From 8b1a85711fc936fef1c908eaa8b10533f880dff6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 23 Feb 2024 20:22:14 +0100 Subject: [PATCH 001/117] archiver: unexport save/saveDir/saveTree methods --- internal/archiver/archiver.go | 22 +++++++++++----------- internal/archiver/archiver_test.go | 12 ++++++------ 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 77ddba7c4..9d7d68913 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -222,9 +222,9 @@ func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error { return err } -// SaveDir stores a directory in the repo and returns the node. snPath is the +// saveDir stores a directory in the repo and returns the node. snPath is the // path within the current snapshot. -func (arch *Archiver) SaveDir(ctx context.Context, snPath string, dir string, fi os.FileInfo, previous *restic.Tree, complete CompleteFunc) (d FutureNode, err error) { +func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi os.FileInfo, previous *restic.Tree, complete CompleteFunc) (d FutureNode, err error) { debug.Log("%v %v", snPath, dir) treeNode, err := arch.nodeFromFileInfo(snPath, dir, fi) @@ -250,7 +250,7 @@ func (arch *Archiver) SaveDir(ctx context.Context, snPath string, dir string, fi pathname := arch.FS.Join(dir, name) oldNode := previous.Find(name) snItem := join(snPath, name) - fn, excluded, err := arch.Save(ctx, snItem, pathname, oldNode) + fn, excluded, err := arch.save(ctx, snItem, pathname, oldNode) // return error early if possible if err != nil { @@ -334,14 +334,14 @@ func (arch *Archiver) allBlobsPresent(previous *restic.Node) bool { return true } -// Save saves a target (file or directory) to the repo. If the item is +// save saves a target (file or directory) to the repo. If the item is // excluded, this function returns a nil node and error, with excluded set to // true. // // Errors and completion needs to be handled by the caller. // // snPath is the path within the current snapshot. -func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous *restic.Node) (fn FutureNode, excluded bool, err error) { +func (arch *Archiver) save(ctx context.Context, snPath, target string, previous *restic.Node) (fn FutureNode, excluded bool, err error) { start := time.Now() debug.Log("%v target %q, previous %v", snPath, target, previous) @@ -462,7 +462,7 @@ func (arch *Archiver) Save(ctx context.Context, snPath, target string, previous return FutureNode{}, false, err } - fn, err = arch.SaveDir(ctx, snPath, target, fi, oldSubtree, + fn, err = arch.saveDir(ctx, snPath, target, fi, oldSubtree, func(node *restic.Node, stats ItemStats) { arch.CompleteItem(snItem, previous, node, stats, time.Since(start)) }) @@ -545,9 +545,9 @@ func (arch *Archiver) statDir(dir string) (os.FileInfo, error) { return fi, nil } -// SaveTree stores a Tree in the repo, returned is the tree. snPath is the path +// saveTree stores a Tree in the repo, returned is the tree. snPath is the path // within the current snapshot. -func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, previous *restic.Tree, complete CompleteFunc) (FutureNode, int, error) { +func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree, previous *restic.Tree, complete CompleteFunc) (FutureNode, int, error) { var node *restic.Node if snPath != "/" { @@ -585,7 +585,7 @@ func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, // this is a leaf node if subatree.Leaf() { - fn, excluded, err := arch.Save(ctx, join(snPath, name), subatree.Path, previous.Find(name)) + fn, excluded, err := arch.save(ctx, join(snPath, name), subatree.Path, previous.Find(name)) if err != nil { err = arch.error(subatree.Path, err) @@ -619,7 +619,7 @@ func (arch *Archiver) SaveTree(ctx context.Context, snPath string, atree *Tree, } // not a leaf node, archive subtree - fn, _, err := arch.SaveTree(ctx, join(snPath, name), &subatree, oldSubtree, func(n *restic.Node, is ItemStats) { + fn, _, err := arch.saveTree(ctx, join(snPath, name), &subatree, oldSubtree, func(n *restic.Node, is ItemStats) { arch.CompleteItem(snItem, oldNode, n, is, time.Since(start)) }) if err != nil { @@ -762,7 +762,7 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps arch.runWorkers(wgCtx, wg) debug.Log("starting snapshot") - fn, nodeCount, err := arch.SaveTree(wgCtx, "/", atree, arch.loadParentTree(wgCtx, opts.ParentSnapshot), func(_ *restic.Node, is ItemStats) { + fn, nodeCount, err := arch.saveTree(wgCtx, "/", atree, arch.loadParentTree(wgCtx, opts.ParentSnapshot), func(_ *restic.Node, is ItemStats) { arch.CompleteItem("/", nil, nil, is, time.Since(start)) }) if err != nil { diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 46ef44251..158768323 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -227,7 +227,7 @@ func TestArchiverSave(t *testing.T) { } arch.runWorkers(ctx, wg) - node, excluded, err := arch.Save(ctx, "/", filepath.Join(tempdir, "file"), nil) + node, excluded, err := arch.save(ctx, "/", filepath.Join(tempdir, "file"), nil) if err != nil { t.Fatal(err) } @@ -304,7 +304,7 @@ func TestArchiverSaveReaderFS(t *testing.T) { } arch.runWorkers(ctx, wg) - node, excluded, err := arch.Save(ctx, "/", filename, nil) + node, excluded, err := arch.save(ctx, "/", filename, nil) t.Logf("Save returned %v %v", node, err) if err != nil { t.Fatal(err) @@ -845,7 +845,7 @@ func TestArchiverSaveDir(t *testing.T) { t.Fatal(err) } - ft, err := arch.SaveDir(ctx, "/", test.target, fi, nil, nil) + ft, err := arch.saveDir(ctx, "/", test.target, fi, nil, nil) if err != nil { t.Fatal(err) } @@ -918,7 +918,7 @@ func TestArchiverSaveDirIncremental(t *testing.T) { t.Fatal(err) } - ft, err := arch.SaveDir(ctx, "/", tempdir, fi, nil, nil) + ft, err := arch.saveDir(ctx, "/", tempdir, fi, nil, nil) if err != nil { t.Fatal(err) } @@ -1107,7 +1107,7 @@ func TestArchiverSaveTree(t *testing.T) { t.Fatal(err) } - fn, _, err := arch.SaveTree(ctx, "/", atree, nil, nil) + fn, _, err := arch.saveTree(ctx, "/", atree, nil, nil) if err != nil { t.Fatal(err) } @@ -2236,7 +2236,7 @@ func TestRacyFileSwap(t *testing.T) { arch.runWorkers(ctx, wg) // fs.Track will panic if the file was not closed - _, excluded, err := arch.Save(ctx, "/", tempfile, nil) + _, excluded, err := arch.save(ctx, "/", tempfile, nil) if err == nil { t.Errorf("Save() should have failed") } From a59f654fa68dfe5b1a186b1fb81265ba8268c623 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 22 Feb 2024 22:14:48 +0100 Subject: [PATCH 002/117] archiver: refactor summary collection from ui into the archiver --- cmd/restic/cmd_backup.go | 4 +- internal/archiver/archiver.go | 79 ++++++++++++++++++++++++----- internal/archiver/archiver_test.go | 21 +++++--- internal/archiver/testing.go | 2 +- internal/archiver/testing_test.go | 2 +- internal/dump/common_test.go | 2 +- internal/restorer/restorer_test.go | 2 +- internal/ui/backup/json.go | 2 +- internal/ui/backup/progress.go | 49 ++---------------- internal/ui/backup/progress_test.go | 5 +- internal/ui/backup/text.go | 2 +- 11 files changed, 92 insertions(+), 78 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 318d17796..4c03b7e8c 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -649,7 +649,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter if !gopts.JSON { progressPrinter.V("start backup on %v", targets) } - _, id, err := arch.Snapshot(ctx, targets, snapshotOpts) + _, id, summary, err := arch.Snapshot(ctx, targets, snapshotOpts) // cleanly shutdown all running goroutines cancel() @@ -663,7 +663,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter } // Report finished execution - progressReporter.Finish(id, opts.DryRun) + progressReporter.Finish(id, summary, opts.DryRun) if !gopts.JSON && !opts.DryRun { progressPrinter.P("snapshot %s saved\n", id.Str()) } diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 9d7d68913..b8d1d45bb 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -8,6 +8,7 @@ import ( "runtime" "sort" "strings" + "sync" "time" "github.com/restic/restic/internal/debug" @@ -40,6 +41,16 @@ type ItemStats struct { TreeSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead) } +type Summary struct { + Files, Dirs struct { + New uint + Changed uint + Unchanged uint + } + ProcessedBytes uint64 + ItemStats +} + // Add adds other to the current ItemStats. func (s *ItemStats) Add(other ItemStats) { s.DataBlobs += other.DataBlobs @@ -61,6 +72,8 @@ type Archiver struct { blobSaver *BlobSaver fileSaver *FileSaver treeSaver *TreeSaver + mu sync.Mutex + summary *Summary // Error is called for all errors that occur during backup. Error ErrorFunc @@ -182,6 +195,44 @@ func (arch *Archiver) error(item string, err error) error { return errf } +func (arch *Archiver) trackItem(item string, previous, current *restic.Node, s ItemStats, d time.Duration) { + arch.CompleteItem(item, previous, current, s, d) + + arch.mu.Lock() + defer arch.mu.Unlock() + + arch.summary.ItemStats.Add(s) + + if current != nil { + arch.summary.ProcessedBytes += current.Size + } else { + // last item or an error occurred + return + } + + switch current.Type { + case "dir": + switch { + case previous == nil: + arch.summary.Dirs.New++ + case previous.Equals(*current): + arch.summary.Dirs.Unchanged++ + default: + arch.summary.Dirs.Changed++ + } + + case "file": + switch { + case previous == nil: + arch.summary.Files.New++ + case previous.Equals(*current): + arch.summary.Files.Unchanged++ + default: + arch.summary.Files.Changed++ + } + } +} + // nodeFromFileInfo returns the restic node from an os.FileInfo. func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo) (*restic.Node, error) { node, err := restic.NodeFromFileInfo(filename, fi) @@ -380,7 +431,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous if previous != nil && !fileChanged(fi, previous, arch.ChangeIgnoreFlags) { if arch.allBlobsPresent(previous) { debug.Log("%v hasn't changed, using old list of blobs", target) - arch.CompleteItem(snPath, previous, previous, ItemStats{}, time.Since(start)) + arch.trackItem(snPath, previous, previous, ItemStats{}, time.Since(start)) arch.CompleteBlob(previous.Size) node, err := arch.nodeFromFileInfo(snPath, target, fi) if err != nil { @@ -445,9 +496,9 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous fn = arch.fileSaver.Save(ctx, snPath, target, file, fi, func() { arch.StartFile(snPath) }, func() { - arch.CompleteItem(snPath, nil, nil, ItemStats{}, 0) + arch.trackItem(snPath, nil, nil, ItemStats{}, 0) }, func(node *restic.Node, stats ItemStats) { - arch.CompleteItem(snPath, previous, node, stats, time.Since(start)) + arch.trackItem(snPath, previous, node, stats, time.Since(start)) }) case fi.IsDir(): @@ -464,7 +515,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous fn, err = arch.saveDir(ctx, snPath, target, fi, oldSubtree, func(node *restic.Node, stats ItemStats) { - arch.CompleteItem(snItem, previous, node, stats, time.Since(start)) + arch.trackItem(snItem, previous, node, stats, time.Since(start)) }) if err != nil { debug.Log("SaveDir for %v returned error: %v", snPath, err) @@ -620,7 +671,7 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree, // not a leaf node, archive subtree fn, _, err := arch.saveTree(ctx, join(snPath, name), &subatree, oldSubtree, func(n *restic.Node, is ItemStats) { - arch.CompleteItem(snItem, oldNode, n, is, time.Since(start)) + arch.trackItem(snItem, oldNode, n, is, time.Since(start)) }) if err != nil { return FutureNode{}, 0, err @@ -738,15 +789,17 @@ func (arch *Archiver) stopWorkers() { } // Snapshot saves several targets and returns a snapshot. -func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, error) { +func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts SnapshotOptions) (*restic.Snapshot, restic.ID, *Summary, error) { + arch.summary = &Summary{} + cleanTargets, err := resolveRelativeTargets(arch.FS, targets) if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } atree, err := NewTree(arch.FS, cleanTargets) if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } var rootTreeID restic.ID @@ -763,7 +816,7 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps debug.Log("starting snapshot") fn, nodeCount, err := arch.saveTree(wgCtx, "/", atree, arch.loadParentTree(wgCtx, opts.ParentSnapshot), func(_ *restic.Node, is ItemStats) { - arch.CompleteItem("/", nil, nil, is, time.Since(start)) + arch.trackItem("/", nil, nil, is, time.Since(start)) }) if err != nil { return err @@ -799,12 +852,12 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps }) err = wgUp.Wait() if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } sn, err := restic.NewSnapshot(targets, opts.Tags, opts.Hostname, opts.Time) if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } sn.ProgramVersion = opts.ProgramVersion @@ -816,8 +869,8 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps id, err := restic.SaveSnapshot(ctx, arch.Repo, sn) if err != nil { - return nil, restic.ID{}, err + return nil, restic.ID{}, nil, err } - return sn, id, nil + return sn, id, arch.summary, nil } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 158768323..0ae7ca05f 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -226,6 +226,7 @@ func TestArchiverSave(t *testing.T) { return err } arch.runWorkers(ctx, wg) + arch.summary = &Summary{} node, excluded, err := arch.save(ctx, "/", filepath.Join(tempdir, "file"), nil) if err != nil { @@ -303,6 +304,7 @@ func TestArchiverSaveReaderFS(t *testing.T) { return err } arch.runWorkers(ctx, wg) + arch.summary = &Summary{} node, excluded, err := arch.save(ctx, "/", filename, nil) t.Logf("Save returned %v %v", node, err) @@ -831,6 +833,7 @@ func TestArchiverSaveDir(t *testing.T) { arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.runWorkers(ctx, wg) + arch.summary = &Summary{} chdir := tempdir if test.chdir != "" { @@ -912,6 +915,7 @@ func TestArchiverSaveDirIncremental(t *testing.T) { arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.runWorkers(ctx, wg) + arch.summary = &Summary{} fi, err := fs.Lstat(tempdir) if err != nil { @@ -1094,6 +1098,7 @@ func TestArchiverSaveTree(t *testing.T) { repo.StartPackUploader(ctx, wg) arch.runWorkers(ctx, wg) + arch.summary = &Summary{} back := restictest.Chdir(t, tempdir) defer back() @@ -1395,7 +1400,7 @@ func TestArchiverSnapshot(t *testing.T) { } t.Logf("targets: %v", targets) - sn, snapshotID, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) + sn, snapshotID, _, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) if err != nil { t.Fatal(err) } @@ -1543,7 +1548,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { defer back() targets := []string{"."} - _, snapshotID, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) + _, snapshotID, _, err := arch.Snapshot(ctx, targets, SnapshotOptions{Time: time.Now()}) if test.err != "" { if err == nil { t.Fatalf("expected error not found, got %v, wanted %q", err, test.err) @@ -1648,7 +1653,7 @@ func TestArchiverParent(t *testing.T) { back := restictest.Chdir(t, tempdir) defer back() - firstSnapshot, firstSnapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + firstSnapshot, firstSnapshotID, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) if err != nil { t.Fatal(err) } @@ -1678,7 +1683,7 @@ func TestArchiverParent(t *testing.T) { Time: time.Now(), ParentSnapshot: firstSnapshot, } - _, secondSnapshotID, err := arch.Snapshot(ctx, []string{"."}, opts) + _, secondSnapshotID, _, err := arch.Snapshot(ctx, []string{"."}, opts) if err != nil { t.Fatal(err) } @@ -1814,7 +1819,7 @@ func TestArchiverErrorReporting(t *testing.T) { arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.Error = test.errFn - _, snapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + _, snapshotID, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) if test.mustError { if err != nil { t.Logf("found expected error (%v), skipping further checks", err) @@ -1887,7 +1892,7 @@ func TestArchiverContextCanceled(t *testing.T) { arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) - _, snapshotID, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + _, snapshotID, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) if err != nil { t.Logf("found expected error (%v)", err) @@ -2026,7 +2031,7 @@ func TestArchiverAbortEarlyOnError(t *testing.T) { SaveBlobConcurrency: 1, }) - _, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + _, _, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) if !errors.Is(err, test.err) { t.Errorf("expected error (%v) not found, got %v", test.err, err) } @@ -2054,7 +2059,7 @@ func snapshot(t testing.TB, repo restic.Repository, fs fs.FS, parent *restic.Sna Time: time.Now(), ParentSnapshot: parent, } - snapshot, _, err := arch.Snapshot(ctx, []string{filename}, sopts) + snapshot, _, _, err := arch.Snapshot(ctx, []string{filename}, sopts) if err != nil { t.Fatal(err) } diff --git a/internal/archiver/testing.go b/internal/archiver/testing.go index 111c1e68c..1feaa82fc 100644 --- a/internal/archiver/testing.go +++ b/internal/archiver/testing.go @@ -31,7 +31,7 @@ func TestSnapshot(t testing.TB, repo restic.Repository, path string, parent *res } opts.ParentSnapshot = sn } - sn, _, err := arch.Snapshot(context.TODO(), []string{path}, opts) + sn, _, _, err := arch.Snapshot(context.TODO(), []string{path}, opts) if err != nil { t.Fatal(err) } diff --git a/internal/archiver/testing_test.go b/internal/archiver/testing_test.go index ada7261f1..e48b41ec7 100644 --- a/internal/archiver/testing_test.go +++ b/internal/archiver/testing_test.go @@ -473,7 +473,7 @@ func TestTestEnsureSnapshot(t *testing.T) { Hostname: "localhost", Tags: []string{"test"}, } - _, id, err := arch.Snapshot(ctx, []string{"."}, opts) + _, id, _, err := arch.Snapshot(ctx, []string{"."}, opts) if err != nil { t.Fatal(err) } diff --git a/internal/dump/common_test.go b/internal/dump/common_test.go index 3ee9112af..afd19df63 100644 --- a/internal/dump/common_test.go +++ b/internal/dump/common_test.go @@ -78,7 +78,7 @@ func WriteTest(t *testing.T, format string, cd CheckDump) { back := rtest.Chdir(t, tmpdir) defer back() - sn, _, err := arch.Snapshot(ctx, []string{"."}, archiver.SnapshotOptions{}) + sn, _, _, err := arch.Snapshot(ctx, []string{"."}, archiver.SnapshotOptions{}) rtest.OK(t, err) tree, err := restic.LoadTree(ctx, repo, *sn.Tree) diff --git a/internal/restorer/restorer_test.go b/internal/restorer/restorer_test.go index c33214bc3..29f8920c5 100644 --- a/internal/restorer/restorer_test.go +++ b/internal/restorer/restorer_test.go @@ -840,7 +840,7 @@ func TestRestorerSparseFiles(t *testing.T) { rtest.OK(t, err) arch := archiver.New(repo, target, archiver.Options{}) - sn, _, err := arch.Snapshot(context.Background(), []string{"/zeros"}, + sn, _, _, err := arch.Snapshot(context.Background(), []string{"/zeros"}, archiver.SnapshotOptions{}) rtest.OK(t, err) diff --git a/internal/ui/backup/json.go b/internal/ui/backup/json.go index 10f0e91fa..3393a3d48 100644 --- a/internal/ui/backup/json.go +++ b/internal/ui/backup/json.go @@ -163,7 +163,7 @@ func (b *JSONProgress) ReportTotal(start time.Time, s archiver.ScanStats) { } // Finish prints the finishing messages. -func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *Summary, dryRun bool) { +func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) { b.print(summaryOutput{ MessageType: "summary", FilesNew: summary.Files.New, diff --git a/internal/ui/backup/progress.go b/internal/ui/backup/progress.go index da0d401a3..1d494bf14 100644 --- a/internal/ui/backup/progress.go +++ b/internal/ui/backup/progress.go @@ -17,7 +17,7 @@ type ProgressPrinter interface { ScannerError(item string, err error) error CompleteItem(messageType string, item string, s archiver.ItemStats, d time.Duration) ReportTotal(start time.Time, s archiver.ScanStats) - Finish(snapshotID restic.ID, start time.Time, summary *Summary, dryRun bool) + Finish(snapshotID restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) Reset() P(msg string, args ...interface{}) @@ -28,16 +28,6 @@ type Counter struct { Files, Dirs, Bytes uint64 } -type Summary struct { - Files, Dirs struct { - New uint - Changed uint - Unchanged uint - } - ProcessedBytes uint64 - archiver.ItemStats -} - // Progress reports progress for the `backup` command. type Progress struct { progress.Updater @@ -52,7 +42,6 @@ type Progress struct { processed, total Counter errors uint - summary Summary printer ProgressPrinter } @@ -126,16 +115,6 @@ func (p *Progress) CompleteBlob(bytes uint64) { // CompleteItem is the status callback function for the archiver when a // file/dir has been saved successfully. func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) { - p.mu.Lock() - p.summary.ItemStats.Add(s) - - // for the last item "/", current is nil - if current != nil { - p.summary.ProcessedBytes += current.Size - } - - p.mu.Unlock() - if current == nil { // error occurred, tell the status display to remove the line p.mu.Lock() @@ -153,21 +132,10 @@ func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s a switch { case previous == nil: p.printer.CompleteItem("dir new", item, s, d) - p.mu.Lock() - p.summary.Dirs.New++ - p.mu.Unlock() - case previous.Equals(*current): p.printer.CompleteItem("dir unchanged", item, s, d) - p.mu.Lock() - p.summary.Dirs.Unchanged++ - p.mu.Unlock() - default: p.printer.CompleteItem("dir modified", item, s, d) - p.mu.Lock() - p.summary.Dirs.Changed++ - p.mu.Unlock() } case "file": @@ -179,21 +147,10 @@ func (p *Progress) CompleteItem(item string, previous, current *restic.Node, s a switch { case previous == nil: p.printer.CompleteItem("file new", item, s, d) - p.mu.Lock() - p.summary.Files.New++ - p.mu.Unlock() - case previous.Equals(*current): p.printer.CompleteItem("file unchanged", item, s, d) - p.mu.Lock() - p.summary.Files.Unchanged++ - p.mu.Unlock() - default: p.printer.CompleteItem("file modified", item, s, d) - p.mu.Lock() - p.summary.Files.Changed++ - p.mu.Unlock() } } } @@ -213,8 +170,8 @@ func (p *Progress) ReportTotal(item string, s archiver.ScanStats) { } // Finish prints the finishing messages. -func (p *Progress) Finish(snapshotID restic.ID, dryrun bool) { +func (p *Progress) Finish(snapshotID restic.ID, summary *archiver.Summary, dryrun bool) { // wait for the status update goroutine to shut down p.Updater.Done() - p.printer.Finish(snapshotID, p.start, &p.summary, dryrun) + p.printer.Finish(snapshotID, p.start, summary, dryrun) } diff --git a/internal/ui/backup/progress_test.go b/internal/ui/backup/progress_test.go index 79a56c91e..6b242a0f3 100644 --- a/internal/ui/backup/progress_test.go +++ b/internal/ui/backup/progress_test.go @@ -33,11 +33,10 @@ func (p *mockPrinter) CompleteItem(messageType string, _ string, _ archiver.Item } func (p *mockPrinter) ReportTotal(_ time.Time, _ archiver.ScanStats) {} -func (p *mockPrinter) Finish(id restic.ID, _ time.Time, summary *Summary, _ bool) { +func (p *mockPrinter) Finish(id restic.ID, _ time.Time, _ *archiver.Summary, _ bool) { p.Lock() defer p.Unlock() - _ = *summary // Should not be nil. p.id = id } @@ -64,7 +63,7 @@ func TestProgress(t *testing.T) { time.Sleep(10 * time.Millisecond) id := restic.NewRandomID() - prog.Finish(id, false) + prog.Finish(id, nil, false) if !prnt.dirUnchanged { t.Error(`"dir unchanged" event not seen`) diff --git a/internal/ui/backup/text.go b/internal/ui/backup/text.go index 215982cd4..00d025e51 100644 --- a/internal/ui/backup/text.go +++ b/internal/ui/backup/text.go @@ -126,7 +126,7 @@ func (b *TextProgress) Reset() { } // Finish prints the finishing messages. -func (b *TextProgress) Finish(_ restic.ID, start time.Time, summary *Summary, dryRun bool) { +func (b *TextProgress) Finish(_ restic.ID, start time.Time, summary *archiver.Summary, dryRun bool) { b.P("\n") b.P("Files: %5d new, %5d changed, %5d unmodified\n", summary.Files.New, summary.Files.Changed, summary.Files.Unchanged) b.P("Dirs: %5d new, %5d changed, %5d unmodified\n", summary.Dirs.New, summary.Dirs.Changed, summary.Dirs.Unchanged) From 86897314d54f68164c9fee330deb9772b0f4f6fa Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 22 Feb 2024 22:17:54 +0100 Subject: [PATCH 003/117] backup: expose data_added_in_repo in JSON output The value describes how much data was added after compression. Previously, it was only available in the text output. --- doc/075_scripting.rst | 4 +++- internal/ui/backup/json.go | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index fda4b2d53..1c0f7a34f 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -163,7 +163,9 @@ Summary is the last output line in a successful backup. +---------------------------+---------------------------------------------------------+ | ``tree_blobs`` | Number of tree blobs | +---------------------------+---------------------------------------------------------+ -| ``data_added`` | Amount of data added, in bytes | +| ``data_added`` | Amount of (uncompressed) data added, in bytes | ++---------------------------+---------------------------------------------------------+ +| ``data_added_in_repo`` | Amount of data added (after compression), in bytes | +---------------------------+---------------------------------------------------------+ | ``total_files_processed`` | Total number of files processed | +---------------------------+---------------------------------------------------------+ diff --git a/internal/ui/backup/json.go b/internal/ui/backup/json.go index 3393a3d48..b3a8b44a7 100644 --- a/internal/ui/backup/json.go +++ b/internal/ui/backup/json.go @@ -175,6 +175,7 @@ func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *ar DataBlobs: summary.ItemStats.DataBlobs, TreeBlobs: summary.ItemStats.TreeBlobs, DataAdded: summary.ItemStats.DataSize + summary.ItemStats.TreeSize, + DataAddedInRepo: summary.ItemStats.DataSizeInRepo + summary.ItemStats.TreeSizeInRepo, TotalFilesProcessed: summary.Files.New + summary.Files.Changed + summary.Files.Unchanged, TotalBytesProcessed: summary.ProcessedBytes, TotalDuration: time.Since(start).Seconds(), @@ -230,6 +231,7 @@ type summaryOutput struct { DataBlobs int `json:"data_blobs"` TreeBlobs int `json:"tree_blobs"` DataAdded uint64 `json:"data_added"` + DataAddedInRepo uint64 `json:"data_added_in_repo"` TotalFilesProcessed uint `json:"total_files_processed"` TotalBytesProcessed uint64 `json:"total_bytes_processed"` TotalDuration float64 `json:"total_duration"` // in seconds From 38f91d3b5e045ada840aa4a1c91deb942e9ccdbe Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 22 Feb 2024 22:25:14 +0100 Subject: [PATCH 004/117] backup: store statistics in snapshot --- cmd/restic/cmd_backup.go | 2 ++ internal/archiver/archiver.go | 18 ++++++++++++++++++ internal/restic/snapshot.go | 22 +++++++++++++++++++++- 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 4c03b7e8c..acc4bddb1 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -451,6 +451,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter } timeStamp := time.Now() + backupStart := timeStamp if opts.TimeStamp != "" { timeStamp, err = time.ParseInLocation(TimeFormat, opts.TimeStamp, time.Local) if err != nil { @@ -640,6 +641,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter snapshotOpts := archiver.SnapshotOptions{ Excludes: opts.Excludes, Tags: opts.Tags.Flatten(), + BackupStart: backupStart, Time: timeStamp, Hostname: opts.Host, ParentSnapshot: parentSnapshot, diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index b8d1d45bb..2be7dddd5 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -739,6 +739,7 @@ type SnapshotOptions struct { Tags restic.TagList Hostname string Excludes []string + BackupStart time.Time Time time.Time ParentSnapshot *restic.Snapshot ProgramVersion string @@ -866,6 +867,23 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps sn.Parent = opts.ParentSnapshot.ID() } sn.Tree = &rootTreeID + sn.Summary = &restic.SnapshotSummary{ + BackupStart: opts.BackupStart, + BackupEnd: time.Now(), + + FilesNew: arch.summary.Files.New, + FilesChanged: arch.summary.Files.Changed, + FilesUnmodified: arch.summary.Files.Unchanged, + DirsNew: arch.summary.Dirs.New, + DirsChanged: arch.summary.Dirs.Changed, + DirsUnmodified: arch.summary.Dirs.Unchanged, + DataBlobs: arch.summary.ItemStats.DataBlobs, + TreeBlobs: arch.summary.ItemStats.TreeBlobs, + DataAdded: arch.summary.ItemStats.DataSize + arch.summary.ItemStats.TreeSize, + DataAddedInRepo: arch.summary.ItemStats.DataSizeInRepo + arch.summary.ItemStats.TreeSizeInRepo, + TotalFilesProcessed: arch.summary.Files.New + arch.summary.Files.Changed + arch.summary.Files.Unchanged, + TotalBytesProcessed: arch.summary.ProcessedBytes, + } id, err := restic.SaveSnapshot(ctx, arch.Repo, sn) if err != nil { diff --git a/internal/restic/snapshot.go b/internal/restic/snapshot.go index 8cf651d96..5ee308879 100644 --- a/internal/restic/snapshot.go +++ b/internal/restic/snapshot.go @@ -25,11 +25,31 @@ type Snapshot struct { Tags []string `json:"tags,omitempty"` Original *ID `json:"original,omitempty"` - ProgramVersion string `json:"program_version,omitempty"` + ProgramVersion string `json:"program_version,omitempty"` + Summary *SnapshotSummary `json:"summary,omitempty"` id *ID // plaintext ID, used during restore } +type SnapshotSummary struct { + BackupStart time.Time `json:"backup_start"` + BackupEnd time.Time `json:"backup_end"` + + // statistics from the backup json output + FilesNew uint `json:"files_new"` + FilesChanged uint `json:"files_changed"` + FilesUnmodified uint `json:"files_unmodified"` + DirsNew uint `json:"dirs_new"` + DirsChanged uint `json:"dirs_changed"` + DirsUnmodified uint `json:"dirs_unmodified"` + DataBlobs int `json:"data_blobs"` + TreeBlobs int `json:"tree_blobs"` + DataAdded uint64 `json:"data_added"` + DataAddedInRepo uint64 `json:"data_added_in_repo"` + TotalFilesProcessed uint `json:"total_files_processed"` + TotalBytesProcessed uint64 `json:"total_bytes_processed"` +} + // NewSnapshot returns an initialized snapshot struct for the current user and // time. func NewSnapshot(paths []string, tags []string, hostname string, time time.Time) (*Snapshot, error) { From b6520038fd882a075cf86310e98125d7ab01e5c8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 22 Feb 2024 22:43:54 +0100 Subject: [PATCH 005/117] snapshots: Print snapshot size stored in snapshots --- cmd/restic/cmd_snapshots.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index e94f2ed9b..d6199d47a 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui" "github.com/restic/restic/internal/ui/table" "github.com/spf13/cobra" ) @@ -163,6 +164,11 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke keepReasons[*id] = reasons[i] } } + // check if any snapshot contains a summary + hasSize := false + for _, sn := range list { + hasSize = hasSize || (sn.Summary != nil) + } // always sort the snapshots so that the newer ones are listed last sort.SliceStable(list, func(i, j int) bool { @@ -198,6 +204,9 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke tab.AddColumn("Reasons", `{{ join .Reasons "\n" }}`) } tab.AddColumn("Paths", `{{ join .Paths "\n" }}`) + if hasSize { + tab.AddColumn("Size", `{{ .Size }}`) + } } type snapshot struct { @@ -207,6 +216,7 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke Tags []string Reasons []string Paths []string + Size string } var multiline bool @@ -228,6 +238,10 @@ func PrintSnapshots(stdout io.Writer, list restic.Snapshots, reasons []restic.Ke multiline = true } + if sn.Summary != nil { + data.Size = ui.FormatBytes(sn.Summary.TotalBytesProcessed) + } + tab.AddRow(data) } From 681395955e9ae836b333420bc6697a6edbdd8922 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 23 Feb 2024 21:46:39 +0100 Subject: [PATCH 006/117] archiver: test backup summary calculation --- internal/archiver/archiver.go | 12 +-- internal/archiver/archiver_test.go | 139 +++++++++++++++++++++-------- 2 files changed, 108 insertions(+), 43 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 2be7dddd5..39b05d0e6 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -41,12 +41,14 @@ type ItemStats struct { TreeSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead) } +type ChangeStats struct { + New uint + Changed uint + Unchanged uint +} + type Summary struct { - Files, Dirs struct { - New uint - Changed uint - Unchanged uint - } + Files, Dirs ChangeStats ProcessedBytes uint64 ItemStats } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 0ae7ca05f..4430f8528 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -986,9 +986,9 @@ func TestArchiverSaveDirIncremental(t *testing.T) { // bothZeroOrNeither fails the test if only one of exp, act is zero. func bothZeroOrNeither(tb testing.TB, exp, act uint64) { + tb.Helper() if (exp == 0 && act != 0) || (exp != 0 && act == 0) { - _, file, line, _ := runtime.Caller(1) - tb.Fatalf("\033[31m%s:%d:\n\n\texp: %#v\n\n\tgot: %#v\033[39m\n\n", filepath.Base(file), line, exp, act) + restictest.Equals(tb, exp, act) } } @@ -1008,7 +1008,7 @@ func TestArchiverSaveTree(t *testing.T) { prepare func(t testing.TB) targets []string want TestDir - stat ItemStats + stat Summary }{ { src: TestDir{ @@ -1018,7 +1018,12 @@ func TestArchiverSaveTree(t *testing.T) { want: TestDir{ "targetfile": TestFile{Content: string("foobar")}, }, - stat: ItemStats{1, 6, 32 + 6, 0, 0, 0}, + stat: Summary{ + ItemStats: ItemStats{1, 6, 32 + 6, 0, 0, 0}, + ProcessedBytes: 6, + Files: ChangeStats{1, 0, 0}, + Dirs: ChangeStats{0, 0, 0}, + }, }, { src: TestDir{ @@ -1030,7 +1035,12 @@ func TestArchiverSaveTree(t *testing.T) { "targetfile": TestFile{Content: string("foobar")}, "filesymlink": TestSymlink{Target: "targetfile"}, }, - stat: ItemStats{1, 6, 32 + 6, 0, 0, 0}, + stat: Summary{ + ItemStats: ItemStats{1, 6, 32 + 6, 0, 0, 0}, + ProcessedBytes: 6, + Files: ChangeStats{1, 0, 0}, + Dirs: ChangeStats{0, 0, 0}, + }, }, { src: TestDir{ @@ -1050,7 +1060,12 @@ func TestArchiverSaveTree(t *testing.T) { "symlink": TestSymlink{Target: "subdir"}, }, }, - stat: ItemStats{0, 0, 0, 1, 0x154, 0x16a}, + stat: Summary{ + ItemStats: ItemStats{0, 0, 0, 1, 0x154, 0x16a}, + ProcessedBytes: 0, + Files: ChangeStats{0, 0, 0}, + Dirs: ChangeStats{1, 0, 0}, + }, }, { src: TestDir{ @@ -1074,7 +1089,12 @@ func TestArchiverSaveTree(t *testing.T) { }, }, }, - stat: ItemStats{1, 6, 32 + 6, 3, 0x47f, 0x4c1}, + stat: Summary{ + ItemStats: ItemStats{1, 6, 32 + 6, 3, 0x47f, 0x4c1}, + ProcessedBytes: 6, + Files: ChangeStats{1, 0, 0}, + Dirs: ChangeStats{3, 0, 0}, + }, }, } @@ -1086,14 +1106,6 @@ func TestArchiverSaveTree(t *testing.T) { arch := New(repo, testFS, Options{}) - var stat ItemStats - lock := &sync.Mutex{} - arch.CompleteItem = func(item string, previous, current *restic.Node, s ItemStats, d time.Duration) { - lock.Lock() - defer lock.Unlock() - stat.Add(s) - } - wg, ctx := errgroup.WithContext(context.TODO()) repo.StartPackUploader(ctx, wg) @@ -1139,11 +1151,15 @@ func TestArchiverSaveTree(t *testing.T) { want = test.src } TestEnsureTree(context.TODO(), t, "/", repo, treeID, want) + stat := arch.summary bothZeroOrNeither(t, uint64(test.stat.DataBlobs), uint64(stat.DataBlobs)) bothZeroOrNeither(t, uint64(test.stat.TreeBlobs), uint64(stat.TreeBlobs)) bothZeroOrNeither(t, test.stat.DataSize, stat.DataSize) bothZeroOrNeither(t, test.stat.DataSizeInRepo, stat.DataSizeInRepo) bothZeroOrNeither(t, test.stat.TreeSizeInRepo, stat.TreeSizeInRepo) + restictest.Equals(t, test.stat.ProcessedBytes, stat.ProcessedBytes) + restictest.Equals(t, test.stat.Files, stat.Files) + restictest.Equals(t, test.stat.Dirs, stat.Dirs) }) } } @@ -1623,15 +1639,64 @@ func (f MockFile) Read(p []byte) (int, error) { func TestArchiverParent(t *testing.T) { var tests = []struct { - src TestDir - read map[string]int // tracks number of times a file must have been read + src TestDir + modify func(path string) + statInitial Summary + statSecond Summary }{ { src: TestDir{ "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, }, - read: map[string]int{ - "targetfile": 1, + statInitial: Summary{ + Files: ChangeStats{1, 0, 0}, + Dirs: ChangeStats{0, 0, 0}, + ProcessedBytes: 2102152, + }, + statSecond: Summary{ + Files: ChangeStats{0, 0, 1}, + Dirs: ChangeStats{0, 0, 0}, + ProcessedBytes: 2102152, + }, + }, + { + src: TestDir{ + "targetDir": TestDir{ + "targetfile": TestFile{Content: string(restictest.Random(888, 1234))}, + "targetfile2": TestFile{Content: string(restictest.Random(888, 1235))}, + }, + }, + statInitial: Summary{ + Files: ChangeStats{2, 0, 0}, + Dirs: ChangeStats{1, 0, 0}, + ProcessedBytes: 2469, + }, + statSecond: Summary{ + Files: ChangeStats{0, 0, 2}, + Dirs: ChangeStats{0, 0, 1}, + ProcessedBytes: 2469, + }, + }, + { + src: TestDir{ + "targetDir": TestDir{ + "targetfile": TestFile{Content: string(restictest.Random(888, 1234))}, + }, + "targetfile2": TestFile{Content: string(restictest.Random(888, 1235))}, + }, + modify: func(path string) { + remove(t, filepath.Join(path, "targetDir", "targetfile")) + save(t, filepath.Join(path, "targetfile2"), []byte("foobar")) + }, + statInitial: Summary{ + Files: ChangeStats{2, 0, 0}, + Dirs: ChangeStats{1, 0, 0}, + ProcessedBytes: 2469, + }, + statSecond: Summary{ + Files: ChangeStats{0, 1, 0}, + Dirs: ChangeStats{0, 1, 0}, + ProcessedBytes: 6, }, }, } @@ -1653,7 +1718,7 @@ func TestArchiverParent(t *testing.T) { back := restictest.Chdir(t, tempdir) defer back() - firstSnapshot, firstSnapshotID, _, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) + firstSnapshot, firstSnapshotID, summary, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) if err != nil { t.Fatal(err) } @@ -1678,33 +1743,31 @@ func TestArchiverParent(t *testing.T) { } return nil }) + restictest.Equals(t, test.statInitial.Files, summary.Files) + restictest.Equals(t, test.statInitial.Dirs, summary.Dirs) + restictest.Equals(t, test.statInitial.ProcessedBytes, summary.ProcessedBytes) + + if test.modify != nil { + test.modify(tempdir) + } opts := SnapshotOptions{ Time: time.Now(), ParentSnapshot: firstSnapshot, } - _, secondSnapshotID, _, err := arch.Snapshot(ctx, []string{"."}, opts) + testFS.bytesRead = map[string]int{} + _, secondSnapshotID, summary, err := arch.Snapshot(ctx, []string{"."}, opts) if err != nil { t.Fatal(err) } - // check that all files still been read exactly once - TestWalkFiles(t, ".", test.src, func(filename string, item interface{}) error { - file, ok := item.(TestFile) - if !ok { - return nil - } - - n, ok := testFS.bytesRead[filename] - if !ok { - t.Fatalf("file %v was not read at all", filename) - } - - if n != len(file.Content) { - t.Fatalf("file %v: read %v bytes, wanted %v bytes", filename, n, len(file.Content)) - } - return nil - }) + if test.modify == nil { + // check that no files were read this time + restictest.Equals(t, map[string]int{}, testFS.bytesRead) + } + restictest.Equals(t, test.statSecond.Files, summary.Files) + restictest.Equals(t, test.statSecond.Dirs, summary.Dirs) + restictest.Equals(t, test.statSecond.ProcessedBytes, summary.ProcessedBytes) t.Logf("second backup saved as %v", secondSnapshotID.Str()) t.Logf("testfs: %v", testFS) From a8f5684f68a8a8a9b301aae413fed9e444e0c753 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 23 Feb 2024 22:05:15 +0100 Subject: [PATCH 007/117] archiver: test statistics in snapshot --- internal/archiver/archiver_test.go | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 4430f8528..ac0ffa6d0 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -1637,6 +1637,21 @@ func (f MockFile) Read(p []byte) (int, error) { return n, err } +func checkSnapshotStats(t *testing.T, sn *restic.Snapshot, stat Summary) { + restictest.Equals(t, stat.Files.New, sn.Summary.FilesNew) + restictest.Equals(t, stat.Files.Changed, sn.Summary.FilesChanged) + restictest.Equals(t, stat.Files.Unchanged, sn.Summary.FilesUnmodified) + restictest.Equals(t, stat.Dirs.New, sn.Summary.DirsNew) + restictest.Equals(t, stat.Dirs.Changed, sn.Summary.DirsChanged) + restictest.Equals(t, stat.Dirs.Unchanged, sn.Summary.DirsUnmodified) + restictest.Equals(t, stat.ProcessedBytes, sn.Summary.TotalBytesProcessed) + restictest.Equals(t, stat.Files.New+stat.Files.Changed+stat.Files.Unchanged, sn.Summary.TotalFilesProcessed) + bothZeroOrNeither(t, uint64(stat.DataBlobs), uint64(sn.Summary.DataBlobs)) + bothZeroOrNeither(t, uint64(stat.TreeBlobs), uint64(sn.Summary.TreeBlobs)) + bothZeroOrNeither(t, uint64(stat.DataSize+stat.TreeSize), uint64(sn.Summary.DataAdded)) + bothZeroOrNeither(t, uint64(stat.DataSizeInRepo+stat.TreeSizeInRepo), uint64(sn.Summary.DataAddedInRepo)) +} + func TestArchiverParent(t *testing.T) { var tests = []struct { src TestDir @@ -1652,6 +1667,7 @@ func TestArchiverParent(t *testing.T) { Files: ChangeStats{1, 0, 0}, Dirs: ChangeStats{0, 0, 0}, ProcessedBytes: 2102152, + ItemStats: ItemStats{3, 0x201593, 0x201632, 1, 0, 0}, }, statSecond: Summary{ Files: ChangeStats{0, 0, 1}, @@ -1670,6 +1686,7 @@ func TestArchiverParent(t *testing.T) { Files: ChangeStats{2, 0, 0}, Dirs: ChangeStats{1, 0, 0}, ProcessedBytes: 2469, + ItemStats: ItemStats{2, 0xe1c, 0xcd9, 2, 0, 0}, }, statSecond: Summary{ Files: ChangeStats{0, 0, 2}, @@ -1692,11 +1709,13 @@ func TestArchiverParent(t *testing.T) { Files: ChangeStats{2, 0, 0}, Dirs: ChangeStats{1, 0, 0}, ProcessedBytes: 2469, + ItemStats: ItemStats{2, 0xe13, 0xcf8, 2, 0, 0}, }, statSecond: Summary{ Files: ChangeStats{0, 1, 0}, Dirs: ChangeStats{0, 1, 0}, ProcessedBytes: 6, + ItemStats: ItemStats{1, 0x305, 0x233, 2, 0, 0}, }, }, } @@ -1746,6 +1765,7 @@ func TestArchiverParent(t *testing.T) { restictest.Equals(t, test.statInitial.Files, summary.Files) restictest.Equals(t, test.statInitial.Dirs, summary.Dirs) restictest.Equals(t, test.statInitial.ProcessedBytes, summary.ProcessedBytes) + checkSnapshotStats(t, firstSnapshot, test.statInitial) if test.modify != nil { test.modify(tempdir) @@ -1756,7 +1776,7 @@ func TestArchiverParent(t *testing.T) { ParentSnapshot: firstSnapshot, } testFS.bytesRead = map[string]int{} - _, secondSnapshotID, summary, err := arch.Snapshot(ctx, []string{"."}, opts) + secondSnapshot, secondSnapshotID, summary, err := arch.Snapshot(ctx, []string{"."}, opts) if err != nil { t.Fatal(err) } @@ -1768,6 +1788,7 @@ func TestArchiverParent(t *testing.T) { restictest.Equals(t, test.statSecond.Files, summary.Files) restictest.Equals(t, test.statSecond.Dirs, summary.Dirs) restictest.Equals(t, test.statSecond.ProcessedBytes, summary.ProcessedBytes) + checkSnapshotStats(t, secondSnapshot, test.statSecond) t.Logf("second backup saved as %v", secondSnapshotID.Str()) t.Logf("testfs: %v", testFS) From 6a13e451b14406a7cb3d1e3fc1dbb37b5ee16819 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 23 Feb 2024 22:29:10 +0100 Subject: [PATCH 008/117] document snapshot statistics --- changelog/unreleased/issue-693 | 12 ++++++++ doc/045_working_with_repos.rst | 56 ++++++++++++++++++---------------- doc/075_scripting.rst | 37 ++++++++++++++++++++++ 3 files changed, 78 insertions(+), 27 deletions(-) create mode 100644 changelog/unreleased/issue-693 diff --git a/changelog/unreleased/issue-693 b/changelog/unreleased/issue-693 new file mode 100644 index 000000000..054ae42ed --- /dev/null +++ b/changelog/unreleased/issue-693 @@ -0,0 +1,12 @@ +Enhancement: Support printing snapshot size in `snapshots` command + +The `snapshots` command now supports printing the snapshot size for snapshots +created using this or a future restic version. For this, the `backup` command +now stores the backup summary statistics in the snapshot. + +The text output of the `snapshots` command only shows the snapshot size. The +other statistics are only included in the JSON output. To inspect these +statistics use `restic snapshots --json` or `restic cat snapshot `. + +https://github.com/restic/restic/issues/693 +https://github.com/restic/restic/pull/4705 diff --git a/doc/045_working_with_repos.rst b/doc/045_working_with_repos.rst index 48e5985dc..85c022580 100644 --- a/doc/045_working_with_repos.rst +++ b/doc/045_working_with_repos.rst @@ -18,19 +18,21 @@ Working with repositories Listing all snapshots ===================== -Now, you can list all the snapshots stored in the repository: +Now, you can list all the snapshots stored in the repository. The size column +only exists for snapshots created using restic 0.17.0 or later. It reflects the +size of the contained files at the time when the snapshot was created. .. code-block:: console $ restic -r /srv/restic-repo snapshots enter password for repository: - ID Date Host Tags Directory - ---------------------------------------------------------------------- - 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work - 79766175 2015-05-08 21:40:19 kasimir /home/user/work - bdbd3439 2015-05-08 21:45:17 luigi /home/art - 590c8fc8 2015-05-08 21:47:38 kazik /srv - 9f0bc19e 2015-05-08 21:46:11 luigi /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------------- + 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work 20.643GiB + 79766175 2015-05-08 21:40:19 kasimir /home/user/work 20.645GiB + bdbd3439 2015-05-08 21:45:17 luigi /home/art 3.141GiB + 590c8fc8 2015-05-08 21:47:38 kazik /srv 580.200MiB + 9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB You can filter the listing by directory path: @@ -38,10 +40,10 @@ You can filter the listing by directory path: $ restic -r /srv/restic-repo snapshots --path="/srv" enter password for repository: - ID Date Host Tags Directory - ---------------------------------------------------------------------- - 590c8fc8 2015-05-08 21:47:38 kazik /srv - 9f0bc19e 2015-05-08 21:46:11 luigi /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------- + 590c8fc8 2015-05-08 21:47:38 kazik /srv 580.200MiB + 9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB Or filter by host: @@ -49,10 +51,10 @@ Or filter by host: $ restic -r /srv/restic-repo snapshots --host luigi enter password for repository: - ID Date Host Tags Directory - ---------------------------------------------------------------------- - bdbd3439 2015-05-08 21:45:17 luigi /home/art - 9f0bc19e 2015-05-08 21:46:11 luigi /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------- + bdbd3439 2015-05-08 21:45:17 luigi /home/art 3.141GiB + 9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB Combining filters is also possible. @@ -64,21 +66,21 @@ Furthermore you can group the output by the same filters (host, paths, tags): enter password for repository: snapshots for (host [kasimir]) - ID Date Host Tags Directory - ---------------------------------------------------------------------- - 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work - 79766175 2015-05-08 21:40:19 kasimir /home/user/work + ID Date Host Tags Directory Size + ------------------------------------------------------------------------ + 40dc1520 2015-05-08 21:38:30 kasimir /home/user/work 20.643GiB + 79766175 2015-05-08 21:40:19 kasimir /home/user/work 20.645GiB 2 snapshots snapshots for (host [luigi]) - ID Date Host Tags Directory - ---------------------------------------------------------------------- - bdbd3439 2015-05-08 21:45:17 luigi /home/art - 9f0bc19e 2015-05-08 21:46:11 luigi /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------- + bdbd3439 2015-05-08 21:45:17 luigi /home/art 3.141GiB + 9f0bc19e 2015-05-08 21:46:11 luigi /srv 572.180MiB 2 snapshots snapshots for (host [kazik]) - ID Date Host Tags Directory - ---------------------------------------------------------------------- - 590c8fc8 2015-05-08 21:47:38 kazik /srv + ID Date Host Tags Directory Size + ------------------------------------------------------------------- + 590c8fc8 2015-05-08 21:47:38 kazik /srv 580.200MiB 1 snapshots diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index 1c0f7a34f..86851c54b 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -553,11 +553,48 @@ The snapshots command returns a single JSON object, an array with objects of the +---------------------+--------------------------------------------------+ | ``program_version`` | restic version used to create snapshot | +---------------------+--------------------------------------------------+ +| ``summary`` | Snapshot statistics, see "Summary object" | ++---------------------+--------------------------------------------------+ | ``id`` | Snapshot ID | +---------------------+--------------------------------------------------+ | ``short_id`` | Snapshot ID, short form | +---------------------+--------------------------------------------------+ +Summary object + +The contained statistics reflect the information at the point in time when the snapshot +was created. + ++---------------------------+---------------------------------------------------------+ +| ``backup_start`` | Time at which the backup was started | ++---------------------------+---------------------------------------------------------+ +| ``backup_end`` | Time at which the backup was completed | ++---------------------------+---------------------------------------------------------+ +| ``files_new`` | Number of new files | ++---------------------------+---------------------------------------------------------+ +| ``files_changed`` | Number of files that changed | ++---------------------------+---------------------------------------------------------+ +| ``files_unmodified`` | Number of files that did not change | ++---------------------------+---------------------------------------------------------+ +| ``dirs_new`` | Number of new directories | ++---------------------------+---------------------------------------------------------+ +| ``dirs_changed`` | Number of directories that changed | ++---------------------------+---------------------------------------------------------+ +| ``dirs_unmodified`` | Number of directories that did not change | ++---------------------------+---------------------------------------------------------+ +| ``data_blobs`` | Number of data blobs | ++---------------------------+---------------------------------------------------------+ +| ``tree_blobs`` | Number of tree blobs | ++---------------------------+---------------------------------------------------------+ +| ``data_added`` | Amount of (uncompressed) data added, in bytes | ++---------------------------+---------------------------------------------------------+ +| ``data_added_in_repo`` | Amount of data added (after compression), in bytes | ++---------------------------+---------------------------------------------------------+ +| ``total_files_processed`` | Total number of files processed | ++---------------------------+---------------------------------------------------------+ +| ``total_bytes_processed`` | Total number of bytes processed | ++---------------------------+---------------------------------------------------------+ + stats ----- From e71660cd1e5e990476911201bd336dafbaa13bdf Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 25 Feb 2024 20:40:52 +0100 Subject: [PATCH 009/117] backup: rename data_added_in_repo statistic to data_added_packed --- doc/075_scripting.rst | 4 ++-- internal/archiver/archiver.go | 2 +- internal/archiver/archiver_test.go | 2 +- internal/restic/snapshot.go | 2 +- internal/ui/backup/json.go | 4 ++-- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index 86851c54b..d51516cbe 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -165,7 +165,7 @@ Summary is the last output line in a successful backup. +---------------------------+---------------------------------------------------------+ | ``data_added`` | Amount of (uncompressed) data added, in bytes | +---------------------------+---------------------------------------------------------+ -| ``data_added_in_repo`` | Amount of data added (after compression), in bytes | +| ``data_added_packed`` | Amount of data added (after compression), in bytes | +---------------------------+---------------------------------------------------------+ | ``total_files_processed`` | Total number of files processed | +---------------------------+---------------------------------------------------------+ @@ -588,7 +588,7 @@ was created. +---------------------------+---------------------------------------------------------+ | ``data_added`` | Amount of (uncompressed) data added, in bytes | +---------------------------+---------------------------------------------------------+ -| ``data_added_in_repo`` | Amount of data added (after compression), in bytes | +| ``data_added_packed`` | Amount of data added (after compression), in bytes | +---------------------------+---------------------------------------------------------+ | ``total_files_processed`` | Total number of files processed | +---------------------------+---------------------------------------------------------+ diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 39b05d0e6..73cd7a12e 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -882,7 +882,7 @@ func (arch *Archiver) Snapshot(ctx context.Context, targets []string, opts Snaps DataBlobs: arch.summary.ItemStats.DataBlobs, TreeBlobs: arch.summary.ItemStats.TreeBlobs, DataAdded: arch.summary.ItemStats.DataSize + arch.summary.ItemStats.TreeSize, - DataAddedInRepo: arch.summary.ItemStats.DataSizeInRepo + arch.summary.ItemStats.TreeSizeInRepo, + DataAddedPacked: arch.summary.ItemStats.DataSizeInRepo + arch.summary.ItemStats.TreeSizeInRepo, TotalFilesProcessed: arch.summary.Files.New + arch.summary.Files.Changed + arch.summary.Files.Unchanged, TotalBytesProcessed: arch.summary.ProcessedBytes, } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index ac0ffa6d0..5391a1036 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -1649,7 +1649,7 @@ func checkSnapshotStats(t *testing.T, sn *restic.Snapshot, stat Summary) { bothZeroOrNeither(t, uint64(stat.DataBlobs), uint64(sn.Summary.DataBlobs)) bothZeroOrNeither(t, uint64(stat.TreeBlobs), uint64(sn.Summary.TreeBlobs)) bothZeroOrNeither(t, uint64(stat.DataSize+stat.TreeSize), uint64(sn.Summary.DataAdded)) - bothZeroOrNeither(t, uint64(stat.DataSizeInRepo+stat.TreeSizeInRepo), uint64(sn.Summary.DataAddedInRepo)) + bothZeroOrNeither(t, uint64(stat.DataSizeInRepo+stat.TreeSizeInRepo), uint64(sn.Summary.DataAddedPacked)) } func TestArchiverParent(t *testing.T) { diff --git a/internal/restic/snapshot.go b/internal/restic/snapshot.go index 5ee308879..39ed80627 100644 --- a/internal/restic/snapshot.go +++ b/internal/restic/snapshot.go @@ -45,7 +45,7 @@ type SnapshotSummary struct { DataBlobs int `json:"data_blobs"` TreeBlobs int `json:"tree_blobs"` DataAdded uint64 `json:"data_added"` - DataAddedInRepo uint64 `json:"data_added_in_repo"` + DataAddedPacked uint64 `json:"data_added_packed"` TotalFilesProcessed uint `json:"total_files_processed"` TotalBytesProcessed uint64 `json:"total_bytes_processed"` } diff --git a/internal/ui/backup/json.go b/internal/ui/backup/json.go index b3a8b44a7..a14c7ccec 100644 --- a/internal/ui/backup/json.go +++ b/internal/ui/backup/json.go @@ -175,7 +175,7 @@ func (b *JSONProgress) Finish(snapshotID restic.ID, start time.Time, summary *ar DataBlobs: summary.ItemStats.DataBlobs, TreeBlobs: summary.ItemStats.TreeBlobs, DataAdded: summary.ItemStats.DataSize + summary.ItemStats.TreeSize, - DataAddedInRepo: summary.ItemStats.DataSizeInRepo + summary.ItemStats.TreeSizeInRepo, + DataAddedPacked: summary.ItemStats.DataSizeInRepo + summary.ItemStats.TreeSizeInRepo, TotalFilesProcessed: summary.Files.New + summary.Files.Changed + summary.Files.Unchanged, TotalBytesProcessed: summary.ProcessedBytes, TotalDuration: time.Since(start).Seconds(), @@ -231,7 +231,7 @@ type summaryOutput struct { DataBlobs int `json:"data_blobs"` TreeBlobs int `json:"tree_blobs"` DataAdded uint64 `json:"data_added"` - DataAddedInRepo uint64 `json:"data_added_in_repo"` + DataAddedPacked uint64 `json:"data_added_packed"` TotalFilesProcessed uint `json:"total_files_processed"` TotalBytesProcessed uint64 `json:"total_bytes_processed"` TotalDuration float64 `json:"total_duration"` // in seconds From e1a588b75c97d64ef293b7d78944fad8ddc13e43 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 01:17:20 +0000 Subject: [PATCH 010/117] build(deps): bump docker/login-action Bumps [docker/login-action](https://github.com/docker/login-action) from 3d58c274f17dffee475a5520cbe67f0a882c4dbb to 5139682d94efc37792e6b54386b5b470a68a4737. - [Release notes](https://github.com/docker/login-action/releases) - [Commits](https://github.com/docker/login-action/compare/3d58c274f17dffee475a5520cbe67f0a882c4dbb...5139682d94efc37792e6b54386b5b470a68a4737) --- updated-dependencies: - dependency-name: docker/login-action dependency-type: direct:production ... Signed-off-by: dependabot[bot] --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index a19767849..d9df0b8ba 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -25,7 +25,7 @@ jobs: uses: actions/checkout@v4 - name: Log in to the Container registry - uses: docker/login-action@3d58c274f17dffee475a5520cbe67f0a882c4dbb + uses: docker/login-action@5139682d94efc37792e6b54386b5b470a68a4737 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} From 70c8aaa303fa555dc2099984941b100e61b8b36a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 01:17:27 +0000 Subject: [PATCH 011/117] build(deps): bump golangci/golangci-lint-action from 3 to 4 Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 3 to 4. - [Release notes](https://github.com/golangci/golangci-lint-action/releases) - [Commits](https://github.com/golangci/golangci-lint-action/compare/v3...v4) --- updated-dependencies: - dependency-name: golangci/golangci-lint-action dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 091d42e8a..134656d8a 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -257,7 +257,7 @@ jobs: uses: actions/checkout@v4 - name: golangci-lint - uses: golangci/golangci-lint-action@v3 + uses: golangci/golangci-lint-action@v4 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. version: v1.56.1 From f185c80cf04c1464f139051f4ec7d99caa9a4542 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 01:24:46 +0000 Subject: [PATCH 012/117] build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/storage/azblob Bumps [github.com/Azure/azure-sdk-for-go/sdk/storage/azblob](https://github.com/Azure/azure-sdk-for-go) from 1.2.1 to 1.3.1. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azidentity/v1.2.1...sdk/azcore/v1.3.1) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/storage/azblob dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 16 ++++++++-------- go.sum | 33 ++++++++++++++++----------------- 2 files changed, 24 insertions(+), 25 deletions(-) diff --git a/go.mod b/go.mod index afcbc427b..31c0c6b27 100644 --- a/go.mod +++ b/go.mod @@ -2,9 +2,9 @@ module github.com/restic/restic require ( cloud.google.com/go/storage v1.37.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 - github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 - github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 + github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 + github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 github.com/Backblaze/blazer v0.6.1 github.com/anacrolix/fuse v0.2.0 github.com/cenkalti/backoff/v4 v4.2.1 @@ -41,20 +41,20 @@ require ( cloud.google.com/go/compute v1.23.3 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect cloud.google.com/go/iam v1.1.5 // indirect - github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 // indirect - github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 // indirect + github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect + github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/fgprof v0.9.3 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/golang-jwt/jwt/v5 v5.0.0 // indirect + github.com/golang-jwt/jwt/v5 v5.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect github.com/google/s2a-go v0.1.7 // indirect - github.com/google/uuid v1.5.0 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -66,7 +66,7 @@ require ( github.com/minio/md5-simd v1.1.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/rs/xid v1.5.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect diff --git a/go.sum b/go.sum index fb70ac9d5..8bc8593c9 100644 --- a/go.sum +++ b/go.sum @@ -9,17 +9,17 @@ cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= cloud.google.com/go/storage v1.37.0 h1:WI8CsaFO8Q9KjPVtsZ5Cmi0dXV25zMoX0FklT7c3Jm4= cloud.google.com/go/storage v1.37.0/go.mod h1:i34TiT2IhiNDmcj65PqwCjcoUX7Z5pLzS8DEmoiFq1k= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1 h1:lGlwhPtrX6EVml1hO0ivjkUxsSyl4dsiw9qcA1k/3IQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.1/go.mod h1:RKUqNu35KJYcVG/fqTRqmuXJZYNhYkBrnC/hX7yGbTA= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0 h1:BMAjVKJM0U/CYF27gA0ZMmXGkOcvfFtD0oHVZ1TIPRI= -github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.4.0/go.mod h1:1fXstnBMas5kzG+S3q8UoJcmyU6nUeunJcMDHcRYHhs= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1 h1:6oNBlSdi1QqM1PNW7FPA6xOGA5UNsXnkaYZz9vdPGhA= -github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.1/go.mod h1:s4kgfzA0covAXNicZHDMN58jExvcng2mC/DepXiF1EI= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 h1:c4k2FIYIh4xtwqrQwV0Ct1v5+ehlNXj5NI/MWVsiTkQ= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2/go.mod h1:5FDJtLEO/GxwNgUxbwrY3LP0pEoThTQJtk2oysdXHxM= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= +github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= +github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2/go.mod h1:yInRyqWXAuaPrgI7p70+lDDgh3mlBohis29jGMISnmc= github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.5.0 h1:AifHbc4mg0x9zW52WOpKbsHaDKuRhlI7TVl47thgQ70= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1 h1:AMf7YbZOZIW5b66cXNHMWWT/zkjhz5+a+k/3x40EO7E= -github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.1/go.mod h1:uwfk06ZBcvL/g4VHNjurPfVln9NMbsk2XIZxJ+hu81k= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0 h1:hVeq+yCyUi+MsoO/CU95yqCIcdzra5ovzk8Q2BBpV2M= -github.com/AzureAD/microsoft-authentication-library-for-go v1.2.0/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 h1:fXPMAmuh0gDuRDey0atC8cXBuKIlqCzCkL8sm1n9Ov0= +github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1/go.mod h1:SUZc9YRRHfx2+FAQKNDGrssXehqLpxmwRv2mC/5ntj4= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 h1:DzHpqpoJVaCgOUdVHxE8QB52S6NiVdDQvGlny1qvPqA= +github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI= github.com/Backblaze/blazer v0.6.1 h1:xC9HyC7OcxRzzmtfRiikIEvq4HZYWjU6caFwX2EXw1s= github.com/Backblaze/blazer v0.6.1/go.mod h1:7/jrGx4O6OKOto6av+hLwelPR8rwZ+PLxQ5ZOiYAjwY= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -67,8 +67,8 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= -github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= -github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= +github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -104,8 +104,8 @@ github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98/go.mod h1:czg5+yv1E0Z github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.5.0 h1:1p67kYwdtXjb0gL0BPiP1Av9wiZPo5A8z2cWkTZ+eyU= -github.com/google/uuid v1.5.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= @@ -142,8 +142,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/ncw/swift/v2 v2.0.2 h1:jx282pcAKFhmoZBSdMcCRFn9VWkoBIRsCpe+yZq7vEk= github.com/ncw/swift/v2 v2.0.2/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= @@ -241,7 +241,6 @@ golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= From b5a9b5d0bc8ddce2ad3da3d985206799127df9e4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 01:25:00 +0000 Subject: [PATCH 013/117] build(deps): bump github.com/klauspost/compress from 1.17.6 to 1.17.7 Bumps [github.com/klauspost/compress](https://github.com/klauspost/compress) from 1.17.6 to 1.17.7. - [Release notes](https://github.com/klauspost/compress/releases) - [Changelog](https://github.com/klauspost/compress/blob/master/.goreleaser.yml) - [Commits](https://github.com/klauspost/compress/compare/v1.17.6...v1.17.7) --- updated-dependencies: - dependency-name: github.com/klauspost/compress dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index afcbc427b..60d018102 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/go-ole/go-ole v1.3.0 github.com/google/go-cmp v0.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 - github.com/klauspost/compress v1.17.6 + github.com/klauspost/compress v1.17.7 github.com/minio/minio-go/v7 v7.0.66 github.com/minio/sha256-simd v1.0.1 github.com/ncw/swift/v2 v2.0.2 diff --git a/go.sum b/go.sum index fb70ac9d5..d8d2d69b5 100644 --- a/go.sum +++ b/go.sum @@ -117,8 +117,8 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/klauspost/compress v1.17.6 h1:60eq2E/jlfwQXtvZEeBUYADs+BwKBWURIY+Gj2eRGjI= -github.com/klauspost/compress v1.17.6/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= From 79e8ddac3ffce1d62e2f7affa7e7182ace104c75 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 01:25:41 +0000 Subject: [PATCH 014/117] build(deps): bump github.com/spf13/cobra from 1.7.0 to 1.8.0 Bumps [github.com/spf13/cobra](https://github.com/spf13/cobra) from 1.7.0 to 1.8.0. - [Release notes](https://github.com/spf13/cobra/releases) - [Commits](https://github.com/spf13/cobra/compare/v1.7.0...v1.8.0) --- updated-dependencies: - dependency-name: github.com/spf13/cobra dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index afcbc427b..fa4f40e78 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/pkg/sftp v1.13.6 github.com/pkg/xattr v0.4.10-0.20221120235825-35026bbbd013 github.com/restic/chunker v0.4.0 - github.com/spf13/cobra v1.7.0 + github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.5.3 golang.org/x/crypto v0.18.0 diff --git a/go.sum b/go.sum index fb70ac9d5..8a5c5f915 100644 --- a/go.sum +++ b/go.sum @@ -37,7 +37,6 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -165,8 +164,8 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stephens2424/writerset v1.0.2/go.mod h1:aS2JhsMn6eA7e82oNmW4rfsgAOp9COBTTl8mzkwADnc= From 8e7f29ae28ca7eb7916dc0989303ddd65a14c6f7 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 01:25:47 +0000 Subject: [PATCH 015/117] build(deps): bump golang.org/x/oauth2 from 0.16.0 to 0.17.0 Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.16.0 to 0.17.0. - [Commits](https://github.com/golang/oauth2/compare/v0.16.0...v0.17.0) --- updated-dependencies: - dependency-name: golang.org/x/oauth2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index afcbc427b..be0c90186 100644 --- a/go.mod +++ b/go.mod @@ -25,12 +25,12 @@ require ( github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.5.3 - golang.org/x/crypto v0.18.0 - golang.org/x/net v0.20.0 - golang.org/x/oauth2 v0.16.0 + golang.org/x/crypto v0.19.0 + golang.org/x/net v0.21.0 + golang.org/x/oauth2 v0.17.0 golang.org/x/sync v0.6.0 - golang.org/x/sys v0.16.0 - golang.org/x/term v0.16.0 + golang.org/x/sys v0.17.0 + golang.org/x/term v0.17.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 google.golang.org/api v0.157.0 diff --git a/go.sum b/go.sum index fb70ac9d5..b7745bf86 100644 --- a/go.sum +++ b/go.sum @@ -203,8 +203,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -222,11 +222,11 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= +golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -249,13 +249,13 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= From b48b1fa2c9a81f5cfd88841025510e7f7f2cb9ac Mon Sep 17 00:00:00 2001 From: Srigovind Nayak Date: Sun, 3 Mar 2024 16:18:13 +0530 Subject: [PATCH 016/117] docker: update the base image to golang:1.22-alpine --- docker/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index 978da7960..02b53261f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.20-alpine AS builder +FROM golang:1.22-alpine AS builder WORKDIR /go/src/github.com/restic/restic From 608116817b3025bcc9539e4aff64c8badfc8fd23 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 7 Mar 2024 13:59:23 +0000 Subject: [PATCH 017/117] build(deps): bump cloud.google.com/go/storage from 1.37.0 to 1.39.0 Bumps [cloud.google.com/go/storage](https://github.com/googleapis/google-cloud-go) from 1.37.0 to 1.39.0. - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.37.0...spanner/v1.39.0) --- updated-dependencies: - dependency-name: cloud.google.com/go/storage dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 30 ++++++++++++++-------------- go.sum | 62 +++++++++++++++++++++++++++++----------------------------- 2 files changed, 46 insertions(+), 46 deletions(-) diff --git a/go.mod b/go.mod index a7381e07a..6e546974e 100644 --- a/go.mod +++ b/go.mod @@ -1,7 +1,7 @@ module github.com/restic/restic require ( - cloud.google.com/go/storage v1.37.0 + cloud.google.com/go/storage v1.39.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 @@ -33,21 +33,21 @@ require ( golang.org/x/term v0.17.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 - google.golang.org/api v0.157.0 + google.golang.org/api v0.166.0 ) require ( cloud.google.com/go v0.112.0 // indirect - cloud.google.com/go/compute v1.23.3 // indirect + cloud.google.com/go/compute v1.24.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.5 // indirect + cloud.google.com/go/iam v1.1.6 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/fgprof v0.9.3 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect @@ -56,7 +56,7 @@ require ( github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.0 // indirect + github.com/googleapis/gax-go/v2 v2.12.1 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/cpuid/v2 v2.2.6 // indirect @@ -71,16 +71,16 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect - go.opentelemetry.io/otel v1.21.0 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.21.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect + go.opentelemetry.io/otel v1.23.0 // indirect + go.opentelemetry.io/otel/metric v1.23.0 // indirect + go.opentelemetry.io/otel/trace v1.23.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac // indirect - google.golang.org/grpc v1.60.1 // indirect + google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 // indirect + google.golang.org/grpc v1.61.1 // indirect google.golang.org/protobuf v1.32.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index dc62f67c9..668d6a339 100644 --- a/go.sum +++ b/go.sum @@ -1,14 +1,14 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= +cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= +cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/iam v1.1.5 h1:1jTsCu4bcsNsE4iiqNT5SHwrDRCfRmIaaaVFhRveTJI= -cloud.google.com/go/iam v1.1.5/go.mod h1:rB6P/Ic3mykPbFio+vo7403drjlgvoWfYpJhMXEbzv8= -cloud.google.com/go/storage v1.37.0 h1:WI8CsaFO8Q9KjPVtsZ5Cmi0dXV25zMoX0FklT7c3Jm4= -cloud.google.com/go/storage v1.37.0/go.mod h1:i34TiT2IhiNDmcj65PqwCjcoUX7Z5pLzS8DEmoiFq1k= +cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= +cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= +cloud.google.com/go/storage v1.39.0 h1:brbjUa4hbDHhpQf48tjqMaXEV+f1OGoaTmQau9tmCsA= +cloud.google.com/go/storage v1.39.0/go.mod h1:OAEj/WZwUYjA3YHQ10/YcN9ttGuEpLwvaoyBXIPikEk= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 h1:c4k2FIYIh4xtwqrQwV0Ct1v5+ehlNXj5NI/MWVsiTkQ= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2/go.mod h1:5FDJtLEO/GxwNgUxbwrY3LP0pEoThTQJtk2oysdXHxM= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= @@ -36,7 +36,7 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -60,8 +60,8 @@ github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNu github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= @@ -107,8 +107,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/gax-go/v2 v2.12.1 h1:9F8GV9r9ztXyAi00gsMQHNoF51xPZm8uj1dpYt2ZETM= +github.com/googleapis/gax-go/v2 v2.12.1/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= @@ -184,17 +184,17 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA= +go.opentelemetry.io/otel v1.23.0 h1:Df0pqjqExIywbMCMTxkAwzjLZtRf+bBKLbUcpxO2C9E= +go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0= +go.opentelemetry.io/otel/metric v1.23.0 h1:pazkx7ss4LFVVYSxYew7L5I6qvLXHA0Ap2pwV+9Cnpo= +go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo= go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/otel/trace v1.23.0 h1:37Ik5Ib7xfYVb4V1UtnT97T1jI+AoIYkJyPkuL4iJgI= +go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -275,8 +275,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -google.golang.org/api v0.157.0 h1:ORAeqmbrrozeyw5NjnMxh7peHO0UzV4wWYSwZeCUb20= -google.golang.org/api v0.157.0/go.mod h1:+z4v4ufbZ1WEpld6yMGHyggs+PmAHiaLNj5ytP3N01g= +google.golang.org/api v0.166.0 h1:6m4NUwrZYhAaVIHZWxaKjw1L1vNAjtMwORmKRyEEo24= +google.golang.org/api v0.166.0/go.mod h1:4FcBc686KFi7QI/U51/2GKKevfZMpM17sCdibqe/bSA= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -284,19 +284,19 @@ google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac h1:ZL/Teoy/ZGnzyrqK/Optxxp2pmVh+fmJ97slxSRyzUg= -google.golang.org/genproto v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:+Rvu7ElI+aLzyDQhpHMFMMltsD6m7nqpuWDd2CwJw3k= -google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457 h1:KHBtwE+eQc3+NxpjmRFlQ3pJQ2FNnhhgB9xOV8kyBuU= -google.golang.org/genproto/googleapis/api v0.0.0-20240122161410-6c6643bf1457/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac h1:nUQEQmH/csSvFECKYRv6HWEyypysidKl2I6Qpsglq/0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240116215550-a9fa1716bcac/go.mod h1:daQN87bsDqDoe316QbbvX60nMoJQa4r6Ds0ZuoAe5yA= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= +google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c h1:9g7erC9qu44ks7UK4gDNlnk4kOxZG707xKm4jVniy6o= +google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 h1:hZB7eLIaYlW9qXRfCq/qDaPdbeY3757uARz5Vvfv+cY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= -google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= +google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= From 5974a7949777bb0b17ec39b5ccadfb29897f2358 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 28 Jan 2024 16:15:32 +0100 Subject: [PATCH 018/117] features: add basic feature flag implementation --- cmd/restic/cmd_features.go | 51 +++++++++++++ internal/feature/features.go | 140 +++++++++++++++++++++++++++++++++++ internal/feature/registry.go | 15 ++++ internal/feature/testing.go | 29 ++++++++ 4 files changed, 235 insertions(+) create mode 100644 cmd/restic/cmd_features.go create mode 100644 internal/feature/features.go create mode 100644 internal/feature/registry.go create mode 100644 internal/feature/testing.go diff --git a/cmd/restic/cmd_features.go b/cmd/restic/cmd_features.go new file mode 100644 index 000000000..b1544b9d8 --- /dev/null +++ b/cmd/restic/cmd_features.go @@ -0,0 +1,51 @@ +package main + +import ( + "fmt" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" + "github.com/restic/restic/internal/ui/table" + + "github.com/spf13/cobra" +) + +// FIXME explain semantics + +var featuresCmd = &cobra.Command{ + Use: "features", + Short: "Print list of feature flags", + Long: ` +The "features" command prints a list of supported feature flags. + +EXIT STATUS +=========== + +Exit status is 0 if the command was successful, and non-zero if there was any error. +`, + Hidden: true, + DisableAutoGenTag: true, + RunE: func(_ *cobra.Command, args []string) error { + if len(args) != 0 { + return errors.Fatal("the feature command expects no arguments") + } + + fmt.Printf("All Feature Flags:\n") + flags := feature.Flag.List() + + tab := table.New() + tab.AddColumn("Name", "{{ .Name }}") + tab.AddColumn("Type", "{{ .Type }}") + tab.AddColumn("Default", "{{ .Default }}") + tab.AddColumn("Description", "{{ .Description }}") + + for _, flag := range flags { + tab.AddRow(flag) + } + return tab.Write(globalOptions.stdout) + }, +} + +func init() { + cmdRoot.AddCommand(featuresCmd) +} diff --git a/internal/feature/features.go b/internal/feature/features.go new file mode 100644 index 000000000..1e1f3785c --- /dev/null +++ b/internal/feature/features.go @@ -0,0 +1,140 @@ +package feature + +import ( + "fmt" + "sort" + "strconv" + "strings" +) + +type state string +type FlagName string + +const ( + // Alpha features are disabled by default. They do not guarantee any backwards compatibility and may change in arbitrary ways between restic versions. + Alpha state = "alpha" + // Beta features are enabled by default. They may still change, but incompatible changes should be avoided. + Beta state = "beta" + // Stable features are always enabled + Stable state = "stable" + // Deprecated features are always disabled + Deprecated state = "deprecated" +) + +type FlagDesc struct { + Type state + Description string +} + +type FlagSet struct { + flags map[FlagName]*FlagDesc + enabled map[FlagName]bool +} + +func New() *FlagSet { + return &FlagSet{} +} + +func getDefault(phase state) bool { + switch phase { + case Alpha, Deprecated: + return false + case Beta, Stable: + return true + default: + panic("unknown feature phase") + } +} + +func (f *FlagSet) SetFlags(flags map[FlagName]FlagDesc) { + f.flags = map[FlagName]*FlagDesc{} + f.enabled = map[FlagName]bool{} + + for name, flag := range flags { + fcopy := flag + f.flags[name] = &fcopy + f.enabled[name] = getDefault(fcopy.Type) + } +} + +func (f *FlagSet) Apply(flags string) error { + if flags == "" { + return nil + } + + selection := make(map[string]bool) + + for _, flag := range strings.Split(flags, ",") { + parts := strings.SplitN(flag, "=", 2) + + name := parts[0] + value := "true" + if len(parts) == 2 { + value = parts[1] + } + + isEnabled, err := strconv.ParseBool(value) + if err != nil { + return fmt.Errorf("failed to parse value %q for feature flag %v: %w", value, name, err) + } + + selection[name] = isEnabled + } + + for name, value := range selection { + fname := FlagName(name) + flag := f.flags[fname] + if flag == nil { + return fmt.Errorf("unknown feature flag %q", name) + } + + switch flag.Type { + case Alpha, Beta: + f.enabled[fname] = value + case Stable: + // FIXME print warning + case Deprecated: + // FIXME print warning + default: + panic("unknown feature phase") + } + } + + return nil +} + +func (f *FlagSet) Enabled(name FlagName) bool { + isEnabled, ok := f.enabled[name] + if !ok { + panic(fmt.Sprintf("unknown feature flag %v", name)) + } + + return isEnabled +} + +// Help contains information about a feature. +type Help struct { + Name string + Type string + Default bool + Description string +} + +func (f *FlagSet) List() []Help { + var help []Help + + for name, flag := range f.flags { + help = append(help, Help{ + Name: string(name), + Type: string(flag.Type), + Default: getDefault(flag.Type), + Description: flag.Description, + }) + } + + sort.Slice(help, func(i, j int) bool { + return strings.Compare(help[i].Name, help[j].Name) < 0 + }) + + return help +} diff --git a/internal/feature/registry.go b/internal/feature/registry.go new file mode 100644 index 000000000..7a9cbf560 --- /dev/null +++ b/internal/feature/registry.go @@ -0,0 +1,15 @@ +package feature + +// Flag is named such that checking for a feature uses `feature.Flag.Enabled(feature.ExampleFeature)`. +var Flag = New() + +// flag names are written in kebab-case +const ( + ExampleFeature FlagName = "example-feature" +) + +func init() { + Flag.SetFlags(map[FlagName]FlagDesc{ + ExampleFeature: {Type: Alpha, Description: "just for testing"}, + }) +} diff --git a/internal/feature/testing.go b/internal/feature/testing.go new file mode 100644 index 000000000..c13f52509 --- /dev/null +++ b/internal/feature/testing.go @@ -0,0 +1,29 @@ +package feature + +import ( + "fmt" + "testing" +) + +// TestSetFlag temporarily sets a feature flag to the given value until the +// returned function is called. +// +// Usage +// ``` +// defer TestSetFlag(t, features.Flags, features.ExampleFlag, true)() +// ``` +func TestSetFlag(t *testing.T, f *FlagSet, flag FlagName, value bool) func() { + current := f.Enabled(flag) + + if err := f.Apply(fmt.Sprintf("%s=%v", flag, value)); err != nil { + // not reachable + panic(err) + } + + return func() { + if err := f.Apply(fmt.Sprintf("%s=%v", flag, current)); err != nil { + // not reachable + panic(err) + } + } +} From 1c77c51a03f583d2e5169665e74b47668a2fe35c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 28 Jan 2024 16:21:22 +0100 Subject: [PATCH 019/117] features: initialize based on RESTIC_FEATURES environment variable --- cmd/restic/main.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/cmd/restic/main.go b/cmd/restic/main.go index b31ce1bb4..1a11abc40 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -14,6 +14,7 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/options" "github.com/restic/restic/internal/restic" ) @@ -103,10 +104,16 @@ func main() { // we can show the logs log.SetOutput(logBuffer) + err := feature.Flag.Apply(os.Getenv("RESTIC_FEATURES")) + if err != nil { + fmt.Fprintln(os.Stderr, err) + Exit(1) + } + debug.Log("main %#v", os.Args) debug.Log("restic %s compiled with %v on %v/%v", version, runtime.Version(), runtime.GOOS, runtime.GOARCH) - err := cmdRoot.ExecuteContext(internalGlobalCtx) + err = cmdRoot.ExecuteContext(internalGlobalCtx) switch { case restic.IsAlreadyLocked(err): From 70839155f247c74935d2ee900790a3a8fc4099d2 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 12 Feb 2024 21:34:37 +0100 Subject: [PATCH 020/117] features: add tests --- internal/feature/features_test.go | 139 ++++++++++++++++++++++++++++++ internal/feature/testing_test.go | 19 ++++ 2 files changed, 158 insertions(+) create mode 100644 internal/feature/features_test.go create mode 100644 internal/feature/testing_test.go diff --git a/internal/feature/features_test.go b/internal/feature/features_test.go new file mode 100644 index 000000000..3611ac998 --- /dev/null +++ b/internal/feature/features_test.go @@ -0,0 +1,139 @@ +package feature_test + +import ( + "fmt" + "strings" + "testing" + + "github.com/restic/restic/internal/feature" + rtest "github.com/restic/restic/internal/test" +) + +var ( + alpha = feature.FlagName("alpha-feature") + beta = feature.FlagName("beta-feature") + stable = feature.FlagName("stable-feature") + deprecated = feature.FlagName("deprecated-feature") +) + +var testFlags = map[feature.FlagName]feature.FlagDesc{ + alpha: { + Type: feature.Alpha, + Description: "alpha", + }, + beta: { + Type: feature.Beta, + Description: "beta", + }, + stable: { + Type: feature.Stable, + Description: "stable", + }, + deprecated: { + Type: feature.Deprecated, + Description: "deprecated", + }, +} + +func buildTestFlagSet() *feature.FlagSet { + flags := feature.New() + flags.SetFlags(testFlags) + return flags +} + +func TestFeatureDefaults(t *testing.T) { + flags := buildTestFlagSet() + for _, exp := range []struct { + flag feature.FlagName + value bool + }{ + {alpha, false}, + {beta, true}, + {stable, true}, + {deprecated, false}, + } { + rtest.Assert(t, flags.Enabled(exp.flag) == exp.value, "expected flag %v to have value %v got %v", exp.flag, exp.value, flags.Enabled(exp.flag)) + } +} + +func TestEmptyApply(t *testing.T) { + flags := buildTestFlagSet() + rtest.OK(t, flags.Apply("")) + + rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") + rtest.Assert(t, flags.Enabled(beta), "expected beta feature to be enabled") +} + +func TestFeatureApply(t *testing.T) { + flags := buildTestFlagSet() + rtest.OK(t, flags.Apply(string(alpha))) + rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", alpha))) + rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true", alpha))) + rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled again") + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", beta))) + rtest.Assert(t, !flags.Enabled(beta), "expected beta feature to be disabled") + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", stable))) + rtest.Assert(t, flags.Enabled(stable), "expected stable feature to remain enabled") + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true", deprecated))) + rtest.Assert(t, !flags.Enabled(deprecated), "expected deprecated feature to remain disabled") +} + +func TestFeatureMultipleApply(t *testing.T) { + flags := buildTestFlagSet() + + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true,%s=false", alpha, beta))) + rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") + rtest.Assert(t, !flags.Enabled(beta), "expected beta feature to be disabled") +} + +func TestFeatureApplyInvalid(t *testing.T) { + flags := buildTestFlagSet() + + err := flags.Apply("invalid-flag") + rtest.Assert(t, err != nil && strings.Contains(err.Error(), "unknown feature flag"), "expected unknown feature flag error, got: %v", err) + + err = flags.Apply(fmt.Sprintf("%v=invalid", alpha)) + rtest.Assert(t, err != nil && strings.Contains(err.Error(), "failed to parse value"), "expected parsing error, got: %v", err) +} + +func assertPanic(t *testing.T) { + if r := recover(); r == nil { + t.Fatal("should have panicked") + } +} + +func TestFeatureQueryInvalid(t *testing.T) { + defer assertPanic(t) + + flags := buildTestFlagSet() + flags.Enabled("invalid-flag") +} + +func TestFeatureSetInvalidPhase(t *testing.T) { + defer assertPanic(t) + + flags := feature.New() + flags.SetFlags(map[feature.FlagName]feature.FlagDesc{ + "invalid": { + Type: "invalid", + }, + }) +} + +func TestFeatureList(t *testing.T) { + flags := buildTestFlagSet() + + rtest.Equals(t, []feature.Help{ + {string(alpha), string(feature.Alpha), false, "alpha"}, + {string(beta), string(feature.Beta), true, "beta"}, + {string(deprecated), string(feature.Deprecated), false, "deprecated"}, + {string(stable), string(feature.Stable), true, "stable"}, + }, flags.List()) +} diff --git a/internal/feature/testing_test.go b/internal/feature/testing_test.go new file mode 100644 index 000000000..f11b4bae4 --- /dev/null +++ b/internal/feature/testing_test.go @@ -0,0 +1,19 @@ +package feature_test + +import ( + "testing" + + "github.com/restic/restic/internal/feature" + rtest "github.com/restic/restic/internal/test" +) + +func TestSetFeatureFlag(t *testing.T) { + flags := buildTestFlagSet() + rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") + + restore := feature.TestSetFlag(t, flags, alpha, true) + rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") + + restore() + rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled again") +} From fe68d2cafb1098b7bee16983be57492ee91388cc Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 17 Feb 2024 21:41:07 +0100 Subject: [PATCH 021/117] add feature flag documentation --- changelog/unreleased/issue-4601 | 9 +++++++++ cmd/restic/cmd_features.go | 11 +++++++++-- doc/047_tuning_backup_parameters.rst | 28 +++++++++++++++++++++++++++- 3 files changed, 45 insertions(+), 3 deletions(-) create mode 100644 changelog/unreleased/issue-4601 diff --git a/changelog/unreleased/issue-4601 b/changelog/unreleased/issue-4601 new file mode 100644 index 000000000..f99dbe187 --- /dev/null +++ b/changelog/unreleased/issue-4601 @@ -0,0 +1,9 @@ +Enhancement: Add support for feature flags + +Restic now supports feature flags that can be used to enable and disable +experimental features. The flags can be set using the environment variable +`RESTIC_FEATURES`. To get a list of currently supported feature flags, +run the `features` command. + +https://github.com/restic/restic/issues/4601 +https://github.com/restic/restic/pull/4666 diff --git a/cmd/restic/cmd_features.go b/cmd/restic/cmd_features.go index b1544b9d8..8125d3e26 100644 --- a/cmd/restic/cmd_features.go +++ b/cmd/restic/cmd_features.go @@ -10,14 +10,21 @@ import ( "github.com/spf13/cobra" ) -// FIXME explain semantics - var featuresCmd = &cobra.Command{ Use: "features", Short: "Print list of feature flags", Long: ` The "features" command prints a list of supported feature flags. +To pass feature flags to restic, set the RESTIC_FEATURES environment variable +to "featureA=true,featureB=false". Specifying an unknown feature flag is an error. + +A feature can either be in alpha, beta, stable or deprecated state. +An _alpha_ feature is disabled by default and may change in arbitrary ways between restic versions or be removed. +A _beta_ feature is enabled by default, but still can change in minor ways or be removed. +A _stable_ feature is always enabled and cannot be disabled. The flag will be removed in a future restic version. +A _deprecated_ feature is always disabled and cannot be enabled. The flag will be removed in a future restic version. + EXIT STATUS =========== diff --git a/doc/047_tuning_backup_parameters.rst b/doc/047_tuning_backup_parameters.rst index d8fb2c9b6..8456693e7 100644 --- a/doc/047_tuning_backup_parameters.rst +++ b/doc/047_tuning_backup_parameters.rst @@ -26,7 +26,8 @@ When you start a backup, restic will concurrently count the number of files and their total size, which is used to estimate how long it will take. This will cause some extra I/O, which can slow down backups of network file systems or FUSE mounts. To avoid this overhead at the cost of not seeing a progress -estimate, use the ``--no-scan`` option which disables this file scanning. +estimate, use the ``--no-scan`` option of the ``backup`` command which disables +this file scanning. Backend Connections =================== @@ -111,3 +112,28 @@ to disk. An operating system usually caches file write operations in memory and them to disk after a short delay. As larger pack files take longer to upload, this increases the chance of these files being written to disk. This can increase disk wear for SSDs. + + +Feature Flags +============= + +Feature flags allow disabling or enabling certain experimental restic features. The flags +can be specified via the ``RESTIC_FEATURES`` environment variable. The variable expects a +comma-separated list of ``key[=value],key2[=value2]`` pairs. The key is the name of a feature +flag. The value is optional and can contain either the value ``true`` (default if omitted) +or ``false``. The list of currently available feautre flags is shown by the ``features`` +command. + +Restic will return an error if an invalid feature flag is specified. No longer relevant +feature flags may be removed in a future restic release. Thus, make sure to no longer +specify these flags. + +A feature can either be in alpha, beta, stable or deprecated state. + +- An _alpha_ feature is disabled by default and may change in arbitrary ways between restic + versions or be removed. +- A _beta_ feature is enabled by default, but still can change in minor ways or be removed. +- A _stable_ feature is always enabled and cannot be disabled. This allows for a transition + period after which the flag will be removed in a future restic version. +- A _deprecated_ feature is always disabled and cannot be enabled. The flag will be removed + in a future restic version. From a9b64cd7ad92a44a9db00a917e381b762c7d7acb Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 17 Feb 2024 21:50:25 +0100 Subject: [PATCH 022/117] features: print warning for stable/depreacted feature flags --- cmd/restic/main.go | 4 +++- internal/feature/features.go | 6 +++--- internal/feature/features_test.go | 34 +++++++++++++++++++++---------- internal/feature/testing.go | 8 ++++++-- 4 files changed, 35 insertions(+), 17 deletions(-) diff --git a/cmd/restic/main.go b/cmd/restic/main.go index 1a11abc40..a4acb1cab 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -104,7 +104,9 @@ func main() { // we can show the logs log.SetOutput(logBuffer) - err := feature.Flag.Apply(os.Getenv("RESTIC_FEATURES")) + err := feature.Flag.Apply(os.Getenv("RESTIC_FEATURES"), func(s string) { + fmt.Fprintln(os.Stderr, s) + }) if err != nil { fmt.Fprintln(os.Stderr, err) Exit(1) diff --git a/internal/feature/features.go b/internal/feature/features.go index 1e1f3785c..e3b625e92 100644 --- a/internal/feature/features.go +++ b/internal/feature/features.go @@ -57,7 +57,7 @@ func (f *FlagSet) SetFlags(flags map[FlagName]FlagDesc) { } } -func (f *FlagSet) Apply(flags string) error { +func (f *FlagSet) Apply(flags string, logWarning func(string)) error { if flags == "" { return nil } @@ -92,9 +92,9 @@ func (f *FlagSet) Apply(flags string) error { case Alpha, Beta: f.enabled[fname] = value case Stable: - // FIXME print warning + logWarning(fmt.Sprintf("feature flag %q is always enabled and will be removed in a future release", fname)) case Deprecated: - // FIXME print warning + logWarning(fmt.Sprintf("feature flag %q is always disabled and will be removed in a future release", fname)) default: panic("unknown feature phase") } diff --git a/internal/feature/features_test.go b/internal/feature/features_test.go index 3611ac998..f5d405fa7 100644 --- a/internal/feature/features_test.go +++ b/internal/feature/features_test.go @@ -56,9 +56,13 @@ func TestFeatureDefaults(t *testing.T) { } } +func panicIfCalled(msg string) { + panic(msg) +} + func TestEmptyApply(t *testing.T) { flags := buildTestFlagSet() - rtest.OK(t, flags.Apply("")) + rtest.OK(t, flags.Apply("", panicIfCalled)) rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") rtest.Assert(t, flags.Enabled(beta), "expected beta feature to be enabled") @@ -66,29 +70,37 @@ func TestEmptyApply(t *testing.T) { func TestFeatureApply(t *testing.T) { flags := buildTestFlagSet() - rtest.OK(t, flags.Apply(string(alpha))) + rtest.OK(t, flags.Apply(string(alpha), panicIfCalled)) rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") - rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", alpha))) + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", alpha), panicIfCalled)) rtest.Assert(t, !flags.Enabled(alpha), "expected alpha feature to be disabled") - rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true", alpha))) + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true", alpha), panicIfCalled)) rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled again") - rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", beta))) + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", beta), panicIfCalled)) rtest.Assert(t, !flags.Enabled(beta), "expected beta feature to be disabled") - rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", stable))) - rtest.Assert(t, flags.Enabled(stable), "expected stable feature to remain enabled") + logMsg := "" + log := func(msg string) { + logMsg = msg + } - rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true", deprecated))) + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=false", stable), log)) + rtest.Assert(t, flags.Enabled(stable), "expected stable feature to remain enabled") + rtest.Assert(t, strings.Contains(logMsg, string(stable)), "unexpected log message for stable flag: %v", logMsg) + + logMsg = "" + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true", deprecated), log)) rtest.Assert(t, !flags.Enabled(deprecated), "expected deprecated feature to remain disabled") + rtest.Assert(t, strings.Contains(logMsg, string(deprecated)), "unexpected log message for deprecated flag: %v", logMsg) } func TestFeatureMultipleApply(t *testing.T) { flags := buildTestFlagSet() - rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true,%s=false", alpha, beta))) + rtest.OK(t, flags.Apply(fmt.Sprintf("%s=true,%s=false", alpha, beta), panicIfCalled)) rtest.Assert(t, flags.Enabled(alpha), "expected alpha feature to be enabled") rtest.Assert(t, !flags.Enabled(beta), "expected beta feature to be disabled") } @@ -96,10 +108,10 @@ func TestFeatureMultipleApply(t *testing.T) { func TestFeatureApplyInvalid(t *testing.T) { flags := buildTestFlagSet() - err := flags.Apply("invalid-flag") + err := flags.Apply("invalid-flag", panicIfCalled) rtest.Assert(t, err != nil && strings.Contains(err.Error(), "unknown feature flag"), "expected unknown feature flag error, got: %v", err) - err = flags.Apply(fmt.Sprintf("%v=invalid", alpha)) + err = flags.Apply(fmt.Sprintf("%v=invalid", alpha), panicIfCalled) rtest.Assert(t, err != nil && strings.Contains(err.Error(), "failed to parse value"), "expected parsing error, got: %v", err) } diff --git a/internal/feature/testing.go b/internal/feature/testing.go index c13f52509..b796e89b5 100644 --- a/internal/feature/testing.go +++ b/internal/feature/testing.go @@ -15,13 +15,17 @@ import ( func TestSetFlag(t *testing.T, f *FlagSet, flag FlagName, value bool) func() { current := f.Enabled(flag) - if err := f.Apply(fmt.Sprintf("%s=%v", flag, value)); err != nil { + panicIfCalled := func(msg string) { + panic(msg) + } + + if err := f.Apply(fmt.Sprintf("%s=%v", flag, value), panicIfCalled); err != nil { // not reachable panic(err) } return func() { - if err := f.Apply(fmt.Sprintf("%s=%v", flag, current)); err != nil { + if err := f.Apply(fmt.Sprintf("%s=%v", flag, current), panicIfCalled); err != nil { // not reachable panic(err) } From 1a8bf358f1ea7b6903ad76da6e31f3f01be0eb04 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 9 Mar 2024 18:20:52 +0100 Subject: [PATCH 023/117] index: deprecate legacy index format --- .../repo-restore-permissions-test.tar.gz | Bin 4174 -> 4256 bytes internal/feature/registry.go | 6 ++++-- internal/index/index.go | 6 ++++++ internal/index/index_test.go | 3 +++ 4 files changed, 13 insertions(+), 2 deletions(-) diff --git a/cmd/restic/testdata/repo-restore-permissions-test.tar.gz b/cmd/restic/testdata/repo-restore-permissions-test.tar.gz index 36aa62dbfb0ad52d5c0cafab528cfee393d388a2..dc8e9bc80e992fca9a59632cbb89344dcd870869 100644 GIT binary patch literal 4256 zcmV;R5MS>fiwFP!000001MS**I92<<2XI0~DaUEfl*r6p(;k${tZkmh6MI2cBcS8f1-|!^(ALS1s2#f$20REgm03kR|{!8JP z`~f^nfXD!i0FkH!3>izNpg5EYk!U0W1Q8I@;x`zSfCp%clM_ZL7zjlO2%fU|J&2+Z zME?CTJ_ii{i~O-T+^_QoK?nx9B>r_E|407j`3Ff-7%0gXrAn$wf*1f%!r+uZ*a8Gp zK~NO{lnHnQgW&N51q`T)!AL4fem)MKBoB0P`^7`HxPtD*^!?`wHxkpu!-cW9(v3=6 z{LG{<0=#_}KN?A@Vi5$wDoQdWRq=|F-jb?-q9l{#?z^}Zq-$ei3X&+g-V8%c3#N;O zjW5|#JG=~ddTdIdD-0W31(29pb~ff@Ka`}fkLhX2 zHgcw#niH%j8rBq+mjSBHG+q3KA^&#%U*Ud!g%HoxxHY6R^LA1>y+@-~$yf|j zIfwh6*RSgiKJAqKct*^zq)>`)ZoGh}FtT#Xe#|nH+^FG0y;EY8H4|kS;XNW1$qi@d zawF@Qt#|6yUR@Aaxz_Yz8B?8GGkMT`w$ej7RF#jIbazdkvS?adqRmU4E413s39IbP zO|lkZqYv26CW7f0itBYYW;~77PW#|2!vC=H{DvN1;+l;9^`L4gY($ zfM4nV004r&>Hi>%133MEDY%X74zboTV(JV^UF!1x`50-VNo?vfiUh0W!Qpr}l?5J= z_FWFqo&<%r=V z%Ur5Sze4YO-u1+(s4{oGQ`UX}qX8&icT_M)Yu7@dFbUrQ6i-9v4}@ z!80v$S;+7$r={l488j}Q~Rhk0I_?LKW~ockBObDlks?Zd{6lHVQuFhbf&}e zA|?;%PPdEs3~p3<>7lzb$nS=_H1TSK-w97_%%~uGL}fzroS*ditI8`xBj3&Fw=zrn zryq`ri1^)um7w`#`OP27DJna=Y8gL14Vj4Ok*s_MKkvaYN zF7LIS5=!hsHdf+7ukMsmYnNJGdd;ZGfby$(d0wec@~1b}9YLgi=Xl3<3gV049+M zSQ>&ws1ObZ0w^A!fm8^@V{pGS##j132m-&o|6jcR!*crnQb?RDl|yFsYbrVtl3PtY z6aCkFL%eL0zOdX6sQGeh?nYaw`Q}o+W%2uqLX`Ji z_1R}akIG7qe|I3#w)TSJnpl!pfuetjt#RSuG7KBV7H!er>Av~w_wmGRBDWf3=Gn;u zJX2s_dQC8ijo7k|xb6Aw;rQ4C!|KU`;sG0<0NIRN`85zS5V51c>zH)CLdnL7fUb&G zk#@5WFKW8HYWW`;ay-WQc$i9~&YnjdLnd{l zA@$$$xm`=otst#_xW%(MBmT@|C_QG^u`=bMWa;*Ymb&H#7*yL~tCgMZ!Gt|M0j!{g z(CL(0yJv1m+b_%u2MFEfRy`+`F&chISoG@KdZFHvBaF0gI6ULY;-YZ`9*=Hn6 zde-%s^2Pe;X1*m^bhzK^ByGSF1P$^HlAXm}$A|%R#^~9(lpo?69#5aE?@i7e6Np-FD1tW>Y>z1;JU2^DwC7G;k2AQcZryi9&CV%vueI96?LS_Ry6Sa{ zKFkQXeAOl?a|eyMocGA_Q|sB-Xh*TmX0ap609lkU4W zwi)V6t?RBZB$GprAM-z}l%C^Dd(j$lBy~4yMm{pJRh+OCIriq>O2{qNB${MP#@blXSkpC8!} z%rzZu`$Xd=1LVUbL@3r_sU2*Qh z&aC1xk?V%G^`>jR#O<6f8eG`nFr_vXGcQnTx-;d;)U@RA^o8QKnWVbAQOXH+&i8@a0U3V>CP>3d_jUQgsJTARSI?H6WgyCRDTjrQR zT1ks-i^jd|C$q6cr;~cUg!Q%9~Ag+pNxd)ca{hR~r%bqckfOWXzka}x^5CLg9(Txv^I48MgpbN@F&UwPmbG8`ZMiqaF~%D-VeWB|MwHY-l~Z!}FM<<;i=m^?l|qzmIs5+RTo| z4_&8zVx2RUeg5&oe3Z_@^188x==2B5tgLvbsJ*n^E-xK2*#(K;A`Du5fG<@(%Qx$e{7UNce%oQgzyi1Q4Dcn@YJfRt!jFs$L!553x{K`gWuzn@f@Ke6ph&Z=SMafj?;UdzdkckHZXBBSo_wKCUr zD0+xB8U3@5&ELV7`S0#Uar=gU0Qw{U|3VPp%lzj&|6dwk@^|s1qW*ue{5kjkOX9!e zkH?eoG#o-EU|=kcN}-@2L;w%~qJcP=hEf(=Gl<0ys3-+R5rjm;k||Ug8Bf4NI1GS- zG}`Zu@wMmw2#EW2{vh}ze@_4Z_70!$8tHzCz86ao(x;aRxddlka!)^0q|B^+FJzG^ zpL*ZIRrr&+;!}LtXhfrdb5{v!(BGtK-oY~0*0Hy2b6Ivi_tZO6=R|hw275^j-xReQ zcdcIz9TdoT)%qrgUy}FeJC+V_LGHltT!Jq<9iKQ^=$N}TY5QG14?EKqk>DTHyU`p` z)zYnd_TL`-Iscf#3m>7t7}xgZK;b6GO3T^oS7&SV2RzQE6w)ddQtd+fyLbt+lK34} zXJ?;}T1=mvT~%oIzNei{v^(<5O}tx~{CKm-T^0M=N#Bof74+s{YiCRyxK?HuxomPe z3OYEPJh(4{-q_h>zUkJ2+ZxK*7bS8>TqlY&Zs1b2pAJ;xROeN51sr9o)LCx)m=5rP z^S1RKlREd7C4_t@?6s%5U%AaKi5i_O&((YI%{VDyta|7={sKzPN;^#l z^u{-7>fCSQ=D)l*YCi2?Z=h0*;YvBeka~q4uoNeBz*4qd7g((Np^?DDg>OHsKCHqQLQp=gE zL08(iz7w6yeYl`qY-dogp~9Azb@BcNS>qHR8zrjn3qAcQ%l_1Y+{gefqu9Os-gK?K zhqvAJpX!U zCT^8qe;gX}cJEOFCb)=dlqgl?RZz83%~D`V?s05^tO z^QSKjwRT6wMz@bJtLd8a`LER+i>ys{81?M0emxV)6M9jH-b^H?w>R`5f_z=;B4hXa z$aIQkDN0R;xE~lbUoOg>-+R$V(Qd;gW$&vi1dHQ{Qx)aX1$%ad{q%ZzMZ%Q)L zt?Rz8@BRCIuitgC=)OLheBT!o7YqvZOABC-g%$zY^0D{Kw z$^COd{+IX<*FQp}<)UPlrHMe02;*V2o*!0yywY(ianw$Xy);YDw=Dg#>TO+AAH)1~ ztbEVPAmj43wLI$g<$*e`O3lFMYvr@K5W-BEw-0^e5~sH*ye)V<^39?&Sy%5Y~O#q?`W?a z75#XZosF^c<)>(?!M}QL{u5Y;e}8Y^@96=5BL4$0un_+s3gYGe#b7~yPkJ!>JN^KF z%Kza7{UHS5>Ax8KLw^7Z;~)yaz(Hgh4n;vzsB{dS29X(L90cJIGWQOOj>7^B6gNf@ zDhi?_I0Q@O-UsP)2%`M)7~k@LGzRlq{Xy=E0LAnF#UNCk%A%9~=`?vQc@PC4>L`pl z2opg-3k0MVD3rXK{4evsBzx1j&vU1Y+u`QJ_W!lRlgxJac4u)r zJ!uT?HJi!`_VwppTF7gm5d=c3$+P6Ouxj$Y@>+nJJe%z0&wUCqc5onoWU8?*3$IUP zyAvJ!DNNlEOE(`oFC;|M(%9WXUzLsv@pRP#OfA?9j-e)-f+3PZSgMA$h9;^3bX^0Y zwSfc6pWyCc;O6hcfHmp5+S=T4X=HygcLW<(4whg_rdnt+Frd4c5sP6T5&&UB^aysY z4%UFBnS%#`7-Z`~a`RzOV75M8)r`e(*E2MtQ0-s?S4Rj<;;@3PnITMHstMv~v0D}N zHFdD01_oRBTH9K2Ld?xAC_so?2uQWG)b#WuP|Wp-s^)+y9%S2HUOrUM?`Q!3g8w4`nwS3$W(eZ&6&2m}3Af3E*y0G|IZ2AS#vgA42?0f6A7@d4PS z>v2%gC=@%70xqOB5Si+RmAi9$8s-Tm9eHze_ykmV)z|ez-0FuXEpsNW%7`XIOVN`2 z{nkH|_eCBRZWr%=oWV&%ACzNR06vYr(|uW%=_H55e0)xA`G9=LhL=(w*Aw@~C096q z)v#9CGHK58i4fVN-Ef{W>o9I79X_DBHWa}>UOrRh9=z&~JoGL@<&a+Tb5epFzOm6J zW5xX%-AVKXrb6iem0_utceXiCM=E4y`))(;%h9&SKjHkiCCTAz5I9jD+PIh*X+Z4!EG z*x1c*fZ(>{+47}_3dPthGUEPwStqA#P8L;KZf(+imb^S%`jawaS7*Z6riie$kKTOl z%w5(`SwTKG`Qv=7p@!96)FTCNKO$xK(%HgL`=9CEahnBuaXl@E%WopBi1x1GZRtDsgC9RB6+VQIbfi`f@w*)ol8S=@(1p zkE`sBt)pWK9+|Hij!}(zWHE(_07ZxUn?F6L@Ri>6q_dKVxT$>g`v*_&_moho+8j}B zqbnCP(x3thq$d23RVJRS+YvjI5|S2fw}uw^@%SD$W5;;-!|1gx(&eEzNtc1Mhwp9E zVD?ky%3lk;t(Sp_MRV*=!lbZX%EX4QZPreUcY`(``~5u zvJlgXp~5tQ8H2`;`!mgs;+;Cx#UkuPk1d&yDisQFuNLAf8TbjXn-nRMtKNq%yBoNE zxsyQJ=j_Ccd?ytmMk(#KhM};fP*O@8H)yR06>Eo3^YK7xt&z5j37*=V`&T=0CATEFpNS| z>2wT@Lc?IOR66>99OGN{KZO3B{vd>cK%W0E26aBm5>mV=)aQD$7T2Y>zdh}cOJTs!b(Dd)^dG(FESE61)+&l3Xz7sF>S* zGJ3f(vG+1MFp&YXVN|L+&f|Ib*6|6jIA{wDjszt(@;dV-h#7lVK3PlLbLFYqtukA?uA z{)@qH^ryizI*m>R$p{rkCBqmx4hG2}h^C++kP6ZeItai383&>P7{pLv3=IM>R2q#8 zq9F)mVE^a{;9K=Szzz1_)&DT>{C{!q9MB1~H?m+Gy;8i=8T2TPEM*m&GE9{VYXnV&r;O6|f!RW?HME4RHjk(eAUc41ftgNl*ktnZ-bDM?bG@XJ^Nl*K<^uVFocvOu z)#VPz2cPz>%DYK2Exotr3G411pB3wkSr$DW{Buq=aPz?4ks~H9OZ~PMTsz&kF`!3$ zTG=+Nxm-~3)qt|;b)`66X*Y+tLMAeB%uW>F;HE+qjI3PZ9 zm1rSqP@#OK=2W#?&zsIZm&;Xok}lcKgX2jJF4sn)=T05AI&rW@p;5Ra?qqbw*_V;I z0lQw9rnvAcT5kz&J`>9{Rd?+KZ_j}8(e$~f8hdtDj5+!$wnXiSQJw6)}kRUoyC zfN$obY-?bqW(tJ$-*H*C>SpP(A6=o(Ungsy)dnMuedp(F5VH}PqB>>oq4b>*3jKzz zdyFO`@+03LF`j6X@p~nu{?6NYduYHS5P&#ch<6w4~0#}jK+lq=*#u{$(s{r3gTPc#x|^^ z=w(hke<*ZwyPP_wn1hzP*lj$n-qNXEcdBO4s#oK~+%_S_*==W~YVw3;-d_?zImI*j zCM2fq>H|Hh_>?cI%#UKGIIrfLyvi#*UyD7Ny8}u}s15{vWt=dgg8>0L7GQuh2*jc=e=x>3{2v5?-`D@#^&iah|HUA2rfe%RxmRCp zA1=T^s?o-yUu zhOC(#g21pU=gLQ%Rcz#jHKv>2==8_O9_rUk5|a&<>I2TO?wqfIklx5Gg+6IY^{S;( zW5J!3Ez)h)({F1!eQJfB&+P16^`HWUr?hr7m>%Gh6rR4x`Q&%}cH56;QVrK?weId~ z5Bj-{zxPyqg}tI$)QXRtC|P^0UeZY2DM3LQiQ*uy7f)#Ga>a7|s;Wj~T+ZJcmxj>_{uuc%7uw_SDwc<`9cU>bG0g;k6&P z^F?rAXhZl!@*SPYJ4#OTa}vR;?(%CDDP{~t9Ff>`W2$~t_vryvY6S8)Z)1t&iEPLRVWr?cx zUHYNNDI-aqS=U1ljtwx*oFWt3y_!47Ytc9{^Yi9OuCg8@ronEk!E-aod*T|NPZZUP zWNlpkys1yYUNo`!{bZVG^b))@7BAKoQ;vIMos{UrpCXAdzoBd2Q?2djlDW%X`;x-( zThTXs?zr`{g0J3iIF`AEVY)=@bt8xbLyvZ{TXjt!cL^<1PB((-6{hl{ZzZ0 z4w_vUSi7HZB7)SX_m1C9#;;)VhqE&sp(}c?DOTrxZVqM_UNJCi#yge>vc_abOHDq? z&exUX&2P^xDVM&5C)E?gePkV7FPUH5;ykWB9y2FeM%bR*H$EZXKXI|*ilOvQzGE|K z`*WH^L&Nl`ts?O9#!;npO4(K`#IIQMehz>P<9b}jna82;bt{+$0WI4atNorZCcCztG?TaPn zA)f=(>s~Ku2EO8ABB%mQ&QNRMgF4xn#2v&<#oMgkcR8%ce%3uZsiNOg;~j3}GS;vH zI}uSgm>(5rzf}WwJArDvB&fc3AN9#4lYIWLix;x@wT5UDKgDRZ&qeNuub|7wZDPN9 zIZ+Y*lA_yWoKqIFexx;HY3iT>X{T;*sL(b@HuUiD#8tfgmo3b+eYU5YKbrZ?UHv(- zFQt)l5c~QTZ8oropz`MH^jx&j{F1tnhJ&Y`Xar`*Lzmmi+8jgjyIL=ptvOgzJlk+@ z!~UM3mK>ug8HwFp99yf=HyMnigGGfNe{~Y%!Gi}69z1yP;K73j4<0;t@ZiCN2M-=R Yc<|uCg9i^DJpPaKUr7c%#{f_O02$#w+W-In diff --git a/internal/feature/registry.go b/internal/feature/registry.go index 7a9cbf560..620c9ec35 100644 --- a/internal/feature/registry.go +++ b/internal/feature/registry.go @@ -5,11 +5,13 @@ var Flag = New() // flag names are written in kebab-case const ( - ExampleFeature FlagName = "example-feature" + ExampleFeature FlagName = "example-feature" + DeprecateLegacyIndex FlagName = "deprecate-legacy-index" ) func init() { Flag.SetFlags(map[FlagName]FlagDesc{ - ExampleFeature: {Type: Alpha, Description: "just for testing"}, + ExampleFeature: {Type: Alpha, Description: "just for testing"}, + DeprecateLegacyIndex: {Type: Beta, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."}, }) } diff --git a/internal/index/index.go b/internal/index/index.go index ecd481594..b571c55eb 100644 --- a/internal/index/index.go +++ b/internal/index/index.go @@ -3,12 +3,14 @@ package index import ( "context" "encoding/json" + "fmt" "io" "sync" "time" "github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/debug" @@ -515,6 +517,10 @@ func DecodeIndex(buf []byte, id restic.ID) (idx *Index, oldFormat bool, err erro debug.Log("Error %v", err) if isErrOldIndex(err) { + if feature.Flag.Enabled(feature.DeprecateLegacyIndex) { + return nil, false, fmt.Errorf("index seems to use the legacy format. update it using `restic repair index`") + } + debug.Log("index is probably old format, trying that") idx, err = decodeOldIndex(buf) return idx, err == nil, err diff --git a/internal/index/index_test.go b/internal/index/index_test.go index 4f0dbd2a0..78e4800ca 100644 --- a/internal/index/index_test.go +++ b/internal/index/index_test.go @@ -8,6 +8,7 @@ import ( "sync" "testing" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -427,6 +428,8 @@ func BenchmarkEncodeIndex(b *testing.B) { } func TestIndexUnserializeOld(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateLegacyIndex, false)() + idx, oldFormat, err := index.DecodeIndex(docOldExample, restic.NewRandomID()) rtest.OK(t, err) rtest.Assert(t, oldFormat, "old index format recognized as new format") From f8852f0eb6f63d08fddbabf19609fb530c67bb5e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 9 Mar 2024 18:21:22 +0100 Subject: [PATCH 024/117] repair index: fix deletion of legacy indexes --- internal/index/index.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/index/index.go b/internal/index/index.go index b571c55eb..1fb2c155e 100644 --- a/internal/index/index.go +++ b/internal/index/index.go @@ -523,6 +523,7 @@ func DecodeIndex(buf []byte, id restic.ID) (idx *Index, oldFormat bool, err erro debug.Log("index is probably old format, trying that") idx, err = decodeOldIndex(buf) + idx.ids = append(idx.ids, id) return idx, err == nil, err } From 98a6817d013a96898718514132a752c95f81eebe Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 9 Mar 2024 18:35:00 +0100 Subject: [PATCH 025/117] add changelog for legacy index deprecation --- changelog/unreleased/issue-4602 | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 changelog/unreleased/issue-4602 diff --git a/changelog/unreleased/issue-4602 b/changelog/unreleased/issue-4602 new file mode 100644 index 000000000..3cba63876 --- /dev/null +++ b/changelog/unreleased/issue-4602 @@ -0,0 +1,13 @@ +Change: Deprecate legacy index format + +Support for the legacy index format used by restic before version 0.2.0 has +been depreacted and will be removed in the next minor restic version. You can +use `restic repair index` to update the index to the current format. + +It is possible to temporarily reenable support for the legacy index format by +setting the environment variable +`RESTIC_FEATURES=deprecate-legacy-index=false`. Note that this feature flag +will be removed in the next minor restic version. + +https://github.com/restic/restic/issues/4602 +https://github.com/restic/restic/pull/4724 From 69ca12d2eba28b0744e30c5a367fddb607980b23 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 9 Mar 2024 18:36:33 +0100 Subject: [PATCH 026/117] check: treat legacy index format as errors --- cmd/restic/cmd_check.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 990702b61..cbe388877 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -231,12 +231,17 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args errorsFound := false suggestIndexRebuild := false + suggestLegacyIndexRebuild := false mixedFound := false for _, hint := range hints { switch hint.(type) { - case *checker.ErrDuplicatePacks, *checker.ErrOldIndexFormat: + case *checker.ErrDuplicatePacks: Printf("%v\n", hint) suggestIndexRebuild = true + case *checker.ErrOldIndexFormat: + Warnf("error: %v\n", hint) + suggestLegacyIndexRebuild = true + errorsFound = true case *checker.ErrMixedPack: Printf("%v\n", hint) mixedFound = true @@ -247,7 +252,10 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args } if suggestIndexRebuild { - Printf("Duplicate packs/old indexes are non-critical, you can run `restic repair index' to correct this.\n") + Printf("Duplicate packs are non-critical, you can run `restic repair index' to correct this.\n") + } + if suggestLegacyIndexRebuild { + Warnf("Found indexes using the legacy format, you must run `restic repair index' to correct this.\n") } if mixedFound { Printf("Mixed packs with tree and data blobs are non-critical, you can run `restic prune` to correct this.\n") From 9f3e1462c0c2dd147c1774c6acd469b7d524ca5e Mon Sep 17 00:00:00 2001 From: Leo Heitmann Ruiz <148111793+leoheitmannruiz@users.noreply.github.com> Date: Sat, 9 Mar 2024 23:56:16 +0100 Subject: [PATCH 027/117] Minor README.md cleanups --- README.md | 18 +++++++----------- 1 file changed, 7 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index ad6b13cef..ef12f3e1b 100644 --- a/README.md +++ b/README.md @@ -10,8 +10,7 @@ For detailed usage and installation instructions check out the [documentation](h You can ask questions in our [Discourse forum](https://forum.restic.net). -Quick start ------------ +## Quick start Once you've [installed](https://restic.readthedocs.io/en/latest/020_installation.html) restic, start off with creating a repository for your backups: @@ -59,7 +58,7 @@ Therefore, restic supports the following backends for storing backups natively: Restic is a program that does backups right and was designed with the following principles in mind: -- **Easy:** Doing backups should be a frictionless process, otherwise +- **Easy**: Doing backups should be a frictionless process, otherwise you might be tempted to skip it. Restic should be easy to configure and use, so that, in the event of a data loss, you can just restore it. Likewise, restoring data should not be complicated. @@ -92,20 +91,17 @@ reproduce a byte identical version from the source code for that release. Instructions on how to do that are contained in the [builder repository](https://github.com/restic/builder). -News ----- +## News -You can follow the restic project on Mastodon [@resticbackup](https://fosstodon.org/@restic) or by subscribing to +You can follow the restic project on Mastodon [@resticbackup](https://fosstodon.org/@restic) or subscribe to the [project blog](https://restic.net/blog/). -License -------- +## License Restic is licensed under [BSD 2-Clause License](https://opensource.org/licenses/BSD-2-Clause). You can find the -complete text in [``LICENSE``](LICENSE). +complete text in [`LICENSE`](LICENSE). -Sponsorship ------------ +## Sponsorship Backend integration tests for Google Cloud Storage and Microsoft Azure Blob Storage are sponsored by [AppsCode](https://appscode.com)! From 00f762373fa625659f411bfa6a4ba5d590599bb3 Mon Sep 17 00:00:00 2001 From: Leo Heitmann Ruiz <148111793+leoheitmannruiz@users.noreply.github.com> Date: Sun, 10 Mar 2024 00:20:26 +0100 Subject: [PATCH 028/117] Capitalize Homebrew --- doc/020_installation.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/doc/020_installation.rst b/doc/020_installation.rst index 0f1cd6c04..b21a2a66f 100644 --- a/doc/020_installation.rst +++ b/doc/020_installation.rst @@ -77,8 +77,7 @@ avoid any conflicts: macOS ===== -If you are using macOS, you can install restic using the -`homebrew `__ package manager: +If you are using macOS, you can install restic using `Homebrew `__: .. code-block:: console From ac948fccda125156e3e03768ec22f75974aceee3 Mon Sep 17 00:00:00 2001 From: avoidalone Date: Mon, 11 Mar 2024 14:35:12 +0800 Subject: [PATCH 029/117] fix some typos Signed-off-by: avoidalone --- CHANGELOG.md | 2 +- changelog/0.10.0_2020-09-19/pull-2195 | 2 +- doc/faq.rst | 2 +- internal/restorer/doc.go | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b8969a443..5fea763e3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3488,7 +3488,7 @@ restic users. The changes are ordered by importance. NOTE: This new implementation does not guarantee order in which blobs are written to the target files and, for example, the last blob of a file can be - written to the file before any of the preceeding file blobs. It is therefore + written to the file before any of the preceding file blobs. It is therefore possible to have gaps in the data written to the target files if restore fails or interrupted by the user. diff --git a/changelog/0.10.0_2020-09-19/pull-2195 b/changelog/0.10.0_2020-09-19/pull-2195 index a139aa4e1..7898568fa 100644 --- a/changelog/0.10.0_2020-09-19/pull-2195 +++ b/changelog/0.10.0_2020-09-19/pull-2195 @@ -10,7 +10,7 @@ https://github.com/restic/restic/issues/2244 NOTE: This new implementation does not guarantee order in which blobs are written to the target files and, for example, the last blob of a -file can be written to the file before any of the preceeding file blobs. +file can be written to the file before any of the preceding file blobs. It is therefore possible to have gaps in the data written to the target files if restore fails or interrupted by the user. diff --git a/doc/faq.rst b/doc/faq.rst index e8ef2de5e..8e56b5d9e 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -74,7 +74,7 @@ $ restic backup --exclude "~/documents" ~ This command will result in a complete backup of the current logged in user's home directory and it won't exclude the folder ``~/documents/`` - which is not what the user wanted to achieve. The problem is how the path to ``~/documents`` is passed to restic. -In order to spot an issue like this, you can make use of the following ruby command preceeding your restic command. +In order to spot an issue like this, you can make use of the following ruby command preceding your restic command. :: diff --git a/internal/restorer/doc.go b/internal/restorer/doc.go index 8d68d7161..e230f23f0 100644 --- a/internal/restorer/doc.go +++ b/internal/restorer/doc.go @@ -18,7 +18,7 @@ // // Implementation does not guarantee order in which blobs are written to the // target files and, for example, the last blob of a file can be written to the -// file before any of the preceeding file blobs. It is therefore possible to +// file before any of the preceding file blobs. It is therefore possible to // have gaps in the data written to the target files if restore fails or // interrupted by the user. package restorer From 521713fc94fba9f4d103725c198e487ac94f0881 Mon Sep 17 00:00:00 2001 From: Facundo Tuesca Date: Sat, 16 Mar 2024 18:54:27 +0100 Subject: [PATCH 030/117] doc: Add instructions to configure PowerShell completions --- doc/020_installation.rst | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/doc/020_installation.rst b/doc/020_installation.rst index b21a2a66f..da9ed8a3f 100644 --- a/doc/020_installation.rst +++ b/doc/020_installation.rst @@ -362,3 +362,18 @@ Example for using sudo to write a zsh completion script directly to the system-w the operating system used, e.g. ``/usr/share/bash-completion/completions/restic`` in Debian and derivatives. Please look up the correct path in the appropriate documentation. + +Example for setting up a powershell completion script for the local user's profile: + +.. code-block:: pwsh-session + + # Create profile if one does not exist + PS> If (!(Test-Path $PROFILE.CurrentUserAllHosts)) {New-Item -Path $PROFILE.CurrentUserAllHosts -Force} + + PS> $ProfileDir = (Get-Item $PROFILE.CurrentUserAllHosts).Directory + + # Generate Restic completions in the same directory as the profile + PS> restic generate --powershell-completion "$ProfileDir\restic-completion.ps1" + + # Append to the profile file the command to load Restic completions + PS> Add-Content -Path $Profile.CurrentUserAllHosts -Value "`r`nImport-Module $ProfileDir\restic-completion.ps1" From 5c4a4b4a30951f54fae5befdc834f4671464ddcc Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 17:09:59 +0100 Subject: [PATCH 031/117] CI: Allow golangci-lint to annotate PRs --- .github/workflows/tests.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 134656d8a..e03ee3264 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -247,6 +247,10 @@ jobs: lint: name: lint runs-on: ubuntu-latest + permissions: + contents: read + # allow annotating code in the PR + checks: write steps: - name: Set up Go ${{ env.latest_go }} uses: actions/setup-go@v5 From 6e775d3787d4614e2805f15e46d62f1ed67195e6 Mon Sep 17 00:00:00 2001 From: Adam Eijdenberg Date: Mon, 29 Jan 2024 14:23:56 +1100 Subject: [PATCH 032/117] Enhancement: option to send HTTP over unix socket add tests for unix socket connection switch HTTP rest-server test to use any free port allow rest-server test graceful shutdown opportunity --- doc/030_preparing_a_new_repo.rst | 7 +- go.mod | 1 + go.sum | 13 ++ internal/backend/http_transport.go | 3 + internal/backend/rest/config_test.go | 7 + internal/backend/rest/rest_test.go | 162 ++++++++++++++++++------ internal/backend/rest/rest_unix_test.go | 30 +++++ 7 files changed, 182 insertions(+), 41 deletions(-) create mode 100644 internal/backend/rest/rest_unix_test.go diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 8661f5904..0c50b65be 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -201,15 +201,16 @@ scheme like this: $ restic -r rest:http://host:8000/ init Depending on your REST server setup, you can use HTTPS protocol, -password protection, multiple repositories or any combination of -those features. The TCP/IP port is also configurable. Here -are some more examples: +unix socket, password protection, multiple repositories or any +combination of those features. The TCP/IP port is also configurable. +Here are some more examples: .. code-block:: console $ restic -r rest:https://host:8000/ init $ restic -r rest:https://user:pass@host:8000/ init $ restic -r rest:https://user:pass@host:8000/my_backup_repo/ init + $ restic -r rest:http+unix:///tmp/rest.socket:/my_backup_repo/ init The server username and password can be specified using environment variables as well: diff --git a/go.mod b/go.mod index 6e546974e..7121cdac4 100644 --- a/go.mod +++ b/go.mod @@ -17,6 +17,7 @@ require ( github.com/minio/minio-go/v7 v7.0.66 github.com/minio/sha256-simd v1.0.1 github.com/ncw/swift/v2 v2.0.2 + github.com/peterbourgon/unixtransport v0.0.4 github.com/pkg/errors v0.9.1 github.com/pkg/profile v1.7.0 github.com/pkg/sftp v1.13.6 diff --git a/go.sum b/go.sum index 668d6a339..1cd51cbac 100644 --- a/go.sum +++ b/go.sum @@ -128,6 +128,7 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= github.com/minio/minio-go/v7 v7.0.66 h1:bnTOXOHjOqv/gcMuiVbN9o2ngRItvqE774dG9nq0Dzw= @@ -141,6 +142,11 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/ncw/swift/v2 v2.0.2 h1:jx282pcAKFhmoZBSdMcCRFn9VWkoBIRsCpe+yZq7vEk= github.com/ncw/swift/v2 v2.0.2/go.mod h1:z0A9RVdYPjNjXVo2pDOPxZ4eu3oarO1P91fTItcb+Kg= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/peterbourgon/ff/v3 v3.3.1/go.mod h1:zjJVUhx+twciwfDl0zBcFzl4dW8axCRyXE/eKY9RztQ= +github.com/peterbourgon/unixtransport v0.0.4 h1:UTF0FxXCAglvoZz9jaGPYjEg52DjBLDYGMJvJni6Tfw= +github.com/peterbourgon/unixtransport v0.0.4/go.mod h1:o8aUkOCa8W/BIXpi15uKvbSabjtBh0JhSOJGSfoOhAU= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -210,6 +216,7 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -221,6 +228,7 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -231,6 +239,7 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -246,12 +255,14 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -271,6 +282,7 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200423201157-2723c5de0d66/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -315,6 +327,7 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33 gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/internal/backend/http_transport.go b/internal/backend/http_transport.go index 9ee1c91f1..19b20dc6a 100644 --- a/internal/backend/http_transport.go +++ b/internal/backend/http_transport.go @@ -10,6 +10,7 @@ import ( "strings" "time" + "github.com/peterbourgon/unixtransport" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" ) @@ -82,6 +83,8 @@ func Transport(opts TransportOptions) (http.RoundTripper, error) { TLSClientConfig: &tls.Config{}, } + unixtransport.Register(tr) + if opts.InsecureTLS { tr.TLSClientConfig.InsecureSkipVerify = true } diff --git a/internal/backend/rest/config_test.go b/internal/backend/rest/config_test.go index 23ea9095b..13a1ebb13 100644 --- a/internal/backend/rest/config_test.go +++ b/internal/backend/rest/config_test.go @@ -31,6 +31,13 @@ var configTests = []test.ConfigTestData[Config]{ Connections: 5, }, }, + { + S: "rest:http+unix:///tmp/rest.socket:/my_backup_repo/", + Cfg: Config{ + URL: parseURL("http+unix:///tmp/rest.socket:/my_backup_repo/"), + Connections: 5, + }, + }, } func TestParseConfig(t *testing.T) { diff --git a/internal/backend/rest/rest_test.go b/internal/backend/rest/rest_test.go index 6a5b4f8a5..93b9a103e 100644 --- a/internal/backend/rest/rest_test.go +++ b/internal/backend/rest/rest_test.go @@ -1,11 +1,18 @@ +//go:build go1.20 +// +build go1.20 + package rest_test import ( + "bufio" "context" - "net" + "fmt" "net/url" "os" "os/exec" + "regexp" + "strings" + "syscall" "testing" "time" @@ -14,54 +21,133 @@ import ( rtest "github.com/restic/restic/internal/test" ) -func runRESTServer(ctx context.Context, t testing.TB, dir string) (*url.URL, func()) { +var ( + serverStartedRE = regexp.MustCompile("^start server on (.*)$") +) + +func runRESTServer(ctx context.Context, t testing.TB, dir, reqListenAddr string) (*url.URL, func()) { srv, err := exec.LookPath("rest-server") if err != nil { t.Skip(err) } - cmd := exec.CommandContext(ctx, srv, "--no-auth", "--path", dir) + // create our own context, so that our cleanup can cancel and wait for completion + // this will ensure any open ports, open unix sockets etc are properly closed + processCtx, cancel := context.WithCancel(ctx) + cmd := exec.CommandContext(processCtx, srv, "--no-auth", "--path", dir, "--listen", reqListenAddr) + + // this cancel func is called by when the process context is done + cmd.Cancel = func() error { + // we execute in a Go-routine as we know the caller will + // be waiting on a .Wait() regardless + go func() { + // try to send a graceful termination signal + if cmd.Process.Signal(syscall.SIGTERM) == nil { + // if we succeed, then wait a few seconds + time.Sleep(2 * time.Second) + } + // and then make sure it's killed either way, ignoring any error code + _ = cmd.Process.Kill() + }() + return nil + } + + // this is the cleanup function that we return the caller, + // which will cancel our process context, and then wait for it to finish + cleanup := func() { + cancel() + _ = cmd.Wait() + } + + // but in-case we don't finish this method, e.g. by calling t.Fatal() + // we also defer a call to clean it up ourselves, guarded by a flag to + // indicate that we returned the function to the caller to deal with. + callerWillCleanUp := false + defer func() { + if !callerWillCleanUp { + cleanup() + } + }() + + // send stdout to our std out cmd.Stdout = os.Stdout - cmd.Stderr = os.Stdout - if err := cmd.Start(); err != nil { - t.Fatal(err) - } - // wait until the TCP port is reachable - var success bool - for i := 0; i < 10; i++ { - time.Sleep(200 * time.Millisecond) - - c, err := net.Dial("tcp", "localhost:8000") - if err != nil { - continue - } - - success = true - if err := c.Close(); err != nil { - t.Fatal(err) - } - } - - if !success { - t.Fatal("unable to connect to rest server") - return nil, nil - } - - url, err := url.Parse("http://localhost:8000/restic-test/") + // capture stderr with a pipe, as we want to examine this output + // to determine when the server is started and listening. + cmdErr, err := cmd.StderrPipe() if err != nil { t.Fatal(err) } - cleanup := func() { - if err := cmd.Process.Kill(); err != nil { - t.Fatal(err) - } - - // ignore errors, we've killed the process - _ = cmd.Wait() + // start the rest-server + if err := cmd.Start(); err != nil { + t.Fatal(err) } + // create a channel to receive the actual listen address on + listenAddrCh := make(chan string) + go func() { + defer close(listenAddrCh) + matched := false + br := bufio.NewReader(cmdErr) + for { + line, err := br.ReadString('\n') + if err != nil { + // we ignore errors, as code that relies on this + // will happily fail via timeout and empty closed + // channel. + return + } + + line = strings.Trim(line, "\r\n") + if !matched { + // look for the server started message, and return the address + // that it's listening on + matchedServerListen := serverStartedRE.FindSubmatch([]byte(line)) + if len(matchedServerListen) == 2 { + listenAddrCh <- string(matchedServerListen[1]) + matched = true + } + } + fmt.Fprintln(os.Stdout, line) // print all output to console + } + }() + + // wait for us to get an address, + // or the parent context to cancel, + // or for us to timeout + var actualListenAddr string + select { + case <-processCtx.Done(): + t.Fatal(context.Canceled) + case <-time.NewTimer(2 * time.Second).C: + t.Fatal(context.DeadlineExceeded) + case a, ok := <-listenAddrCh: + if !ok { + t.Fatal(context.Canceled) + } + actualListenAddr = a + } + + // this translate the address that the server is listening on + // to a URL suitable for us to connect to + var addrToConnectTo string + if strings.HasPrefix(reqListenAddr, "unix:") { + addrToConnectTo = fmt.Sprintf("http+unix://%s:/restic-test/", actualListenAddr) + } else { + // while we may listen on 0.0.0.0, we connect to localhost + addrToConnectTo = fmt.Sprintf("http://%s/restic-test/", strings.Replace(actualListenAddr, "0.0.0.0", "localhost", 1)) + } + + // parse to a URL + url, err := url.Parse(addrToConnectTo) + if err != nil { + t.Fatal(err) + } + + // indicate that we've completed successfully, and that the caller + // is responsible for calling cleanup + callerWillCleanUp = true return url, cleanup } @@ -91,7 +177,7 @@ func TestBackendREST(t *testing.T) { defer cancel() dir := rtest.TempDir(t) - serverURL, cleanup := runRESTServer(ctx, t, dir) + serverURL, cleanup := runRESTServer(ctx, t, dir, ":0") defer cleanup() newTestSuite(serverURL, false).RunTests(t) @@ -116,7 +202,7 @@ func BenchmarkBackendREST(t *testing.B) { defer cancel() dir := rtest.TempDir(t) - serverURL, cleanup := runRESTServer(ctx, t, dir) + serverURL, cleanup := runRESTServer(ctx, t, dir, ":0") defer cleanup() newTestSuite(serverURL, false).RunBenchmarks(t) diff --git a/internal/backend/rest/rest_unix_test.go b/internal/backend/rest/rest_unix_test.go new file mode 100644 index 000000000..85ef7a73d --- /dev/null +++ b/internal/backend/rest/rest_unix_test.go @@ -0,0 +1,30 @@ +//go:build !windows && go1.20 +// +build !windows,go1.20 + +package rest_test + +import ( + "context" + "fmt" + "path" + "testing" + + rtest "github.com/restic/restic/internal/test" +) + +func TestBackendRESTWithUnixSocket(t *testing.T) { + defer func() { + if t.Skipped() { + rtest.SkipDisallowed(t, "restic/backend/rest.TestBackendREST") + } + }() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + dir := rtest.TempDir(t) + serverURL, cleanup := runRESTServer(ctx, t, path.Join(dir, "data"), fmt.Sprintf("unix:%s", path.Join(dir, "sock"))) + defer cleanup() + + newTestSuite(serverURL, false).RunTests(t) +} From add37fcd9f58bb59c1e079ca13f1f35a1039a4ea Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 16:57:36 +0100 Subject: [PATCH 033/117] CI: uses rest-server from master branch until unix sockets are released --- .github/workflows/tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 134656d8a..ba11d7fd2 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -74,7 +74,7 @@ jobs: - name: Get programs (Linux/macOS) run: | echo "build Go tools" - go install github.com/restic/rest-server/cmd/rest-server@latest + go install github.com/restic/rest-server/cmd/rest-server@master echo "install minio server" mkdir $HOME/bin @@ -106,7 +106,7 @@ jobs: $ProgressPreference = 'SilentlyContinue' echo "build Go tools" - go install github.com/restic/rest-server/... + go install github.com/restic/rest-server/cmd/rest-server@master echo "install minio server" mkdir $Env:USERPROFILE/bin From 6ac751918806921b8949da9bca480b7014d5eca8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 17:40:02 +0100 Subject: [PATCH 034/117] add changelog for rest unix socket support --- changelog/unreleased/issue-4287 | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 changelog/unreleased/issue-4287 diff --git a/changelog/unreleased/issue-4287 b/changelog/unreleased/issue-4287 new file mode 100644 index 000000000..df4fc5590 --- /dev/null +++ b/changelog/unreleased/issue-4287 @@ -0,0 +1,14 @@ +Enhancement: support connection to rest-server using unix socket + +Restic now supports connecting to rest-server using a unix socket for +rest-server version 0.13.0 or later. + +This allows running restic as follows: + +``` +rest-server --listen unix:/tmp/rest.socket --data /path/to/data & +restic -r rest:http+unix:///tmp/rest.socket:/my_backup_repo/ [...] +``` + +https://github.com/restic/restic/issues/4287 +https://github.com/restic/restic/pull/4655 From aee6d311f141d49ac620f71254d7f35f4d349f2a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 17:56:13 +0100 Subject: [PATCH 035/117] CI: update docker actions --- .github/workflows/docker.yml | 8 ++++---- .github/workflows/tests.yml | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index d9df0b8ba..71cfe1691 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -25,7 +25,7 @@ jobs: uses: actions/checkout@v4 - name: Log in to the Container registry - uses: docker/login-action@5139682d94efc37792e6b54386b5b470a68a4737 + uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 with: registry: ${{ env.REGISTRY }} username: ${{ github.actor }} @@ -33,7 +33,7 @@ jobs: - name: Extract metadata (tags, labels) for Docker id: meta - uses: docker/metadata-action@9ec57ed1fcdbf14dcef7dfbe97b2010124a938b7 + uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 with: images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} tags: | @@ -45,7 +45,7 @@ jobs: uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 + uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 - name: Ensure consistent binaries run: | @@ -55,7 +55,7 @@ jobs: if: github.ref != 'refs/heads/master' - name: Build and push Docker image - uses: docker/build-push-action@f2a1d5e99d037542a71f64918e516c093c6f3fc4 + uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 with: push: true context: . diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 134656d8a..d8f5b308b 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -298,7 +298,7 @@ jobs: - name: Docker meta id: meta - uses: docker/metadata-action@v4 + uses: docker/metadata-action@v5 with: # list of Docker images to use as base name for tags images: | @@ -321,7 +321,7 @@ jobs: - name: Build and push id: docker_build - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: push: false context: . From 1497525e1555e2cf1aa149f3f1aef142ced39fc2 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 18:15:50 +0100 Subject: [PATCH 036/117] CI: Update golangci-lint to version 1.57.1 --- .github/workflows/tests.yml | 2 +- cmd/restic/cmd_backup_integration_test.go | 2 ++ cmd/restic/cmd_tag_integration_test.go | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index e03ee3264..7c41fe4e5 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -264,7 +264,7 @@ jobs: uses: golangci/golangci-lint-action@v4 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.56.1 + version: v1.57.1 args: --verbose --timeout 5m # only run golangci-lint for pull requests, otherwise ALL hints get diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index c60e9c543..0bc4a9eaa 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -406,6 +406,7 @@ func TestIncrementalBackup(t *testing.T) { t.Logf("repository grown by %d bytes", stat3.size-stat2.size) } +// nolint: staticcheck // false positive nil pointer dereference check func TestBackupTags(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() @@ -441,6 +442,7 @@ func TestBackupTags(t *testing.T) { "expected parent to be %v, got %v", parent.ID, newest.Parent) } +// nolint: staticcheck // false positive nil pointer dereference check func TestBackupProgramVersion(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() diff --git a/cmd/restic/cmd_tag_integration_test.go b/cmd/restic/cmd_tag_integration_test.go index 3b902c51e..6979f9c11 100644 --- a/cmd/restic/cmd_tag_integration_test.go +++ b/cmd/restic/cmd_tag_integration_test.go @@ -12,6 +12,7 @@ func testRunTag(t testing.TB, opts TagOptions, gopts GlobalOptions) { rtest.OK(t, runTag(context.TODO(), opts, gopts, []string{})) } +// nolint: staticcheck // false positive nil pointer dereference check func TestTag(t *testing.T) { env, cleanup := withTestEnvironment(t) defer cleanup() From 15555c9898c83838f80e29c3f1629568da0ca2b7 Mon Sep 17 00:00:00 2001 From: Facundo Tuesca Date: Thu, 28 Mar 2024 18:35:18 +0100 Subject: [PATCH 037/117] doc: Use consistent case for PROFILE env variable in PowerShell --- doc/020_installation.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/020_installation.rst b/doc/020_installation.rst index da9ed8a3f..17b581a87 100644 --- a/doc/020_installation.rst +++ b/doc/020_installation.rst @@ -376,4 +376,4 @@ Example for setting up a powershell completion script for the local user's profi PS> restic generate --powershell-completion "$ProfileDir\restic-completion.ps1" # Append to the profile file the command to load Restic completions - PS> Add-Content -Path $Profile.CurrentUserAllHosts -Value "`r`nImport-Module $ProfileDir\restic-completion.ps1" + PS> Add-Content -Path $PROFILE.CurrentUserAllHosts -Value "`r`nImport-Module $ProfileDir\restic-completion.ps1" From 2ba21fe72bf93f4caee3de1b2ea58bf0a50c7a64 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 9 Mar 2024 17:38:41 +0100 Subject: [PATCH 038/117] archiver: only store deviceID for hardlinks The deviceID can change e.g. when backing up from filesystem snapshot. It is only used for hardlink detection. Thus there it is not necessary to store it for everything else. --- internal/archiver/archiver.go | 6 ++++++ internal/archiver/archiver_test.go | 1 + internal/restic/node.go | 2 +- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 77ddba7c4..63a1691b3 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -188,6 +188,12 @@ func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo) if !arch.WithAtime { node.AccessTime = node.ModTime } + if node.Links == 1 || node.Type == "dir" { + // the DeviceID is only necessary for hardlinked files + // when using subvolumes or snapshots their deviceIDs tend to change which causes + // restic to upload new tree blobs + node.DeviceID = 0 + } // overwrite name to match that within the snapshot node.Name = path.Base(snPath) if err != nil { diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 46ef44251..3d50e555f 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -2153,6 +2153,7 @@ func TestMetadataChanged(t *testing.T) { sn, node2 := snapshot(t, repo, fs, nil, "testfile") // set some values so we can then compare the nodes + want.DeviceID = 0 want.Content = node2.Content want.Path = "" if len(want.ExtendedAttributes) == 0 { diff --git a/internal/restic/node.go b/internal/restic/node.go index cbe9ef363..e7688aada 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -82,7 +82,7 @@ type Node struct { User string `json:"user,omitempty"` Group string `json:"group,omitempty"` Inode uint64 `json:"inode,omitempty"` - DeviceID uint64 `json:"device_id,omitempty"` // device id of the file, stat.st_dev + DeviceID uint64 `json:"device_id,omitempty"` // device id of the file, stat.st_dev, only stored for hardlinks Size uint64 `json:"size,omitempty"` Links uint64 `json:"links,omitempty"` LinkTarget string `json:"linktarget,omitempty"` From a26d6ffa720af4dde2504c560ce838470bdf21ab Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 9 Mar 2024 17:44:48 +0100 Subject: [PATCH 039/117] archiver: move deviceID handling behind feature flag --- internal/archiver/archiver.go | 13 ++++++++----- internal/archiver/archiver_test.go | 3 +++ internal/feature/registry.go | 2 ++ 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 63a1691b3..19d16c4d3 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -12,6 +12,7 @@ import ( "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" "golang.org/x/sync/errgroup" @@ -188,11 +189,13 @@ func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo) if !arch.WithAtime { node.AccessTime = node.ModTime } - if node.Links == 1 || node.Type == "dir" { - // the DeviceID is only necessary for hardlinked files - // when using subvolumes or snapshots their deviceIDs tend to change which causes - // restic to upload new tree blobs - node.DeviceID = 0 + if feature.Flag.Enabled(feature.DeviceIDForHardlinks) { + if node.Links == 1 || node.Type == "dir" { + // the DeviceID is only necessary for hardlinked files + // when using subvolumes or snapshots their deviceIDs tend to change which causes + // restic to upload new tree blobs + node.DeviceID = 0 + } } // overwrite name to match that within the snapshot node.Name = path.Base(snPath) diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 3d50e555f..411994911 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -19,6 +19,7 @@ import ( "github.com/restic/restic/internal/backend/mem" "github.com/restic/restic/internal/checker" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" @@ -2125,6 +2126,8 @@ const ( ) func TestMetadataChanged(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeviceIDForHardlinks, true)() + files := TestDir{ "testfile": TestFile{ Content: "foo bar test file", diff --git a/internal/feature/registry.go b/internal/feature/registry.go index 620c9ec35..6a9874786 100644 --- a/internal/feature/registry.go +++ b/internal/feature/registry.go @@ -7,11 +7,13 @@ var Flag = New() const ( ExampleFeature FlagName = "example-feature" DeprecateLegacyIndex FlagName = "deprecate-legacy-index" + DeviceIDForHardlinks FlagName = "device-id-for-hardlinks" ) func init() { Flag.SetFlags(map[FlagName]FlagDesc{ ExampleFeature: {Type: Alpha, Description: "just for testing"}, DeprecateLegacyIndex: {Type: Beta, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."}, + DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, }) } From a9b3d86c4f4d90a7b48ee02da6bf328b95ef4332 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 18:44:45 +0100 Subject: [PATCH 040/117] features: remove example feature --- internal/feature/registry.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/internal/feature/registry.go b/internal/feature/registry.go index 6a9874786..4693b8909 100644 --- a/internal/feature/registry.go +++ b/internal/feature/registry.go @@ -5,14 +5,12 @@ var Flag = New() // flag names are written in kebab-case const ( - ExampleFeature FlagName = "example-feature" DeprecateLegacyIndex FlagName = "deprecate-legacy-index" DeviceIDForHardlinks FlagName = "device-id-for-hardlinks" ) func init() { Flag.SetFlags(map[FlagName]FlagDesc{ - ExampleFeature: {Type: Alpha, Description: "just for testing"}, DeprecateLegacyIndex: {Type: Beta, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."}, DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, }) From d705741571530d4a9b1b3ee61dddeaf211aea393 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 19:11:56 +0100 Subject: [PATCH 041/117] backup: test that deviceID is only stored for hardlinks --- internal/archiver/archiver_unix_test.go | 48 +++++++++++++++++++++++++ internal/archiver/testing.go | 26 +++++++++++++- 2 files changed, 73 insertions(+), 1 deletion(-) diff --git a/internal/archiver/archiver_unix_test.go b/internal/archiver/archiver_unix_test.go index 7523f0749..2552b23e1 100644 --- a/internal/archiver/archiver_unix_test.go +++ b/internal/archiver/archiver_unix_test.go @@ -6,6 +6,12 @@ package archiver import ( "os" "syscall" + "testing" + + "github.com/restic/restic/internal/feature" + "github.com/restic/restic/internal/fs" + "github.com/restic/restic/internal/restic" + restictest "github.com/restic/restic/internal/test" ) type wrappedFileInfo struct { @@ -39,3 +45,45 @@ func wrapFileInfo(fi os.FileInfo) os.FileInfo { return res } + +func statAndSnapshot(t *testing.T, repo restic.Repository, name string) (*restic.Node, *restic.Node) { + fi := lstat(t, name) + want, err := restic.NodeFromFileInfo(name, fi) + restictest.OK(t, err) + + _, node := snapshot(t, repo, fs.Local{}, nil, name) + return want, node +} + +func TestHardlinkMetadata(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeviceIDForHardlinks, true)() + + files := TestDir{ + "testfile": TestFile{ + Content: "foo bar test file", + }, + "linktarget": TestFile{ + Content: "test file", + }, + "testlink": TestHardlink{ + Target: "./linktarget", + }, + "testdir": TestDir{}, + } + + tempdir, repo := prepareTempdirRepoSrc(t, files) + + back := restictest.Chdir(t, tempdir) + defer back() + + want, node := statAndSnapshot(t, repo, "testlink") + restictest.Assert(t, node.DeviceID == want.DeviceID, "device id mismatch expected %v got %v", want.DeviceID, node.DeviceID) + restictest.Assert(t, node.Links == want.Links, "link count mismatch expected %v got %v", want.Links, node.Links) + restictest.Assert(t, node.Inode == want.Inode, "inode mismatch expected %v got %v", want.Inode, node.Inode) + + _, node = statAndSnapshot(t, repo, "testfile") + restictest.Assert(t, node.DeviceID == 0, "device id mismatch for testfile expected %v got %v", 0, node.DeviceID) + + _, node = statAndSnapshot(t, repo, "testdir") + restictest.Assert(t, node.DeviceID == 0, "device id mismatch for testdir expected %v got %v", 0, node.DeviceID) +} diff --git a/internal/archiver/testing.go b/internal/archiver/testing.go index 111c1e68c..0bbd03a72 100644 --- a/internal/archiver/testing.go +++ b/internal/archiver/testing.go @@ -6,6 +6,7 @@ import ( "path" "path/filepath" "runtime" + "sort" "strings" "testing" "time" @@ -63,11 +64,29 @@ func (s TestSymlink) String() string { return "" } +// TestHardlink describes a hardlink created for a test. +type TestHardlink struct { + Target string +} + +func (s TestHardlink) String() string { + return "" +} + // TestCreateFiles creates a directory structure described by dir at target, // which must already exist. On Windows, symlinks aren't created. func TestCreateFiles(t testing.TB, target string, dir TestDir) { t.Helper() - for name, item := range dir { + + // ensure a stable order such that it can be guaranteed that a hardlink target already exists + var names []string + for name := range dir { + names = append(names, name) + } + sort.Strings(names) + + for _, name := range names { + item := dir[name] targetPath := filepath.Join(target, name) switch it := item.(type) { @@ -81,6 +100,11 @@ func TestCreateFiles(t testing.TB, target string, dir TestDir) { if err != nil { t.Fatal(err) } + case TestHardlink: + err := fs.Link(filepath.Join(target, filepath.FromSlash(it.Target)), targetPath) + if err != nil { + t.Fatal(err) + } case TestDir: err := fs.Mkdir(targetPath, 0755) if err != nil { From 21cf38fe96570e073b163a96ff6dedae073ad041 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 19:25:52 +0100 Subject: [PATCH 042/117] add changelog for deviceID only for hardlinks --- changelog/unreleased/pull-4006 | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 changelog/unreleased/pull-4006 diff --git a/changelog/unreleased/pull-4006 b/changelog/unreleased/pull-4006 new file mode 100644 index 000000000..01f4ddb6e --- /dev/null +++ b/changelog/unreleased/pull-4006 @@ -0,0 +1,16 @@ +Enhancement: (alpha) Store deviceID only for hardlinks + +Set `RESTIC_FEATURES=device-id-for-hardlinks` to enable this alpha feature. +The feature flag will be removed after repository format version 3 becomes +available or be replaced with a different solution. + +When creating backups from a filesystem snapshot, for example created using +btrfs subvolumes, the deviceID of the filesystem changes compared to previous +snapshots. This prevented restic from deduplicating the directory metadata of +a snapshot. + +When this alpha feature is enabled, then the deviceID is only stored for +hardlinks. This significantly reduces the metadata duplication for most +backups. + +https://github.com/restic/restic/pull/4006 From cf81f8ced63bee4f1d56c95b1e6557af251e2dcb Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 21:28:01 +0100 Subject: [PATCH 043/117] stats: only check for hardlinks for files with more than one link --- changelog/unreleased/pull-4503 | 1 + cmd/restic/cmd_stats.go | 11 +++++++---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/changelog/unreleased/pull-4503 b/changelog/unreleased/pull-4503 index 3ce5c48e8..b52552d69 100644 --- a/changelog/unreleased/pull-4503 +++ b/changelog/unreleased/pull-4503 @@ -4,4 +4,5 @@ If files on different devices had the same inode id, then the `stats` command did not correctly calculate the snapshot size. This has been fixed. https://github.com/restic/restic/pull/4503 +https://github.com/restic/restic/pull/4006 https://forum.restic.net/t/possible-bug-in-stats/6461/8 diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index d3078a419..b84620bab 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -270,11 +270,14 @@ func statsWalkTree(repo restic.Loader, opts StatsOptions, stats *statsContainer, // will still be restored stats.TotalFileCount++ - // if inodes are present, only count each inode once - // (hard links do not increase restore size) - if !hardLinkIndex.Has(node.Inode, node.DeviceID) || node.Inode == 0 { - hardLinkIndex.Add(node.Inode, node.DeviceID, struct{}{}) + if node.Links == 1 || node.Type == "dir" { stats.TotalSize += node.Size + } else { + // if hardlinks are present only count each deviceID+inode once + if !hardLinkIndex.Has(node.Inode, node.DeviceID) || node.Inode == 0 { + hardLinkIndex.Add(node.Inode, node.DeviceID, struct{}{}) + stats.TotalSize += node.Size + } } } From 118a69a84b5408201142ea06f6e25ae908527598 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 15:19:02 +0100 Subject: [PATCH 044/117] lock: replace lockRepo(Exclusive) with openWith(Read/Write/Exclusive)Lock The new functions much better convey the intent behind the lock request. This allows cleanly integrating noLock (for read) and dryRun (write/exclusive) handling. There are only minor changes to existing behavior with two exceptions: - `tag` no longer accepts the `--no-lock` flag. As it replaces files in the repository, this always requires an exclusive lock. - `debug examine` now returns an error if both `--extract-pack` and `--no-lock` are given. --- cmd/restic/cmd_backup.go | 19 ++------------- cmd/restic/cmd_cat.go | 12 ++-------- cmd/restic/cmd_check.go | 16 ++++--------- cmd/restic/cmd_copy.go | 21 ++++------------- cmd/restic/cmd_debug.go | 28 +++++++--------------- cmd/restic/cmd_diff.go | 12 ++-------- cmd/restic/cmd_dump.go | 12 ++-------- cmd/restic/cmd_find.go | 12 ++-------- cmd/restic/cmd_forget.go | 16 ++++--------- cmd/restic/cmd_key_add.go | 9 ++----- cmd/restic/cmd_key_list.go | 12 ++-------- cmd/restic/cmd_key_passwd.go | 9 ++----- cmd/restic/cmd_key_remove.go | 13 +++------- cmd/restic/cmd_list.go | 12 ++-------- cmd/restic/cmd_migrate.go | 9 ++----- cmd/restic/cmd_mount.go | 12 ++-------- cmd/restic/cmd_prune.go | 9 ++----- cmd/restic/cmd_recover.go | 9 ++----- cmd/restic/cmd_repair_index.go | 9 ++----- cmd/restic/cmd_repair_packs.go | 9 ++----- cmd/restic/cmd_repair_snapshots.go | 15 ++---------- cmd/restic/cmd_restore.go | 12 ++-------- cmd/restic/cmd_rewrite.go | 31 ++++++++++-------------- cmd/restic/cmd_snapshots.go | 12 ++-------- cmd/restic/cmd_stats.go | 12 ++-------- cmd/restic/cmd_tag.go | 16 ++++--------- cmd/restic/lock.go | 38 ++++++++++++++++++++++++++---- cmd/restic/lock_test.go | 18 +++++++------- 28 files changed, 122 insertions(+), 292 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index acc4bddb1..8b2f1f808 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -463,10 +463,11 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter Verbosef("open repository\n") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, opts.DryRun) if err != nil { return err } + defer unlock() var progressPrinter backup.ProgressPrinter if gopts.JSON { @@ -478,22 +479,6 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter calculateProgressInterval(!gopts.Quiet, gopts.JSON)) defer progressReporter.Done() - if opts.DryRun { - repo.SetDryRun() - } - - if !gopts.JSON { - progressPrinter.V("lock repository") - } - if !opts.DryRun { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } - // rejectByNameFuncs collect functions that can reject items from the backup based on path only rejectByNameFuncs, err := collectRejectByNameFuncs(opts, repo) if err != nil { diff --git a/cmd/restic/cmd_cat.go b/cmd/restic/cmd_cat.go index 92f58b2e7..ccec9b5d9 100644 --- a/cmd/restic/cmd_cat.go +++ b/cmd/restic/cmd_cat.go @@ -64,19 +64,11 @@ func runCat(ctx context.Context, gopts GlobalOptions, args []string) error { return err } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() tpe := args[0] diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index cbe388877..7bea641ae 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -204,20 +204,14 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args return code, nil }) - repo, err := OpenRepository(ctx, gopts) + if !gopts.NoLock { + Verbosef("create exclusive lock for repository\n") + } + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - Verbosef("create exclusive lock for repository\n") - var lock *restic.Lock - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() chkr := checker.New(repo, opts.CheckUnused) err = chkr.LoadSnapshots(ctx) diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index 92922b42b..410134e41 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -62,30 +62,17 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args [] gopts, secondaryGopts = secondaryGopts, gopts } - srcRepo, err := OpenRepository(ctx, gopts) + ctx, srcRepo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } + defer unlock() - dstRepo, err := OpenRepository(ctx, secondaryGopts) - if err != nil { - return err - } - - if !gopts.NoLock { - var srcLock *restic.Lock - srcLock, ctx, err = lockRepo(ctx, srcRepo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(srcLock) - if err != nil { - return err - } - } - - dstLock, ctx, err := lockRepo(ctx, dstRepo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(dstLock) + ctx, dstRepo, unlock, err := openWithAppendLock(ctx, secondaryGopts, false) if err != nil { return err } + defer unlock() srcSnapshotLister, err := restic.MemorizeList(ctx, srcRepo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_debug.go b/cmd/restic/cmd_debug.go index a87e7a0c5..3abb9d7eb 100644 --- a/cmd/restic/cmd_debug.go +++ b/cmd/restic/cmd_debug.go @@ -153,19 +153,11 @@ func runDebugDump(ctx context.Context, gopts GlobalOptions, args []string) error return errors.Fatal("type not specified") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() tpe := args[0] @@ -442,10 +434,15 @@ func storePlainBlob(id restic.ID, prefix string, plain []byte) error { } func runDebugExamine(ctx context.Context, gopts GlobalOptions, opts DebugExamineOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) + if opts.ExtractPack && gopts.NoLock { + return fmt.Errorf("--extract-pack and --no-lock are mutually exclusive") + } + + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, gopts.NoLock) if err != nil { return err } + defer unlock() ids := make([]restic.ID, 0) for _, name := range args { @@ -464,15 +461,6 @@ func runDebugExamine(ctx context.Context, gopts GlobalOptions, opts DebugExamine return errors.Fatal("no pack files to examine") } - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } - bar := newIndexProgress(gopts.Quiet, gopts.JSON) err = repo.LoadIndex(ctx, bar) if err != nil { diff --git a/cmd/restic/cmd_diff.go b/cmd/restic/cmd_diff.go index 3bd29fa67..b156191dc 100644 --- a/cmd/restic/cmd_diff.go +++ b/cmd/restic/cmd_diff.go @@ -344,19 +344,11 @@ func runDiff(ctx context.Context, opts DiffOptions, gopts GlobalOptions, args [] return errors.Fatalf("specify two snapshot IDs") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() // cache snapshots listing be, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) diff --git a/cmd/restic/cmd_dump.go b/cmd/restic/cmd_dump.go index 9178f2abe..39e915b40 100644 --- a/cmd/restic/cmd_dump.go +++ b/cmd/restic/cmd_dump.go @@ -131,19 +131,11 @@ func runDump(ctx context.Context, opts DumpOptions, gopts GlobalOptions, args [] splittedPath := splitPath(path.Clean(pathToPrint)) - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() sn, subfolder, err := (&restic.SnapshotFilter{ Hosts: opts.Hosts, diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index 7ea7c425a..e29fe30dc 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -563,19 +563,11 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args [] return errors.Fatal("cannot have several ID types") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 65ff449a3..f2fc1da8c 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -163,23 +163,15 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption return err } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - if gopts.NoLock && !opts.DryRun { return errors.Fatal("--no-lock is only applicable in combination with --dry-run for forget command") } - if !opts.DryRun || !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun && gopts.NoLock) + if err != nil { + return err } + defer unlock() var snapshots restic.Snapshots removeSnIDs := restic.NewIDSet() diff --git a/cmd/restic/cmd_key_add.go b/cmd/restic/cmd_key_add.go index 43a38f4eb..83e0cab7f 100644 --- a/cmd/restic/cmd_key_add.go +++ b/cmd/restic/cmd_key_add.go @@ -50,16 +50,11 @@ func runKeyAdd(ctx context.Context, gopts GlobalOptions, opts KeyAddOptions, arg return fmt.Errorf("the key add command expects no arguments, only options - please see `restic help key add` for usage and flags") } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, false) if err != nil { return err } + defer unlock() return addKey(ctx, repo, gopts, opts) } diff --git a/cmd/restic/cmd_key_list.go b/cmd/restic/cmd_key_list.go index 2b3574281..9bddb5ed3 100644 --- a/cmd/restic/cmd_key_list.go +++ b/cmd/restic/cmd_key_list.go @@ -40,19 +40,11 @@ func runKeyList(ctx context.Context, gopts GlobalOptions, args []string) error { return fmt.Errorf("the key list command expects no arguments, only options - please see `restic help key list` for usage and flags") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() return listKeys(ctx, repo, gopts) } diff --git a/cmd/restic/cmd_key_passwd.go b/cmd/restic/cmd_key_passwd.go index cb916274c..70abca6dc 100644 --- a/cmd/restic/cmd_key_passwd.go +++ b/cmd/restic/cmd_key_passwd.go @@ -47,16 +47,11 @@ func runKeyPasswd(ctx context.Context, gopts GlobalOptions, opts KeyPasswdOption return fmt.Errorf("the key passwd command expects no arguments, only options - please see `restic help key passwd` for usage and flags") } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() return changePassword(ctx, repo, gopts, opts) } diff --git a/cmd/restic/cmd_key_remove.go b/cmd/restic/cmd_key_remove.go index c8e303ffc..93babb4f3 100644 --- a/cmd/restic/cmd_key_remove.go +++ b/cmd/restic/cmd_key_remove.go @@ -37,20 +37,13 @@ func runKeyRemove(ctx context.Context, gopts GlobalOptions, args []string) error return fmt.Errorf("key remove expects one argument as the key id") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - - idPrefix := args[0] - - return deleteKey(ctx, repo, idPrefix) + return deleteKey(ctx, repo, args[0]) } func deleteKey(ctx context.Context, repo *repository.Repository, idPrefix string) error { diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index becad7f0d..a3df0c98f 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -36,19 +36,11 @@ func runList(ctx context.Context, gopts GlobalOptions, args []string) error { return errors.Fatal("type not specified") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock || args[0] == "locks") if err != nil { return err } - - if !gopts.NoLock && args[0] != "locks" { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() var t restic.FileType switch args[0] { diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go index fd2e762c0..c3f82b8dd 100644 --- a/cmd/restic/cmd_migrate.go +++ b/cmd/restic/cmd_migrate.go @@ -117,16 +117,11 @@ func applyMigrations(ctx context.Context, opts MigrateOptions, gopts GlobalOptio } func runMigrate(ctx context.Context, opts MigrateOptions, gopts GlobalOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() if len(args) == 0 { return checkMigrations(ctx, repo) diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go index 5fd81b344..cb2b1142d 100644 --- a/cmd/restic/cmd_mount.go +++ b/cmd/restic/cmd_mount.go @@ -125,19 +125,11 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args debug.Log("start mount") defer debug.Log("finish mount") - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() bar := newIndexProgress(gopts.Quiet, gopts.JSON) err = repo.LoadIndex(ctx, bar) diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 1b9352ea7..3a9a8c33c 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -148,10 +148,11 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error return errors.Fatal("disabled compression and `--repack-uncompressed` are mutually exclusive") } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() if repo.Connections() < 2 { return errors.Fatal("prune requires a backend connection limit of at least two") @@ -169,12 +170,6 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error opts.unsafeRecovery = true } - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - return runPruneWithRepo(ctx, opts, gopts, repo, restic.NewIDSet()) } diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index b97a7582b..f9a4d419d 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -40,16 +40,11 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { return err } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithAppendLock(ctx, gopts, false) if err != nil { return err } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_repair_index.go b/cmd/restic/cmd_repair_index.go index ea36f02f6..1ac743348 100644 --- a/cmd/restic/cmd_repair_index.go +++ b/cmd/restic/cmd_repair_index.go @@ -56,16 +56,11 @@ func init() { } func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions) error { - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() return rebuildIndex(ctx, opts, gopts, repo) } diff --git a/cmd/restic/cmd_repair_packs.go b/cmd/restic/cmd_repair_packs.go index 521b5859f..00dee076b 100644 --- a/cmd/restic/cmd_repair_packs.go +++ b/cmd/restic/cmd_repair_packs.go @@ -52,16 +52,11 @@ func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.T return errors.Fatal("no ids specified") } - repo, err := OpenRepository(ctx, gopts) - if err != nil { - return err - } - - lock, ctx, err := lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } + defer unlock() bar := newIndexProgress(gopts.Quiet, gopts.JSON) err = repo.LoadIndex(ctx, bar) diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index cc3d0eb85..4d9745e15 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -66,22 +66,11 @@ func init() { } func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, opts.DryRun) if err != nil { return err } - - if !opts.DryRun { - var lock *restic.Lock - var err error - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } else { - repo.SetDryRun() - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_restore.go b/cmd/restic/cmd_restore.go index 58f257541..5161be50d 100644 --- a/cmd/restic/cmd_restore.go +++ b/cmd/restic/cmd_restore.go @@ -127,19 +127,11 @@ func runRestore(ctx context.Context, opts RestoreOptions, gopts GlobalOptions, debug.Log("restore %v to %v", snapshotIDString, opts.Target) - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() sn, subfolder, err := (&restic.SnapshotFilter{ Hosts: opts.Hosts, diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index 62624e75c..06d4ddbd1 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -256,27 +256,22 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a return errors.Fatal("Nothing to do: no excludes provided and no new metadata provided") } - repo, err := OpenRepository(ctx, gopts) + var ( + repo *repository.Repository + unlock func() + err error + ) + + if opts.Forget { + Verbosef("create exclusive lock for repository\n") + ctx, repo, unlock, err = openWithExclusiveLock(ctx, gopts, opts.DryRun) + } else { + ctx, repo, unlock, err = openWithAppendLock(ctx, gopts, opts.DryRun) + } if err != nil { return err } - - if !opts.DryRun { - var lock *restic.Lock - var err error - if opts.Forget { - Verbosef("create exclusive lock for repository\n") - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - } else { - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - } - defer unlockRepo(lock) - if err != nil { - return err - } - } else { - repo.SetDryRun() - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index d6199d47a..1a9cd2232 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -59,19 +59,11 @@ func init() { } func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions, args []string) error { - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() var snapshots restic.Snapshots for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index b84620bab..20d7a485c 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -80,19 +80,11 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args return err } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } - - if !gopts.NoLock { - var lock *restic.Lock - lock, ctx, err = lockRepo(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } - } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index 01f3ad8af..b0d139fa6 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -104,20 +104,12 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st return errors.Fatal("--set and --add/--remove cannot be given at the same time") } - repo, err := OpenRepository(ctx, gopts) + Verbosef("create exclusive lock for repository\n") + ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { - return err - } - - if !gopts.NoLock { - Verbosef("create exclusive lock for repository\n") - var lock *restic.Lock - lock, ctx, err = lockRepoExclusive(ctx, repo, gopts.RetryLock, gopts.JSON) - defer unlockRepo(lock) - if err != nil { - return err - } + return nil } + defer unlock() changeCnt := 0 for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go index 600b7476f..29641e670 100644 --- a/cmd/restic/lock.go +++ b/cmd/restic/lock.go @@ -9,6 +9,7 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" ) @@ -24,12 +25,41 @@ var globalLocks struct { sync.Once } -func lockRepo(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) { - return lockRepository(ctx, repo, false, retryLock, json) +func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun bool, exclusive bool) (context.Context, *repository.Repository, func(), error) { + repo, err := OpenRepository(ctx, gopts) + if err != nil { + return nil, nil, nil, err + } + + unlock := func() {} + if !dryRun { + var lock *restic.Lock + lock, ctx, err = lockRepository(ctx, repo, exclusive, gopts.RetryLock, gopts.JSON) + unlock = func() { + unlockRepo(lock) + } + if err != nil { + return nil, nil, nil, err + } + } else { + repo.SetDryRun() + } + + return ctx, repo, unlock, nil } -func lockRepoExclusive(ctx context.Context, repo restic.Repository, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) { - return lockRepository(ctx, repo, true, retryLock, json) +func openWithReadLock(ctx context.Context, gopts GlobalOptions, noLock bool) (context.Context, *repository.Repository, func(), error) { + // TODO enfore read-only operations once the locking code has moved to the repository + return internalOpenWithLocked(ctx, gopts, noLock, false) +} + +func openWithAppendLock(ctx context.Context, gopts GlobalOptions, dryRun bool) (context.Context, *repository.Repository, func(), error) { + // TODO enfore non-exclusive operations once the locking code has moved to the repository + return internalOpenWithLocked(ctx, gopts, dryRun, false) +} + +func openWithExclusiveLock(ctx context.Context, gopts GlobalOptions, dryRun bool) (context.Context, *repository.Repository, func(), error) { + return internalOpenWithLocked(ctx, gopts, dryRun, true) } var ( diff --git a/cmd/restic/lock_test.go b/cmd/restic/lock_test.go index bf22db699..83d5f2a5e 100644 --- a/cmd/restic/lock_test.go +++ b/cmd/restic/lock_test.go @@ -37,7 +37,7 @@ func openLockTestRepo(t *testing.T, wrapper backendWrapper) (*repository.Reposit } func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, env *testEnvironment) (*restic.Lock, context.Context) { - lock, wrappedCtx, err := lockRepo(ctx, repo, env.gopts.RetryLock, env.gopts.JSON) + lock, wrappedCtx, err := lockRepository(ctx, repo, false, env.gopts.RetryLock, env.gopts.JSON) test.OK(t, err) test.OK(t, wrappedCtx.Err()) if lock.Stale() { @@ -94,10 +94,10 @@ func TestLockConflict(t *testing.T) { repo2, err := OpenRepository(context.TODO(), env.gopts) test.OK(t, err) - lock, _, err := lockRepoExclusive(context.Background(), repo, env.gopts.RetryLock, env.gopts.JSON) + lock, _, err := lockRepository(context.Background(), repo, true, env.gopts.RetryLock, env.gopts.JSON) test.OK(t, err) defer unlockRepo(lock) - _, _, err = lockRepo(context.Background(), repo2, env.gopts.RetryLock, env.gopts.JSON) + _, _, err = lockRepository(context.Background(), repo2, false, env.gopts.RetryLock, env.gopts.JSON) if err == nil { t.Fatal("second lock should have failed") } @@ -260,13 +260,13 @@ func TestLockWaitTimeout(t *testing.T) { repo, cleanup, env := openLockTestRepo(t, nil) defer cleanup() - elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := lockRepository(context.TODO(), repo, true, env.gopts.RetryLock, env.gopts.JSON) test.OK(t, err) retryLock := 200 * time.Millisecond start := time.Now() - lock, _, err := lockRepo(context.TODO(), repo, retryLock, env.gopts.JSON) + lock, _, err := lockRepository(context.TODO(), repo, false, retryLock, env.gopts.JSON) duration := time.Since(start) test.Assert(t, err != nil, @@ -284,7 +284,7 @@ func TestLockWaitCancel(t *testing.T) { repo, cleanup, env := openLockTestRepo(t, nil) defer cleanup() - elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := lockRepository(context.TODO(), repo, true, env.gopts.RetryLock, env.gopts.JSON) test.OK(t, err) retryLock := 200 * time.Millisecond @@ -294,7 +294,7 @@ func TestLockWaitCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) time.AfterFunc(cancelAfter, cancel) - lock, _, err := lockRepo(ctx, repo, retryLock, env.gopts.JSON) + lock, _, err := lockRepository(ctx, repo, false, retryLock, env.gopts.JSON) duration := time.Since(start) test.Assert(t, err != nil, @@ -312,7 +312,7 @@ func TestLockWaitSuccess(t *testing.T) { repo, cleanup, env := openLockTestRepo(t, nil) defer cleanup() - elock, _, err := lockRepoExclusive(context.TODO(), repo, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := lockRepository(context.TODO(), repo, true, env.gopts.RetryLock, env.gopts.JSON) test.OK(t, err) retryLock := 200 * time.Millisecond @@ -322,7 +322,7 @@ func TestLockWaitSuccess(t *testing.T) { test.OK(t, elock.Unlock()) }) - lock, _, err := lockRepo(context.TODO(), repo, retryLock, env.gopts.JSON) + lock, _, err := lockRepository(context.TODO(), repo, false, retryLock, env.gopts.JSON) test.OK(t, err) test.OK(t, lock.Unlock()) From cbb5f89252523f8ebe631ee53f326b75f810d6f8 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 16:26:29 +0100 Subject: [PATCH 045/117] lock: move code to repository package --- cmd/restic/lock.go | 312 +----------------- internal/repository/lock.go | 301 +++++++++++++++++ .../repository}/lock_test.go | 120 ++++--- 3 files changed, 370 insertions(+), 363 deletions(-) create mode 100644 internal/repository/lock.go rename {cmd/restic => internal/repository}/lock_test.go (70%) diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go index 29641e670..20ac4dd34 100644 --- a/cmd/restic/lock.go +++ b/cmd/restic/lock.go @@ -2,26 +2,13 @@ package main import ( "context" - "fmt" "sync" - "time" - "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" ) -type lockContext struct { - lock *restic.Lock - cancel context.CancelFunc - refreshWG sync.WaitGroup -} - var globalLocks struct { - locks map[*restic.Lock]*lockContext - sync.Mutex sync.Once } @@ -34,9 +21,20 @@ func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun boo unlock := func() {} if !dryRun { var lock *restic.Lock - lock, ctx, err = lockRepository(ctx, repo, exclusive, gopts.RetryLock, gopts.JSON) + + // make sure that a repository is unlocked properly and after cancel() was + // called by the cleanup handler in global.go + globalLocks.Do(func() { + AddCleanupHandler(repository.UnlockAll) + }) + + lock, ctx, err = repository.Lock(ctx, repo, exclusive, gopts.RetryLock, func(msg string) { + if !gopts.JSON { + Verbosef("%s", msg) + } + }, Warnf) unlock = func() { - unlockRepo(lock) + repository.Unlock(lock) } if err != nil { return nil, nil, nil, err @@ -61,287 +59,3 @@ func openWithAppendLock(ctx context.Context, gopts GlobalOptions, dryRun bool) ( func openWithExclusiveLock(ctx context.Context, gopts GlobalOptions, dryRun bool) (context.Context, *repository.Repository, func(), error) { return internalOpenWithLocked(ctx, gopts, dryRun, true) } - -var ( - retrySleepStart = 5 * time.Second - retrySleepMax = 60 * time.Second -) - -func minDuration(a, b time.Duration) time.Duration { - if a <= b { - return a - } - return b -} - -// lockRepository wraps the ctx such that it is cancelled when the repository is unlocked -// cancelling the original context also stops the lock refresh -func lockRepository(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, json bool) (*restic.Lock, context.Context, error) { - // make sure that a repository is unlocked properly and after cancel() was - // called by the cleanup handler in global.go - globalLocks.Do(func() { - AddCleanupHandler(unlockAll) - }) - - lockFn := restic.NewLock - if exclusive { - lockFn = restic.NewExclusiveLock - } - - var lock *restic.Lock - var err error - - retrySleep := minDuration(retrySleepStart, retryLock) - retryMessagePrinted := false - retryTimeout := time.After(retryLock) - -retryLoop: - for { - lock, err = lockFn(ctx, repo) - if err != nil && restic.IsAlreadyLocked(err) { - - if !retryMessagePrinted { - if !json { - Verbosef("repo already locked, waiting up to %s for the lock\n", retryLock) - } - retryMessagePrinted = true - } - - debug.Log("repo already locked, retrying in %v", retrySleep) - retrySleepCh := time.After(retrySleep) - - select { - case <-ctx.Done(): - return nil, ctx, ctx.Err() - case <-retryTimeout: - debug.Log("repo already locked, timeout expired") - // Last lock attempt - lock, err = lockFn(ctx, repo) - break retryLoop - case <-retrySleepCh: - retrySleep = minDuration(retrySleep*2, retrySleepMax) - } - } else { - // anything else, either a successful lock or another error - break retryLoop - } - } - if restic.IsInvalidLock(err) { - return nil, ctx, errors.Fatalf("%v\n\nthe `unlock --remove-all` command can be used to remove invalid locks. Make sure that no other restic process is accessing the repository when running the command", err) - } - if err != nil { - return nil, ctx, fmt.Errorf("unable to create lock in backend: %w", err) - } - debug.Log("create lock %p (exclusive %v)", lock, exclusive) - - ctx, cancel := context.WithCancel(ctx) - lockInfo := &lockContext{ - lock: lock, - cancel: cancel, - } - lockInfo.refreshWG.Add(2) - refreshChan := make(chan struct{}) - forceRefreshChan := make(chan refreshLockRequest) - - globalLocks.Lock() - globalLocks.locks[lock] = lockInfo - go refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan) - go monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan) - globalLocks.Unlock() - - return lock, ctx, err -} - -var refreshInterval = 5 * time.Minute - -// consider a lock refresh failed a bit before the lock actually becomes stale -// the difference allows to compensate for a small time drift between clients. -var refreshabilityTimeout = restic.StaleLockTimeout - refreshInterval*3/2 - -type refreshLockRequest struct { - result chan bool -} - -func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest) { - debug.Log("start") - lock := lockInfo.lock - ticker := time.NewTicker(refreshInterval) - lastRefresh := lock.Time - - defer func() { - ticker.Stop() - // ensure that the context was cancelled before removing the lock - lockInfo.cancel() - - // remove the lock from the repo - debug.Log("unlocking repository with lock %v", lock) - if err := lock.Unlock(); err != nil { - debug.Log("error while unlocking: %v", err) - Warnf("error while unlocking: %v", err) - } - - lockInfo.refreshWG.Done() - }() - - for { - select { - case <-ctx.Done(): - debug.Log("terminate") - return - - case req := <-forceRefresh: - debug.Log("trying to refresh stale lock") - // keep on going if our current lock still exists - success := tryRefreshStaleLock(ctx, backend, lock, lockInfo.cancel) - // inform refresh goroutine about forced refresh - select { - case <-ctx.Done(): - case req.result <- success: - } - - if success { - // update lock refresh time - lastRefresh = lock.Time - } - - case <-ticker.C: - if time.Since(lastRefresh) > refreshabilityTimeout { - // the lock is too old, wait until the expiry monitor cancels the context - continue - } - - debug.Log("refreshing locks") - err := lock.Refresh(context.TODO()) - if err != nil { - Warnf("unable to refresh lock: %v\n", err) - } else { - lastRefresh = lock.Time - // inform monitor goroutine about successful refresh - select { - case <-ctx.Done(): - case refreshed <- struct{}{}: - } - } - } - } -} - -func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest) { - // time.Now() might use a monotonic timer which is paused during standby - // convert to unix time to ensure we compare real time values - lastRefresh := time.Now().UnixNano() - pollDuration := 1 * time.Second - if refreshInterval < pollDuration { - // require for TestLockFailedRefresh - pollDuration = refreshInterval / 5 - } - // timers are paused during standby, which is a problem as the refresh timeout - // _must_ expire if the host was too long in standby. Thus fall back to periodic checks - // https://github.com/golang/go/issues/35012 - ticker := time.NewTicker(pollDuration) - defer func() { - ticker.Stop() - lockInfo.cancel() - lockInfo.refreshWG.Done() - }() - - var refreshStaleLockResult chan bool - - for { - select { - case <-ctx.Done(): - debug.Log("terminate expiry monitoring") - return - case <-refreshed: - if refreshStaleLockResult != nil { - // ignore delayed refresh notifications while the stale lock is refreshed - continue - } - lastRefresh = time.Now().UnixNano() - case <-ticker.C: - if time.Now().UnixNano()-lastRefresh < refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil { - continue - } - - debug.Log("trying to refreshStaleLock") - // keep on going if our current lock still exists - refreshReq := refreshLockRequest{ - result: make(chan bool), - } - refreshStaleLockResult = refreshReq.result - - // inform refresh goroutine about forced refresh - select { - case <-ctx.Done(): - case forceRefresh <- refreshReq: - } - case success := <-refreshStaleLockResult: - if success { - lastRefresh = time.Now().UnixNano() - refreshStaleLockResult = nil - continue - } - - Warnf("Fatal: failed to refresh lock in time\n") - return - } - } -} - -func tryRefreshStaleLock(ctx context.Context, be backend.Backend, lock *restic.Lock, cancel context.CancelFunc) bool { - freeze := backend.AsBackend[backend.FreezeBackend](be) - if freeze != nil { - debug.Log("freezing backend") - freeze.Freeze() - defer freeze.Unfreeze() - } - - err := lock.RefreshStaleLock(ctx) - if err != nil { - Warnf("failed to refresh stale lock: %v\n", err) - // cancel context while the backend is still frozen to prevent accidental modifications - cancel() - return false - } - - return true -} - -func unlockRepo(lock *restic.Lock) { - if lock == nil { - return - } - - globalLocks.Lock() - lockInfo, exists := globalLocks.locks[lock] - delete(globalLocks.locks, lock) - globalLocks.Unlock() - - if !exists { - debug.Log("unable to find lock %v in the global list of locks, ignoring", lock) - return - } - lockInfo.cancel() - lockInfo.refreshWG.Wait() -} - -func unlockAll(code int) (int, error) { - globalLocks.Lock() - locks := globalLocks.locks - debug.Log("unlocking %d locks", len(globalLocks.locks)) - for _, lockInfo := range globalLocks.locks { - lockInfo.cancel() - } - globalLocks.locks = make(map[*restic.Lock]*lockContext) - globalLocks.Unlock() - - for _, lockInfo := range locks { - lockInfo.refreshWG.Wait() - } - - return code, nil -} - -func init() { - globalLocks.locks = make(map[*restic.Lock]*lockContext) -} diff --git a/internal/repository/lock.go b/internal/repository/lock.go new file mode 100644 index 000000000..c64cb9222 --- /dev/null +++ b/internal/repository/lock.go @@ -0,0 +1,301 @@ +package repository + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/debug" + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/restic" +) + +type lockContext struct { + lock *restic.Lock + cancel context.CancelFunc + refreshWG sync.WaitGroup +} + +var globalLocks struct { + locks map[*restic.Lock]*lockContext + sync.Mutex +} + +var ( + retrySleepStart = 5 * time.Second + retrySleepMax = 60 * time.Second +) + +func minDuration(a, b time.Duration) time.Duration { + if a <= b { + return a + } + return b +} + +// Lock wraps the ctx such that it is cancelled when the repository is unlocked +// cancelling the original context also stops the lock refresh +func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*restic.Lock, context.Context, error) { + + lockFn := restic.NewLock + if exclusive { + lockFn = restic.NewExclusiveLock + } + + var lock *restic.Lock + var err error + + retrySleep := minDuration(retrySleepStart, retryLock) + retryMessagePrinted := false + retryTimeout := time.After(retryLock) + +retryLoop: + for { + lock, err = lockFn(ctx, repo) + if err != nil && restic.IsAlreadyLocked(err) { + + if !retryMessagePrinted { + printRetry(fmt.Sprintf("repo already locked, waiting up to %s for the lock\n", retryLock)) + retryMessagePrinted = true + } + + debug.Log("repo already locked, retrying in %v", retrySleep) + retrySleepCh := time.After(retrySleep) + + select { + case <-ctx.Done(): + return nil, ctx, ctx.Err() + case <-retryTimeout: + debug.Log("repo already locked, timeout expired") + // Last lock attempt + lock, err = lockFn(ctx, repo) + break retryLoop + case <-retrySleepCh: + retrySleep = minDuration(retrySleep*2, retrySleepMax) + } + } else { + // anything else, either a successful lock or another error + break retryLoop + } + } + if restic.IsInvalidLock(err) { + return nil, ctx, errors.Fatalf("%v\n\nthe `unlock --remove-all` command can be used to remove invalid locks. Make sure that no other restic process is accessing the repository when running the command", err) + } + if err != nil { + return nil, ctx, fmt.Errorf("unable to create lock in backend: %w", err) + } + debug.Log("create lock %p (exclusive %v)", lock, exclusive) + + ctx, cancel := context.WithCancel(ctx) + lockInfo := &lockContext{ + lock: lock, + cancel: cancel, + } + lockInfo.refreshWG.Add(2) + refreshChan := make(chan struct{}) + forceRefreshChan := make(chan refreshLockRequest) + + globalLocks.Lock() + globalLocks.locks[lock] = lockInfo + go refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan, logger) + go monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan, logger) + globalLocks.Unlock() + + return lock, ctx, err +} + +var refreshInterval = 5 * time.Minute + +// consider a lock refresh failed a bit before the lock actually becomes stale +// the difference allows to compensate for a small time drift between clients. +var refreshabilityTimeout = restic.StaleLockTimeout - refreshInterval*3/2 + +type refreshLockRequest struct { + result chan bool +} + +func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest, logger func(format string, args ...interface{})) { + debug.Log("start") + lock := lockInfo.lock + ticker := time.NewTicker(refreshInterval) + lastRefresh := lock.Time + + defer func() { + ticker.Stop() + // ensure that the context was cancelled before removing the lock + lockInfo.cancel() + + // remove the lock from the repo + debug.Log("unlocking repository with lock %v", lock) + if err := lock.Unlock(); err != nil { + debug.Log("error while unlocking: %v", err) + logger("error while unlocking: %v", err) + } + + lockInfo.refreshWG.Done() + }() + + for { + select { + case <-ctx.Done(): + debug.Log("terminate") + return + + case req := <-forceRefresh: + debug.Log("trying to refresh stale lock") + // keep on going if our current lock still exists + success := tryRefreshStaleLock(ctx, backend, lock, lockInfo.cancel, logger) + // inform refresh goroutine about forced refresh + select { + case <-ctx.Done(): + case req.result <- success: + } + + if success { + // update lock refresh time + lastRefresh = lock.Time + } + + case <-ticker.C: + if time.Since(lastRefresh) > refreshabilityTimeout { + // the lock is too old, wait until the expiry monitor cancels the context + continue + } + + debug.Log("refreshing locks") + err := lock.Refresh(context.TODO()) + if err != nil { + logger("unable to refresh lock: %v\n", err) + } else { + lastRefresh = lock.Time + // inform monitor goroutine about successful refresh + select { + case <-ctx.Done(): + case refreshed <- struct{}{}: + } + } + } + } +} + +func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest, logger func(format string, args ...interface{})) { + // time.Now() might use a monotonic timer which is paused during standby + // convert to unix time to ensure we compare real time values + lastRefresh := time.Now().UnixNano() + pollDuration := 1 * time.Second + if refreshInterval < pollDuration { + // required for TestLockFailedRefresh + pollDuration = refreshInterval / 5 + } + // timers are paused during standby, which is a problem as the refresh timeout + // _must_ expire if the host was too long in standby. Thus fall back to periodic checks + // https://github.com/golang/go/issues/35012 + ticker := time.NewTicker(pollDuration) + defer func() { + ticker.Stop() + lockInfo.cancel() + lockInfo.refreshWG.Done() + }() + + var refreshStaleLockResult chan bool + + for { + select { + case <-ctx.Done(): + debug.Log("terminate expiry monitoring") + return + case <-refreshed: + if refreshStaleLockResult != nil { + // ignore delayed refresh notifications while the stale lock is refreshed + continue + } + lastRefresh = time.Now().UnixNano() + case <-ticker.C: + if time.Now().UnixNano()-lastRefresh < refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil { + continue + } + + debug.Log("trying to refreshStaleLock") + // keep on going if our current lock still exists + refreshReq := refreshLockRequest{ + result: make(chan bool), + } + refreshStaleLockResult = refreshReq.result + + // inform refresh goroutine about forced refresh + select { + case <-ctx.Done(): + case forceRefresh <- refreshReq: + } + case success := <-refreshStaleLockResult: + if success { + lastRefresh = time.Now().UnixNano() + refreshStaleLockResult = nil + continue + } + + logger("Fatal: failed to refresh lock in time\n") + return + } + } +} + +func tryRefreshStaleLock(ctx context.Context, be backend.Backend, lock *restic.Lock, cancel context.CancelFunc, logger func(format string, args ...interface{})) bool { + freeze := backend.AsBackend[backend.FreezeBackend](be) + if freeze != nil { + debug.Log("freezing backend") + freeze.Freeze() + defer freeze.Unfreeze() + } + + err := lock.RefreshStaleLock(ctx) + if err != nil { + logger("failed to refresh stale lock: %v\n", err) + // cancel context while the backend is still frozen to prevent accidental modifications + cancel() + return false + } + + return true +} + +func Unlock(lock *restic.Lock) { + if lock == nil { + return + } + + globalLocks.Lock() + lockInfo, exists := globalLocks.locks[lock] + delete(globalLocks.locks, lock) + globalLocks.Unlock() + + if !exists { + debug.Log("unable to find lock %v in the global list of locks, ignoring", lock) + return + } + lockInfo.cancel() + lockInfo.refreshWG.Wait() +} + +func UnlockAll(code int) (int, error) { + globalLocks.Lock() + locks := globalLocks.locks + debug.Log("unlocking %d locks", len(globalLocks.locks)) + for _, lockInfo := range globalLocks.locks { + lockInfo.cancel() + } + globalLocks.locks = make(map[*restic.Lock]*lockContext) + globalLocks.Unlock() + + for _, lockInfo := range locks { + lockInfo.refreshWG.Wait() + } + + return code, nil +} + +func init() { + globalLocks.locks = make(map[*restic.Lock]*lockContext) +} diff --git a/cmd/restic/lock_test.go b/internal/repository/lock_test.go similarity index 70% rename from cmd/restic/lock_test.go rename to internal/repository/lock_test.go index 83d5f2a5e..fb48a566f 100644 --- a/cmd/restic/lock_test.go +++ b/internal/repository/lock_test.go @@ -1,4 +1,4 @@ -package main +package repository import ( "context" @@ -10,34 +10,35 @@ import ( "time" "github.com/restic/restic/internal/backend" - "github.com/restic/restic/internal/backend/location" "github.com/restic/restic/internal/backend/mem" "github.com/restic/restic/internal/debug" - "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) -func openLockTestRepo(t *testing.T, wrapper backendWrapper) (*repository.Repository, func(), *testEnvironment) { - env, cleanup := withTestEnvironment(t) +type backendWrapper func(r backend.Backend) (backend.Backend, error) - reg := location.NewRegistry() - reg.Register(mem.NewFactory()) - env.gopts.backends = reg - env.gopts.Repo = "mem:" +func openLockTestRepo(t *testing.T, wrapper backendWrapper) restic.Repository { + be := backend.Backend(mem.New()) + // initialize repo + TestRepositoryWithBackend(t, be, 0, Options{}) + // reopen repository to allow injecting a backend wrapper if wrapper != nil { - env.gopts.backendTestHook = wrapper + var err error + be, err = wrapper(be) + rtest.OK(t, err) } - testRunInit(t, env.gopts) - repo, err := OpenRepository(context.TODO(), env.gopts) - test.OK(t, err) - return repo, cleanup, env + repo, err := New(be, Options{}) + rtest.OK(t, err) + rtest.OK(t, repo.SearchKey(context.TODO(), test.TestPassword, 1, "")) + return repo } -func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, env *testEnvironment) (*restic.Lock, context.Context) { - lock, wrappedCtx, err := lockRepository(ctx, repo, false, env.gopts.RetryLock, env.gopts.JSON) +func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, retryLock time.Duration) (*restic.Lock, context.Context) { + lock, wrappedCtx, err := Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) test.OK(t, wrappedCtx.Err()) if lock.Stale() { @@ -47,57 +48,54 @@ func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, } func TestLock(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + repo := openLockTestRepo(t, nil) - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) - unlockRepo(lock) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) + Unlock(lock) if wrappedCtx.Err() == nil { t.Fatal("unlock did not cancel context") } } func TestLockCancel(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + repo := openLockTestRepo(t, nil) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - lock, wrappedCtx := checkedLockRepo(ctx, t, repo, env) + lock, wrappedCtx := checkedLockRepo(ctx, t, repo, 0) cancel() if wrappedCtx.Err() == nil { t.Fatal("canceled parent context did not cancel context") } - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + Unlock(lock) } func TestLockUnlockAll(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + repo := openLockTestRepo(t, nil) - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) - _, err := unlockAll(0) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) + _, err := UnlockAll(0) test.OK(t, err) if wrappedCtx.Err() == nil { t.Fatal("canceled parent context did not cancel context") } - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + Unlock(lock) } func TestLockConflict(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() - repo2, err := OpenRepository(context.TODO(), env.gopts) + repo := openLockTestRepo(t, nil) + repo2, err := New(repo.Backend(), Options{}) test.OK(t, err) + test.OK(t, repo2.SearchKey(context.TODO(), test.TestPassword, 1, "")) - lock, _, err := lockRepository(context.Background(), repo, true, env.gopts.RetryLock, env.gopts.JSON) + lock, _, err := Lock(context.Background(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) - defer unlockRepo(lock) - _, _, err = lockRepository(context.Background(), repo2, false, env.gopts.RetryLock, env.gopts.JSON) + defer Unlock(lock) + _, _, err = Lock(context.Background(), repo2, false, 0, func(msg string) {}, func(format string, args ...interface{}) {}) if err == nil { t.Fatal("second lock should have failed") } @@ -118,10 +116,9 @@ func (b *writeOnceBackend) Save(ctx context.Context, h backend.Handle, rd backen } func TestLockFailedRefresh(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { + repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { return &writeOnceBackend{Backend: r}, nil }) - defer cleanup() // reduce locking intervals to be suitable for testing ri, rt := refreshInterval, refreshabilityTimeout @@ -131,7 +128,7 @@ func TestLockFailedRefresh(t *testing.T) { refreshInterval, refreshabilityTimeout = ri, rt }() - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) select { case <-wrappedCtx.Done(): @@ -139,8 +136,8 @@ func TestLockFailedRefresh(t *testing.T) { case <-time.After(time.Second): t.Fatal("failed lock refresh did not cause context cancellation") } - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + Unlock(lock) } type loggingBackend struct { @@ -156,13 +153,12 @@ func (b *loggingBackend) Save(ctx context.Context, h backend.Handle, rd backend. } func TestLockSuccessfulRefresh(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { + repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { return &loggingBackend{ Backend: r, t: t, }, nil }) - defer cleanup() t.Logf("test for successful lock refresh %v", time.Now()) // reduce locking intervals to be suitable for testing @@ -173,7 +169,7 @@ func TestLockSuccessfulRefresh(t *testing.T) { refreshInterval, refreshabilityTimeout = ri, rt }() - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) select { case <-wrappedCtx.Done(): @@ -189,8 +185,8 @@ func TestLockSuccessfulRefresh(t *testing.T) { case <-time.After(2 * refreshabilityTimeout): // expected lock refresh to work } - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + Unlock(lock) } type slowBackend struct { @@ -209,11 +205,10 @@ func (b *slowBackend) Save(ctx context.Context, h backend.Handle, rd backend.Rew func TestLockSuccessfulStaleRefresh(t *testing.T) { var sb *slowBackend - repo, cleanup, env := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { + repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { sb = &slowBackend{Backend: r} return sb, nil }) - defer cleanup() t.Logf("test for successful lock refresh %v", time.Now()) // reduce locking intervals to be suitable for testing @@ -224,7 +219,7 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { refreshInterval, refreshabilityTimeout = ri, rt }() - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, env) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) // delay lock refreshing long enough that the lock would expire sb.m.Lock() sb.sleep = refreshabilityTimeout + refreshInterval @@ -252,21 +247,20 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { // expected lock refresh to work } - // unlockRepo should not crash - unlockRepo(lock) + // Unlock should not crash + Unlock(lock) } func TestLockWaitTimeout(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + repo := openLockTestRepo(t, nil) - elock, _, err := lockRepository(context.TODO(), repo, true, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) retryLock := 200 * time.Millisecond start := time.Now() - lock, _, err := lockRepository(context.TODO(), repo, false, retryLock, env.gopts.JSON) + lock, _, err := Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) duration := time.Since(start) test.Assert(t, err != nil, @@ -281,10 +275,9 @@ func TestLockWaitTimeout(t *testing.T) { } func TestLockWaitCancel(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + repo := openLockTestRepo(t, nil) - elock, _, err := lockRepository(context.TODO(), repo, true, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) retryLock := 200 * time.Millisecond @@ -294,7 +287,7 @@ func TestLockWaitCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) time.AfterFunc(cancelAfter, cancel) - lock, _, err := lockRepository(ctx, repo, false, retryLock, env.gopts.JSON) + lock, _, err := Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) duration := time.Since(start) test.Assert(t, err != nil, @@ -309,10 +302,9 @@ func TestLockWaitCancel(t *testing.T) { } func TestLockWaitSuccess(t *testing.T) { - repo, cleanup, env := openLockTestRepo(t, nil) - defer cleanup() + repo := openLockTestRepo(t, nil) - elock, _, err := lockRepository(context.TODO(), repo, true, env.gopts.RetryLock, env.gopts.JSON) + elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) retryLock := 200 * time.Millisecond @@ -322,7 +314,7 @@ func TestLockWaitSuccess(t *testing.T) { test.OK(t, elock.Unlock()) }) - lock, _, err := lockRepository(context.TODO(), repo, false, retryLock, env.gopts.JSON) + lock, _, err := Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) test.OK(t, lock.Unlock()) From e8df50fa3c0e5e4ab69e5f21aeedbd9ba0c36dee Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 16:45:57 +0100 Subject: [PATCH 046/117] repository: remove global list of locks --- cmd/restic/lock.go | 26 ++++++---------- internal/repository/lock.go | 52 +++++--------------------------- internal/repository/lock_test.go | 50 +++++++++++------------------- 3 files changed, 34 insertions(+), 94 deletions(-) diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go index 20ac4dd34..69d433df1 100644 --- a/cmd/restic/lock.go +++ b/cmd/restic/lock.go @@ -2,16 +2,10 @@ package main import ( "context" - "sync" "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/restic" ) -var globalLocks struct { - sync.Once -} - func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun bool, exclusive bool) (context.Context, *repository.Repository, func(), error) { repo, err := OpenRepository(ctx, gopts) if err != nil { @@ -20,22 +14,22 @@ func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun boo unlock := func() {} if !dryRun { - var lock *restic.Lock - - // make sure that a repository is unlocked properly and after cancel() was - // called by the cleanup handler in global.go - globalLocks.Do(func() { - AddCleanupHandler(repository.UnlockAll) - }) + var lock *repository.Unlocker lock, ctx, err = repository.Lock(ctx, repo, exclusive, gopts.RetryLock, func(msg string) { if !gopts.JSON { Verbosef("%s", msg) } }, Warnf) - unlock = func() { - repository.Unlock(lock) - } + + unlock = lock.Unlock + // make sure that a repository is unlocked properly and after cancel() was + // called by the cleanup handler in global.go + AddCleanupHandler(func(code int) (int, error) { + lock.Unlock() + return code, nil + }) + if err != nil { return nil, nil, nil, err } diff --git a/internal/repository/lock.go b/internal/repository/lock.go index c64cb9222..e3360cac0 100644 --- a/internal/repository/lock.go +++ b/internal/repository/lock.go @@ -18,11 +18,6 @@ type lockContext struct { refreshWG sync.WaitGroup } -var globalLocks struct { - locks map[*restic.Lock]*lockContext - sync.Mutex -} - var ( retrySleepStart = 5 * time.Second retrySleepMax = 60 * time.Second @@ -37,7 +32,7 @@ func minDuration(a, b time.Duration) time.Duration { // Lock wraps the ctx such that it is cancelled when the repository is unlocked // cancelling the original context also stops the lock refresh -func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*restic.Lock, context.Context, error) { +func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { lockFn := restic.NewLock if exclusive { @@ -97,13 +92,10 @@ retryLoop: refreshChan := make(chan struct{}) forceRefreshChan := make(chan refreshLockRequest) - globalLocks.Lock() - globalLocks.locks[lock] = lockInfo go refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan, logger) go monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan, logger) - globalLocks.Unlock() - return lock, ctx, err + return &Unlocker{lockInfo}, ctx, nil } var refreshInterval = 5 * time.Minute @@ -261,41 +253,11 @@ func tryRefreshStaleLock(ctx context.Context, be backend.Backend, lock *restic.L return true } -func Unlock(lock *restic.Lock) { - if lock == nil { - return - } - - globalLocks.Lock() - lockInfo, exists := globalLocks.locks[lock] - delete(globalLocks.locks, lock) - globalLocks.Unlock() - - if !exists { - debug.Log("unable to find lock %v in the global list of locks, ignoring", lock) - return - } - lockInfo.cancel() - lockInfo.refreshWG.Wait() +type Unlocker struct { + info *lockContext } -func UnlockAll(code int) (int, error) { - globalLocks.Lock() - locks := globalLocks.locks - debug.Log("unlocking %d locks", len(globalLocks.locks)) - for _, lockInfo := range globalLocks.locks { - lockInfo.cancel() - } - globalLocks.locks = make(map[*restic.Lock]*lockContext) - globalLocks.Unlock() - - for _, lockInfo := range locks { - lockInfo.refreshWG.Wait() - } - - return code, nil -} - -func init() { - globalLocks.locks = make(map[*restic.Lock]*lockContext) +func (l *Unlocker) Unlock() { + l.info.cancel() + l.info.refreshWG.Wait() } diff --git a/internal/repository/lock_test.go b/internal/repository/lock_test.go index fb48a566f..2975ed7ff 100644 --- a/internal/repository/lock_test.go +++ b/internal/repository/lock_test.go @@ -37,11 +37,11 @@ func openLockTestRepo(t *testing.T, wrapper backendWrapper) restic.Repository { return repo } -func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, retryLock time.Duration) (*restic.Lock, context.Context) { +func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, retryLock time.Duration) (*Unlocker, context.Context) { lock, wrappedCtx, err := Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) test.OK(t, wrappedCtx.Err()) - if lock.Stale() { + if lock.info.lock.Stale() { t.Fatal("lock returned stale lock") } return lock, wrappedCtx @@ -51,7 +51,7 @@ func TestLock(t *testing.T) { repo := openLockTestRepo(t, nil) lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) - Unlock(lock) + lock.Unlock() if wrappedCtx.Err() == nil { t.Fatal("unlock did not cancel context") } @@ -69,21 +69,7 @@ func TestLockCancel(t *testing.T) { } // Unlock should not crash - Unlock(lock) -} - -func TestLockUnlockAll(t *testing.T) { - repo := openLockTestRepo(t, nil) - - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) - _, err := UnlockAll(0) - test.OK(t, err) - if wrappedCtx.Err() == nil { - t.Fatal("canceled parent context did not cancel context") - } - - // Unlock should not crash - Unlock(lock) + lock.Unlock() } func TestLockConflict(t *testing.T) { @@ -94,7 +80,7 @@ func TestLockConflict(t *testing.T) { lock, _, err := Lock(context.Background(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) - defer Unlock(lock) + defer lock.Unlock() _, _, err = Lock(context.Background(), repo2, false, 0, func(msg string) {}, func(format string, args ...interface{}) {}) if err == nil { t.Fatal("second lock should have failed") @@ -137,7 +123,7 @@ func TestLockFailedRefresh(t *testing.T) { t.Fatal("failed lock refresh did not cause context cancellation") } // Unlock should not crash - Unlock(lock) + lock.Unlock() } type loggingBackend struct { @@ -186,7 +172,7 @@ func TestLockSuccessfulRefresh(t *testing.T) { // expected lock refresh to work } // Unlock should not crash - Unlock(lock) + lock.Unlock() } type slowBackend struct { @@ -248,19 +234,21 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { } // Unlock should not crash - Unlock(lock) + lock.Unlock() } func TestLockWaitTimeout(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, nil) elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) + defer elock.Unlock() retryLock := 200 * time.Millisecond start := time.Now() - lock, _, err := Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) + _, _, err = Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) duration := time.Since(start) test.Assert(t, err != nil, @@ -269,16 +257,15 @@ func TestLockWaitTimeout(t *testing.T) { "create normal lock with exclusively locked repo didn't return the correct error") test.Assert(t, retryLock <= duration && duration < retryLock*3/2, "create normal lock with exclusively locked repo didn't wait for the specified timeout") - - test.OK(t, lock.Unlock()) - test.OK(t, elock.Unlock()) } func TestLockWaitCancel(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, nil) elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) + defer elock.Unlock() retryLock := 200 * time.Millisecond cancelAfter := 40 * time.Millisecond @@ -287,7 +274,7 @@ func TestLockWaitCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.TODO()) time.AfterFunc(cancelAfter, cancel) - lock, _, err := Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) + _, _, err = Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) duration := time.Since(start) test.Assert(t, err != nil, @@ -296,12 +283,10 @@ func TestLockWaitCancel(t *testing.T) { "create normal lock with exclusively locked repo didn't return the correct error") test.Assert(t, cancelAfter <= duration && duration < retryLock-10*time.Millisecond, "create normal lock with exclusively locked repo didn't return in time, duration %v", duration) - - test.OK(t, lock.Unlock()) - test.OK(t, elock.Unlock()) } func TestLockWaitSuccess(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, nil) elock, _, err := Lock(context.TODO(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) @@ -311,11 +296,10 @@ func TestLockWaitSuccess(t *testing.T) { unlockAfter := 40 * time.Millisecond time.AfterFunc(unlockAfter, func() { - test.OK(t, elock.Unlock()) + elock.Unlock() }) lock, _, err := Lock(context.TODO(), repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) - - test.OK(t, lock.Unlock()) + lock.Unlock() } From 044e8bf82157abcf9623465db608a1994c0e9dac Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 17:07:14 +0100 Subject: [PATCH 047/117] repository: parallelize lock tests --- internal/repository/lock.go | 63 +++++++++++++++++------------- internal/repository/lock_test.go | 66 +++++++++++++++++--------------- 2 files changed, 72 insertions(+), 57 deletions(-) diff --git a/internal/repository/lock.go b/internal/repository/lock.go index e3360cac0..fd8214cd1 100644 --- a/internal/repository/lock.go +++ b/internal/repository/lock.go @@ -18,21 +18,31 @@ type lockContext struct { refreshWG sync.WaitGroup } -var ( - retrySleepStart = 5 * time.Second - retrySleepMax = 60 * time.Second -) +type locker struct { + retrySleepStart time.Duration + retrySleepMax time.Duration + refreshInterval time.Duration + refreshabilityTimeout time.Duration +} -func minDuration(a, b time.Duration) time.Duration { - if a <= b { - return a - } - return b +const defaultRefreshInterval = 5 * time.Minute + +var lockerInst = &locker{ + retrySleepStart: 5 * time.Second, + retrySleepMax: 60 * time.Second, + refreshInterval: defaultRefreshInterval, + // consider a lock refresh failed a bit before the lock actually becomes stale + // the difference allows to compensate for a small time drift between clients. + refreshabilityTimeout: restic.StaleLockTimeout - defaultRefreshInterval*3/2, +} + +func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { + return lockerInst.Lock(ctx, repo, exclusive, retryLock, printRetry, logger) } // Lock wraps the ctx such that it is cancelled when the repository is unlocked // cancelling the original context also stops the lock refresh -func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { +func (l *locker) Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock time.Duration, printRetry func(msg string), logger func(format string, args ...interface{})) (*Unlocker, context.Context, error) { lockFn := restic.NewLock if exclusive { @@ -42,7 +52,7 @@ func Lock(ctx context.Context, repo restic.Repository, exclusive bool, retryLock var lock *restic.Lock var err error - retrySleep := minDuration(retrySleepStart, retryLock) + retrySleep := minDuration(l.retrySleepStart, retryLock) retryMessagePrinted := false retryTimeout := time.After(retryLock) @@ -68,7 +78,7 @@ retryLoop: lock, err = lockFn(ctx, repo) break retryLoop case <-retrySleepCh: - retrySleep = minDuration(retrySleep*2, retrySleepMax) + retrySleep = minDuration(retrySleep*2, l.retrySleepMax) } } else { // anything else, either a successful lock or another error @@ -92,26 +102,27 @@ retryLoop: refreshChan := make(chan struct{}) forceRefreshChan := make(chan refreshLockRequest) - go refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan, logger) - go monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan, logger) + go l.refreshLocks(ctx, repo.Backend(), lockInfo, refreshChan, forceRefreshChan, logger) + go l.monitorLockRefresh(ctx, lockInfo, refreshChan, forceRefreshChan, logger) return &Unlocker{lockInfo}, ctx, nil } -var refreshInterval = 5 * time.Minute - -// consider a lock refresh failed a bit before the lock actually becomes stale -// the difference allows to compensate for a small time drift between clients. -var refreshabilityTimeout = restic.StaleLockTimeout - refreshInterval*3/2 +func minDuration(a, b time.Duration) time.Duration { + if a <= b { + return a + } + return b +} type refreshLockRequest struct { result chan bool } -func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest, logger func(format string, args ...interface{})) { +func (l *locker) refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockContext, refreshed chan<- struct{}, forceRefresh <-chan refreshLockRequest, logger func(format string, args ...interface{})) { debug.Log("start") lock := lockInfo.lock - ticker := time.NewTicker(refreshInterval) + ticker := time.NewTicker(l.refreshInterval) lastRefresh := lock.Time defer func() { @@ -151,7 +162,7 @@ func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockCo } case <-ticker.C: - if time.Since(lastRefresh) > refreshabilityTimeout { + if time.Since(lastRefresh) > l.refreshabilityTimeout { // the lock is too old, wait until the expiry monitor cancels the context continue } @@ -172,14 +183,14 @@ func refreshLocks(ctx context.Context, backend backend.Backend, lockInfo *lockCo } } -func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest, logger func(format string, args ...interface{})) { +func (l *locker) monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <-chan struct{}, forceRefresh chan<- refreshLockRequest, logger func(format string, args ...interface{})) { // time.Now() might use a monotonic timer which is paused during standby // convert to unix time to ensure we compare real time values lastRefresh := time.Now().UnixNano() pollDuration := 1 * time.Second - if refreshInterval < pollDuration { + if l.refreshInterval < pollDuration { // required for TestLockFailedRefresh - pollDuration = refreshInterval / 5 + pollDuration = l.refreshInterval / 5 } // timers are paused during standby, which is a problem as the refresh timeout // _must_ expire if the host was too long in standby. Thus fall back to periodic checks @@ -205,7 +216,7 @@ func monitorLockRefresh(ctx context.Context, lockInfo *lockContext, refreshed <- } lastRefresh = time.Now().UnixNano() case <-ticker.C: - if time.Now().UnixNano()-lastRefresh < refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil { + if time.Now().UnixNano()-lastRefresh < l.refreshabilityTimeout.Nanoseconds() || refreshStaleLockResult != nil { continue } diff --git a/internal/repository/lock_test.go b/internal/repository/lock_test.go index 2975ed7ff..360ee2b23 100644 --- a/internal/repository/lock_test.go +++ b/internal/repository/lock_test.go @@ -37,8 +37,8 @@ func openLockTestRepo(t *testing.T, wrapper backendWrapper) restic.Repository { return repo } -func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, retryLock time.Duration) (*Unlocker, context.Context) { - lock, wrappedCtx, err := Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) +func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, lockerInst *locker, retryLock time.Duration) (*Unlocker, context.Context) { + lock, wrappedCtx, err := lockerInst.Lock(ctx, repo, false, retryLock, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) test.OK(t, wrappedCtx.Err()) if lock.info.lock.Stale() { @@ -48,9 +48,10 @@ func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, } func TestLock(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, nil) - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, lockerInst, 0) lock.Unlock() if wrappedCtx.Err() == nil { t.Fatal("unlock did not cancel context") @@ -58,11 +59,12 @@ func TestLock(t *testing.T) { } func TestLockCancel(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, nil) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - lock, wrappedCtx := checkedLockRepo(ctx, t, repo, 0) + lock, wrappedCtx := checkedLockRepo(ctx, t, repo, lockerInst, 0) cancel() if wrappedCtx.Err() == nil { t.Fatal("canceled parent context did not cancel context") @@ -73,6 +75,7 @@ func TestLockCancel(t *testing.T) { } func TestLockConflict(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, nil) repo2, err := New(repo.Backend(), Options{}) test.OK(t, err) @@ -102,19 +105,19 @@ func (b *writeOnceBackend) Save(ctx context.Context, h backend.Handle, rd backen } func TestLockFailedRefresh(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { return &writeOnceBackend{Backend: r}, nil }) // reduce locking intervals to be suitable for testing - ri, rt := refreshInterval, refreshabilityTimeout - refreshInterval = 20 * time.Millisecond - refreshabilityTimeout = 100 * time.Millisecond - defer func() { - refreshInterval, refreshabilityTimeout = ri, rt - }() - - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) + li := &locker{ + retrySleepStart: lockerInst.retrySleepStart, + retrySleepMax: lockerInst.retrySleepMax, + refreshInterval: 20 * time.Millisecond, + refreshabilityTimeout: 100 * time.Millisecond, + } + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0) select { case <-wrappedCtx.Done(): @@ -139,6 +142,7 @@ func (b *loggingBackend) Save(ctx context.Context, h backend.Handle, rd backend. } func TestLockSuccessfulRefresh(t *testing.T) { + t.Parallel() repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { return &loggingBackend{ Backend: r, @@ -148,14 +152,13 @@ func TestLockSuccessfulRefresh(t *testing.T) { t.Logf("test for successful lock refresh %v", time.Now()) // reduce locking intervals to be suitable for testing - ri, rt := refreshInterval, refreshabilityTimeout - refreshInterval = 60 * time.Millisecond - refreshabilityTimeout = 500 * time.Millisecond - defer func() { - refreshInterval, refreshabilityTimeout = ri, rt - }() - - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) + li := &locker{ + retrySleepStart: lockerInst.retrySleepStart, + retrySleepMax: lockerInst.retrySleepMax, + refreshInterval: 60 * time.Millisecond, + refreshabilityTimeout: 500 * time.Millisecond, + } + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0) select { case <-wrappedCtx.Done(): @@ -168,7 +171,7 @@ func TestLockSuccessfulRefresh(t *testing.T) { buf = buf[:n] t.Log(string(buf)) - case <-time.After(2 * refreshabilityTimeout): + case <-time.After(2 * li.refreshabilityTimeout): // expected lock refresh to work } // Unlock should not crash @@ -190,6 +193,7 @@ func (b *slowBackend) Save(ctx context.Context, h backend.Handle, rd backend.Rew } func TestLockSuccessfulStaleRefresh(t *testing.T) { + t.Parallel() var sb *slowBackend repo := openLockTestRepo(t, func(r backend.Backend) (backend.Backend, error) { sb = &slowBackend{Backend: r} @@ -198,17 +202,17 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { t.Logf("test for successful lock refresh %v", time.Now()) // reduce locking intervals to be suitable for testing - ri, rt := refreshInterval, refreshabilityTimeout - refreshInterval = 10 * time.Millisecond - refreshabilityTimeout = 50 * time.Millisecond - defer func() { - refreshInterval, refreshabilityTimeout = ri, rt - }() + li := &locker{ + retrySleepStart: lockerInst.retrySleepStart, + retrySleepMax: lockerInst.retrySleepMax, + refreshInterval: 10 * time.Millisecond, + refreshabilityTimeout: 50 * time.Millisecond, + } - lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, 0) + lock, wrappedCtx := checkedLockRepo(context.Background(), t, repo, li, 0) // delay lock refreshing long enough that the lock would expire sb.m.Lock() - sb.sleep = refreshabilityTimeout + refreshInterval + sb.sleep = li.refreshabilityTimeout + li.refreshInterval sb.m.Unlock() select { @@ -216,7 +220,7 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { // don't call t.Fatal to allow the lock to be properly cleaned up t.Error("lock refresh failed", time.Now()) - case <-time.After(refreshabilityTimeout): + case <-time.After(li.refreshabilityTimeout): } // reset slow backend sb.m.Lock() @@ -229,7 +233,7 @@ func TestLockSuccessfulStaleRefresh(t *testing.T) { // don't call t.Fatal to allow the lock to be properly cleaned up t.Error("lock refresh failed", time.Now()) - case <-time.After(3 * refreshabilityTimeout): + case <-time.After(3 * li.refreshabilityTimeout): // expected lock refresh to work } From 3ba1fa3cee58aabaee0550b19d2bcb9a103f82d0 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 17:20:10 +0100 Subject: [PATCH 048/117] repository: remove a few global variables --- internal/repository/key.go | 2 +- internal/repository/testing.go | 13 +++++-------- 2 files changed, 6 insertions(+), 9 deletions(-) diff --git a/internal/repository/key.go b/internal/repository/key.go index d9f8d8e17..4d597da4d 100644 --- a/internal/repository/key.go +++ b/internal/repository/key.go @@ -47,7 +47,7 @@ type Key struct { // calibrated on the first run of AddKey(). var Params *crypto.Params -var ( +const ( // KDFTimeout specifies the maximum runtime for the KDF. KDFTimeout = 500 * time.Millisecond diff --git a/internal/repository/testing.go b/internal/repository/testing.go index dbbdbeb07..faa40c70a 100644 --- a/internal/repository/testing.go +++ b/internal/repository/testing.go @@ -17,13 +17,6 @@ import ( "github.com/restic/chunker" ) -// testKDFParams are the parameters for the KDF to be used during testing. -var testKDFParams = crypto.Params{ - N: 128, - R: 1, - P: 1, -} - type logger interface { Logf(format string, args ...interface{}) } @@ -31,7 +24,11 @@ type logger interface { // TestUseLowSecurityKDFParameters configures low-security KDF parameters for testing. func TestUseLowSecurityKDFParameters(t logger) { t.Logf("using low-security KDF parameters for test") - Params = &testKDFParams + Params = &crypto.Params{ + N: 128, + R: 1, + P: 1, + } } // TestBackend returns a fully configured in-memory backend. From dc441c57a76874bc503b71b9b2946b0af31a7934 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 21:45:24 +0100 Subject: [PATCH 049/117] repository: unify repository initialization in tests Tests should use a helper from internal/repository/testing.go to construct a Repository object. --- internal/checker/checker_test.go | 36 +++++++------------------- internal/index/index_parallel_test.go | 4 +-- internal/repository/lock_test.go | 9 ++----- internal/repository/repository_test.go | 11 +++----- internal/repository/testing.go | 13 +++++++++- 5 files changed, 27 insertions(+), 46 deletions(-) diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index cca5a582c..b0fa4e3e3 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -72,11 +72,9 @@ func assertOnlyMixedPackHints(t *testing.T, hints []error) { } func TestCheckRepo(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) if len(errs) > 0 { @@ -92,11 +90,9 @@ func TestCheckRepo(t *testing.T) { } func TestMissingPack(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - packHandle := backend.Handle{ Type: restic.PackFile, Name: "657f7fb64f6a854fff6fe9279998ee09034901eded4e6db9bcee0e59745bbce6", @@ -123,11 +119,9 @@ func TestMissingPack(t *testing.T) { } func TestUnreferencedPack(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - // index 3f1a only references pack 60e0 packID := "60e0438dcb978ec6860cc1f8c43da648170ee9129af8f650f876bad19f8f788e" indexHandle := backend.Handle{ @@ -156,11 +150,9 @@ func TestUnreferencedPack(t *testing.T) { } func TestUnreferencedBlobs(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - snapshotHandle := backend.Handle{ Type: restic.SnapshotFile, Name: "51d249d28815200d59e4be7b3f21a157b864dc343353df9d8e498220c2499b02", @@ -195,11 +187,9 @@ func TestUnreferencedBlobs(t *testing.T) { } func TestModifiedIndex(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - done := make(chan struct{}) defer close(done) @@ -274,11 +264,9 @@ func TestModifiedIndex(t *testing.T) { var checkerDuplicateIndexTestData = filepath.Join("testdata", "duplicate-packs-in-index-test-repo.tar.gz") func TestDuplicatePacksInIndex(t *testing.T) { - repodir, cleanup := test.Env(t, checkerDuplicateIndexTestData) + repo, cleanup := repository.TestFromFixture(t, checkerDuplicateIndexTestData) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) if len(hints) == 0 { @@ -342,9 +330,7 @@ func TestCheckerModifiedData(t *testing.T) { t.Logf("archived as %v", sn.ID().Str()) beError := &errorBackend{Backend: repo.Backend()} - checkRepo, err := repository.New(beError, repository.Options{}) - test.OK(t, err) - test.OK(t, checkRepo.SearchKey(context.TODO(), test.TestPassword, 5, "")) + checkRepo := repository.TestOpenBackend(t, beError) chkr := checker.New(checkRepo, false) @@ -399,10 +385,8 @@ func (r *loadTreesOnceRepository) LoadTree(ctx context.Context, id restic.ID) (* } func TestCheckerNoDuplicateTreeDecodes(t *testing.T) { - repodir, cleanup := test.Env(t, checkerTestData) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) defer cleanup() - - repo := repository.TestOpenLocal(t, repodir) checkRepo := &loadTreesOnceRepository{ Repository: repo, loadedTrees: restic.NewIDSet(), @@ -549,9 +533,7 @@ func TestCheckerBlobTypeConfusion(t *testing.T) { } func loadBenchRepository(t *testing.B) (*checker.Checker, restic.Repository, func()) { - repodir, cleanup := test.Env(t, checkerTestData) - - repo := repository.TestOpenLocal(t, repodir) + repo, cleanup := repository.TestFromFixture(t, checkerTestData) chkr := checker.New(repo, false) hints, errs := chkr.LoadIndex(context.TODO(), nil) diff --git a/internal/index/index_parallel_test.go b/internal/index/index_parallel_test.go index db4853e19..5cb8d299d 100644 --- a/internal/index/index_parallel_test.go +++ b/internal/index/index_parallel_test.go @@ -15,11 +15,9 @@ import ( var repoFixture = filepath.Join("..", "repository", "testdata", "test-repo.tar.gz") func TestRepositoryForAllIndexes(t *testing.T) { - repodir, cleanup := rtest.Env(t, repoFixture) + repo, cleanup := repository.TestFromFixture(t, repoFixture) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) - expectedIndexIDs := restic.NewIDSet() rtest.OK(t, repo.List(context.TODO(), restic.IndexFile, func(id restic.ID, size int64) error { expectedIndexIDs.Insert(id) diff --git a/internal/repository/lock_test.go b/internal/repository/lock_test.go index 360ee2b23..644fc6b37 100644 --- a/internal/repository/lock_test.go +++ b/internal/repository/lock_test.go @@ -31,10 +31,7 @@ func openLockTestRepo(t *testing.T, wrapper backendWrapper) restic.Repository { rtest.OK(t, err) } - repo, err := New(be, Options{}) - rtest.OK(t, err) - rtest.OK(t, repo.SearchKey(context.TODO(), test.TestPassword, 1, "")) - return repo + return TestOpenBackend(t, be) } func checkedLockRepo(ctx context.Context, t *testing.T, repo restic.Repository, lockerInst *locker, retryLock time.Duration) (*Unlocker, context.Context) { @@ -77,9 +74,7 @@ func TestLockCancel(t *testing.T) { func TestLockConflict(t *testing.T) { t.Parallel() repo := openLockTestRepo(t, nil) - repo2, err := New(repo.Backend(), Options{}) - test.OK(t, err) - test.OK(t, repo2.SearchKey(context.TODO(), test.TestPassword, 1, "")) + repo2 := TestOpenBackend(t, repo.Backend()) lock, _, err := Lock(context.Background(), repo, true, 0, func(msg string) {}, func(format string, args ...interface{}) {}) test.OK(t, err) diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 0fa8e4d4a..98ff560fe 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -221,10 +221,9 @@ func benchmarkLoadUnpacked(b *testing.B, version uint) { var repoFixture = filepath.Join("testdata", "test-repo.tar.gz") func TestRepositoryLoadIndex(t *testing.T) { - repodir, cleanup := rtest.Env(t, repoFixture) + repo, cleanup := repository.TestFromFixture(t, repoFixture) defer cleanup() - repo := repository.TestOpenLocal(t, repodir) rtest.OK(t, repo.LoadIndex(context.TODO(), nil)) } @@ -243,7 +242,7 @@ func loadIndex(ctx context.Context, repo restic.LoaderUnpacked, id restic.ID) (* } func TestRepositoryLoadUnpackedBroken(t *testing.T) { - repodir, cleanup := rtest.Env(t, repoFixture) + repo, cleanup := repository.TestFromFixture(t, repoFixture) defer cleanup() data := rtest.Random(23, 12345) @@ -252,7 +251,6 @@ func TestRepositoryLoadUnpackedBroken(t *testing.T) { // damage buffer data[0] ^= 0xff - repo := repository.TestOpenLocal(t, repodir) // store broken file err := repo.Backend().Save(context.TODO(), h, backend.NewByteReader(data, nil)) rtest.OK(t, err) @@ -289,10 +287,7 @@ func TestRepositoryLoadUnpackedRetryBroken(t *testing.T) { be, err := local.Open(context.TODO(), local.Config{Path: repodir, Connections: 2}) rtest.OK(t, err) - repo, err := repository.New(&damageOnceBackend{Backend: be}, repository.Options{}) - rtest.OK(t, err) - err = repo.SearchKey(context.TODO(), rtest.TestPassword, 10, "") - rtest.OK(t, err) + repo := repository.TestOpenBackend(t, &damageOnceBackend{Backend: be}) rtest.OK(t, repo.LoadIndex(context.TODO(), nil)) } diff --git a/internal/repository/testing.go b/internal/repository/testing.go index faa40c70a..3a566565f 100644 --- a/internal/repository/testing.go +++ b/internal/repository/testing.go @@ -95,8 +95,15 @@ func TestRepositoryWithVersion(t testing.TB, version uint) restic.Repository { return TestRepositoryWithBackend(t, nil, version, opts) } +func TestFromFixture(t testing.TB, repoFixture string) (restic.Repository, func()) { + repodir, cleanup := test.Env(t, repoFixture) + repo := TestOpenLocal(t, repodir) + + return repo, cleanup +} + // TestOpenLocal opens a local repository. -func TestOpenLocal(t testing.TB, dir string) (r restic.Repository) { +func TestOpenLocal(t testing.TB, dir string) restic.Repository { var be backend.Backend be, err := local.Open(context.TODO(), local.Config{Path: dir, Connections: 2}) if err != nil { @@ -105,6 +112,10 @@ func TestOpenLocal(t testing.TB, dir string) (r restic.Repository) { be = retry.New(be, 3, nil, nil) + return TestOpenBackend(t, be) +} + +func TestOpenBackend(t testing.TB, be backend.Backend) restic.Repository { repo, err := New(be, Options{}) if err != nil { t.Fatal(err) From d18726cd70527ff5d77f0d418585659834684756 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 21:52:28 +0100 Subject: [PATCH 050/117] ls: add missing read lock As `ls` reads data from the repository, it must acquire a read lock unless `--no-lock` was specified. The old behavior is equivalent to `ls --no-lock`. --- cmd/restic/cmd_ls.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_ls.go b/cmd/restic/cmd_ls.go index b0246625e..c4fb32de3 100644 --- a/cmd/restic/cmd_ls.go +++ b/cmd/restic/cmd_ls.go @@ -309,10 +309,11 @@ func runLs(ctx context.Context, opts LsOptions, gopts GlobalOptions, args []stri return false } - repo, err := OpenRepository(ctx, gopts) + ctx, repo, unlock, err := openWithReadLock(ctx, gopts, gopts.NoLock) if err != nil { return err } + defer unlock() snapshotLister, err := restic.MemorizeList(ctx, repo, restic.SnapshotFile) if err != nil { From 8155dbe711b9636ca7b02e7833595820600657a3 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 24 Feb 2024 21:54:39 +0100 Subject: [PATCH 051/117] correctly lock repository in integration tests --- cmd/restic/cmd_backup_integration_test.go | 18 ++-------- cmd/restic/cmd_mount_integration_test.go | 27 +++++++-------- cmd/restic/cmd_rewrite_integration_test.go | 7 ++-- cmd/restic/integration_helpers_test.go | 39 ++++++++++++++++------ cmd/restic/integration_test.go | 5 +-- 5 files changed, 52 insertions(+), 44 deletions(-) diff --git a/cmd/restic/cmd_backup_integration_test.go b/cmd/restic/cmd_backup_integration_test.go index 0bc4a9eaa..75de1341c 100644 --- a/cmd/restic/cmd_backup_integration_test.go +++ b/cmd/restic/cmd_backup_integration_test.go @@ -9,7 +9,6 @@ import ( "runtime" "testing" - "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -250,29 +249,18 @@ func TestBackupTreeLoadError(t *testing.T) { opts := BackupOptions{} // Backup a subdirectory first, such that we can remove the tree pack for the subdirectory testRunBackup(t, env.testdata, []string{"test"}, opts, env.gopts) - - r, err := OpenRepository(context.TODO(), env.gopts) - rtest.OK(t, err) - rtest.OK(t, r.LoadIndex(context.TODO(), nil)) - treePacks := restic.NewIDSet() - r.Index().Each(context.TODO(), func(pb restic.PackedBlob) { - if pb.Type == restic.TreeBlob { - treePacks.Insert(pb.PackID) - } - }) + treePacks := listTreePacks(env.gopts, t) testRunBackup(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) testRunCheck(t, env.gopts) // delete the subdirectory pack first - for id := range treePacks { - rtest.OK(t, r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()})) - } + removePacks(env.gopts, t, treePacks) testRunRebuildIndex(t, env.gopts) // now the repo is missing the tree blob in the index; check should report this testRunCheckMustFail(t, env.gopts) // second backup should report an error but "heal" this situation - err = testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) + err := testRunBackupAssumeFailure(t, filepath.Dir(env.testdata), []string{filepath.Base(env.testdata)}, opts, env.gopts) rtest.Assert(t, err != nil, "backup should have reported an error for the subdirectory") testRunCheck(t, env.gopts) diff --git a/cmd/restic/cmd_mount_integration_test.go b/cmd/restic/cmd_mount_integration_test.go index d2025a395..590e15030 100644 --- a/cmd/restic/cmd_mount_integration_test.go +++ b/cmd/restic/cmd_mount_integration_test.go @@ -12,7 +12,6 @@ import ( "testing" "time" - "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -86,12 +85,12 @@ func listSnapshots(t testing.TB, dir string) []string { return names } -func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Repository, mountpoint, repodir string, snapshotIDs restic.IDs, expectedSnapshotsInFuseDir int) { +func checkSnapshots(t testing.TB, gopts GlobalOptions, mountpoint string, snapshotIDs restic.IDs, expectedSnapshotsInFuseDir int) { t.Logf("checking for %d snapshots: %v", len(snapshotIDs), snapshotIDs) var wg sync.WaitGroup wg.Add(1) - go testRunMount(t, global, mountpoint, &wg) + go testRunMount(t, gopts, mountpoint, &wg) waitForMount(t, mountpoint) defer wg.Wait() defer testRunUmount(t, mountpoint) @@ -100,7 +99,7 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit t.Fatal(`virtual directory "snapshots" doesn't exist`) } - ids := listSnapshots(t, repodir) + ids := listSnapshots(t, gopts.Repo) t.Logf("found %v snapshots in repo: %v", len(ids), ids) namesInSnapshots := listSnapshots(t, mountpoint) @@ -124,6 +123,10 @@ func checkSnapshots(t testing.TB, global GlobalOptions, repo *repository.Reposit } } + _, repo, unlock, err := openWithReadLock(context.TODO(), gopts, false) + rtest.OK(t, err) + defer unlock() + for _, id := range snapshotIDs { snapshot, err := restic.LoadSnapshot(context.TODO(), repo, id) rtest.OK(t, err) @@ -166,10 +169,7 @@ func TestMount(t *testing.T) { testRunInit(t, env.gopts) - repo, err := OpenRepository(context.TODO(), env.gopts) - rtest.OK(t, err) - - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, []restic.ID{}, 0) + checkSnapshots(t, env.gopts, env.mountpoint, []restic.ID{}, 0) rtest.SetupTarTestFixture(t, env.testdata, filepath.Join("testdata", "backup-data.tar.gz")) @@ -179,7 +179,7 @@ func TestMount(t *testing.T) { rtest.Assert(t, len(snapshotIDs) == 1, "expected one snapshot, got %v", snapshotIDs) - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 2) + checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 2) // second backup, implicit incremental testRunBackup(t, "", []string{env.testdata}, BackupOptions{}, env.gopts) @@ -187,7 +187,7 @@ func TestMount(t *testing.T) { rtest.Assert(t, len(snapshotIDs) == 2, "expected two snapshots, got %v", snapshotIDs) - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 3) + checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 3) // third backup, explicit incremental bopts := BackupOptions{Parent: snapshotIDs[0].String()} @@ -196,7 +196,7 @@ func TestMount(t *testing.T) { rtest.Assert(t, len(snapshotIDs) == 3, "expected three snapshots, got %v", snapshotIDs) - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, snapshotIDs, 4) + checkSnapshots(t, env.gopts, env.mountpoint, snapshotIDs, 4) } func TestMountSameTimestamps(t *testing.T) { @@ -211,14 +211,11 @@ func TestMountSameTimestamps(t *testing.T) { rtest.SetupTarTestFixture(t, env.base, filepath.Join("testdata", "repo-same-timestamps.tar.gz")) - repo, err := OpenRepository(context.TODO(), env.gopts) - rtest.OK(t, err) - ids := []restic.ID{ restic.TestParseID("280303689e5027328889a06d718b729e96a1ce6ae9ef8290bff550459ae611ee"), restic.TestParseID("75ad6cdc0868e082f2596d5ab8705e9f7d87316f5bf5690385eeff8dbe49d9f5"), restic.TestParseID("5fd0d8b2ef0fa5d23e58f1e460188abb0f525c0f0c4af8365a1280c807a80a1b"), } - checkSnapshots(t, env.gopts, repo, env.mountpoint, env.repo, ids, 4) + checkSnapshots(t, env.gopts, env.mountpoint, ids, 4) } diff --git a/cmd/restic/cmd_rewrite_integration_test.go b/cmd/restic/cmd_rewrite_integration_test.go index 532855f57..71d6a60a5 100644 --- a/cmd/restic/cmd_rewrite_integration_test.go +++ b/cmd/restic/cmd_rewrite_integration_test.go @@ -78,8 +78,11 @@ func testRewriteMetadata(t *testing.T, metadata snapshotMetadataArgs) { createBasicRewriteRepo(t, env) testRunRewriteExclude(t, env.gopts, []string{}, true, metadata) - repo, _ := OpenRepository(context.TODO(), env.gopts) - snapshots, err := restic.TestLoadAllSnapshots(context.TODO(), repo, nil) + ctx, repo, unlock, err := openWithReadLock(context.TODO(), env.gopts, false) + rtest.OK(t, err) + defer unlock() + + snapshots, err := restic.TestLoadAllSnapshots(ctx, repo, nil) rtest.OK(t, err) rtest.Assert(t, len(snapshots) == 1, "expected one snapshot, got %v", len(snapshots)) newSnapshot := snapshots[0] diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go index 184609d40..c87e1071e 100644 --- a/cmd/restic/integration_helpers_test.go +++ b/cmd/restic/integration_helpers_test.go @@ -232,47 +232,66 @@ func testSetupBackupData(t testing.TB, env *testEnvironment) string { } func listPacks(gopts GlobalOptions, t *testing.T) restic.IDSet { - r, err := OpenRepository(context.TODO(), gopts) + ctx, r, unlock, err := openWithReadLock(context.TODO(), gopts, false) rtest.OK(t, err) + defer unlock() packs := restic.NewIDSet() - rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { + rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error { packs.Insert(id) return nil })) return packs } -func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) { - r, err := OpenRepository(context.TODO(), gopts) +func listTreePacks(gopts GlobalOptions, t *testing.T) restic.IDSet { + ctx, r, unlock, err := openWithReadLock(context.TODO(), gopts, false) rtest.OK(t, err) + defer unlock() + + rtest.OK(t, r.LoadIndex(ctx, nil)) + treePacks := restic.NewIDSet() + r.Index().Each(ctx, func(pb restic.PackedBlob) { + if pb.Type == restic.TreeBlob { + treePacks.Insert(pb.PackID) + } + }) + + return treePacks +} + +func removePacks(gopts GlobalOptions, t testing.TB, remove restic.IDSet) { + ctx, r, unlock, err := openWithExclusiveLock(context.TODO(), gopts, false) + rtest.OK(t, err) + defer unlock() for id := range remove { - rtest.OK(t, r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()})) + rtest.OK(t, r.Backend().Remove(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()})) } } func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, removeTreePacks bool) { - r, err := OpenRepository(context.TODO(), gopts) + ctx, r, unlock, err := openWithExclusiveLock(context.TODO(), gopts, false) rtest.OK(t, err) + defer unlock() // Get all tree packs - rtest.OK(t, r.LoadIndex(context.TODO(), nil)) + rtest.OK(t, r.LoadIndex(ctx, nil)) treePacks := restic.NewIDSet() - r.Index().Each(context.TODO(), func(pb restic.PackedBlob) { + r.Index().Each(ctx, func(pb restic.PackedBlob) { if pb.Type == restic.TreeBlob { treePacks.Insert(pb.PackID) } }) // remove all packs containing data blobs - rtest.OK(t, r.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { + rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error { if treePacks.Has(id) != removeTreePacks || keep.Has(id) { return nil } - return r.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: id.String()}) + return r.Backend().Remove(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()}) })) } diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index 7cf8396a3..21be571e2 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -154,12 +154,13 @@ func TestFindListOnce(t *testing.T) { testRunBackup(t, "", []string{filepath.Join(env.testdata, "0", "0", "9", "3")}, opts, env.gopts) thirdSnapshot := restic.NewIDSet(testListSnapshots(t, env.gopts, 3)...) - repo, err := OpenRepository(context.TODO(), env.gopts) + ctx, repo, unlock, err := openWithReadLock(context.TODO(), env.gopts, false) rtest.OK(t, err) + defer unlock() snapshotIDs := restic.NewIDSet() // specify the two oldest snapshots explicitly and use "latest" to reference the newest one - for sn := range FindFilteredSnapshots(context.TODO(), repo, repo, &restic.SnapshotFilter{}, []string{ + for sn := range FindFilteredSnapshots(ctx, repo, repo, &restic.SnapshotFilter{}, []string{ secondSnapshot[0].String(), secondSnapshot[1].String()[:8], "latest", From 5e98f1e2eb49ef9dd84030611d6bc557b646d844 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 23:14:32 +0100 Subject: [PATCH 052/117] repository: fix test setup race conditions --- internal/repository/key.go | 16 ++++++++-------- internal/repository/testing.go | 19 ++++++++++++------- internal/restic/config.go | 6 +++++- 3 files changed, 25 insertions(+), 16 deletions(-) diff --git a/internal/repository/key.go b/internal/repository/key.go index 4d597da4d..0604b44df 100644 --- a/internal/repository/key.go +++ b/internal/repository/key.go @@ -43,9 +43,9 @@ type Key struct { id restic.ID } -// Params tracks the parameters used for the KDF. If not set, it will be +// params tracks the parameters used for the KDF. If not set, it will be // calibrated on the first run of AddKey(). -var Params *crypto.Params +var params *crypto.Params const ( // KDFTimeout specifies the maximum runtime for the KDF. @@ -196,13 +196,13 @@ func LoadKey(ctx context.Context, s *Repository, id restic.ID) (k *Key, err erro // AddKey adds a new key to an already existing repository. func AddKey(ctx context.Context, s *Repository, password, username, hostname string, template *crypto.Key) (*Key, error) { // make sure we have valid KDF parameters - if Params == nil { + if params == nil { p, err := crypto.Calibrate(KDFTimeout, KDFMemory) if err != nil { return nil, errors.Wrap(err, "Calibrate") } - Params = &p + params = &p debug.Log("calibrated KDF parameters are %v", p) } @@ -213,9 +213,9 @@ func AddKey(ctx context.Context, s *Repository, password, username, hostname str Hostname: hostname, KDF: "scrypt", - N: Params.N, - R: Params.R, - P: Params.P, + N: params.N, + R: params.R, + P: params.P, } if newkey.Hostname == "" { @@ -237,7 +237,7 @@ func AddKey(ctx context.Context, s *Repository, password, username, hostname str } // call KDF to derive user key - newkey.user, err = crypto.KDF(*Params, newkey.Salt, password) + newkey.user, err = crypto.KDF(*params, newkey.Salt, password) if err != nil { return nil, err } diff --git a/internal/repository/testing.go b/internal/repository/testing.go index 3a566565f..874d179ce 100644 --- a/internal/repository/testing.go +++ b/internal/repository/testing.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "os" + "sync" "testing" "github.com/restic/restic/internal/backend" @@ -21,14 +22,18 @@ type logger interface { Logf(format string, args ...interface{}) } +var paramsOnce sync.Once + // TestUseLowSecurityKDFParameters configures low-security KDF parameters for testing. func TestUseLowSecurityKDFParameters(t logger) { t.Logf("using low-security KDF parameters for test") - Params = &crypto.Params{ - N: 128, - R: 1, - P: 1, - } + paramsOnce.Do(func() { + params = &crypto.Params{ + N: 128, + R: 1, + P: 1, + } + }) } // TestBackend returns a fully configured in-memory backend. @@ -36,7 +41,7 @@ func TestBackend(_ testing.TB) backend.Backend { return mem.New() } -const TestChunkerPol = chunker.Pol(0x3DA3358B4DC173) +const testChunkerPol = chunker.Pol(0x3DA3358B4DC173) // TestRepositoryWithBackend returns a repository initialized with a test // password. If be is nil, an in-memory backend is used. A constant polynomial @@ -55,7 +60,7 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, o t.Fatalf("TestRepository(): new repo failed: %v", err) } - cfg := restic.TestCreateConfig(t, TestChunkerPol, version) + cfg := restic.TestCreateConfig(t, testChunkerPol, version) err = repo.init(context.TODO(), test.TestPassword, cfg) if err != nil { t.Fatalf("TestRepository(): initialize repo failed: %v", err) diff --git a/internal/restic/config.go b/internal/restic/config.go index 67ee190bc..67af259ba 100644 --- a/internal/restic/config.go +++ b/internal/restic/config.go @@ -2,6 +2,7 @@ package restic import ( "context" + "sync" "testing" "github.com/restic/restic/internal/errors" @@ -67,12 +68,15 @@ func TestCreateConfig(t testing.TB, pol chunker.Pol, version uint) (cfg Config) } var checkPolynomial = true +var checkPolynomialOnce sync.Once // TestDisableCheckPolynomial disables the check that the polynomial used for // the chunker. func TestDisableCheckPolynomial(t testing.TB) { t.Logf("disabling check of the chunker polynomial") - checkPolynomial = false + checkPolynomialOnce.Do(func() { + checkPolynomial = false + }) } // LoadConfig returns loads, checks and returns the config for a repository. From 07eb6c315b34bac8d698be41fecd0c26d542bb49 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Thu, 28 Mar 2024 23:46:58 +0100 Subject: [PATCH 053/117] add changelog for locking refactor --- changelog/unreleased/pull-4709 | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 changelog/unreleased/pull-4709 diff --git a/changelog/unreleased/pull-4709 b/changelog/unreleased/pull-4709 new file mode 100644 index 000000000..5ffb2a6a6 --- /dev/null +++ b/changelog/unreleased/pull-4709 @@ -0,0 +1,10 @@ +Bugfix: Correct `--no-lock` handling of `ls` and `tag` command + +The `ls` command never locked the repository. This has been fixed. The old +behavior is still supported using `ls --no-lock`. The latter invocation also +works with older restic versions. + +The `tag` command erroneously accepted the `--no-lock` command. The command +now always requires an exclusive lock. + +https://github.com/restic/restic/pull/4709 From ec2b79834aadc71352a49d5c4176e724c7fd18cf Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 29 Mar 2024 00:24:03 +0100 Subject: [PATCH 054/117] use consistent alias for interal/test package --- .golangci.yml | 10 +- cmd/restic/cmd_restore_integration_test.go | 4 +- internal/archiver/archiver_test.go | 128 ++++++++++----------- internal/archiver/archiver_unix_test.go | 16 +-- internal/archiver/scanner_test.go | 14 +-- internal/archiver/testing_test.go | 12 +- internal/archiver/tree_test.go | 6 +- internal/fs/stat_test.go | 4 +- internal/test/helpers.go | 5 +- 9 files changed, 103 insertions(+), 96 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 7dc6a8e7f..e632965bb 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -38,6 +38,8 @@ linters: # ensure that http response bodies are closed - bodyclose + - importas + issues: # don't use the default exclude rules, this hides (among others) ignored # errors from Close() calls @@ -58,4 +60,10 @@ issues: exclude-rules: # revive: ignore unused parameters in tests - path: (_test\.go|testing\.go|backend/.*/tests\.go) - text: "unused-parameter:" \ No newline at end of file + text: "unused-parameter:" + +linters-settings: + importas: + alias: + - pkg: github.com/restic/restic/internal/test + alias: rtest diff --git a/cmd/restic/cmd_restore_integration_test.go b/cmd/restic/cmd_restore_integration_test.go index 2c7cbe1fb..04c13bc68 100644 --- a/cmd/restic/cmd_restore_integration_test.go +++ b/cmd/restic/cmd_restore_integration_test.go @@ -4,7 +4,7 @@ import ( "context" "fmt" "io" - mrand "math/rand" + "math/rand" "os" "path/filepath" "syscall" @@ -116,7 +116,7 @@ func TestRestore(t *testing.T) { for i := 0; i < 10; i++ { p := filepath.Join(env.testdata, fmt.Sprintf("foo/bar/testfile%v", i)) rtest.OK(t, os.MkdirAll(filepath.Dir(p), 0755)) - rtest.OK(t, appendRandomData(p, uint(mrand.Intn(2<<21)))) + rtest.OK(t, appendRandomData(p, uint(rand.Intn(2<<21)))) } opts := BackupOptions{} diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 91b26f3dd..841c8f2ce 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -23,12 +23,12 @@ import ( "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" "golang.org/x/sync/errgroup" ) func prepareTempdirRepoSrc(t testing.TB, src TestDir) (string, restic.Repository) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) repo := repository.TestRepository(t) TestCreateFiles(t, tempdir, src) @@ -133,7 +133,7 @@ func TestArchiverSaveFile(t *testing.T) { var tests = []TestFile{ {Content: ""}, {Content: "foo"}, - {Content: string(restictest.Random(23, 12*1024*1024+1287898))}, + {Content: string(rtest.Random(23, 12*1024*1024+1287898))}, } for _, testfile := range tests { @@ -166,7 +166,7 @@ func TestArchiverSaveFileReaderFS(t *testing.T) { Data string }{ {Data: "foo"}, - {Data: string(restictest.Random(23, 12*1024*1024+1287898))}, + {Data: string(rtest.Random(23, 12*1024*1024+1287898))}, } for _, test := range tests { @@ -208,7 +208,7 @@ func TestArchiverSave(t *testing.T) { var tests = []TestFile{ {Content: ""}, {Content: "foo"}, - {Content: string(restictest.Random(23, 12*1024*1024+1287898))}, + {Content: string(rtest.Random(23, 12*1024*1024+1287898))}, } for _, testfile := range tests { @@ -277,7 +277,7 @@ func TestArchiverSaveReaderFS(t *testing.T) { Data string }{ {Data: "foo"}, - {Data: string(restictest.Random(23, 12*1024*1024+1287898))}, + {Data: string(rtest.Random(23, 12*1024*1024+1287898))}, } for _, test := range tests { @@ -354,7 +354,7 @@ func TestArchiverSaveReaderFS(t *testing.T) { func BenchmarkArchiverSaveFileSmall(b *testing.B) { const fileSize = 4 * 1024 d := TestDir{"file": TestFile{ - Content: string(restictest.Random(23, fileSize)), + Content: string(rtest.Random(23, fileSize)), }} b.SetBytes(fileSize) @@ -386,7 +386,7 @@ func BenchmarkArchiverSaveFileSmall(b *testing.B) { func BenchmarkArchiverSaveFileLarge(b *testing.B) { const fileSize = 40*1024*1024 + 1287898 d := TestDir{"file": TestFile{ - Content: string(restictest.Random(23, fileSize)), + Content: string(rtest.Random(23, fileSize)), }} b.SetBytes(fileSize) @@ -462,14 +462,14 @@ func appendToFile(t testing.TB, filename string, data []byte) { } func TestArchiverSaveFileIncremental(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) repo := &blobCountingRepo{ Repository: repository.TestRepository(t), saved: make(map[restic.BlobHandle]uint), } - data := restictest.Random(23, 512*1024+887898) + data := rtest.Random(23, 512*1024+887898) testfile := filepath.Join(tempdir, "testfile") for i := 0; i < 3; i++ { @@ -512,12 +512,12 @@ func chmodTwice(t testing.TB, name string) { // POSIX says that ctime is updated "even if the file status does not // change", but let's make sure it does change, just in case. err := os.Chmod(name, 0700) - restictest.OK(t, err) + rtest.OK(t, err) sleep() err = os.Chmod(name, 0600) - restictest.OK(t, err) + rtest.OK(t, err) } func lstat(t testing.TB, name string) os.FileInfo { @@ -676,7 +676,7 @@ func TestFileChanged(t *testing.T) { t.Skip("don't run test on Windows") } - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) filename := filepath.Join(tempdir, "file") content := defaultContent @@ -712,7 +712,7 @@ func TestFileChanged(t *testing.T) { } func TestFilChangedSpecialCases(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) filename := filepath.Join(tempdir, "file") content := []byte("foobar") @@ -746,12 +746,12 @@ func TestArchiverSaveDir(t *testing.T) { }{ { src: TestDir{ - "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + "targetfile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))}, }, target: ".", want: TestDir{ "targetdir": TestDir{ - "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + "targetfile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))}, }, }, }, @@ -761,8 +761,8 @@ func TestArchiverSaveDir(t *testing.T) { "foo": TestFile{Content: "foo"}, "emptyfile": TestFile{Content: ""}, "bar": TestFile{Content: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"}, - "largefile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, - "largerfile": TestFile{Content: string(restictest.Random(234, 5*1024*1024+5000))}, + "largefile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))}, + "largerfile": TestFile{Content: string(rtest.Random(234, 5*1024*1024+5000))}, }, }, target: "targetdir", @@ -841,7 +841,7 @@ func TestArchiverSaveDir(t *testing.T) { chdir = filepath.Join(chdir, test.chdir) } - back := restictest.Chdir(t, chdir) + back := rtest.Chdir(t, chdir) defer back() fi, err := fs.Lstat(test.target) @@ -899,7 +899,7 @@ func TestArchiverSaveDir(t *testing.T) { } func TestArchiverSaveDirIncremental(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) repo := &blobCountingRepo{ Repository: repository.TestRepository(t), @@ -989,7 +989,7 @@ func TestArchiverSaveDirIncremental(t *testing.T) { func bothZeroOrNeither(tb testing.TB, exp, act uint64) { tb.Helper() if (exp == 0 && act != 0) || (exp != 0 && act == 0) { - restictest.Equals(tb, exp, act) + rtest.Equals(tb, exp, act) } } @@ -1113,7 +1113,7 @@ func TestArchiverSaveTree(t *testing.T) { arch.runWorkers(ctx, wg) arch.summary = &Summary{} - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() if test.prepare != nil { @@ -1158,9 +1158,9 @@ func TestArchiverSaveTree(t *testing.T) { bothZeroOrNeither(t, test.stat.DataSize, stat.DataSize) bothZeroOrNeither(t, test.stat.DataSizeInRepo, stat.DataSizeInRepo) bothZeroOrNeither(t, test.stat.TreeSizeInRepo, stat.TreeSizeInRepo) - restictest.Equals(t, test.stat.ProcessedBytes, stat.ProcessedBytes) - restictest.Equals(t, test.stat.Files, stat.Files) - restictest.Equals(t, test.stat.Dirs, stat.Dirs) + rtest.Equals(t, test.stat.ProcessedBytes, stat.ProcessedBytes) + rtest.Equals(t, test.stat.Files, stat.Files) + rtest.Equals(t, test.stat.Dirs, stat.Dirs) }) } } @@ -1408,7 +1408,7 @@ func TestArchiverSnapshot(t *testing.T) { chdir = filepath.Join(chdir, filepath.FromSlash(test.chdir)) } - back := restictest.Chdir(t, chdir) + back := rtest.Chdir(t, chdir) defer back() var targets []string @@ -1561,7 +1561,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) arch.Select = test.selFn - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() targets := []string{"."} @@ -1639,14 +1639,14 @@ func (f MockFile) Read(p []byte) (int, error) { } func checkSnapshotStats(t *testing.T, sn *restic.Snapshot, stat Summary) { - restictest.Equals(t, stat.Files.New, sn.Summary.FilesNew) - restictest.Equals(t, stat.Files.Changed, sn.Summary.FilesChanged) - restictest.Equals(t, stat.Files.Unchanged, sn.Summary.FilesUnmodified) - restictest.Equals(t, stat.Dirs.New, sn.Summary.DirsNew) - restictest.Equals(t, stat.Dirs.Changed, sn.Summary.DirsChanged) - restictest.Equals(t, stat.Dirs.Unchanged, sn.Summary.DirsUnmodified) - restictest.Equals(t, stat.ProcessedBytes, sn.Summary.TotalBytesProcessed) - restictest.Equals(t, stat.Files.New+stat.Files.Changed+stat.Files.Unchanged, sn.Summary.TotalFilesProcessed) + rtest.Equals(t, stat.Files.New, sn.Summary.FilesNew) + rtest.Equals(t, stat.Files.Changed, sn.Summary.FilesChanged) + rtest.Equals(t, stat.Files.Unchanged, sn.Summary.FilesUnmodified) + rtest.Equals(t, stat.Dirs.New, sn.Summary.DirsNew) + rtest.Equals(t, stat.Dirs.Changed, sn.Summary.DirsChanged) + rtest.Equals(t, stat.Dirs.Unchanged, sn.Summary.DirsUnmodified) + rtest.Equals(t, stat.ProcessedBytes, sn.Summary.TotalBytesProcessed) + rtest.Equals(t, stat.Files.New+stat.Files.Changed+stat.Files.Unchanged, sn.Summary.TotalFilesProcessed) bothZeroOrNeither(t, uint64(stat.DataBlobs), uint64(sn.Summary.DataBlobs)) bothZeroOrNeither(t, uint64(stat.TreeBlobs), uint64(sn.Summary.TreeBlobs)) bothZeroOrNeither(t, uint64(stat.DataSize+stat.TreeSize), uint64(sn.Summary.DataAdded)) @@ -1662,7 +1662,7 @@ func TestArchiverParent(t *testing.T) { }{ { src: TestDir{ - "targetfile": TestFile{Content: string(restictest.Random(888, 2*1024*1024+5000))}, + "targetfile": TestFile{Content: string(rtest.Random(888, 2*1024*1024+5000))}, }, statInitial: Summary{ Files: ChangeStats{1, 0, 0}, @@ -1679,8 +1679,8 @@ func TestArchiverParent(t *testing.T) { { src: TestDir{ "targetDir": TestDir{ - "targetfile": TestFile{Content: string(restictest.Random(888, 1234))}, - "targetfile2": TestFile{Content: string(restictest.Random(888, 1235))}, + "targetfile": TestFile{Content: string(rtest.Random(888, 1234))}, + "targetfile2": TestFile{Content: string(rtest.Random(888, 1235))}, }, }, statInitial: Summary{ @@ -1698,9 +1698,9 @@ func TestArchiverParent(t *testing.T) { { src: TestDir{ "targetDir": TestDir{ - "targetfile": TestFile{Content: string(restictest.Random(888, 1234))}, + "targetfile": TestFile{Content: string(rtest.Random(888, 1234))}, }, - "targetfile2": TestFile{Content: string(restictest.Random(888, 1235))}, + "targetfile2": TestFile{Content: string(rtest.Random(888, 1235))}, }, modify: func(path string) { remove(t, filepath.Join(path, "targetDir", "targetfile")) @@ -1735,7 +1735,7 @@ func TestArchiverParent(t *testing.T) { arch := New(repo, testFS, Options{}) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() firstSnapshot, firstSnapshotID, summary, err := arch.Snapshot(ctx, []string{"."}, SnapshotOptions{Time: time.Now()}) @@ -1763,9 +1763,9 @@ func TestArchiverParent(t *testing.T) { } return nil }) - restictest.Equals(t, test.statInitial.Files, summary.Files) - restictest.Equals(t, test.statInitial.Dirs, summary.Dirs) - restictest.Equals(t, test.statInitial.ProcessedBytes, summary.ProcessedBytes) + rtest.Equals(t, test.statInitial.Files, summary.Files) + rtest.Equals(t, test.statInitial.Dirs, summary.Dirs) + rtest.Equals(t, test.statInitial.ProcessedBytes, summary.ProcessedBytes) checkSnapshotStats(t, firstSnapshot, test.statInitial) if test.modify != nil { @@ -1784,11 +1784,11 @@ func TestArchiverParent(t *testing.T) { if test.modify == nil { // check that no files were read this time - restictest.Equals(t, map[string]int{}, testFS.bytesRead) + rtest.Equals(t, map[string]int{}, testFS.bytesRead) } - restictest.Equals(t, test.statSecond.Files, summary.Files) - restictest.Equals(t, test.statSecond.Dirs, summary.Dirs) - restictest.Equals(t, test.statSecond.ProcessedBytes, summary.ProcessedBytes) + rtest.Equals(t, test.statSecond.Files, summary.Files) + rtest.Equals(t, test.statSecond.Dirs, summary.Dirs) + rtest.Equals(t, test.statSecond.ProcessedBytes, summary.ProcessedBytes) checkSnapshotStats(t, secondSnapshot, test.statSecond) t.Logf("second backup saved as %v", secondSnapshotID.Str()) @@ -1894,7 +1894,7 @@ func TestArchiverErrorReporting(t *testing.T) { tempdir, repo := prepareTempdirRepoSrc(t, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() if test.prepare != nil { @@ -1964,7 +1964,7 @@ func TestArchiverContextCanceled(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, TestDir{ "targetfile": TestFile{Content: "foobar"}, }) @@ -1972,7 +1972,7 @@ func TestArchiverContextCanceled(t *testing.T) { // Ensure that the archiver itself reports the canceled context and not just the backend repo := repository.TestRepositoryWithBackend(t, &noCancelBackend{mem.New()}, 0, repository.Options{}) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() arch := New(repo, fs.Track{FS: fs.Local{}}, Options{}) @@ -2058,16 +2058,16 @@ func TestArchiverAbortEarlyOnError(t *testing.T) { { src: TestDir{ "dir": TestDir{ - "file0": TestFile{Content: string(restictest.Random(0, 1024))}, - "file1": TestFile{Content: string(restictest.Random(1, 1024))}, - "file2": TestFile{Content: string(restictest.Random(2, 1024))}, - "file3": TestFile{Content: string(restictest.Random(3, 1024))}, - "file4": TestFile{Content: string(restictest.Random(4, 1024))}, - "file5": TestFile{Content: string(restictest.Random(5, 1024))}, - "file6": TestFile{Content: string(restictest.Random(6, 1024))}, - "file7": TestFile{Content: string(restictest.Random(7, 1024))}, - "file8": TestFile{Content: string(restictest.Random(8, 1024))}, - "file9": TestFile{Content: string(restictest.Random(9, 1024))}, + "file0": TestFile{Content: string(rtest.Random(0, 1024))}, + "file1": TestFile{Content: string(rtest.Random(1, 1024))}, + "file2": TestFile{Content: string(rtest.Random(2, 1024))}, + "file3": TestFile{Content: string(rtest.Random(3, 1024))}, + "file4": TestFile{Content: string(rtest.Random(4, 1024))}, + "file5": TestFile{Content: string(rtest.Random(5, 1024))}, + "file6": TestFile{Content: string(rtest.Random(6, 1024))}, + "file7": TestFile{Content: string(rtest.Random(7, 1024))}, + "file8": TestFile{Content: string(rtest.Random(8, 1024))}, + "file9": TestFile{Content: string(rtest.Random(9, 1024))}, }, }, wantOpen: map[string]uint{ @@ -2092,7 +2092,7 @@ func TestArchiverAbortEarlyOnError(t *testing.T) { tempdir, repo := prepareTempdirRepoSrc(t, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() testFS := &TrackFS{ @@ -2225,7 +2225,7 @@ func TestMetadataChanged(t *testing.T) { tempdir, repo := prepareTempdirRepoSrc(t, files) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() // get metadata @@ -2300,7 +2300,7 @@ func TestRacyFileSwap(t *testing.T) { tempdir, repo := prepareTempdirRepoSrc(t, files) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() // get metadata of current folder diff --git a/internal/archiver/archiver_unix_test.go b/internal/archiver/archiver_unix_test.go index 2552b23e1..9462420dd 100644 --- a/internal/archiver/archiver_unix_test.go +++ b/internal/archiver/archiver_unix_test.go @@ -11,7 +11,7 @@ import ( "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) type wrappedFileInfo struct { @@ -49,7 +49,7 @@ func wrapFileInfo(fi os.FileInfo) os.FileInfo { func statAndSnapshot(t *testing.T, repo restic.Repository, name string) (*restic.Node, *restic.Node) { fi := lstat(t, name) want, err := restic.NodeFromFileInfo(name, fi) - restictest.OK(t, err) + rtest.OK(t, err) _, node := snapshot(t, repo, fs.Local{}, nil, name) return want, node @@ -73,17 +73,17 @@ func TestHardlinkMetadata(t *testing.T) { tempdir, repo := prepareTempdirRepoSrc(t, files) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() want, node := statAndSnapshot(t, repo, "testlink") - restictest.Assert(t, node.DeviceID == want.DeviceID, "device id mismatch expected %v got %v", want.DeviceID, node.DeviceID) - restictest.Assert(t, node.Links == want.Links, "link count mismatch expected %v got %v", want.Links, node.Links) - restictest.Assert(t, node.Inode == want.Inode, "inode mismatch expected %v got %v", want.Inode, node.Inode) + rtest.Assert(t, node.DeviceID == want.DeviceID, "device id mismatch expected %v got %v", want.DeviceID, node.DeviceID) + rtest.Assert(t, node.Links == want.Links, "link count mismatch expected %v got %v", want.Links, node.Links) + rtest.Assert(t, node.Inode == want.Inode, "inode mismatch expected %v got %v", want.Inode, node.Inode) _, node = statAndSnapshot(t, repo, "testfile") - restictest.Assert(t, node.DeviceID == 0, "device id mismatch for testfile expected %v got %v", 0, node.DeviceID) + rtest.Assert(t, node.DeviceID == 0, "device id mismatch for testfile expected %v got %v", 0, node.DeviceID) _, node = statAndSnapshot(t, repo, "testdir") - restictest.Assert(t, node.DeviceID == 0, "device id mismatch for testdir expected %v got %v", 0, node.DeviceID) + rtest.Assert(t, node.DeviceID == 0, "device id mismatch for testdir expected %v got %v", 0, node.DeviceID) } diff --git a/internal/archiver/scanner_test.go b/internal/archiver/scanner_test.go index 1b4cd1f7f..b5b7057b8 100644 --- a/internal/archiver/scanner_test.go +++ b/internal/archiver/scanner_test.go @@ -9,7 +9,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/restic/restic/internal/fs" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) func TestScanner(t *testing.T) { @@ -81,10 +81,10 @@ func TestScanner(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() cur, err := os.Getwd() @@ -216,10 +216,10 @@ func TestScannerError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() cur, err := os.Getwd() @@ -288,10 +288,10 @@ func TestScannerCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() cur, err := os.Getwd() diff --git a/internal/archiver/testing_test.go b/internal/archiver/testing_test.go index e48b41ec7..ff3bd3668 100644 --- a/internal/archiver/testing_test.go +++ b/internal/archiver/testing_test.go @@ -11,7 +11,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/repository" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) // MockT passes through all logging functions from T, but catches Fail(), @@ -101,7 +101,7 @@ func TestTestCreateFiles(t *testing.T) { } for i, test := range tests { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) t.Run("", func(t *testing.T) { tempdir := filepath.Join(tempdir, fmt.Sprintf("test-%d", i)) @@ -191,7 +191,7 @@ func TestTestWalkFiles(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) got := make(map[string]string) @@ -321,7 +321,7 @@ func TestTestEnsureFiles(t *testing.T) { for _, test := range tests { t.Run("", func(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) createFilesAt(t, tempdir, test.files) subtestT := testing.TB(t) @@ -452,7 +452,7 @@ func TestTestEnsureSnapshot(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) targetDir := filepath.Join(tempdir, "target") err := fs.Mkdir(targetDir, 0700) @@ -462,7 +462,7 @@ func TestTestEnsureSnapshot(t *testing.T) { createFilesAt(t, targetDir, test.files) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() repo := repository.TestRepository(t) diff --git a/internal/archiver/tree_test.go b/internal/archiver/tree_test.go index 7852a4c2e..a9d2d97ff 100644 --- a/internal/archiver/tree_test.go +++ b/internal/archiver/tree_test.go @@ -8,7 +8,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/restic/restic/internal/fs" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) // debug.Log requires Tree.String. @@ -439,10 +439,10 @@ func TestTree(t *testing.T) { t.Skip("skip test on unix") } - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) TestCreateFiles(t, tempdir, test.src) - back := restictest.Chdir(t, tempdir) + back := rtest.Chdir(t, tempdir) defer back() tree, err := NewTree(fs.Local{}, test.targets) diff --git a/internal/fs/stat_test.go b/internal/fs/stat_test.go index a5ec77c7a..d52415c1d 100644 --- a/internal/fs/stat_test.go +++ b/internal/fs/stat_test.go @@ -5,11 +5,11 @@ import ( "path/filepath" "testing" - restictest "github.com/restic/restic/internal/test" + rtest "github.com/restic/restic/internal/test" ) func TestExtendedStat(t *testing.T) { - tempdir := restictest.TempDir(t) + tempdir := rtest.TempDir(t) filename := filepath.Join(tempdir, "file") err := os.WriteFile(filename, []byte("foobar"), 0640) if err != nil { diff --git a/internal/test/helpers.go b/internal/test/helpers.go index 242da6079..3387d36df 100644 --- a/internal/test/helpers.go +++ b/internal/test/helpers.go @@ -5,6 +5,7 @@ import ( "compress/gzip" "fmt" "io" + "math/rand" "os" "os/exec" "path/filepath" @@ -12,8 +13,6 @@ import ( "testing" "github.com/restic/restic/internal/errors" - - mrand "math/rand" ) // Assert fails the test if the condition is false. @@ -71,7 +70,7 @@ func Equals(tb testing.TB, exp, act interface{}, msgs ...string) { func Random(seed, count int) []byte { p := make([]byte, count) - rnd := mrand.New(mrand.NewSource(int64(seed))) + rnd := rand.New(rand.NewSource(int64(seed))) for i := 0; i < len(p); i += 8 { val := rnd.Int63() From df07814ec266d641466885318b6ed7889b4c889a Mon Sep 17 00:00:00 2001 From: Stephan Paul Date: Mon, 25 Mar 2024 15:42:15 +0100 Subject: [PATCH 055/117] forget json output: added id's in snapshots within reasons object In order to evaluate the keep reasons for snapshots, there should be also the id's to compare it with snapshots within the keep object. (See also Issue #3117) In order to avoid output parameters also changed function addJSONSnapshots to asJSONSnapshots --- changelog/unreleased/pull-4737 | 5 ++++ cmd/restic/cmd_forget.go | 46 ++++++++++++++++++++++++++-------- doc/075_scripting.rst | 14 +++++------ 3 files changed, 47 insertions(+), 18 deletions(-) create mode 100644 changelog/unreleased/pull-4737 diff --git a/changelog/unreleased/pull-4737 b/changelog/unreleased/pull-4737 new file mode 100644 index 000000000..2637c8f83 --- /dev/null +++ b/changelog/unreleased/pull-4737 @@ -0,0 +1,5 @@ +Enhancement: include snapshot id in reason field of forget JSON output + +The JSON output of the `forget` command now includes the `id` and `short_id` of a snapshot in the `reason` field. + +https://github.com/restic/restic/pull/4737 diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index f2fc1da8c..d634576c0 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -245,16 +245,16 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact) Printf("\n") } - addJSONSnapshots(&fg.Keep, keep) + fg.Keep = asJSONSnapshots(keep) if len(remove) != 0 && !gopts.Quiet && !gopts.JSON { Printf("remove %d snapshots:\n", len(remove)) PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact) Printf("\n") } - addJSONSnapshots(&fg.Remove, remove) + fg.Remove = asJSONSnapshots(remove) - fg.Reasons = reasons + fg.Reasons = asJSONKeeps(reasons) jsonGroups = append(jsonGroups, &fg) @@ -302,23 +302,47 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption // ForgetGroup helps to print what is forgotten in JSON. type ForgetGroup struct { - Tags []string `json:"tags"` - Host string `json:"host"` - Paths []string `json:"paths"` - Keep []Snapshot `json:"keep"` - Remove []Snapshot `json:"remove"` - Reasons []restic.KeepReason `json:"reasons"` + Tags []string `json:"tags"` + Host string `json:"host"` + Paths []string `json:"paths"` + Keep []Snapshot `json:"keep"` + Remove []Snapshot `json:"remove"` + Reasons []KeepReason `json:"reasons"` } -func addJSONSnapshots(js *[]Snapshot, list restic.Snapshots) { +func asJSONSnapshots(list restic.Snapshots) []Snapshot { + var resultList []Snapshot for _, sn := range list { k := Snapshot{ Snapshot: sn, ID: sn.ID(), ShortID: sn.ID().Str(), } - *js = append(*js, k) + resultList = append(resultList, k) } + return resultList +} + +// KeepReason helps to print KeepReasons as JSON with Snapshots with their ID included. +type KeepReason struct { + Snapshot Snapshot `json:"snapshot"` + Matches []string `json:"matches"` +} + +func asJSONKeeps(list []restic.KeepReason) []KeepReason { + var resultList []KeepReason + for _, keep := range list { + k := KeepReason{ + Snapshot: Snapshot{ + Snapshot: keep.Snapshot, + ID: keep.Snapshot.ID(), + ShortID: keep.Snapshot.ID().Str(), + }, + Matches: keep.Matches, + } + resultList = append(resultList, k) + } + return resultList } func printJSONForget(stdout io.Writer, forgets []*ForgetGroup) error { diff --git a/doc/075_scripting.rst b/doc/075_scripting.rst index d51516cbe..28419c292 100644 --- a/doc/075_scripting.rst +++ b/doc/075_scripting.rst @@ -367,13 +367,13 @@ Snapshot object Reason object -+----------------+---------------------------------------------------------+ -| ``snapshot`` | Snapshot object, without ``id`` and ``short_id`` fields | -+----------------+---------------------------------------------------------+ -| ``matches`` | Array containing descriptions of the matching criteria | -+----------------+---------------------------------------------------------+ -| ``counters`` | Object containing counters used by the policies | -+----------------+---------------------------------------------------------+ ++----------------+-----------------------------------------------------------+ +| ``snapshot`` | Snapshot object, including ``id`` and ``short_id`` fields | ++----------------+-----------------------------------------------------------+ +| ``matches`` | Array containing descriptions of the matching criteria | ++----------------+-----------------------------------------------------------+ +| ``counters`` | Object containing counters used by the policies | ++----------------+-----------------------------------------------------------+ init From 5145c8f9c092d8815e516be231c20f2234764e45 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 31 Mar 2024 12:25:20 +0200 Subject: [PATCH 056/117] key list: include full key id in JSON output --- changelog/unreleased/issue-4744 | 9 +++++++++ cmd/restic/cmd_key_list.go | 6 ++++-- 2 files changed, 13 insertions(+), 2 deletions(-) create mode 100644 changelog/unreleased/issue-4744 diff --git a/changelog/unreleased/issue-4744 b/changelog/unreleased/issue-4744 new file mode 100644 index 000000000..b0ede1c5c --- /dev/null +++ b/changelog/unreleased/issue-4744 @@ -0,0 +1,9 @@ +Change: Include full key ID in JSON output of `key list` + +We have changed the JSON output of the `key list` command to include the full +key ID instead of just a shortened version, as the latter can be ambiguous +in some rare cases. To derive the short ID, please truncate the full ID down to +eight characters. + +https://github.com/restic/restic/issues/4744 +https://github.com/restic/restic/pull/4745 diff --git a/cmd/restic/cmd_key_list.go b/cmd/restic/cmd_key_list.go index 9bddb5ed3..fcca6055a 100644 --- a/cmd/restic/cmd_key_list.go +++ b/cmd/restic/cmd_key_list.go @@ -53,6 +53,7 @@ func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions type keyInfo struct { Current bool `json:"current"` ID string `json:"id"` + ShortID string `json:"-"` UserName string `json:"userName"` HostName string `json:"hostName"` Created string `json:"created"` @@ -70,7 +71,8 @@ func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions key := keyInfo{ Current: id == s.KeyID(), - ID: id.Str(), + ID: id.String(), + ShortID: id.Str(), UserName: k.Username, HostName: k.Hostname, Created: k.Created.Local().Format(TimeFormat), @@ -91,7 +93,7 @@ func listKeys(ctx context.Context, s *repository.Repository, gopts GlobalOptions } tab := table.New() - tab.AddColumn(" ID", "{{if .Current}}*{{else}} {{end}}{{ .ID }}") + tab.AddColumn(" ID", "{{if .Current}}*{{else}} {{end}}{{ .ShortID }}") tab.AddColumn("User", "{{ .UserName }}") tab.AddColumn("Host", "{{ .HostName }}") tab.AddColumn("Created", "{{ .Created }}") From f8a72ac2a397f8d0a6109e8e5ebae85f079b23fb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Apr 2024 02:32:19 +0000 Subject: [PATCH 057/117] build(deps): bump github.com/Azure/azure-sdk-for-go/sdk/azcore Bumps [github.com/Azure/azure-sdk-for-go/sdk/azcore](https://github.com/Azure/azure-sdk-for-go) from 1.9.2 to 1.10.0. - [Release notes](https://github.com/Azure/azure-sdk-for-go/releases) - [Changelog](https://github.com/Azure/azure-sdk-for-go/blob/main/documentation/release.md) - [Commits](https://github.com/Azure/azure-sdk-for-go/compare/sdk/azcore/v1.9.2...sdk/azcore/v1.10.0) --- updated-dependencies: - dependency-name: github.com/Azure/azure-sdk-for-go/sdk/azcore dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6e546974e..3ef0e89b6 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module github.com/restic/restic require ( cloud.google.com/go/storage v1.39.0 - github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 + github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 github.com/Backblaze/blazer v0.6.1 diff --git a/go.sum b/go.sum index 668d6a339..f4877ed35 100644 --- a/go.sum +++ b/go.sum @@ -9,8 +9,8 @@ cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= cloud.google.com/go/storage v1.39.0 h1:brbjUa4hbDHhpQf48tjqMaXEV+f1OGoaTmQau9tmCsA= cloud.google.com/go/storage v1.39.0/go.mod h1:OAEj/WZwUYjA3YHQ10/YcN9ttGuEpLwvaoyBXIPikEk= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2 h1:c4k2FIYIh4xtwqrQwV0Ct1v5+ehlNXj5NI/MWVsiTkQ= -github.com/Azure/azure-sdk-for-go/sdk/azcore v1.9.2/go.mod h1:5FDJtLEO/GxwNgUxbwrY3LP0pEoThTQJtk2oysdXHxM= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= +github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1/go.mod h1:h8hyGFDsU5HMivxiS2iYFZsgDbU9OnnJ163x5UGVKYo= github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 h1:LqbJ/WzJUwBf8UiaSzgX7aMclParm9/5Vgp+TY51uBQ= From 96c602a6de89ab1adc67bc97a2ace9e541989afa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Apr 2024 19:41:38 +0000 Subject: [PATCH 058/117] build(deps): bump golang.org/x/net from 0.21.0 to 0.23.0 Bumps [golang.org/x/net](https://github.com/golang/net) from 0.21.0 to 0.23.0. - [Commits](https://github.com/golang/net/compare/v0.21.0...v0.23.0) --- updated-dependencies: - dependency-name: golang.org/x/net dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 7121cdac4..2aa48ebb7 100644 --- a/go.mod +++ b/go.mod @@ -26,12 +26,12 @@ require ( github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 go.uber.org/automaxprocs v1.5.3 - golang.org/x/crypto v0.19.0 - golang.org/x/net v0.21.0 + golang.org/x/crypto v0.21.0 + golang.org/x/net v0.23.0 golang.org/x/oauth2 v0.17.0 golang.org/x/sync v0.6.0 - golang.org/x/sys v0.17.0 - golang.org/x/term v0.17.0 + golang.org/x/sys v0.18.0 + golang.org/x/term v0.18.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 google.golang.org/api v0.166.0 diff --git a/go.sum b/go.sum index 1cd51cbac..895bd23c9 100644 --- a/go.sum +++ b/go.sum @@ -208,8 +208,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= -golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -229,8 +229,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= -golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= @@ -257,14 +257,14 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= -golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= -golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= From ba136b31b86eef304363fd5caec7935ef7343f1f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 3 Apr 2024 20:03:22 +0000 Subject: [PATCH 059/117] build(deps): bump cloud.google.com/go/storage from 1.39.0 to 1.40.0 Bumps [cloud.google.com/go/storage](https://github.com/googleapis/google-cloud-go) from 1.39.0 to 1.40.0. - [Release notes](https://github.com/googleapis/google-cloud-go/releases) - [Changelog](https://github.com/googleapis/google-cloud-go/blob/main/CHANGES.md) - [Commits](https://github.com/googleapis/google-cloud-go/compare/spanner/v1.39.0...spanner/v1.40.0) --- updated-dependencies: - dependency-name: cloud.google.com/go/storage dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 32 +++++++++++++-------------- go.sum | 68 ++++++++++++++++++++++++++++------------------------------ 2 files changed, 49 insertions(+), 51 deletions(-) diff --git a/go.mod b/go.mod index 1637f769a..c928b4a97 100644 --- a/go.mod +++ b/go.mod @@ -1,7 +1,7 @@ module github.com/restic/restic require ( - cloud.google.com/go/storage v1.39.0 + cloud.google.com/go/storage v1.40.0 github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.3.1 @@ -28,20 +28,20 @@ require ( go.uber.org/automaxprocs v1.5.3 golang.org/x/crypto v0.21.0 golang.org/x/net v0.23.0 - golang.org/x/oauth2 v0.17.0 + golang.org/x/oauth2 v0.18.0 golang.org/x/sync v0.6.0 golang.org/x/sys v0.18.0 golang.org/x/term v0.18.0 golang.org/x/text v0.14.0 golang.org/x/time v0.5.0 - google.golang.org/api v0.166.0 + google.golang.org/api v0.170.0 ) require ( - cloud.google.com/go v0.112.0 // indirect + cloud.google.com/go v0.112.1 // indirect cloud.google.com/go/compute v1.24.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v1.1.6 // indirect + cloud.google.com/go/iam v1.1.7 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.5.2 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v1.2.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect @@ -52,12 +52,12 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/pprof v0.0.0-20230926050212-f7f687d19a98 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.12.1 // indirect + github.com/googleapis/gax-go/v2 v2.12.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/cpuid/v2 v2.2.6 // indirect @@ -72,17 +72,17 @@ require ( github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 // indirect - go.opentelemetry.io/otel v1.23.0 // indirect - go.opentelemetry.io/otel/metric v1.23.0 // indirect - go.opentelemetry.io/otel/trace v1.23.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 // indirect - google.golang.org/grpc v1.61.1 // indirect - google.golang.org/protobuf v1.32.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 // indirect + google.golang.org/grpc v1.62.1 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 14951005a..1a7dc1a79 100644 --- a/go.sum +++ b/go.sum @@ -1,14 +1,14 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.112.0 h1:tpFCD7hpHFlQ8yPwT3x+QeXqc2T6+n6T+hmABHfDUSM= -cloud.google.com/go v0.112.0/go.mod h1:3jEEVwZ/MHU4djK5t5RHuKOA/GbLddgTdVubX1qnPD4= +cloud.google.com/go v0.112.1 h1:uJSeirPke5UNZHIb4SxfZklVSiWWVqW4oXlETwZziwM= +cloud.google.com/go v0.112.1/go.mod h1:+Vbu+Y1UU+I1rjmzeMOb/8RfkKJK2Gyxi1X6jJCZLo4= cloud.google.com/go/compute v1.24.0 h1:phWcR2eWzRJaL/kOiJwfFsPs4BaKq1j6vnpZrc1YlVg= cloud.google.com/go/compute v1.24.0/go.mod h1:kw1/T+h/+tK2LJK0wiPPx1intgdAM3j/g3hFDlscY40= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/iam v1.1.6 h1:bEa06k05IO4f4uJonbB5iAgKTPpABy1ayxaIZV/GHVc= -cloud.google.com/go/iam v1.1.6/go.mod h1:O0zxdPeGBoFdWW3HWmBxJsk0pfvNM/p/qa82rWOGTwI= -cloud.google.com/go/storage v1.39.0 h1:brbjUa4hbDHhpQf48tjqMaXEV+f1OGoaTmQau9tmCsA= -cloud.google.com/go/storage v1.39.0/go.mod h1:OAEj/WZwUYjA3YHQ10/YcN9ttGuEpLwvaoyBXIPikEk= +cloud.google.com/go/iam v1.1.7 h1:z4VHOhwKLF/+UYXAJDFwGtNF0b6gjsW1Pk9Ml0U/IoM= +cloud.google.com/go/iam v1.1.7/go.mod h1:J4PMPg8TtyurAUvSmPj8FF3EDgY1SPRZxcUGrn7WXGA= +cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= +cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0 h1:n1DH8TPV4qqPTje2RcUBYwtrTWlabVp4n46+74X2pn4= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.10.0/go.mod h1:HDcZnuGbiyppErN6lB+idp4CKhjbc8gwjto6OPpyggM= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.5.1 h1:sO0/P7g68FrryJzljemN+6GTssUXdANk6aJ7T1ZxnsQ= @@ -36,7 +36,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 h1:7To3pQ+pZo0i3dsWEbinPNFs5gPSBOsJtx3wTT94VBY= github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -54,7 +53,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/felixge/fgprof v0.9.3 h1:VvyZxILNuCiUCSXtPtYmmtGvb65nqXh2QFWc0Wpf2/g= github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= @@ -84,8 +82,8 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -107,8 +105,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.1 h1:9F8GV9r9ztXyAi00gsMQHNoF51xPZm8uj1dpYt2ZETM= -github.com/googleapis/gax-go/v2 v2.12.1/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= +github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= @@ -190,17 +188,17 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0 h1:P+/g8GpuJGYbOp2tAdKrIPUX9JO02q8Q0YNlHolpibA= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.48.0/go.mod h1:tIKj3DbO8N9Y2xo52og3irLsPI4GW02DSMtrVgNMgxg= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0 h1:doUP+ExOpH3spVTLS0FcWGLnQrPct/hD/bCPbDRUEAU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.48.0/go.mod h1:rdENBZMT2OE6Ne/KLwpiXudnAsbdrdBaqBvTN8M8BgA= -go.opentelemetry.io/otel v1.23.0 h1:Df0pqjqExIywbMCMTxkAwzjLZtRf+bBKLbUcpxO2C9E= -go.opentelemetry.io/otel v1.23.0/go.mod h1:YCycw9ZeKhcJFrb34iVSkyT0iczq/zYDtZYFufObyB0= -go.opentelemetry.io/otel/metric v1.23.0 h1:pazkx7ss4LFVVYSxYew7L5I6qvLXHA0Ap2pwV+9Cnpo= -go.opentelemetry.io/otel/metric v1.23.0/go.mod h1:MqUW2X2a6Q8RN96E2/nqNoT+z9BSms20Jb7Bbp+HiTo= -go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= -go.opentelemetry.io/otel/trace v1.23.0 h1:37Ik5Ib7xfYVb4V1UtnT97T1jI+AoIYkJyPkuL4iJgI= -go.opentelemetry.io/otel/trace v1.23.0/go.mod h1:GSGTbIClEsuZrGIzoEHqsVfxgn5UkggkflQwDScNUsk= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.22.0 h1:6coWHw9xw7EfClIC/+O31R8IY3/+EiRFHevmHafB2Gw= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -232,8 +230,8 @@ golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= -golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -287,8 +285,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= -google.golang.org/api v0.166.0 h1:6m4NUwrZYhAaVIHZWxaKjw1L1vNAjtMwORmKRyEEo24= -google.golang.org/api v0.166.0/go.mod h1:4FcBc686KFi7QI/U51/2GKKevfZMpM17sCdibqe/bSA= +google.golang.org/api v0.170.0 h1:zMaruDePM88zxZBG+NG8+reALO2rfLhe/JShitLyT48= +google.golang.org/api v0.170.0/go.mod h1:/xql9M2btF85xac/VAm4PsLMTLVGUOpq4BE9R8jyNy8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= @@ -298,17 +296,17 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= -google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c h1:9g7erC9qu44ks7UK4gDNlnk4kOxZG707xKm4jVniy6o= -google.golang.org/genproto/googleapis/api v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9 h1:hZB7eLIaYlW9qXRfCq/qDaPdbeY3757uARz5Vvfv+cY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:YUWgXUFRPfoYK1IHMuxH5K6nPEXSCzIMljnQ59lLRCk= +google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c h1:kaI7oewGK5YnVwj+Y+EJBO/YN1ht8iTL9XkFHtVZLsc= +google.golang.org/genproto/googleapis/api v0.0.0-20240314234333-6e1732d8331c/go.mod h1:VQW3tUculP/D4B+xVCo+VgSq8As6wA9ZjHl//pmk+6s= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2 h1:9IZDv+/GcI6u+a4jRFRLxQs0RUCfavGfoOgEW6jpkI0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240311132316-a219d84964c2/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.61.1 h1:kLAiWrZs7YeDM6MumDe7m3y4aM6wacLzM1Y/wiLP9XY= -google.golang.org/grpc v1.61.1/go.mod h1:VUbo7IFqmF1QtCAstipjG0GIoq49KvMe9+h1jFLBNJs= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -320,8 +318,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= From 09d2183351ce016fb2a5639014569f0cc1eb1926 Mon Sep 17 00:00:00 2001 From: Martin Geisler Date: Sun, 7 Apr 2024 18:05:53 +0200 Subject: [PATCH 060/117] doc: fix typo in 047_tuning_backup_parameters.rst --- doc/047_tuning_backup_parameters.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/047_tuning_backup_parameters.rst b/doc/047_tuning_backup_parameters.rst index 8456693e7..a6b998cfe 100644 --- a/doc/047_tuning_backup_parameters.rst +++ b/doc/047_tuning_backup_parameters.rst @@ -121,7 +121,7 @@ Feature flags allow disabling or enabling certain experimental restic features. can be specified via the ``RESTIC_FEATURES`` environment variable. The variable expects a comma-separated list of ``key[=value],key2[=value2]`` pairs. The key is the name of a feature flag. The value is optional and can contain either the value ``true`` (default if omitted) -or ``false``. The list of currently available feautre flags is shown by the ``features`` +or ``false``. The list of currently available feature flags is shown by the ``features`` command. Restic will return an error if an invalid feature flag is specified. No longer relevant From 0747cf5319775b3deca0ad0db1657e3179f10e06 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 10 Apr 2024 19:17:25 +0200 Subject: [PATCH 061/117] cache: ignore ErrNotExist during cleanup of old files Two restic processes running concurrently can try to remove the same files from the cache. This could cause one process to fail with an error if the other one has already remove a file that the current process also tries to delete. --- changelog/unreleased/issue-4760 | 8 ++++++++ internal/cache/file.go | 3 ++- 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 changelog/unreleased/issue-4760 diff --git a/changelog/unreleased/issue-4760 b/changelog/unreleased/issue-4760 new file mode 100644 index 000000000..bb2d9c5b4 --- /dev/null +++ b/changelog/unreleased/issue-4760 @@ -0,0 +1,8 @@ +Bugfix: Fix possible error on concurrent cache cleanup + +If multiple restic processes concurrently cleaned up no longer existing files +from the cache, this could cause some of the processes to fail with an `no such +file or directory` error. This has been fixed. + +https://github.com/restic/restic/issues/4760 +https://github.com/restic/restic/pull/4761 diff --git a/internal/cache/file.go b/internal/cache/file.go index 48a38c1d3..1bfe922d2 100644 --- a/internal/cache/file.go +++ b/internal/cache/file.go @@ -165,7 +165,8 @@ func (c *Cache) Clear(t restic.FileType, valid restic.IDSet) error { continue } - if err = fs.Remove(c.filename(backend.Handle{Type: t, Name: id.String()})); err != nil { + // ignore ErrNotExist to gracefully handle multiple processes running Clear() concurrently + if err = fs.Remove(c.filename(backend.Handle{Type: t, Name: id.String()})); err != nil && !errors.Is(err, os.ErrNotExist) { return err } } From bf054c09d21a2f83d8974e34614eb700954057e5 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 31 Jan 2024 20:48:03 +0100 Subject: [PATCH 062/117] backup: Ignore xattr.list permission error for parent directories On FreeBSD, limited users may not be able to even list xattrs for the parent directories above the snapshot source paths. As this can cause the backup to fail, just ignore those errors. --- changelog/unreleased/issue-3600 | 11 ++++++++++ internal/archiver/archiver.go | 14 +++++++------ internal/archiver/archiver_test.go | 4 ++-- internal/archiver/archiver_unix_test.go | 2 +- internal/archiver/file_saver.go | 4 ++-- internal/archiver/file_saver_test.go | 4 ++-- internal/restic/node.go | 13 +++++++----- internal/restic/node_aix.go | 4 ++++ internal/restic/node_netbsd.go | 4 ++++ internal/restic/node_openbsd.go | 4 ++++ internal/restic/node_test.go | 10 ++++++--- internal/restic/node_unix_test.go | 2 +- internal/restic/node_windows.go | 4 ++++ internal/restic/node_windows_test.go | 2 +- internal/restic/node_xattr.go | 8 +++++++ internal/restic/node_xattr_test.go | 28 +++++++++++++++++++++++++ internal/restic/tree_test.go | 4 ++-- 17 files changed, 97 insertions(+), 25 deletions(-) create mode 100644 changelog/unreleased/issue-3600 create mode 100644 internal/restic/node_xattr_test.go diff --git a/changelog/unreleased/issue-3600 b/changelog/unreleased/issue-3600 new file mode 100644 index 000000000..0da66d382 --- /dev/null +++ b/changelog/unreleased/issue-3600 @@ -0,0 +1,11 @@ +Bugfix: `backup` works if xattrs above the backup target cannot be read + +When backup targets are specified using absolute paths, then `backup` also +includes information about the parent folders of the backup targets in the +snapshot. If the extended attributes for some of these folders could not be +read due to missing permissions, this caused the backup to fail. This has been +fixed. + +https://github.com/restic/restic/issues/3600 +https://github.com/restic/restic/pull/4668 +https://forum.restic.net/t/parent-directories-above-the-snapshot-source-path-fatal-error-permission-denied/7216 diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 050a0e2c7..146ff3a7c 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -237,8 +237,8 @@ func (arch *Archiver) trackItem(item string, previous, current *restic.Node, s I } // nodeFromFileInfo returns the restic node from an os.FileInfo. -func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo) (*restic.Node, error) { - node, err := restic.NodeFromFileInfo(filename, fi) +func (arch *Archiver) nodeFromFileInfo(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { + node, err := restic.NodeFromFileInfo(filename, fi, ignoreXattrListError) if !arch.WithAtime { node.AccessTime = node.ModTime } @@ -289,7 +289,7 @@ func (arch *Archiver) wrapLoadTreeError(id restic.ID, err error) error { func (arch *Archiver) saveDir(ctx context.Context, snPath string, dir string, fi os.FileInfo, previous *restic.Tree, complete CompleteFunc) (d FutureNode, err error) { debug.Log("%v %v", snPath, dir) - treeNode, err := arch.nodeFromFileInfo(snPath, dir, fi) + treeNode, err := arch.nodeFromFileInfo(snPath, dir, fi, false) if err != nil { return FutureNode{}, err } @@ -444,7 +444,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous debug.Log("%v hasn't changed, using old list of blobs", target) arch.trackItem(snPath, previous, previous, ItemStats{}, time.Since(start)) arch.CompleteBlob(previous.Size) - node, err := arch.nodeFromFileInfo(snPath, target, fi) + node, err := arch.nodeFromFileInfo(snPath, target, fi, false) if err != nil { return FutureNode{}, false, err } @@ -540,7 +540,7 @@ func (arch *Archiver) save(ctx context.Context, snPath, target string, previous default: debug.Log(" %v other", target) - node, err := arch.nodeFromFileInfo(snPath, target, fi) + node, err := arch.nodeFromFileInfo(snPath, target, fi, false) if err != nil { return FutureNode{}, false, err } @@ -623,7 +623,9 @@ func (arch *Archiver) saveTree(ctx context.Context, snPath string, atree *Tree, } debug.Log("%v, dir node data loaded from %v", snPath, atree.FileInfoPath) - node, err = arch.nodeFromFileInfo(snPath, atree.FileInfoPath, fi) + // in some cases reading xattrs for directories above the backup target is not allowed + // thus ignore errors for such folders. + node, err = arch.nodeFromFileInfo(snPath, atree.FileInfoPath, fi, true) if err != nil { return FutureNode{}, 0, err } diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 841c8f2ce..b1ea4b6b6 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -556,7 +556,7 @@ func rename(t testing.TB, oldname, newname string) { } func nodeFromFI(t testing.TB, filename string, fi os.FileInfo) *restic.Node { - node, err := restic.NodeFromFileInfo(filename, fi) + node, err := restic.NodeFromFileInfo(filename, fi, false) if err != nil { t.Fatal(err) } @@ -2230,7 +2230,7 @@ func TestMetadataChanged(t *testing.T) { // get metadata fi := lstat(t, "testfile") - want, err := restic.NodeFromFileInfo("testfile", fi) + want, err := restic.NodeFromFileInfo("testfile", fi, false) if err != nil { t.Fatal(err) } diff --git a/internal/archiver/archiver_unix_test.go b/internal/archiver/archiver_unix_test.go index 9462420dd..a6b1aad2e 100644 --- a/internal/archiver/archiver_unix_test.go +++ b/internal/archiver/archiver_unix_test.go @@ -48,7 +48,7 @@ func wrapFileInfo(fi os.FileInfo) os.FileInfo { func statAndSnapshot(t *testing.T, repo restic.Repository, name string) (*restic.Node, *restic.Node) { fi := lstat(t, name) - want, err := restic.NodeFromFileInfo(name, fi) + want, err := restic.NodeFromFileInfo(name, fi, false) rtest.OK(t, err) _, node := snapshot(t, repo, fs.Local{}, nil, name) diff --git a/internal/archiver/file_saver.go b/internal/archiver/file_saver.go index 7f11bff8a..d10334301 100644 --- a/internal/archiver/file_saver.go +++ b/internal/archiver/file_saver.go @@ -29,7 +29,7 @@ type FileSaver struct { CompleteBlob func(bytes uint64) - NodeFromFileInfo func(snPath, filename string, fi os.FileInfo) (*restic.Node, error) + NodeFromFileInfo func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) } // NewFileSaver returns a new file saver. A worker pool with fileWorkers is @@ -156,7 +156,7 @@ func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat debug.Log("%v", snPath) - node, err := s.NodeFromFileInfo(snPath, f.Name(), fi) + node, err := s.NodeFromFileInfo(snPath, f.Name(), fi, false) if err != nil { _ = f.Close() completeError(err) diff --git a/internal/archiver/file_saver_test.go b/internal/archiver/file_saver_test.go index ced9d796e..409bdedd0 100644 --- a/internal/archiver/file_saver_test.go +++ b/internal/archiver/file_saver_test.go @@ -49,8 +49,8 @@ func startFileSaver(ctx context.Context, t testing.TB) (*FileSaver, context.Cont } s := NewFileSaver(ctx, wg, saveBlob, pol, workers, workers) - s.NodeFromFileInfo = func(snPath, filename string, fi os.FileInfo) (*restic.Node, error) { - return restic.NodeFromFileInfo(filename, fi) + s.NodeFromFileInfo = func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error) { + return restic.NodeFromFileInfo(filename, fi, ignoreXattrListError) } return s, ctx, wg diff --git a/internal/restic/node.go b/internal/restic/node.go index e7688aada..9613cf3c2 100644 --- a/internal/restic/node.go +++ b/internal/restic/node.go @@ -134,7 +134,7 @@ func (node Node) String() string { // NodeFromFileInfo returns a new node from the given path and FileInfo. It // returns the first error that is encountered, together with a node. -func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) { +func NodeFromFileInfo(path string, fi os.FileInfo, ignoreXattrListError bool) (*Node, error) { mask := os.ModePerm | os.ModeType | os.ModeSetuid | os.ModeSetgid | os.ModeSticky node := &Node{ Path: path, @@ -148,7 +148,7 @@ func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) { node.Size = uint64(fi.Size()) } - err := node.fillExtra(path, fi) + err := node.fillExtra(path, fi, ignoreXattrListError) return node, err } @@ -675,7 +675,7 @@ func lookupGroup(gid uint32) string { return group } -func (node *Node) fillExtra(path string, fi os.FileInfo) error { +func (node *Node) fillExtra(path string, fi os.FileInfo, ignoreXattrListError bool) error { stat, ok := toStatT(fi.Sys()) if !ok { // fill minimal info with current values for uid, gid @@ -719,7 +719,7 @@ func (node *Node) fillExtra(path string, fi os.FileInfo) error { allowExtended, err := node.fillGenericAttributes(path, fi, stat) if allowExtended { // Skip processing ExtendedAttributes if allowExtended is false. - errEx := node.fillExtendedAttributes(path) + errEx := node.fillExtendedAttributes(path, ignoreXattrListError) if err == nil { err = errEx } else { @@ -729,10 +729,13 @@ func (node *Node) fillExtra(path string, fi os.FileInfo) error { return err } -func (node *Node) fillExtendedAttributes(path string) error { +func (node *Node) fillExtendedAttributes(path string, ignoreListError bool) error { xattrs, err := Listxattr(path) debug.Log("fillExtendedAttributes(%v) %v %v", path, xattrs, err) if err != nil { + if ignoreListError && IsListxattrPermissionError(err) { + return nil + } return err } diff --git a/internal/restic/node_aix.go b/internal/restic/node_aix.go index def46bd60..8ee9022c9 100644 --- a/internal/restic/node_aix.go +++ b/internal/restic/node_aix.go @@ -33,6 +33,10 @@ func Listxattr(path string) ([]string, error) { return nil, nil } +func IsListxattrPermissionError(_ error) bool { + return false +} + // Setxattr is a no-op on AIX. func Setxattr(path, name string, data []byte) error { return nil diff --git a/internal/restic/node_netbsd.go b/internal/restic/node_netbsd.go index 1a47299be..cf1fa36bd 100644 --- a/internal/restic/node_netbsd.go +++ b/internal/restic/node_netbsd.go @@ -23,6 +23,10 @@ func Listxattr(path string) ([]string, error) { return nil, nil } +func IsListxattrPermissionError(_ error) bool { + return false +} + // Setxattr is a no-op on netbsd. func Setxattr(path, name string, data []byte) error { return nil diff --git a/internal/restic/node_openbsd.go b/internal/restic/node_openbsd.go index e60eb9dc8..4f1c0dacb 100644 --- a/internal/restic/node_openbsd.go +++ b/internal/restic/node_openbsd.go @@ -23,6 +23,10 @@ func Listxattr(path string) ([]string, error) { return nil, nil } +func IsListxattrPermissionError(_ error) bool { + return false +} + // Setxattr is a no-op on openbsd. func Setxattr(path, name string, data []byte) error { return nil diff --git a/internal/restic/node_test.go b/internal/restic/node_test.go index d9fa02ac8..ea271faab 100644 --- a/internal/restic/node_test.go +++ b/internal/restic/node_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test" ) @@ -31,7 +32,7 @@ func BenchmarkNodeFillUser(t *testing.B) { t.ResetTimer() for i := 0; i < t.N; i++ { - _, err := NodeFromFileInfo(path, fi) + _, err := NodeFromFileInfo(path, fi, false) rtest.OK(t, err) } @@ -55,7 +56,7 @@ func BenchmarkNodeFromFileInfo(t *testing.B) { t.ResetTimer() for i := 0; i < t.N; i++ { - _, err := NodeFromFileInfo(path, fi) + _, err := NodeFromFileInfo(path, fi, false) if err != nil { t.Fatal(err) } @@ -227,8 +228,11 @@ func TestNodeRestoreAt(t *testing.T) { fi, err := os.Lstat(nodePath) rtest.OK(t, err) - n2, err := NodeFromFileInfo(nodePath, fi) + n2, err := NodeFromFileInfo(nodePath, fi, false) rtest.OK(t, err) + n3, err := NodeFromFileInfo(nodePath, fi, true) + rtest.OK(t, err) + rtest.Assert(t, n2.Equals(*n3), "unexpected node info mismatch %v", cmp.Diff(n2, n3)) rtest.Assert(t, test.Name == n2.Name, "%v: name doesn't match (%v != %v)", test.Type, test.Name, n2.Name) diff --git a/internal/restic/node_unix_test.go b/internal/restic/node_unix_test.go index 374326bf7..9ea7b1725 100644 --- a/internal/restic/node_unix_test.go +++ b/internal/restic/node_unix_test.go @@ -128,7 +128,7 @@ func TestNodeFromFileInfo(t *testing.T) { return } - node, err := NodeFromFileInfo(test.filename, fi) + node, err := NodeFromFileInfo(test.filename, fi, false) if err != nil { t.Fatal(err) } diff --git a/internal/restic/node_windows.go b/internal/restic/node_windows.go index 5875c3ccd..7766a1ddf 100644 --- a/internal/restic/node_windows.go +++ b/internal/restic/node_windows.go @@ -78,6 +78,10 @@ func Listxattr(path string) ([]string, error) { return nil, nil } +func IsListxattrPermissionError(_ error) bool { + return false +} + // Setxattr associates name and data together as an attribute of path. func Setxattr(path, name string, data []byte) error { return nil diff --git a/internal/restic/node_windows_test.go b/internal/restic/node_windows_test.go index 501d5a98a..5a5a0a61c 100644 --- a/internal/restic/node_windows_test.go +++ b/internal/restic/node_windows_test.go @@ -165,7 +165,7 @@ func restoreAndGetNode(t *testing.T, tempDir string, testNode Node, warningExpec fi, err := os.Lstat(testPath) test.OK(t, errors.Wrapf(err, "Could not Lstat for path: %s", testPath)) - nodeFromFileInfo, err := NodeFromFileInfo(testPath, fi) + nodeFromFileInfo, err := NodeFromFileInfo(testPath, fi, false) test.OK(t, errors.Wrapf(err, "Could not get NodeFromFileInfo for path: %s", testPath)) return testPath, nodeFromFileInfo diff --git a/internal/restic/node_xattr.go b/internal/restic/node_xattr.go index 0b2d5d552..8b080e74f 100644 --- a/internal/restic/node_xattr.go +++ b/internal/restic/node_xattr.go @@ -25,6 +25,14 @@ func Listxattr(path string) ([]string, error) { return l, handleXattrErr(err) } +func IsListxattrPermissionError(err error) bool { + var xerr *xattr.Error + if errors.As(err, &xerr) { + return xerr.Op == "xattr.list" && errors.Is(xerr.Err, os.ErrPermission) + } + return false +} + // Setxattr associates name and data together as an attribute of path. func Setxattr(path, name string, data []byte) error { return handleXattrErr(xattr.LSet(path, name, data)) diff --git a/internal/restic/node_xattr_test.go b/internal/restic/node_xattr_test.go new file mode 100644 index 000000000..5ce77bd28 --- /dev/null +++ b/internal/restic/node_xattr_test.go @@ -0,0 +1,28 @@ +//go:build darwin || freebsd || linux || solaris +// +build darwin freebsd linux solaris + +package restic + +import ( + "os" + "testing" + + "github.com/pkg/xattr" + rtest "github.com/restic/restic/internal/test" +) + +func TestIsListxattrPermissionError(t *testing.T) { + xerr := &xattr.Error{ + Op: "xattr.list", + Name: "test", + Err: os.ErrPermission, + } + err := handleXattrErr(xerr) + rtest.Assert(t, err != nil, "missing error") + rtest.Assert(t, IsListxattrPermissionError(err), "expected IsListxattrPermissionError to return true for %v", err) + + xerr.Err = os.ErrNotExist + err = handleXattrErr(xerr) + rtest.Assert(t, err != nil, "missing error") + rtest.Assert(t, !IsListxattrPermissionError(err), "expected IsListxattrPermissionError to return false for %v", err) +} diff --git a/internal/restic/tree_test.go b/internal/restic/tree_test.go index da674eb1c..67ecec897 100644 --- a/internal/restic/tree_test.go +++ b/internal/restic/tree_test.go @@ -86,7 +86,7 @@ func TestNodeComparison(t *testing.T) { fi, err := os.Lstat("tree_test.go") rtest.OK(t, err) - node, err := restic.NodeFromFileInfo("tree_test.go", fi) + node, err := restic.NodeFromFileInfo("tree_test.go", fi, false) rtest.OK(t, err) n2 := *node @@ -127,7 +127,7 @@ func TestTreeEqualSerialization(t *testing.T) { for _, fn := range files[:i] { fi, err := os.Lstat(fn) rtest.OK(t, err) - node, err := restic.NodeFromFileInfo(fn, fi) + node, err := restic.NodeFromFileInfo(fn, fi, false) rtest.OK(t, err) rtest.OK(t, tree.Insert(node)) From 591b421c4a7ef2862273c56eadd61246b37b3e85 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 29 Mar 2024 13:51:59 +0100 Subject: [PATCH 063/117] Deprecate s3legacy layout --- changelog/unreleased/issue-4602 | 14 ++++++++++++-- cmd/restic/cmd_restore_integration_test.go | 2 ++ internal/backend/layout/layout.go | 11 +++++++++++ internal/backend/layout/layout_test.go | 3 +++ internal/backend/local/config.go | 2 +- internal/backend/local/layout_test.go | 2 ++ internal/backend/s3/config.go | 2 +- internal/backend/sftp/config.go | 2 +- internal/backend/sftp/layout_test.go | 2 ++ internal/feature/registry.go | 10 ++++++---- 10 files changed, 41 insertions(+), 9 deletions(-) diff --git a/changelog/unreleased/issue-4602 b/changelog/unreleased/issue-4602 index 3cba63876..7532bcb1e 100644 --- a/changelog/unreleased/issue-4602 +++ b/changelog/unreleased/issue-4602 @@ -1,7 +1,7 @@ -Change: Deprecate legacy index format +Change: Deprecate legacy index format and s3legacy layout Support for the legacy index format used by restic before version 0.2.0 has -been depreacted and will be removed in the next minor restic version. You can +been deprecated and will be removed in the next minor restic version. You can use `restic repair index` to update the index to the current format. It is possible to temporarily reenable support for the legacy index format by @@ -9,5 +9,15 @@ setting the environment variable `RESTIC_FEATURES=deprecate-legacy-index=false`. Note that this feature flag will be removed in the next minor restic version. +Support for the s3legacy layout used for the S3 backend before restic 0.7.0 +has been deprecated and will be removed in the next minor restic version. You +can migrate your S3 repository using `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout`. + +It is possible to temporarily reenable support for the legacy s3layout by +setting the environment variable +`RESTIC_FEATURES=deprecate-s3-legacy-layout=false`. Note that this feature flag +will be removed in the next minor restic version. + https://github.com/restic/restic/issues/4602 https://github.com/restic/restic/pull/4724 +https://github.com/restic/restic/pull/4743 diff --git a/cmd/restic/cmd_restore_integration_test.go b/cmd/restic/cmd_restore_integration_test.go index 2c7cbe1fb..806c7584b 100644 --- a/cmd/restic/cmd_restore_integration_test.go +++ b/cmd/restic/cmd_restore_integration_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/filter" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" @@ -274,6 +275,7 @@ func TestRestoreNoMetadataOnIgnoredIntermediateDirs(t *testing.T) { } func TestRestoreLocalLayout(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() env, cleanup := withTestEnvironment(t) defer cleanup() diff --git a/internal/backend/layout/layout.go b/internal/backend/layout/layout.go index b600566a4..052fd66ca 100644 --- a/internal/backend/layout/layout.go +++ b/internal/backend/layout/layout.go @@ -10,6 +10,7 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/feature" "github.com/restic/restic/internal/fs" "github.com/restic/restic/internal/restic" ) @@ -93,6 +94,8 @@ func hasBackendFile(ctx context.Context, fs Filesystem, dir string) (bool, error // cannot be detected automatically. var ErrLayoutDetectionFailed = errors.New("auto-detecting the filesystem layout failed") +var ErrLegacyLayoutFound = errors.New("detected legacy S3 layout. Use `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout` to migrate your repository") + // DetectLayout tries to find out which layout is used in a local (or sftp) // filesystem at the given path. If repo is nil, an instance of LocalFilesystem // is used. @@ -123,6 +126,10 @@ func DetectLayout(ctx context.Context, repo Filesystem, dir string) (Layout, err } if foundKeyFile && !foundKeysFile { + if feature.Flag.Enabled(feature.DeprecateS3LegacyLayout) { + return nil, ErrLegacyLayoutFound + } + debug.Log("found s3 layout at %v", dir) return &S3LegacyLayout{ Path: dir, @@ -145,6 +152,10 @@ func ParseLayout(ctx context.Context, repo Filesystem, layout, defaultLayout, pa Join: repo.Join, } case "s3legacy": + if feature.Flag.Enabled(feature.DeprecateS3LegacyLayout) { + return nil, ErrLegacyLayoutFound + } + l = &S3LegacyLayout{ Path: path, Join: repo.Join, diff --git a/internal/backend/layout/layout_test.go b/internal/backend/layout/layout_test.go index 998f5aeb6..55a0749c9 100644 --- a/internal/backend/layout/layout_test.go +++ b/internal/backend/layout/layout_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/feature" rtest "github.com/restic/restic/internal/test" ) @@ -352,6 +353,7 @@ func TestS3LegacyLayout(t *testing.T) { } func TestDetectLayout(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() path := rtest.TempDir(t) var tests = []struct { @@ -389,6 +391,7 @@ func TestDetectLayout(t *testing.T) { } func TestParseLayout(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() path := rtest.TempDir(t) var tests = []struct { diff --git a/internal/backend/local/config.go b/internal/backend/local/config.go index dc5e7948c..e08f05550 100644 --- a/internal/backend/local/config.go +++ b/internal/backend/local/config.go @@ -10,7 +10,7 @@ import ( // Config holds all information needed to open a local repository. type Config struct { Path string - Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect)"` + Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect) (deprecated)"` Connections uint `option:"connections" help:"set a limit for the number of concurrent operations (default: 2)"` } diff --git a/internal/backend/local/layout_test.go b/internal/backend/local/layout_test.go index 46f3996bb..00c91376a 100644 --- a/internal/backend/local/layout_test.go +++ b/internal/backend/local/layout_test.go @@ -6,10 +6,12 @@ import ( "testing" "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/feature" rtest "github.com/restic/restic/internal/test" ) func TestLayout(t *testing.T) { + defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() path := rtest.TempDir(t) var tests = []struct { diff --git a/internal/backend/s3/config.go b/internal/backend/s3/config.go index b4d44399f..4aea4c3d1 100644 --- a/internal/backend/s3/config.go +++ b/internal/backend/s3/config.go @@ -20,7 +20,7 @@ type Config struct { Secret options.SecretString Bucket string Prefix string - Layout string `option:"layout" help:"use this backend layout (default: auto-detect)"` + Layout string `option:"layout" help:"use this backend layout (default: auto-detect) (deprecated)"` StorageClass string `option:"storage-class" help:"set S3 storage class (STANDARD, STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING or REDUCED_REDUNDANCY)"` Connections uint `option:"connections" help:"set a limit for the number of concurrent connections (default: 5)"` diff --git a/internal/backend/sftp/config.go b/internal/backend/sftp/config.go index 65af50d19..aa8ac7bff 100644 --- a/internal/backend/sftp/config.go +++ b/internal/backend/sftp/config.go @@ -13,7 +13,7 @@ import ( type Config struct { User, Host, Port, Path string - Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect)"` + Layout string `option:"layout" help:"use this backend directory layout (default: auto-detect) (deprecated)"` Command string `option:"command" help:"specify command to create sftp connection"` Args string `option:"args" help:"specify arguments for ssh"` diff --git a/internal/backend/sftp/layout_test.go b/internal/backend/sftp/layout_test.go index 9cf24a753..8bb7eac01 100644 --- a/internal/backend/sftp/layout_test.go +++ b/internal/backend/sftp/layout_test.go @@ -8,6 +8,7 @@ import ( "github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend/sftp" + "github.com/restic/restic/internal/feature" rtest "github.com/restic/restic/internal/test" ) @@ -16,6 +17,7 @@ func TestLayout(t *testing.T) { t.Skip("sftp server binary not available") } + defer feature.TestSetFlag(t, feature.Flag, feature.DeprecateS3LegacyLayout, false)() path := rtest.TempDir(t) var tests = []struct { diff --git a/internal/feature/registry.go b/internal/feature/registry.go index 4693b8909..2d2e45edf 100644 --- a/internal/feature/registry.go +++ b/internal/feature/registry.go @@ -5,13 +5,15 @@ var Flag = New() // flag names are written in kebab-case const ( - DeprecateLegacyIndex FlagName = "deprecate-legacy-index" - DeviceIDForHardlinks FlagName = "device-id-for-hardlinks" + DeprecateLegacyIndex FlagName = "deprecate-legacy-index" + DeprecateS3LegacyLayout FlagName = "deprecate-s3-legacy-layout" + DeviceIDForHardlinks FlagName = "device-id-for-hardlinks" ) func init() { Flag.SetFlags(map[FlagName]FlagDesc{ - DeprecateLegacyIndex: {Type: Beta, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."}, - DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, + DeprecateLegacyIndex: {Type: Beta, Description: "disable support for index format used by restic 0.1.0. Use `restic repair index` to update the index if necessary."}, + DeprecateS3LegacyLayout: {Type: Beta, Description: "disable support for S3 legacy layout used up to restic 0.7.0. Use `RESTIC_FEATURES=deprecate-s3-legacy-layout=false restic migrate s3_layout` to migrate your S3 repository if necessary."}, + DeviceIDForHardlinks: {Type: Alpha, Description: "store deviceID only for hardlinks to reduce metadata changes for example when using btrfs subvolumes. Will be removed in a future restic version after repository format 3 is available"}, }) } From 739d11c2eba4f85e66254e0daadde9c9300a22c7 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 6 Apr 2024 00:11:27 +0200 Subject: [PATCH 064/117] forget: replace usage of DeleteFilesChecked This simplifies refactoring prune into the repository package. --- cmd/restic/cmd_forget.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index d634576c0..080b17472 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -267,7 +267,17 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption if len(removeSnIDs) > 0 { if !opts.DryRun { - err := DeleteFilesChecked(ctx, gopts, repo, removeSnIDs, restic.SnapshotFile) + bar := newProgressMax(!gopts.JSON && !gopts.Quiet, 0, "files deleted") + err := restic.ParallelRemove(ctx, repo, removeSnIDs, restic.SnapshotFile, func(id restic.ID, err error) error { + if err != nil { + Warnf("unable to remove %v/%v from the repository\n", restic.SnapshotFile, id) + } + if !gopts.JSON && gopts.verbosity > 2 { + Verbosef("removed %v/%v\n", restic.SnapshotFile, id) + } + return nil + }, bar) + bar.Done() if err != nil { return err } From 32a234b67ede617e7e6b83496dbbe3aeedacb25b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 6 Apr 2024 00:51:20 +0200 Subject: [PATCH 065/117] prune/forget/repair index: convert output to use progress.Printer --- cmd/restic/cmd_forget.go | 53 +++--- cmd/restic/cmd_forget_integration_test.go | 5 +- cmd/restic/cmd_prune.go | 162 +++++++++--------- cmd/restic/cmd_prune_integration_test.go | 17 +- cmd/restic/cmd_repair_index.go | 37 ++-- .../cmd_repair_index_integration_test.go | 14 +- cmd/restic/cmd_repair_packs.go | 6 +- cmd/restic/delete.go | 21 +-- cmd/restic/integration_test.go | 9 +- 9 files changed, 176 insertions(+), 148 deletions(-) diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 080b17472..f6fc5379c 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -8,6 +8,7 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/termstatus" "github.com/spf13/cobra" ) @@ -33,7 +34,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, args []string) error { - return runForget(cmd.Context(), forgetOptions, forgetPruneOptions, globalOptions, args) + term, cancel := setupTermstatus() + defer cancel() + return runForget(cmd.Context(), forgetOptions, forgetPruneOptions, globalOptions, term, args) }, } @@ -152,7 +155,7 @@ func verifyForgetOptions(opts *ForgetOptions) error { return nil } -func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOptions, gopts GlobalOptions, args []string) error { +func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { err := verifyForgetOptions(&opts) if err != nil { return err @@ -173,6 +176,12 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption } defer unlock() + verbosity := gopts.verbosity + if gopts.JSON { + verbosity = 0 + } + printer := newTerminalProgressPrinter(verbosity, term) + var snapshots restic.Snapshots removeSnIDs := restic.NewIDSet() @@ -210,15 +219,11 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption } if policy.Empty() && len(args) == 0 { - if !gopts.JSON { - Verbosef("no policy was specified, no snapshots will be removed\n") - } + printer.P("no policy was specified, no snapshots will be removed\n") } if !policy.Empty() { - if !gopts.JSON { - Verbosef("Applying Policy: %v\n", policy) - } + printer.P("Applying Policy: %v\n", policy) for k, snapshotGroup := range snapshotGroups { if gopts.Verbose >= 1 && !gopts.JSON { @@ -241,16 +246,16 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption keep, remove, reasons := restic.ApplyPolicy(snapshotGroup, policy) if len(keep) != 0 && !gopts.Quiet && !gopts.JSON { - Printf("keep %d snapshots:\n", len(keep)) + printer.P("keep %d snapshots:\n", len(keep)) PrintSnapshots(globalOptions.stdout, keep, reasons, opts.Compact) - Printf("\n") + printer.P("\n") } fg.Keep = asJSONSnapshots(keep) if len(remove) != 0 && !gopts.Quiet && !gopts.JSON { - Printf("remove %d snapshots:\n", len(remove)) + printer.P("remove %d snapshots:\n", len(remove)) PrintSnapshots(globalOptions.stdout, remove, nil, opts.Compact) - Printf("\n") + printer.P("\n") } fg.Remove = asJSONSnapshots(remove) @@ -267,14 +272,12 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption if len(removeSnIDs) > 0 { if !opts.DryRun { - bar := newProgressMax(!gopts.JSON && !gopts.Quiet, 0, "files deleted") + bar := printer.NewCounter("files deleted") err := restic.ParallelRemove(ctx, repo, removeSnIDs, restic.SnapshotFile, func(id restic.ID, err error) error { if err != nil { - Warnf("unable to remove %v/%v from the repository\n", restic.SnapshotFile, id) - } - if !gopts.JSON && gopts.verbosity > 2 { - Verbosef("removed %v/%v\n", restic.SnapshotFile, id) + printer.E("unable to remove %v/%v from the repository\n", restic.SnapshotFile, id) } + printer.VV("removed %v/%v\n", restic.SnapshotFile, id) return nil }, bar) bar.Done() @@ -282,9 +285,7 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption return err } } else { - if !gopts.JSON { - Printf("Would have removed the following snapshots:\n%v\n\n", removeSnIDs) - } + printer.P("Would have removed the following snapshots:\n%v\n\n", removeSnIDs) } } @@ -296,15 +297,13 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption } if len(removeSnIDs) > 0 && opts.Prune { - if !gopts.JSON { - if opts.DryRun { - Verbosef("%d snapshots would be removed, running prune dry run\n", len(removeSnIDs)) - } else { - Verbosef("%d snapshots have been removed, running prune\n", len(removeSnIDs)) - } + if opts.DryRun { + printer.P("%d snapshots would be removed, running prune dry run\n", len(removeSnIDs)) + } else { + printer.P("%d snapshots have been removed, running prune\n", len(removeSnIDs)) } pruneOptions.DryRun = opts.DryRun - return runPruneWithRepo(ctx, pruneOptions, gopts, repo, removeSnIDs) + return runPruneWithRepo(ctx, pruneOptions, gopts, repo, removeSnIDs, term) } return nil diff --git a/cmd/restic/cmd_forget_integration_test.go b/cmd/restic/cmd_forget_integration_test.go index 1c027a240..e4cdb744e 100644 --- a/cmd/restic/cmd_forget_integration_test.go +++ b/cmd/restic/cmd_forget_integration_test.go @@ -5,6 +5,7 @@ import ( "testing" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" ) func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) { @@ -12,5 +13,7 @@ func testRunForget(t testing.TB, gopts GlobalOptions, args ...string) { pruneOpts := PruneOptions{ MaxUnused: "5%", } - rtest.OK(t, runForget(context.TODO(), opts, pruneOpts, gopts, args)) + rtest.OK(t, withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runForget(context.TODO(), opts, pruneOpts, gopts, term, args) + })) } diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 3a9a8c33c..833e72ae7 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -16,6 +16,7 @@ import ( "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui" "github.com/restic/restic/internal/ui/progress" + "github.com/restic/restic/internal/ui/termstatus" "github.com/spf13/cobra" ) @@ -38,7 +39,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return runPrune(cmd.Context(), pruneOptions, globalOptions) + term, cancel := setupTermstatus() + defer cancel() + return runPrune(cmd.Context(), pruneOptions, globalOptions, term) }, } @@ -138,7 +141,7 @@ func verifyPruneOptions(opts *PruneOptions) error { return nil } -func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error { +func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, term *termstatus.Terminal) error { err := verifyPruneOptions(&opts) if err != nil { return err @@ -170,10 +173,10 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions) error opts.unsafeRecovery = true } - return runPruneWithRepo(ctx, opts, gopts, repo, restic.NewIDSet()) + return runPruneWithRepo(ctx, opts, gopts, repo, restic.NewIDSet(), term) } -func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo *repository.Repository, ignoreSnapshots restic.IDSet) error { +func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo *repository.Repository, ignoreSnapshots restic.IDSet, term *termstatus.Terminal) error { // we do not need index updates while pruning! repo.DisableAutoIndexUpdate() @@ -181,24 +184,26 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption Print("warning: running prune without a cache, this may be very slow!\n") } - Verbosef("loading indexes...\n") + printer := newTerminalProgressPrinter(gopts.verbosity, term) + + printer.P("loading indexes...\n") // loading the index before the snapshots is ok, as we use an exclusive lock here - bar := newIndexProgress(gopts.Quiet, gopts.JSON) + bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term) err := repo.LoadIndex(ctx, bar) if err != nil { return err } - plan, stats, err := planPrune(ctx, opts, repo, ignoreSnapshots, gopts.Quiet) + plan, stats, err := planPrune(ctx, opts, repo, ignoreSnapshots, printer) if err != nil { return err } if opts.DryRun { - Verbosef("\nWould have made the following changes:") + printer.P("\nWould have made the following changes:") } - err = printPruneStats(stats) + err = printPruneStats(printer, stats) if err != nil { return err } @@ -206,7 +211,7 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption // Trigger GC to reset garbage collection threshold runtime.GC() - return doPrune(ctx, opts, gopts, repo, plan) + return doPrune(ctx, opts, repo, plan, printer) } type pruneStats struct { @@ -264,22 +269,22 @@ type packInfoWithID struct { // planPrune selects which files to rewrite and which to delete and which blobs to keep. // Also some summary statistics are returned. -func planPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, ignoreSnapshots restic.IDSet, quiet bool) (prunePlan, pruneStats, error) { +func planPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, ignoreSnapshots restic.IDSet, printer progress.Printer) (prunePlan, pruneStats, error) { var stats pruneStats - usedBlobs, err := getUsedBlobs(ctx, repo, ignoreSnapshots, quiet) + usedBlobs, err := getUsedBlobs(ctx, repo, ignoreSnapshots, printer) if err != nil { return prunePlan{}, stats, err } - Verbosef("searching used packs...\n") - keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats) + printer.P("searching used packs...\n") + keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats, printer) if err != nil { return prunePlan{}, stats, err } - Verbosef("collecting packs for deletion and repacking\n") - plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, quiet) + printer.P("collecting packs for deletion and repacking\n") + plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, printer) if err != nil { return prunePlan{}, stats, err } @@ -308,7 +313,7 @@ func planPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, i return plan, stats, nil } -func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *pruneStats) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { +func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *pruneStats, printer progress.Printer) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { // iterate over all blobs in index to find out which blobs are duplicates // The counter in usedBlobs describes how many instances of the blob exist in the repository index // Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist @@ -337,7 +342,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re } if len(missingBlobs) != 0 { - Warnf("%v not found in the index\n\n"+ + printer.E("%v not found in the index\n\n"+ "Integrity check failed: Data seems to be missing.\n"+ "Will not start prune to prevent (additional) data loss!\n"+ "Please report this error (along with the output of the 'prune' run) at\n"+ @@ -458,7 +463,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re return usedBlobs, indexPack, nil } -func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *pruneStats, quiet bool) (prunePlan, error) { +func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *pruneStats, printer progress.Printer) (prunePlan, error) { removePacksFirst := restic.NewIDSet() removePacks := restic.NewIDSet() repackPacks := restic.NewIDSet() @@ -474,12 +479,13 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi } // loop over all packs and decide what to do - bar := newProgressMax(!quiet, uint64(len(indexPack)), "packs processed") + bar := printer.NewCounter("packs processed") + bar.SetMax(uint64(len(indexPack))) err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { p, ok := indexPack[id] if !ok { // Pack was not referenced in index and is not used => immediately remove! - Verboseff("will remove pack %v as it is unused and not indexed\n", id.Str()) + printer.V("will remove pack %v as it is unused and not indexed\n", id.Str()) removePacksFirst.Insert(id) stats.size.unref += uint64(packSize) return nil @@ -489,7 +495,7 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi // Pack size does not fit and pack is needed => error // If the pack is not needed, this is no error, the pack can // and will be simply removed, see below. - Warnf("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n", + printer.E("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n", id.Str(), p.unusedSize+p.usedSize, packSize) return errorSizeNotMatching } @@ -562,16 +568,16 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi } if len(indexPack) != 0 { - Warnf("The index references %d needed pack files which are missing from the repository:\n", len(indexPack)) + printer.E("The index references %d needed pack files which are missing from the repository:\n", len(indexPack)) for id := range indexPack { - Warnf(" %v\n", id) + printer.E(" %v\n", id) } return prunePlan{}, errorPacksMissing } if len(ignorePacks) != 0 { - Warnf("Missing but unneeded pack files are referenced in the index, will be repaired\n") + printer.E("Missing but unneeded pack files are referenced in the index, will be repaired\n") for id := range ignorePacks { - Warnf("will forget missing pack file %v\n", id) + printer.E("will forget missing pack file %v\n", id) } } @@ -657,43 +663,43 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi } // printPruneStats prints out the statistics -func printPruneStats(stats pruneStats) error { - Verboseff("\nused: %10d blobs / %s\n", stats.blobs.used, ui.FormatBytes(stats.size.used)) +func printPruneStats(printer progress.Printer, stats pruneStats) error { + printer.V("\nused: %10d blobs / %s\n", stats.blobs.used, ui.FormatBytes(stats.size.used)) if stats.blobs.duplicate > 0 { - Verboseff("duplicates: %10d blobs / %s\n", stats.blobs.duplicate, ui.FormatBytes(stats.size.duplicate)) + printer.V("duplicates: %10d blobs / %s\n", stats.blobs.duplicate, ui.FormatBytes(stats.size.duplicate)) } - Verboseff("unused: %10d blobs / %s\n", stats.blobs.unused, ui.FormatBytes(stats.size.unused)) + printer.V("unused: %10d blobs / %s\n", stats.blobs.unused, ui.FormatBytes(stats.size.unused)) if stats.size.unref > 0 { - Verboseff("unreferenced: %s\n", ui.FormatBytes(stats.size.unref)) + printer.V("unreferenced: %s\n", ui.FormatBytes(stats.size.unref)) } totalBlobs := stats.blobs.used + stats.blobs.unused + stats.blobs.duplicate totalSize := stats.size.used + stats.size.duplicate + stats.size.unused + stats.size.unref unusedSize := stats.size.duplicate + stats.size.unused - Verboseff("total: %10d blobs / %s\n", totalBlobs, ui.FormatBytes(totalSize)) - Verboseff("unused size: %s of total size\n", ui.FormatPercent(unusedSize, totalSize)) + printer.V("total: %10d blobs / %s\n", totalBlobs, ui.FormatBytes(totalSize)) + printer.V("unused size: %s of total size\n", ui.FormatPercent(unusedSize, totalSize)) - Verbosef("\nto repack: %10d blobs / %s\n", stats.blobs.repack, ui.FormatBytes(stats.size.repack)) - Verbosef("this removes: %10d blobs / %s\n", stats.blobs.repackrm, ui.FormatBytes(stats.size.repackrm)) - Verbosef("to delete: %10d blobs / %s\n", stats.blobs.remove, ui.FormatBytes(stats.size.remove+stats.size.unref)) + printer.P("\nto repack: %10d blobs / %s\n", stats.blobs.repack, ui.FormatBytes(stats.size.repack)) + printer.P("this removes: %10d blobs / %s\n", stats.blobs.repackrm, ui.FormatBytes(stats.size.repackrm)) + printer.P("to delete: %10d blobs / %s\n", stats.blobs.remove, ui.FormatBytes(stats.size.remove+stats.size.unref)) totalPruneSize := stats.size.remove + stats.size.repackrm + stats.size.unref - Verbosef("total prune: %10d blobs / %s\n", stats.blobs.remove+stats.blobs.repackrm, ui.FormatBytes(totalPruneSize)) + printer.P("total prune: %10d blobs / %s\n", stats.blobs.remove+stats.blobs.repackrm, ui.FormatBytes(totalPruneSize)) if stats.size.uncompressed > 0 { - Verbosef("not yet compressed: %s\n", ui.FormatBytes(stats.size.uncompressed)) + printer.P("not yet compressed: %s\n", ui.FormatBytes(stats.size.uncompressed)) } - Verbosef("remaining: %10d blobs / %s\n", totalBlobs-(stats.blobs.remove+stats.blobs.repackrm), ui.FormatBytes(totalSize-totalPruneSize)) + printer.P("remaining: %10d blobs / %s\n", totalBlobs-(stats.blobs.remove+stats.blobs.repackrm), ui.FormatBytes(totalSize-totalPruneSize)) unusedAfter := unusedSize - stats.size.remove - stats.size.repackrm - Verbosef("unused size after prune: %s (%s of remaining size)\n", + printer.P("unused size after prune: %s (%s of remaining size)\n", ui.FormatBytes(unusedAfter), ui.FormatPercent(unusedAfter, totalSize-totalPruneSize)) - Verbosef("\n") - Verboseff("totally used packs: %10d\n", stats.packs.used) - Verboseff("partly used packs: %10d\n", stats.packs.partlyUsed) - Verboseff("unused packs: %10d\n\n", stats.packs.unused) + printer.P("\n") + printer.V("totally used packs: %10d\n", stats.packs.used) + printer.V("partly used packs: %10d\n", stats.packs.partlyUsed) + printer.V("unused packs: %10d\n\n", stats.packs.unused) - Verboseff("to keep: %10d packs\n", stats.packs.keep) - Verboseff("to repack: %10d packs\n", stats.packs.repack) - Verboseff("to delete: %10d packs\n", stats.packs.remove) + printer.V("to keep: %10d packs\n", stats.packs.keep) + printer.V("to repack: %10d packs\n", stats.packs.repack) + printer.V("to delete: %10d packs\n", stats.packs.remove) if stats.packs.unref > 0 { - Verboseff("to delete: %10d unreferenced packs\n\n", stats.packs.unref) + printer.V("to delete: %10d unreferenced packs\n\n", stats.packs.unref) } return nil } @@ -704,29 +710,28 @@ func printPruneStats(stats pruneStats) error { // - rebuild the index while ignoring all files that will be deleted // - delete the files // plan.removePacks and plan.ignorePacks are modified in this function. -func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo restic.Repository, plan prunePlan) (err error) { +func doPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, plan prunePlan, printer progress.Printer) (err error) { if opts.DryRun { - if !gopts.JSON && gopts.verbosity >= 2 { - Printf("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n") - if len(plan.removePacksFirst) > 0 { - Printf("Would have removed the following unreferenced packs:\n%v\n\n", plan.removePacksFirst) - } - Printf("Would have repacked and removed the following packs:\n%v\n\n", plan.repackPacks) - Printf("Would have removed the following no longer used packs:\n%v\n\n", plan.removePacks) + printer.V("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n") + if len(plan.removePacksFirst) > 0 { + printer.V("Would have removed the following unreferenced packs:\n%v\n\n", plan.removePacksFirst) } + printer.V("Would have repacked and removed the following packs:\n%v\n\n", plan.repackPacks) + printer.V("Would have removed the following no longer used packs:\n%v\n\n", plan.removePacks) // Always quit here if DryRun was set! return nil } // unreferenced packs can be safely deleted first if len(plan.removePacksFirst) != 0 { - Verbosef("deleting unreferenced packs\n") - DeleteFiles(ctx, gopts, repo, plan.removePacksFirst, restic.PackFile) + printer.P("deleting unreferenced packs\n") + DeleteFiles(ctx, repo, plan.removePacksFirst, restic.PackFile, printer) } if len(plan.repackPacks) != 0 { - Verbosef("repacking packs\n") - bar := newProgressMax(!gopts.Quiet, uint64(len(plan.repackPacks)), "packs repacked") + printer.P("repacking packs\n") + bar := printer.NewCounter("packs repacked") + bar.SetMax(uint64(len(plan.repackPacks))) _, err := repository.Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar) bar.Done() if err != nil { @@ -737,7 +742,7 @@ func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo r plan.removePacks.Merge(plan.repackPacks) if len(plan.keepBlobs) != 0 { - Warnf("%v was not repacked\n\n"+ + printer.E("%v was not repacked\n\n"+ "Integrity check failed.\n"+ "Please report this error (along with the output of the 'prune' run) at\n"+ "https://github.com/restic/restic/issues/new/choose\n", plan.keepBlobs) @@ -755,56 +760,54 @@ func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo r } if opts.unsafeRecovery { - Verbosef("deleting index files\n") + printer.P("deleting index files\n") indexFiles := repo.Index().(*index.MasterIndex).IDs() - err = DeleteFilesChecked(ctx, gopts, repo, indexFiles, restic.IndexFile) + err = DeleteFilesChecked(ctx, repo, indexFiles, restic.IndexFile, printer) if err != nil { return errors.Fatalf("%s", err) } } else if len(plan.ignorePacks) != 0 { - err = rebuildIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil, false) + err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, false, printer) if err != nil { return errors.Fatalf("%s", err) } } if len(plan.removePacks) != 0 { - Verbosef("removing %d old packs\n", len(plan.removePacks)) - DeleteFiles(ctx, gopts, repo, plan.removePacks, restic.PackFile) + printer.P("removing %d old packs\n", len(plan.removePacks)) + DeleteFiles(ctx, repo, plan.removePacks, restic.PackFile, printer) } if opts.unsafeRecovery { - err = rebuildIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil, true) + err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, true, printer) if err != nil { return errors.Fatalf("%s", err) } } - Verbosef("done\n") + printer.P("done\n") return nil } -func rebuildIndexFiles(ctx context.Context, gopts GlobalOptions, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool) error { - Verbosef("rebuilding index\n") +func rebuildIndexFiles(ctx context.Context, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool, printer progress.Printer) error { + printer.P("rebuilding index\n") - bar := newProgressMax(!gopts.Quiet, 0, "packs processed") + bar := printer.NewCounter("packs processed") return repo.Index().Save(ctx, repo, removePacks, extraObsolete, restic.MasterIndexSaveOpts{ SaveProgress: bar, DeleteProgress: func() *progress.Counter { - return newProgressMax(!gopts.Quiet, 0, "old indexes deleted") + return printer.NewCounter("old indexes deleted") }, DeleteReport: func(id restic.ID, _ error) { - if gopts.verbosity > 2 { - Verbosef("removed index %v\n", id.String()) - } + printer.VV("removed index %v\n", id.String()) }, SkipDeletion: skipDeletion, }) } -func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, quiet bool) (usedBlobs restic.CountedBlobSet, err error) { +func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, printer progress.Printer) (usedBlobs restic.CountedBlobSet, err error) { var snapshotTrees restic.IDs - Verbosef("loading all snapshots...\n") + printer.P("loading all snapshots...\n") err = restic.ForAllSnapshots(ctx, repo, repo, ignoreSnapshots, func(id restic.ID, sn *restic.Snapshot, err error) error { if err != nil { @@ -819,11 +822,12 @@ func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots r return nil, errors.Fatalf("failed loading snapshot: %v", err) } - Verbosef("finding data that is still in use for %d snapshots\n", len(snapshotTrees)) + printer.P("finding data that is still in use for %d snapshots\n", len(snapshotTrees)) usedBlobs = restic.NewCountedBlobSet() - bar := newProgressMax(!quiet, uint64(len(snapshotTrees)), "snapshots") + bar := printer.NewCounter("snapshots") + bar.SetMax(uint64(len(snapshotTrees))) defer bar.Done() err = restic.FindUsedBlobs(ctx, repo, snapshotTrees, usedBlobs, bar) diff --git a/cmd/restic/cmd_prune_integration_test.go b/cmd/restic/cmd_prune_integration_test.go index ebfa7ae4e..4c21940c4 100644 --- a/cmd/restic/cmd_prune_integration_test.go +++ b/cmd/restic/cmd_prune_integration_test.go @@ -8,6 +8,7 @@ import ( "github.com/restic/restic/internal/backend" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" ) func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) { @@ -16,7 +17,9 @@ func testRunPrune(t testing.TB, gopts GlobalOptions, opts PruneOptions) { defer func() { gopts.backendTestHook = oldHook }() - rtest.OK(t, runPrune(context.TODO(), opts, gopts)) + rtest.OK(t, withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runPrune(context.TODO(), opts, gopts, term) + })) } func TestPrune(t *testing.T) { @@ -84,7 +87,9 @@ func testRunForgetJSON(t testing.TB, gopts GlobalOptions, args ...string) { pruneOpts := PruneOptions{ MaxUnused: "5%", } - return runForget(context.TODO(), opts, pruneOpts, gopts, args) + return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runForget(context.TODO(), opts, pruneOpts, gopts, term, args) + }) }) rtest.OK(t, err) @@ -138,7 +143,9 @@ func TestPruneWithDamagedRepository(t *testing.T) { env.gopts.backendTestHook = oldHook }() // prune should fail - rtest.Assert(t, runPrune(context.TODO(), pruneDefaultOptions, env.gopts) == errorPacksMissing, + rtest.Assert(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runPrune(context.TODO(), pruneDefaultOptions, env.gopts, term) + }) == errorPacksMissing, "prune should have reported index not complete error") } @@ -218,7 +225,9 @@ func testEdgeCaseRepo(t *testing.T, tarfile string, optionsCheck CheckOptions, o testRunPrune(t, env.gopts, optionsPrune) testRunCheck(t, env.gopts) } else { - rtest.Assert(t, runPrune(context.TODO(), optionsPrune, env.gopts) != nil, + rtest.Assert(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runPrune(context.TODO(), optionsPrune, env.gopts, term) + }) != nil, "prune should have reported an error") } } diff --git a/cmd/restic/cmd_repair_index.go b/cmd/restic/cmd_repair_index.go index 1ac743348..19db1d03f 100644 --- a/cmd/restic/cmd_repair_index.go +++ b/cmd/restic/cmd_repair_index.go @@ -7,6 +7,8 @@ import ( "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" + "github.com/restic/restic/internal/ui/termstatus" "github.com/spf13/cobra" "github.com/spf13/pflag" ) @@ -25,7 +27,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er `, DisableAutoGenTag: true, RunE: func(cmd *cobra.Command, _ []string) error { - return runRebuildIndex(cmd.Context(), repairIndexOptions, globalOptions) + term, cancel := setupTermstatus() + defer cancel() + return runRebuildIndex(cmd.Context(), repairIndexOptions, globalOptions, term) }, } @@ -55,17 +59,19 @@ func init() { } } -func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions) error { +func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions, term *termstatus.Terminal) error { ctx, repo, unlock, err := openWithExclusiveLock(ctx, gopts, false) if err != nil { return err } defer unlock() - return rebuildIndex(ctx, opts, gopts, repo) + printer := newTerminalProgressPrinter(gopts.verbosity, term) + + return rebuildIndex(ctx, opts, repo, printer) } -func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOptions, repo *repository.Repository) error { +func rebuildIndex(ctx context.Context, opts RepairIndexOptions, repo *repository.Repository, printer progress.Printer) error { var obsoleteIndexes restic.IDs packSizeFromList := make(map[restic.ID]int64) packSizeFromIndex := make(map[restic.ID]int64) @@ -81,11 +87,11 @@ func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOpti return err } } else { - Verbosef("loading indexes...\n") + printer.P("loading indexes...\n") mi := index.NewMasterIndex() err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, _ bool, err error) error { if err != nil { - Warnf("removing invalid index %v: %v\n", id, err) + printer.E("removing invalid index %v: %v\n", id, err) obsoleteIndexes = append(obsoleteIndexes, id) return nil } @@ -109,7 +115,7 @@ func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOpti packSizeFromIndex = pack.Size(ctx, repo.Index(), false) } - Verbosef("getting pack files to read...\n") + printer.P("getting pack files to read...\n") err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { size, ok := packSizeFromIndex[id] if !ok || size != packSize { @@ -118,9 +124,9 @@ func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOpti removePacks.Insert(id) } if !ok { - Warnf("adding pack file to index %v\n", id) + printer.E("adding pack file to index %v\n", id) } else if size != packSize { - Warnf("reindexing pack file %v with unexpected size %v instead of %v\n", id, packSize, size) + printer.E("reindexing pack file %v with unexpected size %v instead of %v\n", id, packSize, size) } delete(packSizeFromIndex, id) return nil @@ -132,12 +138,13 @@ func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOpti // forget pack files that are referenced in the index but do not exist // when rebuilding the index removePacks.Insert(id) - Warnf("removing not found pack file %v\n", id) + printer.E("removing not found pack file %v\n", id) } if len(packSizeFromList) > 0 { - Verbosef("reading pack files\n") - bar := newProgressMax(!gopts.Quiet, uint64(len(packSizeFromList)), "packs") + printer.P("reading pack files\n") + bar := printer.NewCounter("packs") + bar.SetMax(uint64(len(packSizeFromList))) invalidFiles, err := repo.CreateIndexFromPacks(ctx, packSizeFromList, bar) bar.Done() if err != nil { @@ -145,15 +152,15 @@ func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOpti } for _, id := range invalidFiles { - Verboseff("skipped incomplete pack file: %v\n", id) + printer.V("skipped incomplete pack file: %v\n", id) } } - err = rebuildIndexFiles(ctx, gopts, repo, removePacks, obsoleteIndexes, false) + err = rebuildIndexFiles(ctx, repo, removePacks, obsoleteIndexes, false, printer) if err != nil { return err } - Verbosef("done\n") + printer.P("done\n") return nil } diff --git a/cmd/restic/cmd_repair_index_integration_test.go b/cmd/restic/cmd_repair_index_integration_test.go index e3271361a..e1a3dfe03 100644 --- a/cmd/restic/cmd_repair_index_integration_test.go +++ b/cmd/restic/cmd_repair_index_integration_test.go @@ -13,12 +13,15 @@ import ( "github.com/restic/restic/internal/index" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" ) func testRunRebuildIndex(t testing.TB, gopts GlobalOptions) { rtest.OK(t, withRestoreGlobalOptions(func() error { - globalOptions.stdout = io.Discard - return runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts) + return withTermStatus(gopts, func(ctx context.Context, term *termstatus.Terminal) error { + globalOptions.stdout = io.Discard + return runRebuildIndex(context.TODO(), RepairIndexOptions{}, gopts, term) + }) })) } @@ -126,12 +129,13 @@ func TestRebuildIndexFailsOnAppendOnly(t *testing.T) { rtest.SetupTarTestFixture(t, env.base, datafile) err := withRestoreGlobalOptions(func() error { - globalOptions.stdout = io.Discard - env.gopts.backendTestHook = func(r backend.Backend) (backend.Backend, error) { return &appendOnlyBackend{r}, nil } - return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts) + return withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + globalOptions.stdout = io.Discard + return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts, term) + }) }) if err == nil { diff --git a/cmd/restic/cmd_repair_packs.go b/cmd/restic/cmd_repair_packs.go index 00dee076b..7489e1b3c 100644 --- a/cmd/restic/cmd_repair_packs.go +++ b/cmd/restic/cmd_repair_packs.go @@ -58,14 +58,14 @@ func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.T } defer unlock() - bar := newIndexProgress(gopts.Quiet, gopts.JSON) + printer := newTerminalProgressPrinter(gopts.verbosity, term) + + bar := newIndexTerminalProgress(gopts.Quiet, gopts.JSON, term) err = repo.LoadIndex(ctx, bar) if err != nil { return errors.Fatalf("%s", err) } - printer := newTerminalProgressPrinter(gopts.verbosity, term) - printer.P("saving backup copies of pack files to current folder") for id := range ids { f, err := os.OpenFile("pack-"+id.String(), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o666) diff --git a/cmd/restic/delete.go b/cmd/restic/delete.go index c3a7e039d..34f71d91a 100644 --- a/cmd/restic/delete.go +++ b/cmd/restic/delete.go @@ -4,38 +4,35 @@ import ( "context" "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" ) // DeleteFiles deletes the given fileList of fileType in parallel // it will print a warning if there is an error, but continue deleting the remaining files -func DeleteFiles(ctx context.Context, gopts GlobalOptions, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) { - _ = deleteFiles(ctx, gopts, true, repo, fileList, fileType) +func DeleteFiles(ctx context.Context, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) { + _ = deleteFiles(ctx, true, repo, fileList, fileType, printer) } // DeleteFilesChecked deletes the given fileList of fileType in parallel // if an error occurs, it will cancel and return this error -func DeleteFilesChecked(ctx context.Context, gopts GlobalOptions, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) error { - return deleteFiles(ctx, gopts, false, repo, fileList, fileType) +func DeleteFilesChecked(ctx context.Context, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) error { + return deleteFiles(ctx, false, repo, fileList, fileType, printer) } // deleteFiles deletes the given fileList of fileType in parallel // if ignoreError=true, it will print a warning if there was an error, else it will abort. -func deleteFiles(ctx context.Context, gopts GlobalOptions, ignoreError bool, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) error { - bar := newProgressMax(!gopts.JSON && !gopts.Quiet, 0, "files deleted") +func deleteFiles(ctx context.Context, ignoreError bool, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) error { + bar := printer.NewCounter("files deleted") defer bar.Done() return restic.ParallelRemove(ctx, repo, fileList, fileType, func(id restic.ID, err error) error { if err != nil { - if !gopts.JSON { - Warnf("unable to remove %v/%v from the repository\n", fileType, id) - } + printer.E("unable to remove %v/%v from the repository\n", fileType, id) if !ignoreError { return err } } - if !gopts.JSON && gopts.verbosity > 2 { - Verbosef("removed %v/%v\n", fileType, id) - } + printer.VV("removed %v/%v\n", fileType, id) return nil }, bar) } diff --git a/cmd/restic/integration_test.go b/cmd/restic/integration_test.go index 21be571e2..a7b66add8 100644 --- a/cmd/restic/integration_test.go +++ b/cmd/restic/integration_test.go @@ -12,6 +12,7 @@ import ( "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/termstatus" ) func TestCheckRestoreNoLock(t *testing.T) { @@ -88,8 +89,12 @@ func TestListOnce(t *testing.T) { testRunPrune(t, env.gopts, pruneOpts) rtest.OK(t, runCheck(context.TODO(), checkOpts, env.gopts, nil)) - rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts)) - rtest.OK(t, runRebuildIndex(context.TODO(), RepairIndexOptions{ReadAllPacks: true}, env.gopts)) + rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runRebuildIndex(context.TODO(), RepairIndexOptions{}, env.gopts, term) + })) + rtest.OK(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { + return runRebuildIndex(context.TODO(), RepairIndexOptions{ReadAllPacks: true}, env.gopts, term) + })) } type writeToOnly struct { From 866ddf5698503c3a4159c43c95494f434bfc0f1f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 6 Apr 2024 19:12:04 +0200 Subject: [PATCH 066/117] repair index: refactor code into repository package --- cmd/restic/cmd_repair_index.go | 96 +--------------------- internal/repository/repair_index.go | 118 ++++++++++++++++++++++++++++ 2 files changed, 121 insertions(+), 93 deletions(-) create mode 100644 internal/repository/repair_index.go diff --git a/cmd/restic/cmd_repair_index.go b/cmd/restic/cmd_repair_index.go index 19db1d03f..50ba16e33 100644 --- a/cmd/restic/cmd_repair_index.go +++ b/cmd/restic/cmd_repair_index.go @@ -3,11 +3,7 @@ package main import ( "context" - "github.com/restic/restic/internal/index" - "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/restic" - "github.com/restic/restic/internal/ui/progress" "github.com/restic/restic/internal/ui/termstatus" "github.com/spf13/cobra" "github.com/spf13/pflag" @@ -68,99 +64,13 @@ func runRebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalO printer := newTerminalProgressPrinter(gopts.verbosity, term) - return rebuildIndex(ctx, opts, repo, printer) -} - -func rebuildIndex(ctx context.Context, opts RepairIndexOptions, repo *repository.Repository, printer progress.Printer) error { - var obsoleteIndexes restic.IDs - packSizeFromList := make(map[restic.ID]int64) - packSizeFromIndex := make(map[restic.ID]int64) - removePacks := restic.NewIDSet() - - if opts.ReadAllPacks { - // get list of old index files but start with empty index - err := repo.List(ctx, restic.IndexFile, func(id restic.ID, _ int64) error { - obsoleteIndexes = append(obsoleteIndexes, id) - return nil - }) - if err != nil { - return err - } - } else { - printer.P("loading indexes...\n") - mi := index.NewMasterIndex() - err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, _ bool, err error) error { - if err != nil { - printer.E("removing invalid index %v: %v\n", id, err) - obsoleteIndexes = append(obsoleteIndexes, id) - return nil - } - - mi.Insert(idx) - return nil - }) - if err != nil { - return err - } - - err = mi.MergeFinalIndexes() - if err != nil { - return err - } - - err = repo.SetIndex(mi) - if err != nil { - return err - } - packSizeFromIndex = pack.Size(ctx, repo.Index(), false) - } - - printer.P("getting pack files to read...\n") - err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { - size, ok := packSizeFromIndex[id] - if !ok || size != packSize { - // Pack was not referenced in index or size does not match - packSizeFromList[id] = packSize - removePacks.Insert(id) - } - if !ok { - printer.E("adding pack file to index %v\n", id) - } else if size != packSize { - printer.E("reindexing pack file %v with unexpected size %v instead of %v\n", id, packSize, size) - } - delete(packSizeFromIndex, id) - return nil - }) + err = repository.RepairIndex(ctx, repo, repository.RepairIndexOptions{ + ReadAllPacks: opts.ReadAllPacks, + }, printer) if err != nil { return err } - for id := range packSizeFromIndex { - // forget pack files that are referenced in the index but do not exist - // when rebuilding the index - removePacks.Insert(id) - printer.E("removing not found pack file %v\n", id) - } - if len(packSizeFromList) > 0 { - printer.P("reading pack files\n") - bar := printer.NewCounter("packs") - bar.SetMax(uint64(len(packSizeFromList))) - invalidFiles, err := repo.CreateIndexFromPacks(ctx, packSizeFromList, bar) - bar.Done() - if err != nil { - return err - } - - for _, id := range invalidFiles { - printer.V("skipped incomplete pack file: %v\n", id) - } - } - - err = rebuildIndexFiles(ctx, repo, removePacks, obsoleteIndexes, false, printer) - if err != nil { - return err - } printer.P("done\n") - return nil } diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go new file mode 100644 index 000000000..7cf598fa5 --- /dev/null +++ b/internal/repository/repair_index.go @@ -0,0 +1,118 @@ +package repository + +import ( + "context" + + "github.com/restic/restic/internal/index" + "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" +) + +type RepairIndexOptions struct { + ReadAllPacks bool +} + +func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, printer progress.Printer) error { + var obsoleteIndexes restic.IDs + packSizeFromList := make(map[restic.ID]int64) + packSizeFromIndex := make(map[restic.ID]int64) + removePacks := restic.NewIDSet() + + if opts.ReadAllPacks { + // get list of old index files but start with empty index + err := repo.List(ctx, restic.IndexFile, func(id restic.ID, _ int64) error { + obsoleteIndexes = append(obsoleteIndexes, id) + return nil + }) + if err != nil { + return err + } + } else { + printer.P("loading indexes...\n") + mi := index.NewMasterIndex() + err := index.ForAllIndexes(ctx, repo, repo, func(id restic.ID, idx *index.Index, _ bool, err error) error { + if err != nil { + printer.E("removing invalid index %v: %v\n", id, err) + obsoleteIndexes = append(obsoleteIndexes, id) + return nil + } + + mi.Insert(idx) + return nil + }) + if err != nil { + return err + } + + err = mi.MergeFinalIndexes() + if err != nil { + return err + } + + err = repo.SetIndex(mi) + if err != nil { + return err + } + packSizeFromIndex = pack.Size(ctx, repo.Index(), false) + } + + printer.P("getting pack files to read...\n") + err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { + size, ok := packSizeFromIndex[id] + if !ok || size != packSize { + // Pack was not referenced in index or size does not match + packSizeFromList[id] = packSize + removePacks.Insert(id) + } + if !ok { + printer.E("adding pack file to index %v\n", id) + } else if size != packSize { + printer.E("reindexing pack file %v with unexpected size %v instead of %v\n", id, packSize, size) + } + delete(packSizeFromIndex, id) + return nil + }) + if err != nil { + return err + } + for id := range packSizeFromIndex { + // forget pack files that are referenced in the index but do not exist + // when rebuilding the index + removePacks.Insert(id) + printer.E("removing not found pack file %v\n", id) + } + + if len(packSizeFromList) > 0 { + printer.P("reading pack files\n") + bar := printer.NewCounter("packs") + bar.SetMax(uint64(len(packSizeFromList))) + invalidFiles, err := repo.CreateIndexFromPacks(ctx, packSizeFromList, bar) + bar.Done() + if err != nil { + return err + } + + for _, id := range invalidFiles { + printer.V("skipped incomplete pack file: %v\n", id) + } + } + + return rebuildIndexFiles(ctx, repo, removePacks, obsoleteIndexes, false, printer) +} + +func rebuildIndexFiles(ctx context.Context, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool, printer progress.Printer) error { + printer.P("rebuilding index\n") + + bar := printer.NewCounter("packs processed") + return repo.Index().Save(ctx, repo, removePacks, extraObsolete, restic.MasterIndexSaveOpts{ + SaveProgress: bar, + DeleteProgress: func() *progress.Counter { + return printer.NewCounter("old indexes deleted") + }, + DeleteReport: func(id restic.ID, _ error) { + printer.VV("removed index %v\n", id.String()) + }, + SkipDeletion: skipDeletion, + }) +} From df9d4b455d0154589b2d317d7aa3eac4a350dd34 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 6 Apr 2024 19:17:28 +0200 Subject: [PATCH 067/117] prune: prepare for moving code to repository package --- cmd/restic/cmd_prune.go | 212 ++++++++++++++++++++-------------------- 1 file changed, 107 insertions(+), 105 deletions(-) diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 833e72ae7..eef66cc8e 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -194,7 +194,9 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption return err } - plan, stats, err := planPrune(ctx, opts, repo, ignoreSnapshots, printer) + plan, stats, err := PlanPrune(ctx, opts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) { + return getUsedBlobs(ctx, repo, ignoreSnapshots, printer) + }, printer) if err != nil { return err } @@ -211,40 +213,40 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption // Trigger GC to reset garbage collection threshold runtime.GC() - return doPrune(ctx, opts, repo, plan, printer) + return DoPrune(ctx, opts, repo, plan, printer) } -type pruneStats struct { - blobs struct { - used uint - duplicate uint - unused uint - remove uint - repack uint - repackrm uint +type PruneStats struct { + Blobs struct { + Used uint + Duplicate uint + Unused uint + Remove uint + Repack uint + Repackrm uint } - size struct { - used uint64 - duplicate uint64 - unused uint64 - remove uint64 - repack uint64 - repackrm uint64 - unref uint64 - uncompressed uint64 + Size struct { + Used uint64 + Duplicate uint64 + Unused uint64 + Remove uint64 + Repack uint64 + Repackrm uint64 + Unref uint64 + Uncompressed uint64 } - packs struct { - used uint - unused uint - partlyUsed uint - unref uint - keep uint - repack uint - remove uint + Packs struct { + Used uint + Unused uint + PartlyUsed uint + Unref uint + Keep uint + Repack uint + Remove uint } } -type prunePlan struct { +type PrunePlan struct { removePacksFirst restic.IDSet // packs to remove first (unreferenced packs) repackPacks restic.IDSet // packs to repack keepBlobs restic.CountedBlobSet // blobs to keep during repacking @@ -267,26 +269,26 @@ type packInfoWithID struct { mustCompress bool } -// planPrune selects which files to rewrite and which to delete and which blobs to keep. +// PlanPrune selects which files to rewrite and which to delete and which blobs to keep. // Also some summary statistics are returned. -func planPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, ignoreSnapshots restic.IDSet, printer progress.Printer) (prunePlan, pruneStats, error) { - var stats pruneStats +func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error), printer progress.Printer) (PrunePlan, PruneStats, error) { + var stats PruneStats - usedBlobs, err := getUsedBlobs(ctx, repo, ignoreSnapshots, printer) + usedBlobs, err := getUsedBlobs(ctx, repo) if err != nil { - return prunePlan{}, stats, err + return PrunePlan{}, stats, err } printer.P("searching used packs...\n") keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats, printer) if err != nil { - return prunePlan{}, stats, err + return PrunePlan{}, stats, err } printer.P("collecting packs for deletion and repacking\n") plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, printer) if err != nil { - return prunePlan{}, stats, err + return PrunePlan{}, stats, err } if len(plan.repackPacks) != 0 { @@ -313,7 +315,7 @@ func planPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, i return plan, stats, nil } -func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *pruneStats, printer progress.Printer) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { +func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *PruneStats, printer progress.Printer) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { // iterate over all blobs in index to find out which blobs are duplicates // The counter in usedBlobs describes how many instances of the blob exist in the repository index // Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist @@ -384,20 +386,20 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re ip.unusedBlobs++ // count as duplicate, will later on change one copy to be counted as used - stats.size.duplicate += size - stats.blobs.duplicate++ + stats.Size.Duplicate += size + stats.Blobs.Duplicate++ case dupCount == 1: // used blob, not duplicate ip.usedSize += size ip.usedBlobs++ - stats.size.used += size - stats.blobs.used++ + stats.Size.Used += size + stats.Blobs.Used++ default: // unused blob ip.unusedSize += size ip.unusedBlobs++ - stats.size.unused += size - stats.blobs.unused++ + stats.Size.Unused += size + stats.Blobs.Unused++ } if !blob.IsCompressed() { ip.uncompressed = true @@ -431,10 +433,10 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re ip.unusedSize -= size ip.unusedBlobs-- // same for the global statistics - stats.size.used += size - stats.blobs.used++ - stats.size.duplicate -= size - stats.blobs.duplicate-- + stats.Size.Used += size + stats.Blobs.Used++ + stats.Size.Duplicate -= size + stats.Blobs.Duplicate-- // let other occurrences remain marked as unused usedBlobs[bh] = 1 default: @@ -463,7 +465,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re return usedBlobs, indexPack, nil } -func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *pruneStats, printer progress.Printer) (prunePlan, error) { +func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *PruneStats, printer progress.Printer) (PrunePlan, error) { removePacksFirst := restic.NewIDSet() removePacks := restic.NewIDSet() repackPacks := restic.NewIDSet() @@ -487,7 +489,7 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi // Pack was not referenced in index and is not used => immediately remove! printer.V("will remove pack %v as it is unused and not indexed\n", id.Str()) removePacksFirst.Insert(id) - stats.size.unref += uint64(packSize) + stats.Size.Unref += uint64(packSize) return nil } @@ -503,15 +505,15 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi // statistics switch { case p.usedBlobs == 0: - stats.packs.unused++ + stats.Packs.Unused++ case p.unusedBlobs == 0: - stats.packs.used++ + stats.Packs.Used++ default: - stats.packs.partlyUsed++ + stats.Packs.PartlyUsed++ } if p.uncompressed { - stats.size.uncompressed += p.unusedSize + p.usedSize + stats.Size.Uncompressed += p.unusedSize + p.usedSize } mustCompress := false if repoVersion >= 2 { @@ -525,17 +527,17 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi case p.usedBlobs == 0: // All blobs in pack are no longer used => remove pack! removePacks.Insert(id) - stats.blobs.remove += p.unusedBlobs - stats.size.remove += p.unusedSize + stats.Blobs.Remove += p.unusedBlobs + stats.Size.Remove += p.unusedSize case opts.RepackCachableOnly && p.tpe == restic.DataBlob: // if this is a data pack and --repack-cacheable-only is set => keep pack! - stats.packs.keep++ + stats.Packs.Keep++ case p.unusedBlobs == 0 && p.tpe != restic.InvalidBlob && !mustCompress: if packSize >= int64(targetPackSize) { // All blobs in pack are used and not mixed => keep pack! - stats.packs.keep++ + stats.Packs.Keep++ } else { repackSmallCandidates = append(repackSmallCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress}) } @@ -551,7 +553,7 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi }) bar.Done() if err != nil { - return prunePlan{}, err + return PrunePlan{}, err } // At this point indexPacks contains only missing packs! @@ -561,8 +563,8 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi for id, p := range indexPack { if p.usedBlobs == 0 { ignorePacks.Insert(id) - stats.blobs.remove += p.unusedBlobs - stats.size.remove += p.unusedSize + stats.Blobs.Remove += p.unusedBlobs + stats.Size.Remove += p.unusedSize delete(indexPack, id) } } @@ -572,7 +574,7 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi for id := range indexPack { printer.E(" %v\n", id) } - return prunePlan{}, errorPacksMissing + return PrunePlan{}, errorPacksMissing } if len(ignorePacks) != 0 { printer.E("Missing but unneeded pack files are referenced in the index, will be repaired\n") @@ -584,7 +586,7 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi if len(repackSmallCandidates) < 10 { // too few small files to be worth the trouble, this also prevents endlessly repacking // if there is just a single pack file below the target size - stats.packs.keep += uint(len(repackSmallCandidates)) + stats.Packs.Keep += uint(len(repackSmallCandidates)) } else { repackCandidates = append(repackCandidates, repackSmallCandidates...) } @@ -612,26 +614,26 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi repack := func(id restic.ID, p packInfo) { repackPacks.Insert(id) - stats.blobs.repack += p.unusedBlobs + p.usedBlobs - stats.size.repack += p.unusedSize + p.usedSize - stats.blobs.repackrm += p.unusedBlobs - stats.size.repackrm += p.unusedSize + stats.Blobs.Repack += p.unusedBlobs + p.usedBlobs + stats.Size.Repack += p.unusedSize + p.usedSize + stats.Blobs.Repackrm += p.unusedBlobs + stats.Size.Repackrm += p.unusedSize if p.uncompressed { - stats.size.uncompressed -= p.unusedSize + p.usedSize + stats.Size.Uncompressed -= p.unusedSize + p.usedSize } } // calculate limit for number of unused bytes in the repo after repacking - maxUnusedSizeAfter := opts.maxUnusedBytes(stats.size.used) + maxUnusedSizeAfter := opts.maxUnusedBytes(stats.Size.Used) for _, p := range repackCandidates { - reachedUnusedSizeAfter := (stats.size.unused-stats.size.remove-stats.size.repackrm < maxUnusedSizeAfter) - reachedRepackSize := stats.size.repack+p.unusedSize+p.usedSize >= opts.MaxRepackBytes + reachedUnusedSizeAfter := (stats.Size.Unused-stats.Size.Remove-stats.Size.Repackrm < maxUnusedSizeAfter) + reachedRepackSize := stats.Size.Repack+p.unusedSize+p.usedSize >= opts.MaxRepackBytes packIsLargeEnough := p.unusedSize+p.usedSize >= uint64(targetPackSize) switch { case reachedRepackSize: - stats.packs.keep++ + stats.Packs.Keep++ case p.tpe != restic.DataBlob, p.mustCompress: // repacking non-data packs / uncompressed-trees is only limited by repackSize @@ -639,23 +641,23 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi case reachedUnusedSizeAfter && packIsLargeEnough: // for all other packs stop repacking if tolerated unused size is reached. - stats.packs.keep++ + stats.Packs.Keep++ default: repack(p.ID, p.packInfo) } } - stats.packs.unref = uint(len(removePacksFirst)) - stats.packs.repack = uint(len(repackPacks)) - stats.packs.remove = uint(len(removePacks)) + stats.Packs.Unref = uint(len(removePacksFirst)) + stats.Packs.Repack = uint(len(repackPacks)) + stats.Packs.Remove = uint(len(removePacks)) if repo.Config().Version < 2 { // compression not supported for repository format version 1 - stats.size.uncompressed = 0 + stats.Size.Uncompressed = 0 } - return prunePlan{removePacksFirst: removePacksFirst, + return PrunePlan{removePacksFirst: removePacksFirst, removePacks: removePacks, repackPacks: repackPacks, ignorePacks: ignorePacks, @@ -663,54 +665,54 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi } // printPruneStats prints out the statistics -func printPruneStats(printer progress.Printer, stats pruneStats) error { - printer.V("\nused: %10d blobs / %s\n", stats.blobs.used, ui.FormatBytes(stats.size.used)) - if stats.blobs.duplicate > 0 { - printer.V("duplicates: %10d blobs / %s\n", stats.blobs.duplicate, ui.FormatBytes(stats.size.duplicate)) +func printPruneStats(printer progress.Printer, stats PruneStats) error { + printer.V("\nused: %10d blobs / %s\n", stats.Blobs.Used, ui.FormatBytes(stats.Size.Used)) + if stats.Blobs.Duplicate > 0 { + printer.V("duplicates: %10d blobs / %s\n", stats.Blobs.Duplicate, ui.FormatBytes(stats.Size.Duplicate)) } - printer.V("unused: %10d blobs / %s\n", stats.blobs.unused, ui.FormatBytes(stats.size.unused)) - if stats.size.unref > 0 { - printer.V("unreferenced: %s\n", ui.FormatBytes(stats.size.unref)) + printer.V("unused: %10d blobs / %s\n", stats.Blobs.Unused, ui.FormatBytes(stats.Size.Unused)) + if stats.Size.Unref > 0 { + printer.V("unreferenced: %s\n", ui.FormatBytes(stats.Size.Unref)) } - totalBlobs := stats.blobs.used + stats.blobs.unused + stats.blobs.duplicate - totalSize := stats.size.used + stats.size.duplicate + stats.size.unused + stats.size.unref - unusedSize := stats.size.duplicate + stats.size.unused + totalBlobs := stats.Blobs.Used + stats.Blobs.Unused + stats.Blobs.Duplicate + totalSize := stats.Size.Used + stats.Size.Duplicate + stats.Size.Unused + stats.Size.Unref + unusedSize := stats.Size.Duplicate + stats.Size.Unused printer.V("total: %10d blobs / %s\n", totalBlobs, ui.FormatBytes(totalSize)) printer.V("unused size: %s of total size\n", ui.FormatPercent(unusedSize, totalSize)) - printer.P("\nto repack: %10d blobs / %s\n", stats.blobs.repack, ui.FormatBytes(stats.size.repack)) - printer.P("this removes: %10d blobs / %s\n", stats.blobs.repackrm, ui.FormatBytes(stats.size.repackrm)) - printer.P("to delete: %10d blobs / %s\n", stats.blobs.remove, ui.FormatBytes(stats.size.remove+stats.size.unref)) - totalPruneSize := stats.size.remove + stats.size.repackrm + stats.size.unref - printer.P("total prune: %10d blobs / %s\n", stats.blobs.remove+stats.blobs.repackrm, ui.FormatBytes(totalPruneSize)) - if stats.size.uncompressed > 0 { - printer.P("not yet compressed: %s\n", ui.FormatBytes(stats.size.uncompressed)) + printer.P("\nto repack: %10d blobs / %s\n", stats.Blobs.Repack, ui.FormatBytes(stats.Size.Repack)) + printer.P("this removes: %10d blobs / %s\n", stats.Blobs.Repackrm, ui.FormatBytes(stats.Size.Repackrm)) + printer.P("to delete: %10d blobs / %s\n", stats.Blobs.Remove, ui.FormatBytes(stats.Size.Remove+stats.Size.Unref)) + totalPruneSize := stats.Size.Remove + stats.Size.Repackrm + stats.Size.Unref + printer.P("total prune: %10d blobs / %s\n", stats.Blobs.Remove+stats.Blobs.Repackrm, ui.FormatBytes(totalPruneSize)) + if stats.Size.Uncompressed > 0 { + printer.P("not yet compressed: %s\n", ui.FormatBytes(stats.Size.Uncompressed)) } - printer.P("remaining: %10d blobs / %s\n", totalBlobs-(stats.blobs.remove+stats.blobs.repackrm), ui.FormatBytes(totalSize-totalPruneSize)) - unusedAfter := unusedSize - stats.size.remove - stats.size.repackrm + printer.P("remaining: %10d blobs / %s\n", totalBlobs-(stats.Blobs.Remove+stats.Blobs.Repackrm), ui.FormatBytes(totalSize-totalPruneSize)) + unusedAfter := unusedSize - stats.Size.Remove - stats.Size.Repackrm printer.P("unused size after prune: %s (%s of remaining size)\n", ui.FormatBytes(unusedAfter), ui.FormatPercent(unusedAfter, totalSize-totalPruneSize)) printer.P("\n") - printer.V("totally used packs: %10d\n", stats.packs.used) - printer.V("partly used packs: %10d\n", stats.packs.partlyUsed) - printer.V("unused packs: %10d\n\n", stats.packs.unused) + printer.V("totally used packs: %10d\n", stats.Packs.Used) + printer.V("partly used packs: %10d\n", stats.Packs.PartlyUsed) + printer.V("unused packs: %10d\n\n", stats.Packs.Unused) - printer.V("to keep: %10d packs\n", stats.packs.keep) - printer.V("to repack: %10d packs\n", stats.packs.repack) - printer.V("to delete: %10d packs\n", stats.packs.remove) - if stats.packs.unref > 0 { - printer.V("to delete: %10d unreferenced packs\n\n", stats.packs.unref) + printer.V("to keep: %10d packs\n", stats.Packs.Keep) + printer.V("to repack: %10d packs\n", stats.Packs.Repack) + printer.V("to delete: %10d packs\n", stats.Packs.Remove) + if stats.Packs.Unref > 0 { + printer.V("to delete: %10d unreferenced packs\n\n", stats.Packs.Unref) } return nil } -// doPrune does the actual pruning: +// DoPrune does the actual pruning: // - remove unreferenced packs first // - repack given pack files while keeping the given blobs // - rebuild the index while ignoring all files that will be deleted // - delete the files // plan.removePacks and plan.ignorePacks are modified in this function. -func doPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, plan prunePlan, printer progress.Printer) (err error) { +func DoPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, plan PrunePlan, printer progress.Printer) (err error) { if opts.DryRun { printer.V("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n") if len(plan.removePacksFirst) > 0 { From fc3b548625793579cdb05bd38587744bd3f21b00 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 6 Apr 2024 19:21:21 +0200 Subject: [PATCH 068/117] prune: move logic into repository package --- cmd/restic/cmd_prune.go | 576 +--------------------- cmd/restic/cmd_prune_integration_test.go | 3 +- cmd/restic/delete.go | 38 -- internal/repository/prune.go | 581 +++++++++++++++++++++++ 4 files changed, 599 insertions(+), 599 deletions(-) delete mode 100644 cmd/restic/delete.go create mode 100644 internal/repository/prune.go diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index eef66cc8e..578414f52 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -4,14 +4,11 @@ import ( "context" "math" "runtime" - "sort" "strconv" "strings" "github.com/restic/restic/internal/debug" "github.com/restic/restic/internal/errors" - "github.com/restic/restic/internal/index" - "github.com/restic/restic/internal/pack" "github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/ui" @@ -21,10 +18,6 @@ import ( "github.com/spf13/cobra" ) -var errorIndexIncomplete = errors.Fatal("index is not complete") -var errorPacksMissing = errors.Fatal("packs from index missing in repo") -var errorSizeNotMatching = errors.Fatal("pack size does not match calculated size from index") - var cmdPrune = &cobra.Command{ Use: "prune [flags]", Short: "Remove unneeded data from the repository", @@ -194,14 +187,26 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption return err } - plan, stats, err := PlanPrune(ctx, opts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) { + popts := repository.PruneOptions{ + DryRun: opts.DryRun, + UnsafeRecovery: opts.unsafeRecovery, + + MaxUnusedBytes: opts.maxUnusedBytes, + MaxRepackBytes: opts.MaxRepackBytes, + + RepackCachableOnly: opts.RepackCachableOnly, + RepackSmall: opts.RepackSmall, + RepackUncompressed: opts.RepackUncompressed, + } + + plan, stats, err := repository.PlanPrune(ctx, popts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) { return getUsedBlobs(ctx, repo, ignoreSnapshots, printer) }, printer) if err != nil { return err } - if opts.DryRun { + if popts.DryRun { printer.P("\nWould have made the following changes:") } @@ -213,459 +218,11 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption // Trigger GC to reset garbage collection threshold runtime.GC() - return DoPrune(ctx, opts, repo, plan, printer) -} - -type PruneStats struct { - Blobs struct { - Used uint - Duplicate uint - Unused uint - Remove uint - Repack uint - Repackrm uint - } - Size struct { - Used uint64 - Duplicate uint64 - Unused uint64 - Remove uint64 - Repack uint64 - Repackrm uint64 - Unref uint64 - Uncompressed uint64 - } - Packs struct { - Used uint - Unused uint - PartlyUsed uint - Unref uint - Keep uint - Repack uint - Remove uint - } -} - -type PrunePlan struct { - removePacksFirst restic.IDSet // packs to remove first (unreferenced packs) - repackPacks restic.IDSet // packs to repack - keepBlobs restic.CountedBlobSet // blobs to keep during repacking - removePacks restic.IDSet // packs to remove - ignorePacks restic.IDSet // packs to ignore when rebuilding the index -} - -type packInfo struct { - usedBlobs uint - unusedBlobs uint - usedSize uint64 - unusedSize uint64 - tpe restic.BlobType - uncompressed bool -} - -type packInfoWithID struct { - ID restic.ID - packInfo - mustCompress bool -} - -// PlanPrune selects which files to rewrite and which to delete and which blobs to keep. -// Also some summary statistics are returned. -func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error), printer progress.Printer) (PrunePlan, PruneStats, error) { - var stats PruneStats - - usedBlobs, err := getUsedBlobs(ctx, repo) - if err != nil { - return PrunePlan{}, stats, err - } - - printer.P("searching used packs...\n") - keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats, printer) - if err != nil { - return PrunePlan{}, stats, err - } - - printer.P("collecting packs for deletion and repacking\n") - plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, printer) - if err != nil { - return PrunePlan{}, stats, err - } - - if len(plan.repackPacks) != 0 { - blobCount := keepBlobs.Len() - // when repacking, we do not want to keep blobs which are - // already contained in kept packs, so delete them from keepBlobs - repo.Index().Each(ctx, func(blob restic.PackedBlob) { - if plan.removePacks.Has(blob.PackID) || plan.repackPacks.Has(blob.PackID) { - return - } - keepBlobs.Delete(blob.BlobHandle) - }) - - if keepBlobs.Len() < blobCount/2 { - // replace with copy to shrink map to necessary size if there's a chance to benefit - keepBlobs = keepBlobs.Copy() - } - } else { - // keepBlobs is only needed if packs are repacked - keepBlobs = nil - } - plan.keepBlobs = keepBlobs - - return plan, stats, nil -} - -func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *PruneStats, printer progress.Printer) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { - // iterate over all blobs in index to find out which blobs are duplicates - // The counter in usedBlobs describes how many instances of the blob exist in the repository index - // Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist - idx.Each(ctx, func(blob restic.PackedBlob) { - bh := blob.BlobHandle - count, ok := usedBlobs[bh] - if ok { - if count < math.MaxUint8 { - // don't overflow, but saturate count at 255 - // this can lead to a non-optimal pack selection, but won't cause - // problems otherwise - count++ - } - - usedBlobs[bh] = count - } - }) - - // Check if all used blobs have been found in index - missingBlobs := restic.NewBlobSet() - for bh, count := range usedBlobs { - if count == 0 { - // blob does not exist in any pack files - missingBlobs.Insert(bh) - } - } - - if len(missingBlobs) != 0 { - printer.E("%v not found in the index\n\n"+ - "Integrity check failed: Data seems to be missing.\n"+ - "Will not start prune to prevent (additional) data loss!\n"+ - "Please report this error (along with the output of the 'prune' run) at\n"+ - "https://github.com/restic/restic/issues/new/choose\n", missingBlobs) - return nil, nil, errorIndexIncomplete - } - - indexPack := make(map[restic.ID]packInfo) - - // save computed pack header size - for pid, hdrSize := range pack.Size(ctx, idx, true) { - // initialize tpe with NumBlobTypes to indicate it's not set - indexPack[pid] = packInfo{tpe: restic.NumBlobTypes, usedSize: uint64(hdrSize)} - } - - hasDuplicates := false - // iterate over all blobs in index to generate packInfo - idx.Each(ctx, func(blob restic.PackedBlob) { - ip := indexPack[blob.PackID] - - // Set blob type if not yet set - if ip.tpe == restic.NumBlobTypes { - ip.tpe = blob.Type - } - - // mark mixed packs with "Invalid blob type" - if ip.tpe != blob.Type { - ip.tpe = restic.InvalidBlob - } - - bh := blob.BlobHandle - size := uint64(blob.Length) - dupCount := usedBlobs[bh] - switch { - case dupCount >= 2: - hasDuplicates = true - // mark as unused for now, we will later on select one copy - ip.unusedSize += size - ip.unusedBlobs++ - - // count as duplicate, will later on change one copy to be counted as used - stats.Size.Duplicate += size - stats.Blobs.Duplicate++ - case dupCount == 1: // used blob, not duplicate - ip.usedSize += size - ip.usedBlobs++ - - stats.Size.Used += size - stats.Blobs.Used++ - default: // unused blob - ip.unusedSize += size - ip.unusedBlobs++ - - stats.Size.Unused += size - stats.Blobs.Unused++ - } - if !blob.IsCompressed() { - ip.uncompressed = true - } - // update indexPack - indexPack[blob.PackID] = ip - }) - - // if duplicate blobs exist, those will be set to either "used" or "unused": - // - mark only one occurrence of duplicate blobs as used - // - if there are already some used blobs in a pack, possibly mark duplicates in this pack as "used" - // - if there are no used blobs in a pack, possibly mark duplicates as "unused" - if hasDuplicates { - // iterate again over all blobs in index (this is pretty cheap, all in-mem) - idx.Each(ctx, func(blob restic.PackedBlob) { - bh := blob.BlobHandle - count, ok := usedBlobs[bh] - // skip non-duplicate, aka. normal blobs - // count == 0 is used to mark that this was a duplicate blob with only a single occurrence remaining - if !ok || count == 1 { - return - } - - ip := indexPack[blob.PackID] - size := uint64(blob.Length) - switch { - case ip.usedBlobs > 0, count == 0: - // other used blobs in pack or "last" occurrence -> transition to used - ip.usedSize += size - ip.usedBlobs++ - ip.unusedSize -= size - ip.unusedBlobs-- - // same for the global statistics - stats.Size.Used += size - stats.Blobs.Used++ - stats.Size.Duplicate -= size - stats.Blobs.Duplicate-- - // let other occurrences remain marked as unused - usedBlobs[bh] = 1 - default: - // remain unused and decrease counter - count-- - if count == 1 { - // setting count to 1 would lead to forgetting that this blob had duplicates - // thus use the special value zero. This will select the last instance of the blob for keeping. - count = 0 - } - usedBlobs[bh] = count - } - // update indexPack - indexPack[blob.PackID] = ip - }) - } - - // Sanity check. If no duplicates exist, all blobs have value 1. After handling - // duplicates, this also applies to duplicates. - for _, count := range usedBlobs { - if count != 1 { - panic("internal error during blob selection") - } - } - - return usedBlobs, indexPack, nil -} - -func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *PruneStats, printer progress.Printer) (PrunePlan, error) { - removePacksFirst := restic.NewIDSet() - removePacks := restic.NewIDSet() - repackPacks := restic.NewIDSet() - - var repackCandidates []packInfoWithID - var repackSmallCandidates []packInfoWithID - repoVersion := repo.Config().Version - // only repack very small files by default - targetPackSize := repo.PackSize() / 25 - if opts.RepackSmall { - // consider files with at least 80% of the target size as large enough - targetPackSize = repo.PackSize() / 5 * 4 - } - - // loop over all packs and decide what to do - bar := printer.NewCounter("packs processed") - bar.SetMax(uint64(len(indexPack))) - err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { - p, ok := indexPack[id] - if !ok { - // Pack was not referenced in index and is not used => immediately remove! - printer.V("will remove pack %v as it is unused and not indexed\n", id.Str()) - removePacksFirst.Insert(id) - stats.Size.Unref += uint64(packSize) - return nil - } - - if p.unusedSize+p.usedSize != uint64(packSize) && p.usedBlobs != 0 { - // Pack size does not fit and pack is needed => error - // If the pack is not needed, this is no error, the pack can - // and will be simply removed, see below. - printer.E("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n", - id.Str(), p.unusedSize+p.usedSize, packSize) - return errorSizeNotMatching - } - - // statistics - switch { - case p.usedBlobs == 0: - stats.Packs.Unused++ - case p.unusedBlobs == 0: - stats.Packs.Used++ - default: - stats.Packs.PartlyUsed++ - } - - if p.uncompressed { - stats.Size.Uncompressed += p.unusedSize + p.usedSize - } - mustCompress := false - if repoVersion >= 2 { - // repo v2: always repack tree blobs if uncompressed - // compress data blobs if requested - mustCompress = (p.tpe == restic.TreeBlob || opts.RepackUncompressed) && p.uncompressed - } - - // decide what to do - switch { - case p.usedBlobs == 0: - // All blobs in pack are no longer used => remove pack! - removePacks.Insert(id) - stats.Blobs.Remove += p.unusedBlobs - stats.Size.Remove += p.unusedSize - - case opts.RepackCachableOnly && p.tpe == restic.DataBlob: - // if this is a data pack and --repack-cacheable-only is set => keep pack! - stats.Packs.Keep++ - - case p.unusedBlobs == 0 && p.tpe != restic.InvalidBlob && !mustCompress: - if packSize >= int64(targetPackSize) { - // All blobs in pack are used and not mixed => keep pack! - stats.Packs.Keep++ - } else { - repackSmallCandidates = append(repackSmallCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress}) - } - - default: - // all other packs are candidates for repacking - repackCandidates = append(repackCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress}) - } - - delete(indexPack, id) - bar.Add(1) - return nil - }) - bar.Done() - if err != nil { - return PrunePlan{}, err - } - - // At this point indexPacks contains only missing packs! - - // missing packs that are not needed can be ignored - ignorePacks := restic.NewIDSet() - for id, p := range indexPack { - if p.usedBlobs == 0 { - ignorePacks.Insert(id) - stats.Blobs.Remove += p.unusedBlobs - stats.Size.Remove += p.unusedSize - delete(indexPack, id) - } - } - - if len(indexPack) != 0 { - printer.E("The index references %d needed pack files which are missing from the repository:\n", len(indexPack)) - for id := range indexPack { - printer.E(" %v\n", id) - } - return PrunePlan{}, errorPacksMissing - } - if len(ignorePacks) != 0 { - printer.E("Missing but unneeded pack files are referenced in the index, will be repaired\n") - for id := range ignorePacks { - printer.E("will forget missing pack file %v\n", id) - } - } - - if len(repackSmallCandidates) < 10 { - // too few small files to be worth the trouble, this also prevents endlessly repacking - // if there is just a single pack file below the target size - stats.Packs.Keep += uint(len(repackSmallCandidates)) - } else { - repackCandidates = append(repackCandidates, repackSmallCandidates...) - } - - // Sort repackCandidates such that packs with highest ratio unused/used space are picked first. - // This is equivalent to sorting by unused / total space. - // Instead of unused[i] / used[i] > unused[j] / used[j] we use - // unused[i] * used[j] > unused[j] * used[i] as uint32*uint32 < uint64 - // Moreover packs containing trees and too small packs are sorted to the beginning - sort.Slice(repackCandidates, func(i, j int) bool { - pi := repackCandidates[i].packInfo - pj := repackCandidates[j].packInfo - switch { - case pi.tpe != restic.DataBlob && pj.tpe == restic.DataBlob: - return true - case pj.tpe != restic.DataBlob && pi.tpe == restic.DataBlob: - return false - case pi.unusedSize+pi.usedSize < uint64(targetPackSize) && pj.unusedSize+pj.usedSize >= uint64(targetPackSize): - return true - case pj.unusedSize+pj.usedSize < uint64(targetPackSize) && pi.unusedSize+pi.usedSize >= uint64(targetPackSize): - return false - } - return pi.unusedSize*pj.usedSize > pj.unusedSize*pi.usedSize - }) - - repack := func(id restic.ID, p packInfo) { - repackPacks.Insert(id) - stats.Blobs.Repack += p.unusedBlobs + p.usedBlobs - stats.Size.Repack += p.unusedSize + p.usedSize - stats.Blobs.Repackrm += p.unusedBlobs - stats.Size.Repackrm += p.unusedSize - if p.uncompressed { - stats.Size.Uncompressed -= p.unusedSize + p.usedSize - } - } - - // calculate limit for number of unused bytes in the repo after repacking - maxUnusedSizeAfter := opts.maxUnusedBytes(stats.Size.Used) - - for _, p := range repackCandidates { - reachedUnusedSizeAfter := (stats.Size.Unused-stats.Size.Remove-stats.Size.Repackrm < maxUnusedSizeAfter) - reachedRepackSize := stats.Size.Repack+p.unusedSize+p.usedSize >= opts.MaxRepackBytes - packIsLargeEnough := p.unusedSize+p.usedSize >= uint64(targetPackSize) - - switch { - case reachedRepackSize: - stats.Packs.Keep++ - - case p.tpe != restic.DataBlob, p.mustCompress: - // repacking non-data packs / uncompressed-trees is only limited by repackSize - repack(p.ID, p.packInfo) - - case reachedUnusedSizeAfter && packIsLargeEnough: - // for all other packs stop repacking if tolerated unused size is reached. - stats.Packs.Keep++ - - default: - repack(p.ID, p.packInfo) - } - } - - stats.Packs.Unref = uint(len(removePacksFirst)) - stats.Packs.Repack = uint(len(repackPacks)) - stats.Packs.Remove = uint(len(removePacks)) - - if repo.Config().Version < 2 { - // compression not supported for repository format version 1 - stats.Size.Uncompressed = 0 - } - - return PrunePlan{removePacksFirst: removePacksFirst, - removePacks: removePacks, - repackPacks: repackPacks, - ignorePacks: ignorePacks, - }, nil + return repository.DoPrune(ctx, popts, repo, plan, printer) } // printPruneStats prints out the statistics -func printPruneStats(printer progress.Printer, stats PruneStats) error { +func printPruneStats(printer progress.Printer, stats repository.PruneStats) error { printer.V("\nused: %10d blobs / %s\n", stats.Blobs.Used, ui.FormatBytes(stats.Size.Used)) if stats.Blobs.Duplicate > 0 { printer.V("duplicates: %10d blobs / %s\n", stats.Blobs.Duplicate, ui.FormatBytes(stats.Size.Duplicate)) @@ -706,107 +263,6 @@ func printPruneStats(printer progress.Printer, stats PruneStats) error { return nil } -// DoPrune does the actual pruning: -// - remove unreferenced packs first -// - repack given pack files while keeping the given blobs -// - rebuild the index while ignoring all files that will be deleted -// - delete the files -// plan.removePacks and plan.ignorePacks are modified in this function. -func DoPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, plan PrunePlan, printer progress.Printer) (err error) { - if opts.DryRun { - printer.V("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n") - if len(plan.removePacksFirst) > 0 { - printer.V("Would have removed the following unreferenced packs:\n%v\n\n", plan.removePacksFirst) - } - printer.V("Would have repacked and removed the following packs:\n%v\n\n", plan.repackPacks) - printer.V("Would have removed the following no longer used packs:\n%v\n\n", plan.removePacks) - // Always quit here if DryRun was set! - return nil - } - - // unreferenced packs can be safely deleted first - if len(plan.removePacksFirst) != 0 { - printer.P("deleting unreferenced packs\n") - DeleteFiles(ctx, repo, plan.removePacksFirst, restic.PackFile, printer) - } - - if len(plan.repackPacks) != 0 { - printer.P("repacking packs\n") - bar := printer.NewCounter("packs repacked") - bar.SetMax(uint64(len(plan.repackPacks))) - _, err := repository.Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar) - bar.Done() - if err != nil { - return errors.Fatal(err.Error()) - } - - // Also remove repacked packs - plan.removePacks.Merge(plan.repackPacks) - - if len(plan.keepBlobs) != 0 { - printer.E("%v was not repacked\n\n"+ - "Integrity check failed.\n"+ - "Please report this error (along with the output of the 'prune' run) at\n"+ - "https://github.com/restic/restic/issues/new/choose\n", plan.keepBlobs) - return errors.Fatal("internal error: blobs were not repacked") - } - - // allow GC of the blob set - plan.keepBlobs = nil - } - - if len(plan.ignorePacks) == 0 { - plan.ignorePacks = plan.removePacks - } else { - plan.ignorePacks.Merge(plan.removePacks) - } - - if opts.unsafeRecovery { - printer.P("deleting index files\n") - indexFiles := repo.Index().(*index.MasterIndex).IDs() - err = DeleteFilesChecked(ctx, repo, indexFiles, restic.IndexFile, printer) - if err != nil { - return errors.Fatalf("%s", err) - } - } else if len(plan.ignorePacks) != 0 { - err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, false, printer) - if err != nil { - return errors.Fatalf("%s", err) - } - } - - if len(plan.removePacks) != 0 { - printer.P("removing %d old packs\n", len(plan.removePacks)) - DeleteFiles(ctx, repo, plan.removePacks, restic.PackFile, printer) - } - - if opts.unsafeRecovery { - err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, true, printer) - if err != nil { - return errors.Fatalf("%s", err) - } - } - - printer.P("done\n") - return nil -} - -func rebuildIndexFiles(ctx context.Context, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool, printer progress.Printer) error { - printer.P("rebuilding index\n") - - bar := printer.NewCounter("packs processed") - return repo.Index().Save(ctx, repo, removePacks, extraObsolete, restic.MasterIndexSaveOpts{ - SaveProgress: bar, - DeleteProgress: func() *progress.Counter { - return printer.NewCounter("old indexes deleted") - }, - DeleteReport: func(id restic.ID, _ error) { - printer.VV("removed index %v\n", id.String()) - }, - SkipDeletion: skipDeletion, - }) -} - func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, printer progress.Printer) (usedBlobs restic.CountedBlobSet, err error) { var snapshotTrees restic.IDs printer.P("loading all snapshots...\n") diff --git a/cmd/restic/cmd_prune_integration_test.go b/cmd/restic/cmd_prune_integration_test.go index 4c21940c4..f5d2e1f6b 100644 --- a/cmd/restic/cmd_prune_integration_test.go +++ b/cmd/restic/cmd_prune_integration_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/repository" rtest "github.com/restic/restic/internal/test" "github.com/restic/restic/internal/ui/termstatus" ) @@ -145,7 +146,7 @@ func TestPruneWithDamagedRepository(t *testing.T) { // prune should fail rtest.Assert(t, withTermStatus(env.gopts, func(ctx context.Context, term *termstatus.Terminal) error { return runPrune(context.TODO(), pruneDefaultOptions, env.gopts, term) - }) == errorPacksMissing, + }) == repository.ErrPacksMissing, "prune should have reported index not complete error") } diff --git a/cmd/restic/delete.go b/cmd/restic/delete.go deleted file mode 100644 index 34f71d91a..000000000 --- a/cmd/restic/delete.go +++ /dev/null @@ -1,38 +0,0 @@ -package main - -import ( - "context" - - "github.com/restic/restic/internal/restic" - "github.com/restic/restic/internal/ui/progress" -) - -// DeleteFiles deletes the given fileList of fileType in parallel -// it will print a warning if there is an error, but continue deleting the remaining files -func DeleteFiles(ctx context.Context, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) { - _ = deleteFiles(ctx, true, repo, fileList, fileType, printer) -} - -// DeleteFilesChecked deletes the given fileList of fileType in parallel -// if an error occurs, it will cancel and return this error -func DeleteFilesChecked(ctx context.Context, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) error { - return deleteFiles(ctx, false, repo, fileList, fileType, printer) -} - -// deleteFiles deletes the given fileList of fileType in parallel -// if ignoreError=true, it will print a warning if there was an error, else it will abort. -func deleteFiles(ctx context.Context, ignoreError bool, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) error { - bar := printer.NewCounter("files deleted") - defer bar.Done() - - return restic.ParallelRemove(ctx, repo, fileList, fileType, func(id restic.ID, err error) error { - if err != nil { - printer.E("unable to remove %v/%v from the repository\n", fileType, id) - if !ignoreError { - return err - } - } - printer.VV("removed %v/%v\n", fileType, id) - return nil - }, bar) -} diff --git a/internal/repository/prune.go b/internal/repository/prune.go new file mode 100644 index 000000000..653705bf4 --- /dev/null +++ b/internal/repository/prune.go @@ -0,0 +1,581 @@ +package repository + +import ( + "context" + "math" + "sort" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/index" + "github.com/restic/restic/internal/pack" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/ui/progress" +) + +var ErrIndexIncomplete = errors.Fatal("index is not complete") +var ErrPacksMissing = errors.Fatal("packs from index missing in repo") +var ErrSizeNotMatching = errors.Fatal("pack size does not match calculated size from index") + +// PruneOptions collects all options for the cleanup command. +type PruneOptions struct { + DryRun bool + UnsafeRecovery bool + + MaxUnusedBytes func(used uint64) (unused uint64) // calculates the number of unused bytes after repacking, according to MaxUnused + MaxRepackBytes uint64 + + RepackCachableOnly bool + RepackSmall bool + RepackUncompressed bool +} + +type PruneStats struct { + Blobs struct { + Used uint + Duplicate uint + Unused uint + Remove uint + Repack uint + Repackrm uint + } + Size struct { + Used uint64 + Duplicate uint64 + Unused uint64 + Remove uint64 + Repack uint64 + Repackrm uint64 + Unref uint64 + Uncompressed uint64 + } + Packs struct { + Used uint + Unused uint + PartlyUsed uint + Unref uint + Keep uint + Repack uint + Remove uint + } +} + +type PrunePlan struct { + removePacksFirst restic.IDSet // packs to remove first (unreferenced packs) + repackPacks restic.IDSet // packs to repack + keepBlobs restic.CountedBlobSet // blobs to keep during repacking + removePacks restic.IDSet // packs to remove + ignorePacks restic.IDSet // packs to ignore when rebuilding the index +} + +type packInfo struct { + usedBlobs uint + unusedBlobs uint + usedSize uint64 + unusedSize uint64 + tpe restic.BlobType + uncompressed bool +} + +type packInfoWithID struct { + ID restic.ID + packInfo + mustCompress bool +} + +// PlanPrune selects which files to rewrite and which to delete and which blobs to keep. +// Also some summary statistics are returned. +func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error), printer progress.Printer) (PrunePlan, PruneStats, error) { + var stats PruneStats + + usedBlobs, err := getUsedBlobs(ctx, repo) + if err != nil { + return PrunePlan{}, stats, err + } + + printer.P("searching used packs...\n") + keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats, printer) + if err != nil { + return PrunePlan{}, stats, err + } + + printer.P("collecting packs for deletion and repacking\n") + plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, printer) + if err != nil { + return PrunePlan{}, stats, err + } + + if len(plan.repackPacks) != 0 { + blobCount := keepBlobs.Len() + // when repacking, we do not want to keep blobs which are + // already contained in kept packs, so delete them from keepBlobs + repo.Index().Each(ctx, func(blob restic.PackedBlob) { + if plan.removePacks.Has(blob.PackID) || plan.repackPacks.Has(blob.PackID) { + return + } + keepBlobs.Delete(blob.BlobHandle) + }) + + if keepBlobs.Len() < blobCount/2 { + // replace with copy to shrink map to necessary size if there's a chance to benefit + keepBlobs = keepBlobs.Copy() + } + } else { + // keepBlobs is only needed if packs are repacked + keepBlobs = nil + } + plan.keepBlobs = keepBlobs + + return plan, stats, nil +} + +func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *PruneStats, printer progress.Printer) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { + // iterate over all blobs in index to find out which blobs are duplicates + // The counter in usedBlobs describes how many instances of the blob exist in the repository index + // Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist + idx.Each(ctx, func(blob restic.PackedBlob) { + bh := blob.BlobHandle + count, ok := usedBlobs[bh] + if ok { + if count < math.MaxUint8 { + // don't overflow, but saturate count at 255 + // this can lead to a non-optimal pack selection, but won't cause + // problems otherwise + count++ + } + + usedBlobs[bh] = count + } + }) + + // Check if all used blobs have been found in index + missingBlobs := restic.NewBlobSet() + for bh, count := range usedBlobs { + if count == 0 { + // blob does not exist in any pack files + missingBlobs.Insert(bh) + } + } + + if len(missingBlobs) != 0 { + printer.E("%v not found in the index\n\n"+ + "Integrity check failed: Data seems to be missing.\n"+ + "Will not start prune to prevent (additional) data loss!\n"+ + "Please report this error (along with the output of the 'prune' run) at\n"+ + "https://github.com/restic/restic/issues/new/choose\n", missingBlobs) + return nil, nil, ErrIndexIncomplete + } + + indexPack := make(map[restic.ID]packInfo) + + // save computed pack header size + for pid, hdrSize := range pack.Size(ctx, idx, true) { + // initialize tpe with NumBlobTypes to indicate it's not set + indexPack[pid] = packInfo{tpe: restic.NumBlobTypes, usedSize: uint64(hdrSize)} + } + + hasDuplicates := false + // iterate over all blobs in index to generate packInfo + idx.Each(ctx, func(blob restic.PackedBlob) { + ip := indexPack[blob.PackID] + + // Set blob type if not yet set + if ip.tpe == restic.NumBlobTypes { + ip.tpe = blob.Type + } + + // mark mixed packs with "Invalid blob type" + if ip.tpe != blob.Type { + ip.tpe = restic.InvalidBlob + } + + bh := blob.BlobHandle + size := uint64(blob.Length) + dupCount := usedBlobs[bh] + switch { + case dupCount >= 2: + hasDuplicates = true + // mark as unused for now, we will later on select one copy + ip.unusedSize += size + ip.unusedBlobs++ + + // count as duplicate, will later on change one copy to be counted as used + stats.Size.Duplicate += size + stats.Blobs.Duplicate++ + case dupCount == 1: // used blob, not duplicate + ip.usedSize += size + ip.usedBlobs++ + + stats.Size.Used += size + stats.Blobs.Used++ + default: // unused blob + ip.unusedSize += size + ip.unusedBlobs++ + + stats.Size.Unused += size + stats.Blobs.Unused++ + } + if !blob.IsCompressed() { + ip.uncompressed = true + } + // update indexPack + indexPack[blob.PackID] = ip + }) + + // if duplicate blobs exist, those will be set to either "used" or "unused": + // - mark only one occurrence of duplicate blobs as used + // - if there are already some used blobs in a pack, possibly mark duplicates in this pack as "used" + // - if there are no used blobs in a pack, possibly mark duplicates as "unused" + if hasDuplicates { + // iterate again over all blobs in index (this is pretty cheap, all in-mem) + idx.Each(ctx, func(blob restic.PackedBlob) { + bh := blob.BlobHandle + count, ok := usedBlobs[bh] + // skip non-duplicate, aka. normal blobs + // count == 0 is used to mark that this was a duplicate blob with only a single occurrence remaining + if !ok || count == 1 { + return + } + + ip := indexPack[blob.PackID] + size := uint64(blob.Length) + switch { + case ip.usedBlobs > 0, count == 0: + // other used blobs in pack or "last" occurrence -> transition to used + ip.usedSize += size + ip.usedBlobs++ + ip.unusedSize -= size + ip.unusedBlobs-- + // same for the global statistics + stats.Size.Used += size + stats.Blobs.Used++ + stats.Size.Duplicate -= size + stats.Blobs.Duplicate-- + // let other occurrences remain marked as unused + usedBlobs[bh] = 1 + default: + // remain unused and decrease counter + count-- + if count == 1 { + // setting count to 1 would lead to forgetting that this blob had duplicates + // thus use the special value zero. This will select the last instance of the blob for keeping. + count = 0 + } + usedBlobs[bh] = count + } + // update indexPack + indexPack[blob.PackID] = ip + }) + } + + // Sanity check. If no duplicates exist, all blobs have value 1. After handling + // duplicates, this also applies to duplicates. + for _, count := range usedBlobs { + if count != 1 { + panic("internal error during blob selection") + } + } + + return usedBlobs, indexPack, nil +} + +func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Repository, indexPack map[restic.ID]packInfo, stats *PruneStats, printer progress.Printer) (PrunePlan, error) { + removePacksFirst := restic.NewIDSet() + removePacks := restic.NewIDSet() + repackPacks := restic.NewIDSet() + + var repackCandidates []packInfoWithID + var repackSmallCandidates []packInfoWithID + repoVersion := repo.Config().Version + // only repack very small files by default + targetPackSize := repo.PackSize() / 25 + if opts.RepackSmall { + // consider files with at least 80% of the target size as large enough + targetPackSize = repo.PackSize() / 5 * 4 + } + + // loop over all packs and decide what to do + bar := printer.NewCounter("packs processed") + bar.SetMax(uint64(len(indexPack))) + err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { + p, ok := indexPack[id] + if !ok { + // Pack was not referenced in index and is not used => immediately remove! + printer.V("will remove pack %v as it is unused and not indexed\n", id.Str()) + removePacksFirst.Insert(id) + stats.Size.Unref += uint64(packSize) + return nil + } + + if p.unusedSize+p.usedSize != uint64(packSize) && p.usedBlobs != 0 { + // Pack size does not fit and pack is needed => error + // If the pack is not needed, this is no error, the pack can + // and will be simply removed, see below. + printer.E("pack %s: calculated size %d does not match real size %d\nRun 'restic repair index'.\n", + id.Str(), p.unusedSize+p.usedSize, packSize) + return ErrSizeNotMatching + } + + // statistics + switch { + case p.usedBlobs == 0: + stats.Packs.Unused++ + case p.unusedBlobs == 0: + stats.Packs.Used++ + default: + stats.Packs.PartlyUsed++ + } + + if p.uncompressed { + stats.Size.Uncompressed += p.unusedSize + p.usedSize + } + mustCompress := false + if repoVersion >= 2 { + // repo v2: always repack tree blobs if uncompressed + // compress data blobs if requested + mustCompress = (p.tpe == restic.TreeBlob || opts.RepackUncompressed) && p.uncompressed + } + + // decide what to do + switch { + case p.usedBlobs == 0: + // All blobs in pack are no longer used => remove pack! + removePacks.Insert(id) + stats.Blobs.Remove += p.unusedBlobs + stats.Size.Remove += p.unusedSize + + case opts.RepackCachableOnly && p.tpe == restic.DataBlob: + // if this is a data pack and --repack-cacheable-only is set => keep pack! + stats.Packs.Keep++ + + case p.unusedBlobs == 0 && p.tpe != restic.InvalidBlob && !mustCompress: + if packSize >= int64(targetPackSize) { + // All blobs in pack are used and not mixed => keep pack! + stats.Packs.Keep++ + } else { + repackSmallCandidates = append(repackSmallCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress}) + } + + default: + // all other packs are candidates for repacking + repackCandidates = append(repackCandidates, packInfoWithID{ID: id, packInfo: p, mustCompress: mustCompress}) + } + + delete(indexPack, id) + bar.Add(1) + return nil + }) + bar.Done() + if err != nil { + return PrunePlan{}, err + } + + // At this point indexPacks contains only missing packs! + + // missing packs that are not needed can be ignored + ignorePacks := restic.NewIDSet() + for id, p := range indexPack { + if p.usedBlobs == 0 { + ignorePacks.Insert(id) + stats.Blobs.Remove += p.unusedBlobs + stats.Size.Remove += p.unusedSize + delete(indexPack, id) + } + } + + if len(indexPack) != 0 { + printer.E("The index references %d needed pack files which are missing from the repository:\n", len(indexPack)) + for id := range indexPack { + printer.E(" %v\n", id) + } + return PrunePlan{}, ErrPacksMissing + } + if len(ignorePacks) != 0 { + printer.E("Missing but unneeded pack files are referenced in the index, will be repaired\n") + for id := range ignorePacks { + printer.E("will forget missing pack file %v\n", id) + } + } + + if len(repackSmallCandidates) < 10 { + // too few small files to be worth the trouble, this also prevents endlessly repacking + // if there is just a single pack file below the target size + stats.Packs.Keep += uint(len(repackSmallCandidates)) + } else { + repackCandidates = append(repackCandidates, repackSmallCandidates...) + } + + // Sort repackCandidates such that packs with highest ratio unused/used space are picked first. + // This is equivalent to sorting by unused / total space. + // Instead of unused[i] / used[i] > unused[j] / used[j] we use + // unused[i] * used[j] > unused[j] * used[i] as uint32*uint32 < uint64 + // Moreover packs containing trees and too small packs are sorted to the beginning + sort.Slice(repackCandidates, func(i, j int) bool { + pi := repackCandidates[i].packInfo + pj := repackCandidates[j].packInfo + switch { + case pi.tpe != restic.DataBlob && pj.tpe == restic.DataBlob: + return true + case pj.tpe != restic.DataBlob && pi.tpe == restic.DataBlob: + return false + case pi.unusedSize+pi.usedSize < uint64(targetPackSize) && pj.unusedSize+pj.usedSize >= uint64(targetPackSize): + return true + case pj.unusedSize+pj.usedSize < uint64(targetPackSize) && pi.unusedSize+pi.usedSize >= uint64(targetPackSize): + return false + } + return pi.unusedSize*pj.usedSize > pj.unusedSize*pi.usedSize + }) + + repack := func(id restic.ID, p packInfo) { + repackPacks.Insert(id) + stats.Blobs.Repack += p.unusedBlobs + p.usedBlobs + stats.Size.Repack += p.unusedSize + p.usedSize + stats.Blobs.Repackrm += p.unusedBlobs + stats.Size.Repackrm += p.unusedSize + if p.uncompressed { + stats.Size.Uncompressed -= p.unusedSize + p.usedSize + } + } + + // calculate limit for number of unused bytes in the repo after repacking + maxUnusedSizeAfter := opts.MaxUnusedBytes(stats.Size.Used) + + for _, p := range repackCandidates { + reachedUnusedSizeAfter := (stats.Size.Unused-stats.Size.Remove-stats.Size.Repackrm < maxUnusedSizeAfter) + reachedRepackSize := stats.Size.Repack+p.unusedSize+p.usedSize >= opts.MaxRepackBytes + packIsLargeEnough := p.unusedSize+p.usedSize >= uint64(targetPackSize) + + switch { + case reachedRepackSize: + stats.Packs.Keep++ + + case p.tpe != restic.DataBlob, p.mustCompress: + // repacking non-data packs / uncompressed-trees is only limited by repackSize + repack(p.ID, p.packInfo) + + case reachedUnusedSizeAfter && packIsLargeEnough: + // for all other packs stop repacking if tolerated unused size is reached. + stats.Packs.Keep++ + + default: + repack(p.ID, p.packInfo) + } + } + + stats.Packs.Unref = uint(len(removePacksFirst)) + stats.Packs.Repack = uint(len(repackPacks)) + stats.Packs.Remove = uint(len(removePacks)) + + if repo.Config().Version < 2 { + // compression not supported for repository format version 1 + stats.Size.Uncompressed = 0 + } + + return PrunePlan{removePacksFirst: removePacksFirst, + removePacks: removePacks, + repackPacks: repackPacks, + ignorePacks: ignorePacks, + }, nil +} + +// DoPrune does the actual pruning: +// - remove unreferenced packs first +// - repack given pack files while keeping the given blobs +// - rebuild the index while ignoring all files that will be deleted +// - delete the files +// plan.removePacks and plan.ignorePacks are modified in this function. +func DoPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, plan PrunePlan, printer progress.Printer) (err error) { + if opts.DryRun { + printer.V("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n") + if len(plan.removePacksFirst) > 0 { + printer.V("Would have removed the following unreferenced packs:\n%v\n\n", plan.removePacksFirst) + } + printer.V("Would have repacked and removed the following packs:\n%v\n\n", plan.repackPacks) + printer.V("Would have removed the following no longer used packs:\n%v\n\n", plan.removePacks) + // Always quit here if DryRun was set! + return nil + } + + // unreferenced packs can be safely deleted first + if len(plan.removePacksFirst) != 0 { + printer.P("deleting unreferenced packs\n") + _ = deleteFiles(ctx, true, repo, plan.removePacksFirst, restic.PackFile, printer) + } + + if len(plan.repackPacks) != 0 { + printer.P("repacking packs\n") + bar := printer.NewCounter("packs repacked") + bar.SetMax(uint64(len(plan.repackPacks))) + _, err := Repack(ctx, repo, repo, plan.repackPacks, plan.keepBlobs, bar) + bar.Done() + if err != nil { + return errors.Fatal(err.Error()) + } + + // Also remove repacked packs + plan.removePacks.Merge(plan.repackPacks) + + if len(plan.keepBlobs) != 0 { + printer.E("%v was not repacked\n\n"+ + "Integrity check failed.\n"+ + "Please report this error (along with the output of the 'prune' run) at\n"+ + "https://github.com/restic/restic/issues/new/choose\n", plan.keepBlobs) + return errors.Fatal("internal error: blobs were not repacked") + } + + // allow GC of the blob set + plan.keepBlobs = nil + } + + if len(plan.ignorePacks) == 0 { + plan.ignorePacks = plan.removePacks + } else { + plan.ignorePacks.Merge(plan.removePacks) + } + + if opts.UnsafeRecovery { + printer.P("deleting index files\n") + indexFiles := repo.Index().(*index.MasterIndex).IDs() + err = deleteFiles(ctx, false, repo, indexFiles, restic.IndexFile, printer) + if err != nil { + return errors.Fatalf("%s", err) + } + } else if len(plan.ignorePacks) != 0 { + err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, false, printer) + if err != nil { + return errors.Fatalf("%s", err) + } + } + + if len(plan.removePacks) != 0 { + printer.P("removing %d old packs\n", len(plan.removePacks)) + _ = deleteFiles(ctx, true, repo, plan.removePacks, restic.PackFile, printer) + } + + if opts.UnsafeRecovery { + err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, true, printer) + if err != nil { + return errors.Fatalf("%s", err) + } + } + + printer.P("done\n") + return nil +} + +// deleteFiles deletes the given fileList of fileType in parallel +// if ignoreError=true, it will print a warning if there was an error, else it will abort. +func deleteFiles(ctx context.Context, ignoreError bool, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType, printer progress.Printer) error { + bar := printer.NewCounter("files deleted") + defer bar.Done() + + return restic.ParallelRemove(ctx, repo, fileList, fileType, func(id restic.ID, err error) error { + if err != nil { + printer.E("unable to remove %v/%v from the repository\n", fileType, id) + if !ignoreError { + return err + } + } + printer.VV("removed %v/%v\n", fileType, id) + return nil + }, bar) +} From 85e4021619c58b73fd017758b0565e12b459028f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 6 Apr 2024 19:49:03 +0200 Subject: [PATCH 069/117] prune: move additional option checks to repository --- cmd/restic/cmd_prune.go | 8 -------- cmd/restic/cmd_prune_integration_test.go | 2 +- internal/repository/prune.go | 12 ++++++++++++ 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 578414f52..0ade0b39d 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -150,14 +150,6 @@ func runPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, term } defer unlock() - if repo.Connections() < 2 { - return errors.Fatal("prune requires a backend connection limit of at least two") - } - - if repo.Config().Version < 2 && opts.RepackUncompressed { - return errors.Fatal("compression requires at least repository format version 2") - } - if opts.UnsafeNoSpaceRecovery != "" { repoID := repo.Config().ID if opts.UnsafeNoSpaceRecovery != repoID { diff --git a/cmd/restic/cmd_prune_integration_test.go b/cmd/restic/cmd_prune_integration_test.go index f5d2e1f6b..715adea9a 100644 --- a/cmd/restic/cmd_prune_integration_test.go +++ b/cmd/restic/cmd_prune_integration_test.go @@ -35,7 +35,7 @@ func testPruneVariants(t *testing.T, unsafeNoSpaceRecovery bool) { } t.Run("0"+suffix, func(t *testing.T) { opts := PruneOptions{MaxUnused: "0%", unsafeRecovery: unsafeNoSpaceRecovery} - checkOpts := CheckOptions{ReadData: true, CheckUnused: true} + checkOpts := CheckOptions{ReadData: true, CheckUnused: !unsafeNoSpaceRecovery} testPrune(t, opts, checkOpts) }) diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 653705bf4..5ebe91f03 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -2,6 +2,7 @@ package repository import ( "context" + "fmt" "math" "sort" @@ -87,6 +88,17 @@ type packInfoWithID struct { func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error), printer progress.Printer) (PrunePlan, PruneStats, error) { var stats PruneStats + if opts.UnsafeRecovery { + // prevent repacking data to make sure users cannot get stuck. + opts.MaxRepackBytes = 0 + } + if repo.Connections() < 2 { + return PrunePlan{}, stats, fmt.Errorf("prune requires a backend connection limit of at least two") + } + if repo.Config().Version < 2 && opts.RepackUncompressed { + return PrunePlan{}, stats, fmt.Errorf("compression requires at least repository format version 2") + } + usedBlobs, err := getUsedBlobs(ctx, repo) if err != nil { return PrunePlan{}, stats, err From 4c9a10ca3736ff862daa7dfcd4da6cfed57f116d Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 10 Apr 2024 21:31:53 +0200 Subject: [PATCH 070/117] repair packs: deduplicate index rebuild --- internal/repository/repair_pack.go | 14 +------------- 1 file changed, 1 insertion(+), 13 deletions(-) diff --git a/internal/repository/repair_pack.go b/internal/repository/repair_pack.go index 2e0368899..a4261517a 100644 --- a/internal/repository/repair_pack.go +++ b/internal/repository/repair_pack.go @@ -60,19 +60,7 @@ func RepairPacks(ctx context.Context, repo restic.Repository, ids restic.IDSet, } // remove salvaged packs from index - printer.P("rebuilding index") - - bar = printer.NewCounter("packs processed") - err = repo.Index().Save(ctx, repo, ids, nil, restic.MasterIndexSaveOpts{ - SaveProgress: bar, - DeleteProgress: func() *progress.Counter { - return printer.NewCounter("old indexes deleted") - }, - DeleteReport: func(id restic.ID, _ error) { - printer.VV("removed index %v", id.String()) - }, - }) - + err = rebuildIndexFiles(ctx, repo, ids, nil, false, printer) if err != nil { return err } From 7ba5e95a82adc59c91c5cf5bc6970cc04cd456d5 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 11:42:26 +0200 Subject: [PATCH 071/117] check: allow tests to only verify pack&index integrity --- internal/archiver/archiver_test.go | 10 +++++----- internal/checker/testing.go | 24 +++++++++++++----------- internal/restic/testing_test.go | 2 +- 3 files changed, 19 insertions(+), 17 deletions(-) diff --git a/internal/archiver/archiver_test.go b/internal/archiver/archiver_test.go index 841c8f2ce..71fae003f 100644 --- a/internal/archiver/archiver_test.go +++ b/internal/archiver/archiver_test.go @@ -1430,7 +1430,7 @@ func TestArchiverSnapshot(t *testing.T) { } TestEnsureSnapshot(t, repo, snapshotID, want) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) // check that the snapshot contains the targets with absolute paths for i, target := range sn.Paths { @@ -1590,7 +1590,7 @@ func TestArchiverSnapshotSelect(t *testing.T) { } TestEnsureSnapshot(t, repo, snapshotID, want) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) }) } } @@ -1794,7 +1794,7 @@ func TestArchiverParent(t *testing.T) { t.Logf("second backup saved as %v", secondSnapshotID.Str()) t.Logf("testfs: %v", testFS) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) }) } } @@ -1927,7 +1927,7 @@ func TestArchiverErrorReporting(t *testing.T) { } TestEnsureSnapshot(t, repo, snapshotID, want) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) }) } } @@ -2288,7 +2288,7 @@ func TestMetadataChanged(t *testing.T) { // make sure the content matches TestEnsureFileContent(context.Background(), t, repo, "testfile", node3, files["testfile"].(TestFile)) - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) } func TestRacyFileSwap(t *testing.T) { diff --git a/internal/checker/testing.go b/internal/checker/testing.go index fe1679393..9e949af02 100644 --- a/internal/checker/testing.go +++ b/internal/checker/testing.go @@ -8,7 +8,7 @@ import ( ) // TestCheckRepo runs the checker on repo. -func TestCheckRepo(t testing.TB, repo restic.Repository) { +func TestCheckRepo(t testing.TB, repo restic.Repository, skipStructure bool) { chkr := New(repo, true) hints, errs := chkr.LoadIndex(context.TODO(), nil) @@ -33,18 +33,20 @@ func TestCheckRepo(t testing.TB, repo restic.Repository) { t.Error(err) } - // structure - errChan = make(chan error) - go chkr.Structure(context.TODO(), nil, errChan) + if !skipStructure { + // structure + errChan = make(chan error) + go chkr.Structure(context.TODO(), nil, errChan) - for err := range errChan { - t.Error(err) - } + for err := range errChan { + t.Error(err) + } - // unused blobs - blobs := chkr.UnusedBlobs(context.TODO()) - if len(blobs) > 0 { - t.Errorf("unused blobs found: %v", blobs) + // unused blobs + blobs := chkr.UnusedBlobs(context.TODO()) + if len(blobs) > 0 { + t.Errorf("unused blobs found: %v", blobs) + } } // read data diff --git a/internal/restic/testing_test.go b/internal/restic/testing_test.go index ae8f8dd34..0a0c43892 100644 --- a/internal/restic/testing_test.go +++ b/internal/restic/testing_test.go @@ -45,7 +45,7 @@ func TestCreateSnapshot(t *testing.T) { t.Fatalf("snapshot has zero tree ID") } - checker.TestCheckRepo(t, repo) + checker.TestCheckRepo(t, repo, false) } func BenchmarkTestCreateSnapshot(t *testing.B) { From 35277b7797d60aed97a85703c919b8f7ca7c8dcc Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 11:43:18 +0200 Subject: [PATCH 072/117] backend/mem: cleanup not found error message --- internal/backend/mem/mem_backend.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/backend/mem/mem_backend.go b/internal/backend/mem/mem_backend.go index eea5b060e..8b115b187 100644 --- a/internal/backend/mem/mem_backend.go +++ b/internal/backend/mem/mem_backend.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/base64" + "fmt" "hash" "io" "net/http" @@ -41,7 +42,7 @@ func NewFactory() location.Factory { ) } -var errNotFound = errors.New("not found") +var errNotFound = fmt.Errorf("not found") const connectionCount = 2 From eda9f7beb45931b68f1844cdf2516ba4f5c6fe4b Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 11:44:13 +0200 Subject: [PATCH 073/117] ui/progress: add helper to print messages during tests --- internal/ui/progress/printer.go | 35 +++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/internal/ui/progress/printer.go b/internal/ui/progress/printer.go index a671621e9..a2bc4c4b5 100644 --- a/internal/ui/progress/printer.go +++ b/internal/ui/progress/printer.go @@ -1,5 +1,7 @@ package progress +import "testing" + // A Printer can can return a new counter or print messages // at different log levels. // It must be safe to call its methods from concurrent goroutines. @@ -28,3 +30,36 @@ func (*NoopPrinter) P(_ string, _ ...interface{}) {} func (*NoopPrinter) V(_ string, _ ...interface{}) {} func (*NoopPrinter) VV(_ string, _ ...interface{}) {} + +// TestPrinter prints messages during testing +type TestPrinter struct { + t testing.TB +} + +func NewTestPrinter(t testing.TB) *TestPrinter { + return &TestPrinter{ + t: t, + } +} + +var _ Printer = (*TestPrinter)(nil) + +func (p *TestPrinter) NewCounter(_ string) *Counter { + return nil +} + +func (p *TestPrinter) E(msg string, args ...interface{}) { + p.t.Logf("error: "+msg, args...) +} + +func (p *TestPrinter) P(msg string, args ...interface{}) { + p.t.Logf("print: "+msg, args...) +} + +func (p *TestPrinter) V(msg string, args ...interface{}) { + p.t.Logf("verbose: "+msg, args...) +} + +func (p *TestPrinter) VV(msg string, args ...interface{}) { + p.t.Logf("verbose2: "+msg, args...) +} From c65459cd8a1ca1044e20a923fc94be0beb62859f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 11:48:40 +0200 Subject: [PATCH 074/117] repository: speed up tests --- internal/repository/repack_test.go | 18 +++++++++++++----- internal/repository/repair_pack_test.go | 2 +- internal/repository/repository_test.go | 5 ++--- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go index e5e46ac2a..e3b4a5996 100644 --- a/internal/repository/repack_test.go +++ b/internal/repository/repack_test.go @@ -18,7 +18,7 @@ func randomSize(min, max int) int { return rand.Intn(max-min) + min } -func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData float32) { +func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData float32, smallBlobs bool) { var wg errgroup.Group repo.StartPackUploader(context.TODO(), &wg) @@ -30,7 +30,11 @@ func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData fl if rand.Float32() < pData { tpe = restic.DataBlob - length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data + if smallBlobs { + length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB of data + } else { + length = randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data + } } else { tpe = restic.TreeBlob length = randomSize(1*1024, 20*1024) // 1KiB to 20KiB @@ -219,7 +223,9 @@ func testRepack(t *testing.T, version uint) { rand.Seed(seed) t.Logf("rand seed is %v", seed) - createRandomBlobs(t, repo, 100, 0.7) + // add a small amount of blobs twice to create multiple pack files + createRandomBlobs(t, repo, 10, 0.7, false) + createRandomBlobs(t, repo, 10, 0.7, false) packsBefore := listPacks(t, repo) @@ -302,7 +308,9 @@ func testRepackCopy(t *testing.T, version uint) { rand.Seed(seed) t.Logf("rand seed is %v", seed) - createRandomBlobs(t, repo, 100, 0.7) + // add a small amount of blobs twice to create multiple pack files + createRandomBlobs(t, repo, 10, 0.7, false) + createRandomBlobs(t, repo, 10, 0.7, false) flush(t, repo) _, keepBlobs := selectBlobs(t, repo, 0.2) @@ -343,7 +351,7 @@ func testRepackWrongBlob(t *testing.T, version uint) { rand.Seed(seed) t.Logf("rand seed is %v", seed) - createRandomBlobs(t, repo, 5, 0.7) + createRandomBlobs(t, repo, 5, 0.7, false) createRandomWrongBlob(t, repo) // just keep all blobs, but also rewrite every pack diff --git a/internal/repository/repair_pack_test.go b/internal/repository/repair_pack_test.go index b950245aa..c5cdf5ed5 100644 --- a/internal/repository/repair_pack_test.go +++ b/internal/repository/repair_pack_test.go @@ -109,7 +109,7 @@ func testRepairBrokenPack(t *testing.T, version uint) { rand.Seed(seed) t.Logf("rand seed is %v", seed) - createRandomBlobs(t, repo, 5, 0.7) + createRandomBlobs(t, repo, 5, 0.7, true) packsBefore := listPacks(t, repo) blobsBefore := listBlobs(repo) diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index 98ff560fe..b013c4823 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -242,8 +242,7 @@ func loadIndex(ctx context.Context, repo restic.LoaderUnpacked, id restic.ID) (* } func TestRepositoryLoadUnpackedBroken(t *testing.T) { - repo, cleanup := repository.TestFromFixture(t, repoFixture) - defer cleanup() + repo := repository.TestRepository(t) data := rtest.Random(23, 12345) id := restic.Hash(data) @@ -252,7 +251,7 @@ func TestRepositoryLoadUnpackedBroken(t *testing.T) { data[0] ^= 0xff // store broken file - err := repo.Backend().Save(context.TODO(), h, backend.NewByteReader(data, nil)) + err := repo.Backend().Save(context.TODO(), h, backend.NewByteReader(data, repo.Backend().Hasher())) rtest.OK(t, err) // without a retry backend this will just return an error that the file is broken From b25fc2c89d2eb36710fd48749c0584883c85aca5 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 11:49:04 +0200 Subject: [PATCH 075/117] repository: remove redundant flushes from tests --- internal/repository/repack_test.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go index e3b4a5996..cd40c3174 100644 --- a/internal/repository/repack_test.go +++ b/internal/repository/repack_test.go @@ -170,12 +170,6 @@ func repack(t *testing.T, repo restic.Repository, packs restic.IDSet, blobs rest } } -func flush(t *testing.T, repo restic.Repository) { - if err := repo.Flush(context.TODO()); err != nil { - t.Fatalf("repo.SaveIndex() %v", err) - } -} - func rebuildIndex(t *testing.T, repo restic.Repository) { err := repo.SetIndex(index.NewMasterIndex()) rtest.OK(t, err) @@ -239,8 +233,6 @@ func testRepack(t *testing.T, version uint) { packsBefore, packsAfter) } - flush(t, repo) - removeBlobs, keepBlobs := selectBlobs(t, repo, 0.2) removePacks := findPacksForBlobs(t, repo, removeBlobs) @@ -311,7 +303,6 @@ func testRepackCopy(t *testing.T, version uint) { // add a small amount of blobs twice to create multiple pack files createRandomBlobs(t, repo, 10, 0.7, false) createRandomBlobs(t, repo, 10, 0.7, false) - flush(t, repo) _, keepBlobs := selectBlobs(t, repo, 0.2) copyPacks := findPacksForBlobs(t, repo, keepBlobs) From 7d1b9cde3438c71aa11e37fa3fa5279aa15d7ae4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 11:49:33 +0200 Subject: [PATCH 076/117] repository: use normal Init method in tests --- internal/repository/testing.go | 7 +++++-- internal/restic/config.go | 16 ---------------- 2 files changed, 5 insertions(+), 18 deletions(-) diff --git a/internal/repository/testing.go b/internal/repository/testing.go index 874d179ce..9fb643a46 100644 --- a/internal/repository/testing.go +++ b/internal/repository/testing.go @@ -60,8 +60,11 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, o t.Fatalf("TestRepository(): new repo failed: %v", err) } - cfg := restic.TestCreateConfig(t, testChunkerPol, version) - err = repo.init(context.TODO(), test.TestPassword, cfg) + if version == 0 { + version = restic.StableRepoVersion + } + pol := testChunkerPol + err = repo.Init(context.TODO(), version, test.TestPassword, &pol) if err != nil { t.Fatalf("TestRepository(): initialize repo failed: %v", err) } diff --git a/internal/restic/config.go b/internal/restic/config.go index 67af259ba..3fb61cc13 100644 --- a/internal/restic/config.go +++ b/internal/restic/config.go @@ -51,22 +51,6 @@ func CreateConfig(version uint) (Config, error) { return cfg, nil } -// TestCreateConfig creates a config for use within tests. -func TestCreateConfig(t testing.TB, pol chunker.Pol, version uint) (cfg Config) { - cfg.ChunkerPolynomial = pol - - cfg.ID = NewRandomID().String() - if version == 0 { - version = StableRepoVersion - } - if version < MinRepoVersion || version > MaxRepoVersion { - t.Fatalf("version %d is out of range", version) - } - cfg.Version = version - - return cfg -} - var checkPolynomial = true var checkPolynomialOnce sync.Once From 310db03c0e90c1a448599daf32036166be906345 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 11:51:27 +0200 Subject: [PATCH 077/117] repair index: improve log output if index cannot be deleted The operation will always fail with an error if an index cannot be deleted. Thus, this change is purely cosmetic. --- internal/repository/repair_index.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index 7cf598fa5..ccf8bcdb0 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -110,8 +110,12 @@ func rebuildIndexFiles(ctx context.Context, repo restic.Repository, removePacks DeleteProgress: func() *progress.Counter { return printer.NewCounter("old indexes deleted") }, - DeleteReport: func(id restic.ID, _ error) { - printer.VV("removed index %v\n", id.String()) + DeleteReport: func(id restic.ID, err error) { + if err != nil { + printer.VV("failed to remove index %v: %v\n", id.String(), err) + } else { + printer.VV("removed index %v\n", id.String()) + } }, SkipDeletion: skipDeletion, }) From 8d507c1372aed2285bcc2231782204d516d5b1ed Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 11:53:08 +0200 Subject: [PATCH 078/117] repository: add basic test for RepairIndex --- internal/repository/repack_test.go | 6 +- internal/repository/repair_index_test.go | 79 ++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 internal/repository/repair_index_test.go diff --git a/internal/repository/repack_test.go b/internal/repository/repack_test.go index cd40c3174..2f7867101 100644 --- a/internal/repository/repack_test.go +++ b/internal/repository/repack_test.go @@ -125,8 +125,12 @@ func selectBlobs(t *testing.T, repo restic.Repository, p float32) (list1, list2 } func listPacks(t *testing.T, repo restic.Lister) restic.IDSet { + return listFiles(t, repo, restic.PackFile) +} + +func listFiles(t *testing.T, repo restic.Lister, tpe backend.FileType) restic.IDSet { list := restic.NewIDSet() - err := repo.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error { + err := repo.List(context.TODO(), tpe, func(id restic.ID, size int64) error { list.Insert(id) return nil }) diff --git a/internal/repository/repair_index_test.go b/internal/repository/repair_index_test.go new file mode 100644 index 000000000..adaee3832 --- /dev/null +++ b/internal/repository/repair_index_test.go @@ -0,0 +1,79 @@ +package repository_test + +import ( + "context" + "testing" + + "github.com/restic/restic/internal/backend" + "github.com/restic/restic/internal/checker" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/progress" +) + +func listIndex(t *testing.T, repo restic.Lister) restic.IDSet { + return listFiles(t, repo, restic.IndexFile) +} + +func testRebuildIndex(t *testing.T, readAllPacks bool, damage func(t *testing.T, repo *repository.Repository)) { + repo := repository.TestRepository(t).(*repository.Repository) + createRandomBlobs(t, repo, 4, 0.5, true) + createRandomBlobs(t, repo, 5, 0.5, true) + indexes := listIndex(t, repo) + t.Logf("old indexes %v", indexes) + + damage(t, repo) + + repo = repository.TestOpenBackend(t, repo.Backend()).(*repository.Repository) + rtest.OK(t, repository.RepairIndex(context.TODO(), repo, repository.RepairIndexOptions{ + ReadAllPacks: readAllPacks, + }, &progress.NoopPrinter{})) + + newIndexes := listIndex(t, repo) + old := indexes.Intersect(newIndexes) + rtest.Assert(t, len(old) == 0, "expected old indexes to be removed, found %v", old) + + checker.TestCheckRepo(t, repo, true) +} + +func TestRebuildIndex(t *testing.T) { + for _, test := range []struct { + name string + damage func(t *testing.T, repo *repository.Repository) + }{ + { + "valid index", + func(t *testing.T, repo *repository.Repository) {}, + }, + { + "damaged index", + func(t *testing.T, repo *repository.Repository) { + index := listIndex(t, repo).List()[0] + replaceFile(t, repo, backend.Handle{Type: restic.IndexFile, Name: index.String()}, func(b []byte) []byte { + b[0] ^= 0xff + return b + }) + }, + }, + { + "missing index", + func(t *testing.T, repo *repository.Repository) { + index := listIndex(t, repo).List()[0] + rtest.OK(t, repo.Backend().Remove(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: index.String()})) + }, + }, + { + "missing pack", + func(t *testing.T, repo *repository.Repository) { + pack := listPacks(t, repo).List()[0] + rtest.OK(t, repo.Backend().Remove(context.TODO(), backend.Handle{Type: restic.PackFile, Name: pack.String()})) + }, + }, + } { + t.Run(test.name, func(t *testing.T) { + testRebuildIndex(t, false, test.damage) + testRebuildIndex(t, true, test.damage) + }) + } +} From d8622c86eb40c4c3751a4fb818a36ecbe62cd291 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 12:32:29 +0200 Subject: [PATCH 079/117] prune: clean up internal interface --- cmd/restic/cmd_prune.go | 6 +++--- internal/repository/prune.go | 40 +++++++++++++++++++++++++----------- 2 files changed, 31 insertions(+), 15 deletions(-) diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 0ade0b39d..ea5acddf3 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -191,7 +191,7 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption RepackUncompressed: opts.RepackUncompressed, } - plan, stats, err := repository.PlanPrune(ctx, popts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) { + plan, err := repository.PlanPrune(ctx, popts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) { return getUsedBlobs(ctx, repo, ignoreSnapshots, printer) }, printer) if err != nil { @@ -202,7 +202,7 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption printer.P("\nWould have made the following changes:") } - err = printPruneStats(printer, stats) + err = printPruneStats(printer, plan.Stats()) if err != nil { return err } @@ -210,7 +210,7 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption // Trigger GC to reset garbage collection threshold runtime.GC() - return repository.DoPrune(ctx, popts, repo, plan, printer) + return plan.Execute(ctx, printer) } // printPruneStats prints out the statistics diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 5ebe91f03..d34f3c88f 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -66,6 +66,10 @@ type PrunePlan struct { keepBlobs restic.CountedBlobSet // blobs to keep during repacking removePacks restic.IDSet // packs to remove ignorePacks restic.IDSet // packs to ignore when rebuilding the index + + repo restic.Repository + stats PruneStats + opts PruneOptions } type packInfo struct { @@ -85,7 +89,7 @@ type packInfoWithID struct { // PlanPrune selects which files to rewrite and which to delete and which blobs to keep. // Also some summary statistics are returned. -func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error), printer progress.Printer) (PrunePlan, PruneStats, error) { +func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, getUsedBlobs func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error), printer progress.Printer) (*PrunePlan, error) { var stats PruneStats if opts.UnsafeRecovery { @@ -93,27 +97,27 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, g opts.MaxRepackBytes = 0 } if repo.Connections() < 2 { - return PrunePlan{}, stats, fmt.Errorf("prune requires a backend connection limit of at least two") + return nil, fmt.Errorf("prune requires a backend connection limit of at least two") } if repo.Config().Version < 2 && opts.RepackUncompressed { - return PrunePlan{}, stats, fmt.Errorf("compression requires at least repository format version 2") + return nil, fmt.Errorf("compression requires at least repository format version 2") } usedBlobs, err := getUsedBlobs(ctx, repo) if err != nil { - return PrunePlan{}, stats, err + return nil, err } printer.P("searching used packs...\n") keepBlobs, indexPack, err := packInfoFromIndex(ctx, repo.Index(), usedBlobs, &stats, printer) if err != nil { - return PrunePlan{}, stats, err + return nil, err } printer.P("collecting packs for deletion and repacking\n") plan, err := decidePackAction(ctx, opts, repo, indexPack, &stats, printer) if err != nil { - return PrunePlan{}, stats, err + return nil, err } if len(plan.repackPacks) != 0 { @@ -137,7 +141,11 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, g } plan.keepBlobs = keepBlobs - return plan, stats, nil + plan.repo = repo + plan.stats = stats + plan.opts = opts + + return &plan, nil } func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs restic.CountedBlobSet, stats *PruneStats, printer progress.Printer) (restic.CountedBlobSet, map[restic.ID]packInfo, error) { @@ -489,14 +497,18 @@ func decidePackAction(ctx context.Context, opts PruneOptions, repo restic.Reposi }, nil } -// DoPrune does the actual pruning: +func (plan *PrunePlan) Stats() PruneStats { + return plan.stats +} + +// Execute does the actual pruning: // - remove unreferenced packs first // - repack given pack files while keeping the given blobs // - rebuild the index while ignoring all files that will be deleted // - delete the files // plan.removePacks and plan.ignorePacks are modified in this function. -func DoPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, plan PrunePlan, printer progress.Printer) (err error) { - if opts.DryRun { +func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (err error) { + if plan.opts.DryRun { printer.V("Repeated prune dry-runs can report slightly different amounts of data to keep or repack. This is expected behavior.\n\n") if len(plan.removePacksFirst) > 0 { printer.V("Would have removed the following unreferenced packs:\n%v\n\n", plan.removePacksFirst) @@ -507,6 +519,10 @@ func DoPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, pla return nil } + repo := plan.repo + // make sure the plan can only be used once + plan.repo = nil + // unreferenced packs can be safely deleted first if len(plan.removePacksFirst) != 0 { printer.P("deleting unreferenced packs\n") @@ -544,7 +560,7 @@ func DoPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, pla plan.ignorePacks.Merge(plan.removePacks) } - if opts.UnsafeRecovery { + if plan.opts.UnsafeRecovery { printer.P("deleting index files\n") indexFiles := repo.Index().(*index.MasterIndex).IDs() err = deleteFiles(ctx, false, repo, indexFiles, restic.IndexFile, printer) @@ -563,7 +579,7 @@ func DoPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, pla _ = deleteFiles(ctx, true, repo, plan.removePacks, restic.PackFile, printer) } - if opts.UnsafeRecovery { + if plan.opts.UnsafeRecovery { err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, true, printer) if err != nil { return errors.Fatalf("%s", err) From 038586dc9d5908f363a617a748b266e241df2540 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 12:32:48 +0200 Subject: [PATCH 080/117] repository: add minimal test for prune --- internal/repository/prune_test.go | 93 +++++++++++++++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 internal/repository/prune_test.go diff --git a/internal/repository/prune_test.go b/internal/repository/prune_test.go new file mode 100644 index 000000000..13bf58adc --- /dev/null +++ b/internal/repository/prune_test.go @@ -0,0 +1,93 @@ +package repository_test + +import ( + "context" + "math" + "testing" + + "github.com/restic/restic/internal/checker" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" + "github.com/restic/restic/internal/ui/progress" +) + +func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) { + repo := repository.TestRepository(t).(*repository.Repository) + createRandomBlobs(t, repo, 4, 0.5, true) + createRandomBlobs(t, repo, 5, 0.5, true) + keep, _ := selectBlobs(t, repo, 0.5) + + plan, err := repository.PlanPrune(context.TODO(), opts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) { + return restic.NewCountedBlobSet(keep.List()...), nil + }, &progress.NoopPrinter{}) + rtest.OK(t, err) + + rtest.OK(t, plan.Execute(context.TODO(), &progress.NoopPrinter{})) + + repo = repository.TestOpenBackend(t, repo.Backend()).(*repository.Repository) + checker.TestCheckRepo(t, repo, true) + + if errOnUnused { + existing := listBlobs(repo) + rtest.Assert(t, existing.Equals(keep), "unexpected blobs, wanted %v got %v", keep, existing) + } +} + +func TestPrune(t *testing.T) { + for _, test := range []struct { + name string + opts repository.PruneOptions + errOnUnused bool + }{ + { + name: "0", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return 0 }, + }, + errOnUnused: true, + }, + { + name: "50", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return used / 2 }, + }, + }, + { + name: "unlimited", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return math.MaxUint64 }, + }, + }, + { + name: "cachableonly", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return used / 20 }, + RepackCachableOnly: true, + }, + }, + { + name: "small", + opts: repository.PruneOptions{ + MaxRepackBytes: math.MaxUint64, + MaxUnusedBytes: func(used uint64) (unused uint64) { return math.MaxUint64 }, + RepackSmall: true, + }, + errOnUnused: true, + }, + } { + t.Run(test.name, func(t *testing.T) { + testPrune(t, test.opts, test.errOnUnused) + }) + t.Run(test.name+"-recovery", func(t *testing.T) { + opts := test.opts + opts.UnsafeRecovery = true + // unsafeNoSpaceRecovery does not repack partially used pack files + testPrune(t, opts, false) + }) + } +} From defd7ae729f3832e566ec618868112d615207b4a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 13:46:21 +0200 Subject: [PATCH 081/117] prune/repair index: reset in-memory index after command The current in-memory index becomes stale after prune or repair index have run. Thus, just drop the in-memory index altogether once these commands have finished. --- internal/repository/prune.go | 7 +++++++ internal/repository/repair_index.go | 9 ++++++++- internal/repository/repository.go | 18 +++++++++++++++--- internal/restic/repository.go | 1 + 4 files changed, 31 insertions(+), 4 deletions(-) diff --git a/internal/repository/prune.go b/internal/repository/prune.go index d34f3c88f..8900fffaa 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -586,6 +586,13 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (e } } + if err != nil { + return err + } + + // drop outdated in-memory index + repo.ClearIndex() + printer.P("done\n") return nil } diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index ccf8bcdb0..63e104132 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -98,7 +98,14 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, } } - return rebuildIndexFiles(ctx, repo, removePacks, obsoleteIndexes, false, printer) + err = rebuildIndexFiles(ctx, repo, removePacks, obsoleteIndexes, false, printer) + if err != nil { + return err + } + + // drop outdated in-memory index + repo.ClearIndex() + return nil } func rebuildIndexFiles(ctx context.Context, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool, printer progress.Printer) error { diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 8e34c7125..f163c6a19 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -142,9 +142,6 @@ func (r *Repository) DisableAutoIndexUpdate() { // setConfig assigns the given config and updates the repository parameters accordingly func (r *Repository) setConfig(cfg restic.Config) { r.cfg = cfg - if r.cfg.Version >= 2 { - r.idx.MarkCompressed() - } } // Config returns the repository configuration. @@ -637,9 +634,21 @@ func (r *Repository) Index() restic.MasterIndex { // SetIndex instructs the repository to use the given index. func (r *Repository) SetIndex(i restic.MasterIndex) error { r.idx = i.(*index.MasterIndex) + r.configureIndex() return r.prepareCache() } +func (r *Repository) ClearIndex() { + r.idx = index.NewMasterIndex() + r.configureIndex() +} + +func (r *Repository) configureIndex() { + if r.cfg.Version >= 2 { + r.idx.MarkCompressed() + } +} + // LoadIndex loads all index files from the backend in parallel and stores them func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { debug.Log("Loading index") @@ -662,6 +671,9 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { defer p.Done() } + // reset in-memory index before loading it from the repository + r.ClearIndex() + err = index.ForAllIndexes(ctx, indexList, r, func(_ restic.ID, idx *index.Index, _ bool, err error) error { if err != nil { return err diff --git a/internal/restic/repository.go b/internal/restic/repository.go index 66cc22ea9..89c54ffbb 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -26,6 +26,7 @@ type Repository interface { Index() MasterIndex LoadIndex(context.Context, *progress.Counter) error + ClearIndex() SetIndex(MasterIndex) error LookupBlobSize(ID, BlobType) (uint, bool) From 09587e6c08a84fff1687715d03d3b27b8dd6c9ed Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 13:57:19 +0200 Subject: [PATCH 082/117] repository: duplicate a few blobs in prune tests --- internal/repository/prune_test.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/internal/repository/prune_test.go b/internal/repository/prune_test.go index 13bf58adc..bff221f49 100644 --- a/internal/repository/prune_test.go +++ b/internal/repository/prune_test.go @@ -10,6 +10,7 @@ import ( "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" "github.com/restic/restic/internal/ui/progress" + "golang.org/x/sync/errgroup" ) func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) { @@ -18,6 +19,17 @@ func testPrune(t *testing.T, opts repository.PruneOptions, errOnUnused bool) { createRandomBlobs(t, repo, 5, 0.5, true) keep, _ := selectBlobs(t, repo, 0.5) + var wg errgroup.Group + repo.StartPackUploader(context.TODO(), &wg) + // duplicate a few blobs to exercise those code paths + for blob := range keep { + buf, err := repo.LoadBlob(context.TODO(), blob.Type, blob.ID, nil) + rtest.OK(t, err) + _, _, _, err = repo.SaveBlob(context.TODO(), blob.Type, buf, blob.ID, true) + rtest.OK(t, err) + } + rtest.OK(t, repo.Flush(context.TODO())) + plan, err := repository.PlanPrune(context.TODO(), opts, repo, func(ctx context.Context, repo restic.Repository) (usedBlobs restic.CountedBlobSet, err error) { return restic.NewCountedBlobSet(keep.List()...), nil }, &progress.NoopPrinter{}) From c9191ea72c569dfcc647d6349137447625e19195 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 14 Apr 2024 14:17:40 +0200 Subject: [PATCH 083/117] forget: cleanup verbose output on snapshot deletion error --- cmd/restic/cmd_forget.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index f6fc5379c..9018da211 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -276,8 +276,9 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption err := restic.ParallelRemove(ctx, repo, removeSnIDs, restic.SnapshotFile, func(id restic.ID, err error) error { if err != nil { printer.E("unable to remove %v/%v from the repository\n", restic.SnapshotFile, id) + } else { + printer.VV("removed %v/%v\n", restic.SnapshotFile, id) } - printer.VV("removed %v/%v\n", restic.SnapshotFile, id) return nil }, bar) bar.Done() From 001bb716764c8ec62674b7e610a003f00a626fd3 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 17 Apr 2024 18:32:30 +0200 Subject: [PATCH 084/117] repair packs: Properly close backup files --- cmd/restic/cmd_repair_packs.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cmd/restic/cmd_repair_packs.go b/cmd/restic/cmd_repair_packs.go index 00dee076b..a7b448d85 100644 --- a/cmd/restic/cmd_repair_packs.go +++ b/cmd/restic/cmd_repair_packs.go @@ -82,6 +82,10 @@ func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.T return err }) if err != nil { + _ = f.Close() + return err + } + if err := f.Close(); err != nil { return err } } From a82ed71de7a35b64a1925799c12812a0163dfea3 Mon Sep 17 00:00:00 2001 From: coderwander <770732124@qq.com> Date: Tue, 16 Apr 2024 11:22:26 +0800 Subject: [PATCH 085/117] Fix struct names Signed-off-by: coderwander <770732124@qq.com> --- internal/fs/vss_windows.go | 2 +- internal/walker/walker_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 8c9b8942b..d75567d25 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -190,7 +190,7 @@ func (e *vssError) Error() string { return fmt.Sprintf("VSS error: %s: %s (%#x)", e.text, e.hresult.Str(), e.hresult) } -// VssError encapsulates errors returned from calling VSS api. +// vssTextError encapsulates errors returned from calling VSS api. type vssTextError struct { text string } diff --git a/internal/walker/walker_test.go b/internal/walker/walker_test.go index 0f0009107..75f80e57f 100644 --- a/internal/walker/walker_test.go +++ b/internal/walker/walker_test.go @@ -13,7 +13,7 @@ import ( // TestTree is used to construct a list of trees for testing the walker. type TestTree map[string]interface{} -// TestNode is used to test the walker. +// TestFile is used to test the walker. type TestFile struct { Size uint64 } From 6aced61c72b3efafc4da065a035684bfcd1e514b Mon Sep 17 00:00:00 2001 From: will-ca <37680486+will-ca@users.noreply.github.com> Date: Thu, 18 Apr 2024 07:29:55 +0000 Subject: [PATCH 086/117] Tiny docs wording clarification. --- cmd/restic/cmd_stats.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index 20d7a485c..6bf0dbf19 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -38,7 +38,7 @@ depending on what you are trying to calculate. The modes are: * restore-size: (default) Counts the size of the restored files. -* files-by-contents: Counts total size of files, where a file is +* files-by-contents: Counts total size of unique files, where a file is considered unique if it has unique contents. * raw-data: Counts the size of blobs in the repository, regardless of how many files reference them. From 10355c3fb69db2e5740525c261fad49e0f9a451e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 19 Apr 2024 20:48:43 +0200 Subject: [PATCH 087/117] repository: Better error message if blob is larger than 4GB --- internal/index/index.go | 5 ++--- internal/repository/repository.go | 5 +++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/internal/index/index.go b/internal/index/index.go index 1fb2c155e..73128f7bb 100644 --- a/internal/index/index.go +++ b/internal/index/index.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "io" + "math" "sync" "time" @@ -69,11 +70,9 @@ func (idx *Index) addToPacks(id restic.ID) int { return len(idx.packs) - 1 } -const maxuint32 = 1<<32 - 1 - func (idx *Index) store(packIndex int, blob restic.Blob) { // assert that offset and length fit into uint32! - if blob.Offset > maxuint32 || blob.Length > maxuint32 || blob.UncompressedLength > maxuint32 { + if blob.Offset > math.MaxUint32 || blob.Length > math.MaxUint32 || blob.UncompressedLength > math.MaxUint32 { panic("offset or length does not fit in uint32. You have packs > 4GB!") } diff --git a/internal/repository/repository.go b/internal/repository/repository.go index 8e34c7125..4198e574f 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -6,6 +6,7 @@ import ( "context" "fmt" "io" + "math" "os" "runtime" "sort" @@ -917,6 +918,10 @@ func (r *Repository) Close() error { // occupies in the repo (compressed or not, including encryption overhead). func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, size int, err error) { + if int64(len(buf)) > math.MaxUint32 { + return restic.ID{}, false, 0, fmt.Errorf("blob is larger than 4GB") + } + // compute plaintext hash if not already set if id.IsNull() { // Special case the hash calculation for all zero chunks. This is especially From 6c6dceade37a2072e2c728136e0b3e4a6ea94202 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 19 Apr 2024 22:26:14 +0200 Subject: [PATCH 088/117] global: unify backend open and create --- cmd/restic/global.go | 52 +++++++++++++++++--------------------------- 1 file changed, 20 insertions(+), 32 deletions(-) diff --git a/cmd/restic/global.go b/cmd/restic/global.go index cc47496f3..c93fb4bce 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -570,16 +570,13 @@ func parseConfig(loc location.Location, opts options.Options) (interface{}, erro return cfg, nil } -// Open the backend specified by a location config. -func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) { +func innerOpen(ctx context.Context, s string, gopts GlobalOptions, opts options.Options, create bool) (backend.Backend, error) { debug.Log("parsing location %v", location.StripPassword(gopts.backends, s)) loc, err := location.Parse(gopts.backends, s) if err != nil { return nil, errors.Fatalf("parsing repository location failed: %v", err) } - var be backend.Backend - cfg, err := parseConfig(loc, opts) if err != nil { return nil, err @@ -599,7 +596,13 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio return nil, errors.Fatalf("invalid backend: %q", loc.Scheme) } - be, err = factory.Open(ctx, cfg, rt, lim) + var be backend.Backend + if create { + be, err = factory.Create(ctx, cfg, rt, nil) + } else { + be, err = factory.Open(ctx, cfg, rt, lim) + } + if err != nil { return nil, errors.Fatalf("unable to open repository at %v: %v", location.StripPassword(gopts.backends, s), err) } @@ -615,6 +618,17 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio } } + return be, nil +} + +// Open the backend specified by a location config. +func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) { + + be, err := innerOpen(ctx, s, gopts, opts, false) + if err != nil { + return nil, err + } + // check if config is there fi, err := be.Stat(ctx, backend.Handle{Type: restic.ConfigFile}) if err != nil { @@ -630,31 +644,5 @@ func open(ctx context.Context, s string, gopts GlobalOptions, opts options.Optio // Create the backend specified by URI. func create(ctx context.Context, s string, gopts GlobalOptions, opts options.Options) (backend.Backend, error) { - debug.Log("parsing location %v", location.StripPassword(gopts.backends, s)) - loc, err := location.Parse(gopts.backends, s) - if err != nil { - return nil, err - } - - cfg, err := parseConfig(loc, opts) - if err != nil { - return nil, err - } - - rt, err := backend.Transport(globalOptions.TransportOptions) - if err != nil { - return nil, errors.Fatal(err.Error()) - } - - factory := gopts.backends.Lookup(loc.Scheme) - if factory == nil { - return nil, errors.Fatalf("invalid backend: %q", loc.Scheme) - } - - be, err := factory.Create(ctx, cfg, rt, nil) - if err != nil { - return nil, err - } - - return logger.New(sema.NewBackend(be)), nil + return innerOpen(ctx, s, gopts, opts, true) } From 21a7cb405c82c850db475e2410fba4015fc605b4 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 29 Mar 2024 23:28:59 +0100 Subject: [PATCH 089/117] check: replace cleanup handler --- cmd/restic/cmd_check.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 7bea641ae..83ebf89a6 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -199,10 +199,7 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args } cleanup := prepareCheckCache(opts, &gopts) - AddCleanupHandler(func(code int) (int, error) { - cleanup() - return code, nil - }) + defer cleanup() if !gopts.NoLock { Verbosef("create exclusive lock for repository\n") From 93135dc705ba6f749dcb4c8eaa965a4c9cc4c456 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 29 Mar 2024 23:29:49 +0100 Subject: [PATCH 090/117] lock: drop cleanup handler --- cmd/restic/lock.go | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/cmd/restic/lock.go b/cmd/restic/lock.go index 69d433df1..99e199a67 100644 --- a/cmd/restic/lock.go +++ b/cmd/restic/lock.go @@ -21,18 +21,11 @@ func internalOpenWithLocked(ctx context.Context, gopts GlobalOptions, dryRun boo Verbosef("%s", msg) } }, Warnf) - - unlock = lock.Unlock - // make sure that a repository is unlocked properly and after cancel() was - // called by the cleanup handler in global.go - AddCleanupHandler(func(code int) (int, error) { - lock.Unlock() - return code, nil - }) - if err != nil { return nil, nil, nil, err } + + unlock = lock.Unlock } else { repo.SetDryRun() } From 86c7909f41cc8e3e748a6447831c7c7243a37b69 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 30 Mar 2024 14:26:12 +0100 Subject: [PATCH 091/117] mount: use standalone shutdown hook via goroutine --- cmd/restic/cmd_mount.go | 48 ++++++++++++------------ cmd/restic/cmd_mount_integration_test.go | 3 +- 2 files changed, 25 insertions(+), 26 deletions(-) diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go index cb2b1142d..1e4bff03e 100644 --- a/cmd/restic/cmd_mount.go +++ b/cmd/restic/cmd_mount.go @@ -152,28 +152,15 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args } } - AddCleanupHandler(func(code int) (int, error) { - debug.Log("running umount cleanup handler for mount at %v", mountpoint) - err := umount(mountpoint) - if err != nil { - Warnf("unable to umount (maybe already umounted or still in use?): %v\n", err) - } - // replace error code of sigint - if code == 130 { - code = 0 - } - return code, nil - }) + systemFuse.Debug = func(msg interface{}) { + debug.Log("fuse: %v", msg) + } c, err := systemFuse.Mount(mountpoint, mountOptions...) if err != nil { return err } - systemFuse.Debug = func(msg interface{}) { - debug.Log("fuse: %v", msg) - } - cfg := fuse.Config{ OwnerIsRoot: opts.OwnerRoot, Filter: opts.SnapshotFilter, @@ -187,15 +174,26 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args Printf("When finished, quit with Ctrl-c here or umount the mountpoint.\n") debug.Log("serving mount at %v", mountpoint) - err = fs.Serve(c, root) - if err != nil { - return err + + done := make(chan struct{}) + + go func() { + defer close(done) + err = fs.Serve(c, root) + }() + + select { + case <-ctx.Done(): + debug.Log("running umount cleanup handler for mount at %v", mountpoint) + err := systemFuse.Unmount(mountpoint) + if err != nil { + Warnf("unable to umount (maybe already umounted or still in use?): %v\n", err) + } + + return nil + case <-done: + // clean shutdown, nothing to do } - <-c.Ready - return c.MountError -} - -func umount(mountpoint string) error { - return systemFuse.Unmount(mountpoint) + return err } diff --git a/cmd/restic/cmd_mount_integration_test.go b/cmd/restic/cmd_mount_integration_test.go index 590e15030..d764b4e4f 100644 --- a/cmd/restic/cmd_mount_integration_test.go +++ b/cmd/restic/cmd_mount_integration_test.go @@ -12,6 +12,7 @@ import ( "testing" "time" + systemFuse "github.com/anacrolix/fuse" "github.com/restic/restic/internal/restic" rtest "github.com/restic/restic/internal/test" ) @@ -65,7 +66,7 @@ func testRunMount(t testing.TB, gopts GlobalOptions, dir string, wg *sync.WaitGr func testRunUmount(t testing.TB, dir string) { var err error for i := 0; i < mountWait; i++ { - if err = umount(dir); err == nil { + if err = systemFuse.Unmount(dir); err == nil { t.Logf("directory %v umounted", dir) return } From eb710a28e8574972d71c0b898897c409f62d6563 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 29 Mar 2024 23:52:45 +0100 Subject: [PATCH 092/117] use standalone shutdown hook for readPasswordTerminal move terminal restoration into readPasswordTerminal --- cmd/restic/cmd_copy.go | 2 +- cmd/restic/cmd_init.go | 4 +- cmd/restic/cmd_key_add.go | 6 +- cmd/restic/cmd_key_passwd.go | 2 +- cmd/restic/global.go | 99 ++++++++++++++----------------- cmd/restic/secondary_repo.go | 5 +- cmd/restic/secondary_repo_test.go | 5 +- 7 files changed, 58 insertions(+), 65 deletions(-) diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index 410134e41..de3958def 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -53,7 +53,7 @@ func init() { } func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []string) error { - secondaryGopts, isFromRepo, err := fillSecondaryGlobalOpts(opts.secondaryRepoOptions, gopts, "destination") + secondaryGopts, isFromRepo, err := fillSecondaryGlobalOpts(ctx, opts.secondaryRepoOptions, gopts, "destination") if err != nil { return err } diff --git a/cmd/restic/cmd_init.go b/cmd/restic/cmd_init.go index 7154279e8..e6ea69441 100644 --- a/cmd/restic/cmd_init.go +++ b/cmd/restic/cmd_init.go @@ -80,7 +80,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args [] return err } - gopts.password, err = ReadPasswordTwice(gopts, + gopts.password, err = ReadPasswordTwice(ctx, gopts, "enter password for new repository: ", "enter password again: ") if err != nil { @@ -131,7 +131,7 @@ func runInit(ctx context.Context, opts InitOptions, gopts GlobalOptions, args [] func maybeReadChunkerPolynomial(ctx context.Context, opts InitOptions, gopts GlobalOptions) (*chunker.Pol, error) { if opts.CopyChunkerParameters { - otherGopts, _, err := fillSecondaryGlobalOpts(opts.secondaryRepoOptions, gopts, "secondary") + otherGopts, _, err := fillSecondaryGlobalOpts(ctx, opts.secondaryRepoOptions, gopts, "secondary") if err != nil { return nil, err } diff --git a/cmd/restic/cmd_key_add.go b/cmd/restic/cmd_key_add.go index 83e0cab7f..306754627 100644 --- a/cmd/restic/cmd_key_add.go +++ b/cmd/restic/cmd_key_add.go @@ -60,7 +60,7 @@ func runKeyAdd(ctx context.Context, gopts GlobalOptions, opts KeyAddOptions, arg } func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyAddOptions) error { - pw, err := getNewPassword(gopts, opts.NewPasswordFile) + pw, err := getNewPassword(ctx, gopts, opts.NewPasswordFile) if err != nil { return err } @@ -83,7 +83,7 @@ func addKey(ctx context.Context, repo *repository.Repository, gopts GlobalOption // testKeyNewPassword is used to set a new password during integration testing. var testKeyNewPassword string -func getNewPassword(gopts GlobalOptions, newPasswordFile string) (string, error) { +func getNewPassword(ctx context.Context, gopts GlobalOptions, newPasswordFile string) (string, error) { if testKeyNewPassword != "" { return testKeyNewPassword, nil } @@ -97,7 +97,7 @@ func getNewPassword(gopts GlobalOptions, newPasswordFile string) (string, error) newopts := gopts newopts.password = "" - return ReadPasswordTwice(newopts, + return ReadPasswordTwice(ctx, newopts, "enter new password: ", "enter password again: ") } diff --git a/cmd/restic/cmd_key_passwd.go b/cmd/restic/cmd_key_passwd.go index 70abca6dc..0836c4cfe 100644 --- a/cmd/restic/cmd_key_passwd.go +++ b/cmd/restic/cmd_key_passwd.go @@ -57,7 +57,7 @@ func runKeyPasswd(ctx context.Context, gopts GlobalOptions, opts KeyPasswdOption } func changePassword(ctx context.Context, repo *repository.Repository, gopts GlobalOptions, opts KeyPasswdOptions) error { - pw, err := getNewPassword(gopts, opts.NewPasswordFile) + pw, err := getNewPassword(ctx, gopts, opts.NewPasswordFile) if err != nil { return err } diff --git a/cmd/restic/global.go b/cmd/restic/global.go index cc47496f3..9f1ec85a2 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -96,7 +96,6 @@ var globalOptions = GlobalOptions{ stderr: os.Stderr, } -var isReadingPassword bool var internalGlobalCtx context.Context func init() { @@ -165,8 +164,6 @@ func init() { // parse target pack size from env, on error the default value will be used targetPackSize, _ := strconv.ParseUint(os.Getenv("RESTIC_PACK_SIZE"), 10, 32) globalOptions.PackSize = uint(targetPackSize) - - restoreTerminal() } func stdinIsTerminal() bool { @@ -191,40 +188,6 @@ func stdoutTerminalWidth() int { return w } -// restoreTerminal installs a cleanup handler that restores the previous -// terminal state on exit. This handler is only intended to restore the -// terminal configuration if restic exits after receiving a signal. A regular -// program execution must revert changes to the terminal configuration itself. -// The terminal configuration is only restored while reading a password. -func restoreTerminal() { - if !term.IsTerminal(int(os.Stdout.Fd())) { - return - } - - fd := int(os.Stdout.Fd()) - state, err := term.GetState(fd) - if err != nil { - fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err) - return - } - - AddCleanupHandler(func(code int) (int, error) { - // Restoring the terminal configuration while restic runs in the - // background, causes restic to get stopped on unix systems with - // a SIGTTOU signal. Thus only restore the terminal settings if - // they might have been modified, which is the case while reading - // a password. - if !isReadingPassword { - return code, nil - } - err := term.Restore(fd, state) - if err != nil { - fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err) - } - return code, err - }) -} - // ClearLine creates a platform dependent string to clear the current // line, so it can be overwritten. // @@ -333,24 +296,48 @@ func readPassword(in io.Reader) (password string, err error) { // readPasswordTerminal reads the password from the given reader which must be a // tty. Prompt is printed on the writer out before attempting to read the -// password. -func readPasswordTerminal(in *os.File, out io.Writer, prompt string) (password string, err error) { - fmt.Fprint(out, prompt) - isReadingPassword = true - buf, err := term.ReadPassword(int(in.Fd())) - isReadingPassword = false - fmt.Fprintln(out) +// password. If the context is canceled, the function leaks the password reading +// goroutine. +func readPasswordTerminal(ctx context.Context, in *os.File, out *os.File, prompt string) (password string, err error) { + fd := int(out.Fd()) + state, err := term.GetState(fd) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to get terminal state: %v\n", err) + return "", err + } + + done := make(chan struct{}) + var buf []byte + + go func() { + defer close(done) + fmt.Fprint(out, prompt) + buf, err = term.ReadPassword(int(in.Fd())) + fmt.Fprintln(out) + }() + + select { + case <-ctx.Done(): + err := term.Restore(fd, state) + if err != nil { + fmt.Fprintf(os.Stderr, "unable to restore terminal state: %v\n", err) + } + return "", ctx.Err() + case <-done: + // clean shutdown, nothing to do + } + if err != nil { return "", errors.Wrap(err, "ReadPassword") } - password = string(buf) - return password, nil + return string(buf), nil } // ReadPassword reads the password from a password file, the environment -// variable RESTIC_PASSWORD or prompts the user. -func ReadPassword(opts GlobalOptions, prompt string) (string, error) { +// variable RESTIC_PASSWORD or prompts the user. If the context is canceled, +// the function leaks the password reading goroutine. +func ReadPassword(ctx context.Context, opts GlobalOptions, prompt string) (string, error) { if opts.password != "" { return opts.password, nil } @@ -361,7 +348,7 @@ func ReadPassword(opts GlobalOptions, prompt string) (string, error) { ) if stdinIsTerminal() { - password, err = readPasswordTerminal(os.Stdin, os.Stderr, prompt) + password, err = readPasswordTerminal(ctx, os.Stdin, os.Stderr, prompt) } else { password, err = readPassword(os.Stdin) Verbosef("reading repository password from stdin\n") @@ -379,14 +366,15 @@ func ReadPassword(opts GlobalOptions, prompt string) (string, error) { } // ReadPasswordTwice calls ReadPassword two times and returns an error when the -// passwords don't match. -func ReadPasswordTwice(gopts GlobalOptions, prompt1, prompt2 string) (string, error) { - pw1, err := ReadPassword(gopts, prompt1) +// passwords don't match. If the context is canceled, the function leaks the +// password reading goroutine. +func ReadPasswordTwice(ctx context.Context, gopts GlobalOptions, prompt1, prompt2 string) (string, error) { + pw1, err := ReadPassword(ctx, gopts, prompt1) if err != nil { return "", err } if stdinIsTerminal() { - pw2, err := ReadPassword(gopts, prompt2) + pw2, err := ReadPassword(ctx, gopts, prompt2) if err != nil { return "", err } @@ -469,7 +457,10 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi } for ; passwordTriesLeft > 0; passwordTriesLeft-- { - opts.password, err = ReadPassword(opts, "enter password for repository: ") + opts.password, err = ReadPassword(ctx, opts, "enter password for repository: ") + if ctx.Err() != nil { + return nil, ctx.Err() + } if err != nil && passwordTriesLeft > 1 { opts.password = "" fmt.Printf("%s. Try again\n", err) diff --git a/cmd/restic/secondary_repo.go b/cmd/restic/secondary_repo.go index 4c46b60df..2afd36a81 100644 --- a/cmd/restic/secondary_repo.go +++ b/cmd/restic/secondary_repo.go @@ -1,6 +1,7 @@ package main import ( + "context" "os" "github.com/restic/restic/internal/errors" @@ -56,7 +57,7 @@ func initSecondaryRepoOptions(f *pflag.FlagSet, opts *secondaryRepoOptions, repo opts.PasswordCommand = os.Getenv("RESTIC_FROM_PASSWORD_COMMAND") } -func fillSecondaryGlobalOpts(opts secondaryRepoOptions, gopts GlobalOptions, repoPrefix string) (GlobalOptions, bool, error) { +func fillSecondaryGlobalOpts(ctx context.Context, opts secondaryRepoOptions, gopts GlobalOptions, repoPrefix string) (GlobalOptions, bool, error) { if opts.Repo == "" && opts.RepositoryFile == "" && opts.LegacyRepo == "" && opts.LegacyRepositoryFile == "" { return GlobalOptions{}, false, errors.Fatal("Please specify a source repository location (--from-repo or --from-repository-file)") } @@ -109,7 +110,7 @@ func fillSecondaryGlobalOpts(opts secondaryRepoOptions, gopts GlobalOptions, rep return GlobalOptions{}, false, err } } - dstGopts.password, err = ReadPassword(dstGopts, "enter password for "+repoPrefix+" repository: ") + dstGopts.password, err = ReadPassword(ctx, dstGopts, "enter password for "+repoPrefix+" repository: ") if err != nil { return GlobalOptions{}, false, err } diff --git a/cmd/restic/secondary_repo_test.go b/cmd/restic/secondary_repo_test.go index ff1a10b03..aa511ca99 100644 --- a/cmd/restic/secondary_repo_test.go +++ b/cmd/restic/secondary_repo_test.go @@ -1,6 +1,7 @@ package main import ( + "context" "os" "path/filepath" "testing" @@ -170,7 +171,7 @@ func TestFillSecondaryGlobalOpts(t *testing.T) { // Test all valid cases for _, testCase := range validSecondaryRepoTestCases { - DstGOpts, isFromRepo, err := fillSecondaryGlobalOpts(testCase.Opts, gOpts, "destination") + DstGOpts, isFromRepo, err := fillSecondaryGlobalOpts(context.TODO(), testCase.Opts, gOpts, "destination") rtest.OK(t, err) rtest.Equals(t, DstGOpts, testCase.DstGOpts) rtest.Equals(t, isFromRepo, testCase.FromRepo) @@ -178,7 +179,7 @@ func TestFillSecondaryGlobalOpts(t *testing.T) { // Test all invalid cases for _, testCase := range invalidSecondaryRepoTestCases { - _, _, err := fillSecondaryGlobalOpts(testCase.Opts, gOpts, "destination") + _, _, err := fillSecondaryGlobalOpts(context.TODO(), testCase.Opts, gOpts, "destination") rtest.Assert(t, err != nil, "Expected error, but function did not return an error") } } From 699ef5e9def04297fbd40d08c197d1f9e9a8cdaa Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 5 Apr 2024 23:16:15 +0200 Subject: [PATCH 093/117] debug: replace cleanup handler usage in profiling setup --- cmd/restic/global_debug.go | 82 ++++++++++++++++++------------------ cmd/restic/global_release.go | 3 ++ cmd/restic/main.go | 3 ++ 3 files changed, 47 insertions(+), 41 deletions(-) diff --git a/cmd/restic/global_debug.go b/cmd/restic/global_debug.go index b798074d1..502b2cf6e 100644 --- a/cmd/restic/global_debug.go +++ b/cmd/restic/global_debug.go @@ -15,23 +15,28 @@ import ( "github.com/pkg/profile" ) -var ( - listenProfile string - memProfilePath string - cpuProfilePath string - traceProfilePath string - blockProfilePath string - insecure bool -) +type ProfileOptions struct { + listen string + memPath string + cpuPath string + tracePath string + blockPath string + insecure bool +} + +var profileOpts ProfileOptions +var prof interface { + Stop() +} func init() { f := cmdRoot.PersistentFlags() - f.StringVar(&listenProfile, "listen-profile", "", "listen on this `address:port` for memory profiling") - f.StringVar(&memProfilePath, "mem-profile", "", "write memory profile to `dir`") - f.StringVar(&cpuProfilePath, "cpu-profile", "", "write cpu profile to `dir`") - f.StringVar(&traceProfilePath, "trace-profile", "", "write trace to `dir`") - f.StringVar(&blockProfilePath, "block-profile", "", "write block profile to `dir`") - f.BoolVar(&insecure, "insecure-kdf", false, "use insecure KDF settings") + f.StringVar(&profileOpts.listen, "listen-profile", "", "listen on this `address:port` for memory profiling") + f.StringVar(&profileOpts.memPath, "mem-profile", "", "write memory profile to `dir`") + f.StringVar(&profileOpts.cpuPath, "cpu-profile", "", "write cpu profile to `dir`") + f.StringVar(&profileOpts.tracePath, "trace-profile", "", "write trace to `dir`") + f.StringVar(&profileOpts.blockPath, "block-profile", "", "write block profile to `dir`") + f.BoolVar(&profileOpts.insecure, "insecure-kdf", false, "use insecure KDF settings") } type fakeTestingTB struct{} @@ -41,10 +46,10 @@ func (fakeTestingTB) Logf(msg string, args ...interface{}) { } func runDebug() error { - if listenProfile != "" { - fmt.Fprintf(os.Stderr, "running profile HTTP server on %v\n", listenProfile) + if profileOpts.listen != "" { + fmt.Fprintf(os.Stderr, "running profile HTTP server on %v\n", profileOpts.listen) go func() { - err := http.ListenAndServe(listenProfile, nil) + err := http.ListenAndServe(profileOpts.listen, nil) if err != nil { fmt.Fprintf(os.Stderr, "profile HTTP server listen failed: %v\n", err) } @@ -52,16 +57,16 @@ func runDebug() error { } profilesEnabled := 0 - if memProfilePath != "" { + if profileOpts.memPath != "" { profilesEnabled++ } - if cpuProfilePath != "" { + if profileOpts.cpuPath != "" { profilesEnabled++ } - if traceProfilePath != "" { + if profileOpts.tracePath != "" { profilesEnabled++ } - if blockProfilePath != "" { + if profileOpts.blockPath != "" { profilesEnabled++ } @@ -69,30 +74,25 @@ func runDebug() error { return errors.Fatal("only one profile (memory, CPU, trace, or block) may be activated at the same time") } - var prof interface { - Stop() + if profileOpts.memPath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(profileOpts.memPath)) + } else if profileOpts.cpuPath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(profileOpts.cpuPath)) + } else if profileOpts.tracePath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.TraceProfile, profile.ProfilePath(profileOpts.tracePath)) + } else if profileOpts.blockPath != "" { + prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.BlockProfile, profile.ProfilePath(profileOpts.blockPath)) } - if memProfilePath != "" { - prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.MemProfile, profile.ProfilePath(memProfilePath)) - } else if cpuProfilePath != "" { - prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.CPUProfile, profile.ProfilePath(cpuProfilePath)) - } else if traceProfilePath != "" { - prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.TraceProfile, profile.ProfilePath(traceProfilePath)) - } else if blockProfilePath != "" { - prof = profile.Start(profile.Quiet, profile.NoShutdownHook, profile.BlockProfile, profile.ProfilePath(blockProfilePath)) - } - - if prof != nil { - AddCleanupHandler(func(code int) (int, error) { - prof.Stop() - return code, nil - }) - } - - if insecure { + if profileOpts.insecure { repository.TestUseLowSecurityKDFParameters(fakeTestingTB{}) } return nil } + +func stopDebug() { + if prof != nil { + prof.Stop() + } +} diff --git a/cmd/restic/global_release.go b/cmd/restic/global_release.go index 7cb2e6caf..1dab5a293 100644 --- a/cmd/restic/global_release.go +++ b/cmd/restic/global_release.go @@ -5,3 +5,6 @@ package main // runDebug is a noop without the debug tag. func runDebug() error { return nil } + +// stopDebug is a noop without the debug tag. +func stopDebug() {} diff --git a/cmd/restic/main.go b/cmd/restic/main.go index a4acb1cab..308a432b5 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -74,6 +74,9 @@ The full documentation can be found at https://restic.readthedocs.io/ . // enabled) return runDebug() }, + PersistentPostRun: func(_ *cobra.Command, _ []string) { + stopDebug() + }, } // Distinguish commands that need the password from those that work without, From 6f2a4dea210c98169c4b6658a6cf42091d182424 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 29 Mar 2024 23:58:48 +0100 Subject: [PATCH 094/117] remove global shutdown hook --- cmd/restic/cleanup.go | 88 ++++++++++--------------------------------- cmd/restic/global.go | 11 ------ cmd/restic/main.go | 17 +++++++-- 3 files changed, 33 insertions(+), 83 deletions(-) diff --git a/cmd/restic/cleanup.go b/cmd/restic/cleanup.go index 5a6cf79e1..90ea93b92 100644 --- a/cmd/restic/cleanup.go +++ b/cmd/restic/cleanup.go @@ -1,89 +1,41 @@ package main import ( + "context" "os" "os/signal" - "sync" "syscall" "github.com/restic/restic/internal/debug" ) -var cleanupHandlers struct { - sync.Mutex - list []func(code int) (int, error) - done bool - ch chan os.Signal +func createGlobalContext() context.Context { + ctx, cancel := context.WithCancel(context.Background()) + + ch := make(chan os.Signal, 1) + go cleanupHandler(ch, cancel) + signal.Notify(ch, syscall.SIGINT, syscall.SIGTERM) + + return ctx } -func init() { - cleanupHandlers.ch = make(chan os.Signal, 1) - go CleanupHandler(cleanupHandlers.ch) - signal.Notify(cleanupHandlers.ch, syscall.SIGINT, syscall.SIGTERM) -} +// cleanupHandler handles the SIGINT and SIGTERM signals. +func cleanupHandler(c <-chan os.Signal, cancel context.CancelFunc) { + s := <-c + debug.Log("signal %v received, cleaning up", s) + Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s) -// AddCleanupHandler adds the function f to the list of cleanup handlers so -// that it is executed when all the cleanup handlers are run, e.g. when SIGINT -// is received. -func AddCleanupHandler(f func(code int) (int, error)) { - cleanupHandlers.Lock() - defer cleanupHandlers.Unlock() - - // reset the done flag for integration tests - cleanupHandlers.done = false - - cleanupHandlers.list = append(cleanupHandlers.list, f) -} - -// RunCleanupHandlers runs all registered cleanup handlers -func RunCleanupHandlers(code int) int { - cleanupHandlers.Lock() - defer cleanupHandlers.Unlock() - - if cleanupHandlers.done { - return code + if val, _ := os.LookupEnv("RESTIC_DEBUG_STACKTRACE_SIGINT"); val != "" { + _, _ = os.Stderr.WriteString("\n--- STACKTRACE START ---\n\n") + _, _ = os.Stderr.WriteString(debug.DumpStacktrace()) + _, _ = os.Stderr.WriteString("\n--- STACKTRACE END ---\n") } - cleanupHandlers.done = true - for _, f := range cleanupHandlers.list { - var err error - code, err = f(code) - if err != nil { - Warnf("error in cleanup handler: %v\n", err) - } - } - cleanupHandlers.list = nil - return code + cancel() } -// CleanupHandler handles the SIGINT and SIGTERM signals. -func CleanupHandler(c <-chan os.Signal) { - for s := range c { - debug.Log("signal %v received, cleaning up", s) - Warnf("%ssignal %v received, cleaning up\n", clearLine(0), s) - - if val, _ := os.LookupEnv("RESTIC_DEBUG_STACKTRACE_SIGINT"); val != "" { - _, _ = os.Stderr.WriteString("\n--- STACKTRACE START ---\n\n") - _, _ = os.Stderr.WriteString(debug.DumpStacktrace()) - _, _ = os.Stderr.WriteString("\n--- STACKTRACE END ---\n") - } - - code := 0 - - if s == syscall.SIGINT || s == syscall.SIGTERM { - code = 130 - } else { - code = 1 - } - - Exit(code) - } -} - -// Exit runs the cleanup handlers and then terminates the process with the -// given exit code. +// Exit terminates the process with the given exit code. func Exit(code int) { - code = RunCleanupHandlers(code) debug.Log("exiting with status code %d", code) os.Exit(code) } diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 9f1ec85a2..5b21871dc 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -96,8 +96,6 @@ var globalOptions = GlobalOptions{ stderr: os.Stderr, } -var internalGlobalCtx context.Context - func init() { backends := location.NewRegistry() backends.Register(azure.NewFactory()) @@ -111,15 +109,6 @@ func init() { backends.Register(swift.NewFactory()) globalOptions.backends = backends - var cancel context.CancelFunc - internalGlobalCtx, cancel = context.WithCancel(context.Background()) - AddCleanupHandler(func(code int) (int, error) { - // Must be called before the unlock cleanup handler to ensure that the latter is - // not blocked due to limited number of backend connections, see #1434 - cancel() - return code, nil - }) - f := cmdRoot.PersistentFlags() f.StringVarP(&globalOptions.Repo, "repo", "r", "", "`repository` to backup to or restore from (default: $RESTIC_REPOSITORY)") f.StringVarP(&globalOptions.RepositoryFile, "repository-file", "", "", "`file` to read the repository location from (default: $RESTIC_REPOSITORY_FILE)") diff --git a/cmd/restic/main.go b/cmd/restic/main.go index 308a432b5..56ddf74a4 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -3,6 +3,7 @@ package main import ( "bufio" "bytes" + "context" "fmt" "log" "os" @@ -118,7 +119,13 @@ func main() { debug.Log("main %#v", os.Args) debug.Log("restic %s compiled with %v on %v/%v", version, runtime.Version(), runtime.GOOS, runtime.GOARCH) - err = cmdRoot.ExecuteContext(internalGlobalCtx) + + ctx := createGlobalContext() + err = cmdRoot.ExecuteContext(ctx) + + if err == nil { + err = ctx.Err() + } switch { case restic.IsAlreadyLocked(err): @@ -140,11 +147,13 @@ func main() { } var exitCode int - switch err { - case nil: + switch { + case err == nil: exitCode = 0 - case ErrInvalidSourceData: + case err == ErrInvalidSourceData: exitCode = 3 + case errors.Is(err, context.Canceled): + exitCode = 130 default: exitCode = 1 } From 910927670f135ae08810710631f1126900743abc Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 5 Apr 2024 22:23:08 +0200 Subject: [PATCH 095/117] mount: fix exit code on cancellation --- cmd/restic/cmd_mount.go | 2 +- cmd/restic/main.go | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/cmd/restic/cmd_mount.go b/cmd/restic/cmd_mount.go index 1e4bff03e..5a10447f3 100644 --- a/cmd/restic/cmd_mount.go +++ b/cmd/restic/cmd_mount.go @@ -190,7 +190,7 @@ func runMount(ctx context.Context, opts MountOptions, gopts GlobalOptions, args Warnf("unable to umount (maybe already umounted or still in use?): %v\n", err) } - return nil + return ErrOK case <-done: // clean shutdown, nothing to do } diff --git a/cmd/restic/main.go b/cmd/restic/main.go index 56ddf74a4..82517c31a 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -25,6 +25,8 @@ func init() { _, _ = maxprocs.Set() } +var ErrOK = errors.New("ok") + // cmdRoot is the base command when no other command has been specified. var cmdRoot = &cobra.Command{ Use: "restic", @@ -125,6 +127,9 @@ func main() { if err == nil { err = ctx.Err() + } else if err == ErrOK { + // ErrOK overwrites context cancelation errors + err = nil } switch { From 31624aeffd4cf9a06afbb16db7ffd7e899187bb6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 30 Mar 2024 00:19:58 +0100 Subject: [PATCH 096/117] Improve command shutdown on context cancellation --- cmd/restic/cmd_check.go | 14 +++++++++++++- cmd/restic/cmd_copy.go | 5 ++++- cmd/restic/cmd_find.go | 3 +++ cmd/restic/cmd_forget.go | 7 +++++++ cmd/restic/cmd_prune.go | 3 +++ cmd/restic/cmd_recover.go | 6 ++++++ cmd/restic/cmd_repair_snapshots.go | 3 +++ cmd/restic/cmd_rewrite.go | 3 +++ cmd/restic/cmd_snapshots.go | 3 +++ cmd/restic/cmd_stats.go | 5 ++--- cmd/restic/cmd_tag.go | 3 +++ internal/archiver/archiver.go | 1 + internal/archiver/tree_saver.go | 4 ++++ internal/index/master_index.go | 3 +++ internal/repository/prune.go | 18 ++++++++++++++++++ internal/repository/repack.go | 2 +- internal/repository/repository.go | 3 +++ 17 files changed, 80 insertions(+), 6 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 83ebf89a6..38623c305 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -219,6 +219,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args Verbosef("load indexes\n") bar := newIndexProgress(gopts.Quiet, gopts.JSON) hints, errs := chkr.LoadIndex(ctx, bar) + if ctx.Err() != nil { + return ctx.Err() + } errorsFound := false suggestIndexRebuild := false @@ -280,6 +283,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args if orphanedPacks > 0 { Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks) } + if ctx.Err() != nil { + return ctx.Err() + } Verbosef("check snapshots, trees and blobs\n") errChan = make(chan error) @@ -313,6 +319,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args // Must happen after `errChan` is read from in the above loop to avoid // deadlocking in the case of errors. wg.Wait() + if ctx.Err() != nil { + return ctx.Err() + } if opts.CheckUnused { for _, id := range chkr.UnusedBlobs(ctx) { @@ -392,10 +401,13 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args doReadData(packs) } + if ctx.Err() != nil { + return ctx.Err() + } + if errorsFound { return errors.Fatal("repository contains errors") } - Verbosef("no errors were found\n") return nil diff --git a/cmd/restic/cmd_copy.go b/cmd/restic/cmd_copy.go index de3958def..ad6c58a25 100644 --- a/cmd/restic/cmd_copy.go +++ b/cmd/restic/cmd_copy.go @@ -103,6 +103,9 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args [] // also consider identical snapshot copies dstSnapshotByOriginal[*sn.ID()] = append(dstSnapshotByOriginal[*sn.ID()], sn) } + if ctx.Err() != nil { + return ctx.Err() + } // remember already processed trees across all snapshots visitedTrees := restic.NewIDSet() @@ -147,7 +150,7 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args [] } Verbosef("snapshot %s saved\n", newID.Str()) } - return nil + return ctx.Err() } func similarSnapshots(sna *restic.Snapshot, snb *restic.Snapshot) bool { diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index e29fe30dc..77b651c5e 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -608,6 +608,9 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args [] for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, opts.Snapshots) { filteredSnapshots = append(filteredSnapshots, sn) } + if ctx.Err() != nil { + return ctx.Err() + } sort.Slice(filteredSnapshots, func(i, j int) bool { return filteredSnapshots[i].Time.Before(filteredSnapshots[j].Time) diff --git a/cmd/restic/cmd_forget.go b/cmd/restic/cmd_forget.go index 9018da211..92eeed4a1 100644 --- a/cmd/restic/cmd_forget.go +++ b/cmd/restic/cmd_forget.go @@ -188,6 +188,9 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { snapshots = append(snapshots, sn) } + if ctx.Err() != nil { + return ctx.Err() + } var jsonGroups []*ForgetGroup @@ -270,6 +273,10 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption } } + if ctx.Err() != nil { + return ctx.Err() + } + if len(removeSnIDs) > 0 { if !opts.DryRun { bar := printer.NewCounter("files deleted") diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index ea5acddf3..cbec100df 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -197,6 +197,9 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption if err != nil { return err } + if ctx.Err() != nil { + return ctx.Err() + } if popts.DryRun { printer.P("\nWould have made the following changes:") diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index f9a4d419d..cac29a60c 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -66,11 +66,17 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { trees[blob.Blob.ID] = false } }) + if ctx.Err() != nil { + return ctx.Err() + } Verbosef("load %d trees\n", len(trees)) bar = newProgressMax(!gopts.Quiet, uint64(len(trees)), "trees loaded") for id := range trees { tree, err := restic.LoadTree(ctx, repo, id) + if ctx.Err() != nil { + return ctx.Err() + } if err != nil { Warnf("unable to load tree %v: %v\n", id.Str(), err) continue diff --git a/cmd/restic/cmd_repair_snapshots.go b/cmd/restic/cmd_repair_snapshots.go index 4d9745e15..b200d100a 100644 --- a/cmd/restic/cmd_repair_snapshots.go +++ b/cmd/restic/cmd_repair_snapshots.go @@ -145,6 +145,9 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt changedCount++ } } + if ctx.Err() != nil { + return ctx.Err() + } Verbosef("\n") if changedCount == 0 { diff --git a/cmd/restic/cmd_rewrite.go b/cmd/restic/cmd_rewrite.go index 06d4ddbd1..38a868c5c 100644 --- a/cmd/restic/cmd_rewrite.go +++ b/cmd/restic/cmd_rewrite.go @@ -294,6 +294,9 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a changedCount++ } } + if ctx.Err() != nil { + return ctx.Err() + } Verbosef("\n") if changedCount == 0 { diff --git a/cmd/restic/cmd_snapshots.go b/cmd/restic/cmd_snapshots.go index 1a9cd2232..faa86d3a6 100644 --- a/cmd/restic/cmd_snapshots.go +++ b/cmd/restic/cmd_snapshots.go @@ -69,6 +69,9 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) { snapshots = append(snapshots, sn) } + if ctx.Err() != nil { + return ctx.Err() + } snapshotGroups, grouped, err := restic.GroupSnapshots(snapshots, opts.GroupBy) if err != nil { return err diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index 6bf0dbf19..2647d78e5 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -117,9 +117,8 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args return fmt.Errorf("error walking snapshot: %v", err) } } - - if err != nil { - return err + if ctx.Err() != nil { + return ctx.Err() } if opts.countMode == countModeRawData { diff --git a/cmd/restic/cmd_tag.go b/cmd/restic/cmd_tag.go index b0d139fa6..3bf386f2c 100644 --- a/cmd/restic/cmd_tag.go +++ b/cmd/restic/cmd_tag.go @@ -122,6 +122,9 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st changeCnt++ } } + if ctx.Err() != nil { + return ctx.Err() + } if changeCnt == 0 { Verbosef("no snapshots were modified\n") } else { diff --git a/internal/archiver/archiver.go b/internal/archiver/archiver.go index 146ff3a7c..c1f73eea6 100644 --- a/internal/archiver/archiver.go +++ b/internal/archiver/archiver.go @@ -380,6 +380,7 @@ func (fn *FutureNode) take(ctx context.Context) futureNodeResult { return res } case <-ctx.Done(): + return futureNodeResult{err: ctx.Err()} } return futureNodeResult{err: errors.Errorf("no result")} } diff --git a/internal/archiver/tree_saver.go b/internal/archiver/tree_saver.go index eae524a78..9c11b48f0 100644 --- a/internal/archiver/tree_saver.go +++ b/internal/archiver/tree_saver.go @@ -90,6 +90,10 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I // return the error if it wasn't ignored if fnr.err != nil { debug.Log("err for %v: %v", fnr.snPath, fnr.err) + if fnr.err == context.Canceled { + return nil, stats, fnr.err + } + fnr.err = s.errFn(fnr.target, fnr.err) if fnr.err == nil { // ignore error diff --git a/internal/index/master_index.go b/internal/index/master_index.go index 4c114b955..9833f9a55 100644 --- a/internal/index/master_index.go +++ b/internal/index/master_index.go @@ -320,6 +320,9 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.Repository, exclude newIndex = NewIndex() } } + if wgCtx.Err() != nil { + return wgCtx.Err() + } } err := newIndex.AddToSupersedes(extraObsolete...) diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 8900fffaa..39eb30317 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -130,6 +130,9 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, g } keepBlobs.Delete(blob.BlobHandle) }) + if ctx.Err() != nil { + return nil, ctx.Err() + } if keepBlobs.Len() < blobCount/2 { // replace with copy to shrink map to necessary size if there's a chance to benefit @@ -166,6 +169,9 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re usedBlobs[bh] = count } }) + if ctx.Err() != nil { + return nil, nil, ctx.Err() + } // Check if all used blobs have been found in index missingBlobs := restic.NewBlobSet() @@ -240,6 +246,9 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re // update indexPack indexPack[blob.PackID] = ip }) + if ctx.Err() != nil { + return nil, nil, ctx.Err() + } // if duplicate blobs exist, those will be set to either "used" or "unused": // - mark only one occurrence of duplicate blobs as used @@ -286,6 +295,9 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re indexPack[blob.PackID] = ip }) } + if ctx.Err() != nil { + return nil, nil, ctx.Err() + } // Sanity check. If no duplicates exist, all blobs have value 1. After handling // duplicates, this also applies to duplicates. @@ -528,6 +540,9 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (e printer.P("deleting unreferenced packs\n") _ = deleteFiles(ctx, true, repo, plan.removePacksFirst, restic.PackFile, printer) } + if ctx.Err() != nil { + return ctx.Err() + } if len(plan.repackPacks) != 0 { printer.P("repacking packs\n") @@ -578,6 +593,9 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (e printer.P("removing %d old packs\n", len(plan.removePacks)) _ = deleteFiles(ctx, true, repo, plan.removePacks, restic.PackFile, printer) } + if ctx.Err() != nil { + return ctx.Err() + } if plan.opts.UnsafeRecovery { err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, true, printer) diff --git a/internal/repository/repack.go b/internal/repository/repack.go index 5588984f6..53656252a 100644 --- a/internal/repository/repack.go +++ b/internal/repository/repack.go @@ -72,7 +72,7 @@ func repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito return wgCtx.Err() } } - return nil + return wgCtx.Err() }) worker := func() error { diff --git a/internal/repository/repository.go b/internal/repository/repository.go index ae4528d80..a43971266 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -713,6 +713,9 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { return errors.New("index uses feature not supported by repository version 1") } } + if ctx.Err() != nil { + return ctx.Err() + } // remove index files from the cache which have been removed in the repo return r.prepareCache() From 940a3159b5b3bf84ae84c4a1766f6c2359344804 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Fri, 5 Apr 2024 22:20:14 +0200 Subject: [PATCH 097/117] let index.Each() and pack.Size() return error on canceled context This forces a caller to actually check that the function did complete. --- cmd/restic/cmd_check.go | 6 ++++- cmd/restic/cmd_find.go | 14 +++++++---- cmd/restic/cmd_list.go | 3 +-- cmd/restic/cmd_recover.go | 6 ++--- cmd/restic/cmd_stats.go | 11 +++++---- cmd/restic/integration_helpers_test.go | 8 +++---- internal/checker/checker.go | 26 ++++++++++++-------- internal/checker/checker_test.go | 3 ++- internal/checker/testing.go | 5 +++- internal/index/index.go | 3 ++- internal/index/index_test.go | 8 +++---- internal/index/master_index.go | 16 +++++++------ internal/index/master_index_test.go | 12 +++++----- internal/pack/pack.go | 6 ++--- internal/repository/prune.go | 32 ++++++++++++++----------- internal/repository/repair_index.go | 5 +++- internal/repository/repair_pack_test.go | 2 +- internal/repository/repository.go | 5 +++- internal/repository/repository_test.go | 4 ++-- internal/restic/repository.go | 4 ++-- 20 files changed, 107 insertions(+), 72 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 38623c305..c44edae7e 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -324,7 +324,11 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args } if opts.CheckUnused { - for _, id := range chkr.UnusedBlobs(ctx) { + unused, err := chkr.UnusedBlobs(ctx) + if err != nil { + return err + } + for _, id := range unused { Verbosef("unused blob %v\n", id) errorsFound = true } diff --git a/cmd/restic/cmd_find.go b/cmd/restic/cmd_find.go index 77b651c5e..81df0ab98 100644 --- a/cmd/restic/cmd_find.go +++ b/cmd/restic/cmd_find.go @@ -439,7 +439,10 @@ func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error { if err != errAllPacksFound { // try to resolve unknown pack ids from the index - packIDs = f.indexPacksToBlobs(ctx, packIDs) + packIDs, err = f.indexPacksToBlobs(ctx, packIDs) + if err != nil { + return err + } } if len(packIDs) > 0 { @@ -456,13 +459,13 @@ func (f *Finder) packsToBlobs(ctx context.Context, packs []string) error { return nil } -func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struct{}) map[string]struct{} { +func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struct{}) (map[string]struct{}, error) { wctx, cancel := context.WithCancel(ctx) defer cancel() // remember which packs were found in the index indexPackIDs := make(map[string]struct{}) - f.repo.Index().Each(wctx, func(pb restic.PackedBlob) { + err := f.repo.Index().Each(wctx, func(pb restic.PackedBlob) { idStr := pb.PackID.String() // keep entry in packIDs as Each() returns individual index entries matchingID := false @@ -481,6 +484,9 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc indexPackIDs[idStr] = struct{}{} } }) + if err != nil { + return nil, err + } for id := range indexPackIDs { delete(packIDs, id) @@ -493,7 +499,7 @@ func (f *Finder) indexPacksToBlobs(ctx context.Context, packIDs map[string]struc } Warnf("some pack files are missing from the repository, getting their blobs from the repository index: %v\n\n", list) } - return packIDs + return packIDs, nil } func (f *Finder) findObjectPack(id string, t restic.BlobType) { diff --git a/cmd/restic/cmd_list.go b/cmd/restic/cmd_list.go index a3df0c98f..27f59b4ab 100644 --- a/cmd/restic/cmd_list.go +++ b/cmd/restic/cmd_list.go @@ -59,10 +59,9 @@ func runList(ctx context.Context, gopts GlobalOptions, args []string) error { if err != nil { return err } - idx.Each(ctx, func(blobs restic.PackedBlob) { + return idx.Each(ctx, func(blobs restic.PackedBlob) { Printf("%v %v\n", blobs.Type, blobs.ID) }) - return nil }) default: return errors.Fatal("invalid type") diff --git a/cmd/restic/cmd_recover.go b/cmd/restic/cmd_recover.go index cac29a60c..debaa4e5b 100644 --- a/cmd/restic/cmd_recover.go +++ b/cmd/restic/cmd_recover.go @@ -61,13 +61,13 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error { // tree. If it is not referenced, we have a root tree. trees := make(map[restic.ID]bool) - repo.Index().Each(ctx, func(blob restic.PackedBlob) { + err = repo.Index().Each(ctx, func(blob restic.PackedBlob) { if blob.Type == restic.TreeBlob { trees[blob.Blob.ID] = false } }) - if ctx.Err() != nil { - return ctx.Err() + if err != nil { + return err } Verbosef("load %d trees\n", len(trees)) diff --git a/cmd/restic/cmd_stats.go b/cmd/restic/cmd_stats.go index 2647d78e5..a7891e5b0 100644 --- a/cmd/restic/cmd_stats.go +++ b/cmd/restic/cmd_stats.go @@ -351,7 +351,10 @@ func statsDebug(ctx context.Context, repo restic.Repository) error { Warnf("File Type: %v\n%v\n", t, hist) } - hist := statsDebugBlobs(ctx, repo) + hist, err := statsDebugBlobs(ctx, repo) + if err != nil { + return err + } for _, t := range []restic.BlobType{restic.DataBlob, restic.TreeBlob} { Warnf("Blob Type: %v\n%v\n\n", t, hist[t]) } @@ -369,17 +372,17 @@ func statsDebugFileType(ctx context.Context, repo restic.Lister, tpe restic.File return hist, err } -func statsDebugBlobs(ctx context.Context, repo restic.Repository) [restic.NumBlobTypes]*sizeHistogram { +func statsDebugBlobs(ctx context.Context, repo restic.Repository) ([restic.NumBlobTypes]*sizeHistogram, error) { var hist [restic.NumBlobTypes]*sizeHistogram for i := 0; i < len(hist); i++ { hist[i] = newSizeHistogram(2 * chunker.MaxSize) } - repo.Index().Each(ctx, func(pb restic.PackedBlob) { + err := repo.Index().Each(ctx, func(pb restic.PackedBlob) { hist[pb.Type].Add(uint64(pb.Length)) }) - return hist + return hist, err } type sizeClass struct { diff --git a/cmd/restic/integration_helpers_test.go b/cmd/restic/integration_helpers_test.go index c87e1071e..e7a90dd56 100644 --- a/cmd/restic/integration_helpers_test.go +++ b/cmd/restic/integration_helpers_test.go @@ -252,11 +252,11 @@ func listTreePacks(gopts GlobalOptions, t *testing.T) restic.IDSet { rtest.OK(t, r.LoadIndex(ctx, nil)) treePacks := restic.NewIDSet() - r.Index().Each(ctx, func(pb restic.PackedBlob) { + rtest.OK(t, r.Index().Each(ctx, func(pb restic.PackedBlob) { if pb.Type == restic.TreeBlob { treePacks.Insert(pb.PackID) } - }) + })) return treePacks } @@ -280,11 +280,11 @@ func removePacksExcept(gopts GlobalOptions, t testing.TB, keep restic.IDSet, rem rtest.OK(t, r.LoadIndex(ctx, nil)) treePacks := restic.NewIDSet() - r.Index().Each(ctx, func(pb restic.PackedBlob) { + rtest.OK(t, r.Index().Each(ctx, func(pb restic.PackedBlob) { if pb.Type == restic.TreeBlob { treePacks.Insert(pb.PackID) } - }) + })) // remove all packs containing data blobs rtest.OK(t, r.List(ctx, restic.PackFile, func(id restic.ID, size int64) error { diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 28f55ce3a..1057341bc 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -106,9 +106,9 @@ func (c *Checker) LoadSnapshots(ctx context.Context) error { return err } -func computePackTypes(ctx context.Context, idx restic.MasterIndex) map[restic.ID]restic.BlobType { +func computePackTypes(ctx context.Context, idx restic.MasterIndex) (map[restic.ID]restic.BlobType, error) { packs := make(map[restic.ID]restic.BlobType) - idx.Each(ctx, func(pb restic.PackedBlob) { + err := idx.Each(ctx, func(pb restic.PackedBlob) { tpe, exists := packs[pb.PackID] if exists { if pb.Type != tpe { @@ -119,7 +119,7 @@ func computePackTypes(ctx context.Context, idx restic.MasterIndex) map[restic.ID } packs[pb.PackID] = tpe }) - return packs + return packs, err } // LoadIndex loads all index files. @@ -169,7 +169,7 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e debug.Log("process blobs") cnt := 0 - index.Each(ctx, func(blob restic.PackedBlob) { + err = index.Each(ctx, func(blob restic.PackedBlob) { cnt++ if _, ok := packToIndex[blob.PackID]; !ok { @@ -179,7 +179,7 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e }) debug.Log("%d blobs processed", cnt) - return nil + return err }) if err != nil { errs = append(errs, err) @@ -193,8 +193,14 @@ func (c *Checker) LoadIndex(ctx context.Context, p *progress.Counter) (hints []e } // compute pack size using index entries - c.packs = pack.Size(ctx, c.masterIndex, false) - packTypes := computePackTypes(ctx, c.masterIndex) + c.packs, err = pack.Size(ctx, c.masterIndex, false) + if err != nil { + return hints, append(errs, err) + } + packTypes, err := computePackTypes(ctx, c.masterIndex) + if err != nil { + return hints, append(errs, err) + } debug.Log("checking for duplicate packs") for packID := range c.packs { @@ -484,7 +490,7 @@ func (c *Checker) checkTree(id restic.ID, tree *restic.Tree) (errs []error) { } // UnusedBlobs returns all blobs that have never been referenced. -func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles) { +func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles, err error) { if !c.trackUnused { panic("only works when tracking blob references") } @@ -495,7 +501,7 @@ func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles) { ctx, cancel := context.WithCancel(ctx) defer cancel() - c.repo.Index().Each(ctx, func(blob restic.PackedBlob) { + err = c.repo.Index().Each(ctx, func(blob restic.PackedBlob) { h := restic.BlobHandle{ID: blob.ID, Type: blob.Type} if !c.blobRefs.M.Has(h) { debug.Log("blob %v not referenced", h) @@ -503,7 +509,7 @@ func (c *Checker) UnusedBlobs(ctx context.Context) (blobs restic.BlobHandles) { } }) - return blobs + return blobs, err } // CountPacks returns the number of packs in the repository. diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index b0fa4e3e3..9746e9f5d 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -180,7 +180,8 @@ func TestUnreferencedBlobs(t *testing.T) { test.OKs(t, checkPacks(chkr)) test.OKs(t, checkStruct(chkr)) - blobs := chkr.UnusedBlobs(context.TODO()) + blobs, err := chkr.UnusedBlobs(context.TODO()) + test.OK(t, err) sort.Sort(blobs) test.Equals(t, unusedBlobsBySnapshot, blobs) diff --git a/internal/checker/testing.go b/internal/checker/testing.go index 9e949af02..d0014398f 100644 --- a/internal/checker/testing.go +++ b/internal/checker/testing.go @@ -43,7 +43,10 @@ func TestCheckRepo(t testing.TB, repo restic.Repository, skipStructure bool) { } // unused blobs - blobs := chkr.UnusedBlobs(context.TODO()) + blobs, err := chkr.UnusedBlobs(context.TODO()) + if err != nil { + t.Error(err) + } if len(blobs) > 0 { t.Errorf("unused blobs found: %v", blobs) } diff --git a/internal/index/index.go b/internal/index/index.go index 73128f7bb..1c20fe38d 100644 --- a/internal/index/index.go +++ b/internal/index/index.go @@ -218,7 +218,7 @@ func (idx *Index) AddToSupersedes(ids ...restic.ID) error { // Each passes all blobs known to the index to the callback fn. This blocks any // modification of the index. -func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) { +func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) error { idx.m.Lock() defer idx.m.Unlock() @@ -232,6 +232,7 @@ func (idx *Index) Each(ctx context.Context, fn func(restic.PackedBlob)) { return true }) } + return ctx.Err() } type EachByPackResult struct { diff --git a/internal/index/index_test.go b/internal/index/index_test.go index 78e4800ca..bafd95c48 100644 --- a/internal/index/index_test.go +++ b/internal/index/index_test.go @@ -339,7 +339,7 @@ func TestIndexUnserialize(t *testing.T) { rtest.Equals(t, oldIdx, idx.Supersedes()) - blobs := listPack(idx, exampleLookupTest.packID) + blobs := listPack(t, idx, exampleLookupTest.packID) if len(blobs) != len(exampleLookupTest.blobs) { t.Fatalf("expected %d blobs in pack, got %d", len(exampleLookupTest.blobs), len(blobs)) } @@ -356,12 +356,12 @@ func TestIndexUnserialize(t *testing.T) { } } -func listPack(idx *index.Index, id restic.ID) (pbs []restic.PackedBlob) { - idx.Each(context.TODO(), func(pb restic.PackedBlob) { +func listPack(t testing.TB, idx *index.Index, id restic.ID) (pbs []restic.PackedBlob) { + rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) { if pb.PackID.Equal(id) { pbs = append(pbs, pb) } - }) + })) return pbs } diff --git a/internal/index/master_index.go b/internal/index/master_index.go index 9833f9a55..d99a3434d 100644 --- a/internal/index/master_index.go +++ b/internal/index/master_index.go @@ -223,13 +223,16 @@ func (mi *MasterIndex) finalizeFullIndexes() []*Index { // Each runs fn on all blobs known to the index. When the context is cancelled, // the index iteration return immediately. This blocks any modification of the index. -func (mi *MasterIndex) Each(ctx context.Context, fn func(restic.PackedBlob)) { +func (mi *MasterIndex) Each(ctx context.Context, fn func(restic.PackedBlob)) error { mi.idxMutex.RLock() defer mi.idxMutex.RUnlock() for _, idx := range mi.idx { - idx.Each(ctx, fn) + if err := idx.Each(ctx, fn); err != nil { + return err + } } + return nil } // MergeFinalIndexes merges all final indexes together. @@ -429,10 +432,6 @@ func (mi *MasterIndex) ListPacks(ctx context.Context, packs restic.IDSet) <-chan defer close(out) // only resort a part of the index to keep the memory overhead bounded for i := byte(0); i < 16; i++ { - if ctx.Err() != nil { - return - } - packBlob := make(map[restic.ID][]restic.Blob) for pack := range packs { if pack[0]&0xf == i { @@ -442,11 +441,14 @@ func (mi *MasterIndex) ListPacks(ctx context.Context, packs restic.IDSet) <-chan if len(packBlob) == 0 { continue } - mi.Each(ctx, func(pb restic.PackedBlob) { + err := mi.Each(ctx, func(pb restic.PackedBlob) { if packs.Has(pb.PackID) && pb.PackID[0]&0xf == i { packBlob[pb.PackID] = append(packBlob[pb.PackID], pb.Blob) } }) + if err != nil { + return + } // pass on packs for packID, pbs := range packBlob { diff --git a/internal/index/master_index_test.go b/internal/index/master_index_test.go index dcf6a94f6..fe0364c61 100644 --- a/internal/index/master_index_test.go +++ b/internal/index/master_index_test.go @@ -166,9 +166,9 @@ func TestMasterMergeFinalIndexes(t *testing.T) { rtest.Equals(t, 1, idxCount) blobCount := 0 - mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { + rtest.OK(t, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { blobCount++ - }) + })) rtest.Equals(t, 2, blobCount) blobs := mIdx.Lookup(bhInIdx1) @@ -198,9 +198,9 @@ func TestMasterMergeFinalIndexes(t *testing.T) { rtest.Equals(t, []restic.PackedBlob{blob2}, blobs) blobCount = 0 - mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { + rtest.OK(t, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { blobCount++ - }) + })) rtest.Equals(t, 2, blobCount) } @@ -319,9 +319,9 @@ func BenchmarkMasterIndexEach(b *testing.B) { for i := 0; i < b.N; i++ { entries := 0 - mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { + rtest.OK(b, mIdx.Each(context.TODO(), func(pb restic.PackedBlob) { entries++ - }) + })) } } diff --git a/internal/pack/pack.go b/internal/pack/pack.go index cd118ab03..53631a6fb 100644 --- a/internal/pack/pack.go +++ b/internal/pack/pack.go @@ -389,10 +389,10 @@ func CalculateHeaderSize(blobs []restic.Blob) int { // If onlyHdr is set to true, only the size of the header is returned // Note that this function only gives correct sizes, if there are no // duplicates in the index. -func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) map[restic.ID]int64 { +func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) (map[restic.ID]int64, error) { packSize := make(map[restic.ID]int64) - mi.Each(ctx, func(blob restic.PackedBlob) { + err := mi.Each(ctx, func(blob restic.PackedBlob) { size, ok := packSize[blob.PackID] if !ok { size = headerSize @@ -403,5 +403,5 @@ func Size(ctx context.Context, mi restic.MasterIndex, onlyHdr bool) map[restic.I packSize[blob.PackID] = size + int64(CalculateEntrySize(blob.Blob)) }) - return packSize + return packSize, err } diff --git a/internal/repository/prune.go b/internal/repository/prune.go index 39eb30317..77811e321 100644 --- a/internal/repository/prune.go +++ b/internal/repository/prune.go @@ -124,14 +124,14 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, g blobCount := keepBlobs.Len() // when repacking, we do not want to keep blobs which are // already contained in kept packs, so delete them from keepBlobs - repo.Index().Each(ctx, func(blob restic.PackedBlob) { + err := repo.Index().Each(ctx, func(blob restic.PackedBlob) { if plan.removePacks.Has(blob.PackID) || plan.repackPacks.Has(blob.PackID) { return } keepBlobs.Delete(blob.BlobHandle) }) - if ctx.Err() != nil { - return nil, ctx.Err() + if err != nil { + return nil, err } if keepBlobs.Len() < blobCount/2 { @@ -155,7 +155,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re // iterate over all blobs in index to find out which blobs are duplicates // The counter in usedBlobs describes how many instances of the blob exist in the repository index // Thus 0 == blob is missing, 1 == blob exists once, >= 2 == duplicates exist - idx.Each(ctx, func(blob restic.PackedBlob) { + err := idx.Each(ctx, func(blob restic.PackedBlob) { bh := blob.BlobHandle count, ok := usedBlobs[bh] if ok { @@ -169,8 +169,8 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re usedBlobs[bh] = count } }) - if ctx.Err() != nil { - return nil, nil, ctx.Err() + if err != nil { + return nil, nil, err } // Check if all used blobs have been found in index @@ -194,14 +194,18 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re indexPack := make(map[restic.ID]packInfo) // save computed pack header size - for pid, hdrSize := range pack.Size(ctx, idx, true) { + sz, err := pack.Size(ctx, idx, true) + if err != nil { + return nil, nil, err + } + for pid, hdrSize := range sz { // initialize tpe with NumBlobTypes to indicate it's not set indexPack[pid] = packInfo{tpe: restic.NumBlobTypes, usedSize: uint64(hdrSize)} } hasDuplicates := false // iterate over all blobs in index to generate packInfo - idx.Each(ctx, func(blob restic.PackedBlob) { + err = idx.Each(ctx, func(blob restic.PackedBlob) { ip := indexPack[blob.PackID] // Set blob type if not yet set @@ -246,8 +250,8 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re // update indexPack indexPack[blob.PackID] = ip }) - if ctx.Err() != nil { - return nil, nil, ctx.Err() + if err != nil { + return nil, nil, err } // if duplicate blobs exist, those will be set to either "used" or "unused": @@ -256,7 +260,7 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re // - if there are no used blobs in a pack, possibly mark duplicates as "unused" if hasDuplicates { // iterate again over all blobs in index (this is pretty cheap, all in-mem) - idx.Each(ctx, func(blob restic.PackedBlob) { + err = idx.Each(ctx, func(blob restic.PackedBlob) { bh := blob.BlobHandle count, ok := usedBlobs[bh] // skip non-duplicate, aka. normal blobs @@ -294,9 +298,9 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re // update indexPack indexPack[blob.PackID] = ip }) - } - if ctx.Err() != nil { - return nil, nil, ctx.Err() + if err != nil { + return nil, nil, err + } } // Sanity check. If no duplicates exist, all blobs have value 1. After handling diff --git a/internal/repository/repair_index.go b/internal/repository/repair_index.go index 63e104132..a6e732b44 100644 --- a/internal/repository/repair_index.go +++ b/internal/repository/repair_index.go @@ -54,7 +54,10 @@ func RepairIndex(ctx context.Context, repo *Repository, opts RepairIndexOptions, if err != nil { return err } - packSizeFromIndex = pack.Size(ctx, repo.Index(), false) + packSizeFromIndex, err = pack.Size(ctx, repo.Index(), false) + if err != nil { + return err + } } printer.P("getting pack files to read...\n") diff --git a/internal/repository/repair_pack_test.go b/internal/repository/repair_pack_test.go index c5cdf5ed5..078017d21 100644 --- a/internal/repository/repair_pack_test.go +++ b/internal/repository/repair_pack_test.go @@ -17,7 +17,7 @@ import ( func listBlobs(repo restic.Repository) restic.BlobSet { blobs := restic.NewBlobSet() - repo.Index().Each(context.TODO(), func(pb restic.PackedBlob) { + _ = repo.Index().Each(context.TODO(), func(pb restic.PackedBlob) { blobs.Insert(pb.BlobHandle) }) return blobs diff --git a/internal/repository/repository.go b/internal/repository/repository.go index a43971266..cac1551c4 100644 --- a/internal/repository/repository.go +++ b/internal/repository/repository.go @@ -704,11 +704,14 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error { defer cancel() invalidIndex := false - r.idx.Each(ctx, func(blob restic.PackedBlob) { + err := r.idx.Each(ctx, func(blob restic.PackedBlob) { if blob.IsCompressed() { invalidIndex = true } }) + if err != nil { + return err + } if invalidIndex { return errors.New("index uses feature not supported by repository version 1") } diff --git a/internal/repository/repository_test.go b/internal/repository/repository_test.go index b013c4823..48a56a1fd 100644 --- a/internal/repository/repository_test.go +++ b/internal/repository/repository_test.go @@ -370,13 +370,13 @@ func testRepositoryIncrementalIndex(t *testing.T, version uint) { idx, err := loadIndex(context.TODO(), repo, id) rtest.OK(t, err) - idx.Each(context.TODO(), func(pb restic.PackedBlob) { + rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) { if _, ok := packEntries[pb.PackID]; !ok { packEntries[pb.PackID] = make(map[restic.ID]struct{}) } packEntries[pb.PackID][id] = struct{}{} - }) + })) return nil }) if err != nil { diff --git a/internal/restic/repository.go b/internal/restic/repository.go index 89c54ffbb..7a3389e00 100644 --- a/internal/restic/repository.go +++ b/internal/restic/repository.go @@ -103,8 +103,8 @@ type MasterIndex interface { Lookup(BlobHandle) []PackedBlob // Each runs fn on all blobs known to the index. When the context is cancelled, - // the index iteration return immediately. This blocks any modification of the index. - Each(ctx context.Context, fn func(PackedBlob)) + // the index iteration returns immediately with ctx.Err(). This blocks any modification of the index. + Each(ctx context.Context, fn func(PackedBlob)) error ListPacks(ctx context.Context, packs IDSet) <-chan PackBlobs Save(ctx context.Context, repo Repository, excludePacks IDSet, extraObsolete IDs, opts MasterIndexSaveOpts) error From 484dbb1cf49d6348ff0d35e0bf88126e851e63ba Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 30 Mar 2024 14:28:59 +0100 Subject: [PATCH 098/117] get rid of a few global variables --- cmd/restic/global.go | 2 +- cmd/restic/main.go | 3 +-- helpers/prepare-release/main.go | 6 +++--- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/cmd/restic/global.go b/cmd/restic/global.go index 5b21871dc..40083ad69 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -43,7 +43,7 @@ import ( "golang.org/x/term" ) -var version = "0.16.4-dev (compiled manually)" +const version = "0.16.4-dev (compiled manually)" // TimeFormat is the format used for all timestamps printed by restic. const TimeFormat = "2006-01-02 15:04:05" diff --git a/cmd/restic/main.go b/cmd/restic/main.go index 82517c31a..e847b8156 100644 --- a/cmd/restic/main.go +++ b/cmd/restic/main.go @@ -94,8 +94,6 @@ func needsPassword(cmd string) bool { } } -var logBuffer = bytes.NewBuffer(nil) - func tweakGoGC() { // lower GOGC from 100 to 50, unless it was manually overwritten by the user oldValue := godebug.SetGCPercent(50) @@ -108,6 +106,7 @@ func main() { tweakGoGC() // install custom global logger into a buffer, if an error occurs // we can show the logs + logBuffer := bytes.NewBuffer(nil) log.SetOutput(logBuffer) err := feature.Flag.Apply(os.Getenv("RESTIC_FEATURES"), func(s string) { diff --git a/helpers/prepare-release/main.go b/helpers/prepare-release/main.go index baf8aa2ba..703d85e70 100644 --- a/helpers/prepare-release/main.go +++ b/helpers/prepare-release/main.go @@ -303,7 +303,7 @@ func generateFiles() { } } -var versionPattern = `var version = ".*"` +var versionPattern = `const version = ".*"` const versionCodeFile = "cmd/restic/global.go" @@ -313,7 +313,7 @@ func updateVersion() { die("unable to write version to file: %v", err) } - newVersion := fmt.Sprintf("var version = %q", opts.Version) + newVersion := fmt.Sprintf("const version = %q", opts.Version) replace(versionCodeFile, versionPattern, newVersion) if len(uncommittedChanges("VERSION")) > 0 || len(uncommittedChanges(versionCodeFile)) > 0 { @@ -323,7 +323,7 @@ func updateVersion() { } func updateVersionDev() { - newVersion := fmt.Sprintf(`var version = "%s-dev (compiled manually)"`, opts.Version) + newVersion := fmt.Sprintf(`const version = "%s-dev (compiled manually)"`, opts.Version) replace(versionCodeFile, versionPattern, newVersion) msg("committing cmd/restic/global.go with dev version") From 5f263752d7f41bbec8e889e3e9bb27c5a20ad2ff Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 24 Apr 2024 20:42:30 +0200 Subject: [PATCH 099/117] init: also apply limiter for non-HTTP backend --- cmd/restic/global.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/restic/global.go b/cmd/restic/global.go index c93fb4bce..eded479ad 100644 --- a/cmd/restic/global.go +++ b/cmd/restic/global.go @@ -598,7 +598,7 @@ func innerOpen(ctx context.Context, s string, gopts GlobalOptions, opts options. var be backend.Backend if create { - be, err = factory.Create(ctx, cfg, rt, nil) + be, err = factory.Create(ctx, cfg, rt, lim) } else { be, err = factory.Open(ctx, cfg, rt, lim) } From 871ea1eaf3dcd8da4641401425a10953d8fbcd27 Mon Sep 17 00:00:00 2001 From: Altan Orhon Date: Mon, 18 Mar 2024 13:20:45 -0700 Subject: [PATCH 100/117] Add support for specifying --host via environment variable This commit adds support for specifying the `--host` option via the `RESTIC_HOST` environment variable. This is done by extending option processing in `cmd_backup.go` and for `restic.SnapshotFilter` in `find.go`. --- changelog/unreleased/issue-4733 | 9 +++++++++ cmd/restic/cmd_backup.go | 7 ++++++- cmd/restic/find.go | 19 +++++++++++++++++-- 3 files changed, 32 insertions(+), 3 deletions(-) create mode 100644 changelog/unreleased/issue-4733 diff --git a/changelog/unreleased/issue-4733 b/changelog/unreleased/issue-4733 new file mode 100644 index 000000000..250c2ba68 --- /dev/null +++ b/changelog/unreleased/issue-4733 @@ -0,0 +1,9 @@ +Enhancement: Allow specifying `--host` via environment variable + +Restic commands that operate on snapshots, such as `restic backup` and +`restic snapshots`, support the `--host` flag to specify the hostname for +grouoping snapshots. They now permit selecting the hostname via the +environment variable `RESTIC_HOST`. `--host` still takes precedence over the +environment variable. + +https://github.com/restic/restic/issues/4733 \ No newline at end of file diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 8b2f1f808..d3e5a8546 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -114,7 +114,7 @@ func init() { f.BoolVar(&backupOptions.StdinCommand, "stdin-from-command", false, "interpret arguments as command to execute and store its stdout") f.Var(&backupOptions.Tags, "tag", "add `tags` for the new snapshot in the format `tag[,tag,...]` (can be specified multiple times)") f.UintVar(&backupOptions.ReadConcurrency, "read-concurrency", 0, "read `n` files concurrently (default: $RESTIC_READ_CONCURRENCY or 2)") - f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually. To prevent an expensive rescan use the \"parent\" flag") + f.StringVarP(&backupOptions.Host, "host", "H", "", "set the `hostname` for the snapshot manually (default: $RESTIC_HOST). To prevent an expensive rescan use the \"parent\" flag") f.StringVar(&backupOptions.Host, "hostname", "", "set the `hostname` for the snapshot manually") err := f.MarkDeprecated("hostname", "use --host") if err != nil { @@ -137,6 +137,11 @@ func init() { // parse read concurrency from env, on error the default value will be used readConcurrency, _ := strconv.ParseUint(os.Getenv("RESTIC_READ_CONCURRENCY"), 10, 32) backupOptions.ReadConcurrency = uint(readConcurrency) + + // parse host from env, if not exists or empty the default value will be used + if host := os.Getenv("RESTIC_HOST"); host != "" { + backupOptions.Host = host + } } // filterExisting returns a slice of all existing items, or an error if no diff --git a/cmd/restic/find.go b/cmd/restic/find.go index a990b458d..7c28b3be5 100644 --- a/cmd/restic/find.go +++ b/cmd/restic/find.go @@ -2,6 +2,7 @@ package main import ( "context" + "os" "github.com/restic/restic/internal/restic" "github.com/spf13/pflag" @@ -14,17 +15,31 @@ func initMultiSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter, if !addHostShorthand { hostShorthand = "" } - flags.StringArrayVarP(&filt.Hosts, "host", hostShorthand, nil, "only consider snapshots for this `host` (can be specified multiple times)") + flags.StringArrayVarP(&filt.Hosts, "host", hostShorthand, nil, "only consider snapshots for this `host` (can be specified multiple times) (default: $RESTIC_HOST)") flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]` (can be specified multiple times)") flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path` (can be specified multiple times)") + + if len(filt.Hosts) == 0 { + // parse host from env, if not exists or empty the default value will be used + if host := os.Getenv("RESTIC_HOST"); host != "" { + filt.Hosts = []string{host} + } + } } // initSingleSnapshotFilter is used for commands that work on a single snapshot // MUST be combined with restic.FindFilteredSnapshot func initSingleSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter) { - flags.StringArrayVarP(&filt.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when snapshot ID \"latest\" is given (can be specified multiple times)") + flags.StringArrayVarP(&filt.Hosts, "host", "H", nil, "only consider snapshots for this `host`, when snapshot ID \"latest\" is given (can be specified multiple times) (default: $RESTIC_HOST)") flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]`, when snapshot ID \"latest\" is given (can be specified multiple times)") flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path`, when snapshot ID \"latest\" is given (can be specified multiple times)") + + if len(filt.Hosts) == 0 { + // parse host from env, if not exists or empty the default value will be used + if host := os.Getenv("RESTIC_HOST"); host != "" { + filt.Hosts = []string{host} + } + } } // FindFilteredSnapshots yields Snapshots, either given explicitly by `snapshotIDs` or filtered from the list of all snapshots. From 347e9d07657f358c297a21aabb6224c38502a08f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Wed, 24 Apr 2024 21:49:34 +0200 Subject: [PATCH 101/117] complete RESITC_HOST environment handling & test --- changelog/unreleased/issue-4733 | 3 +- cmd/restic/find.go | 16 ++++----- cmd/restic/find_test.go | 61 +++++++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+), 11 deletions(-) create mode 100644 cmd/restic/find_test.go diff --git a/changelog/unreleased/issue-4733 b/changelog/unreleased/issue-4733 index 250c2ba68..1fc271587 100644 --- a/changelog/unreleased/issue-4733 +++ b/changelog/unreleased/issue-4733 @@ -6,4 +6,5 @@ grouoping snapshots. They now permit selecting the hostname via the environment variable `RESTIC_HOST`. `--host` still takes precedence over the environment variable. -https://github.com/restic/restic/issues/4733 \ No newline at end of file +https://github.com/restic/restic/issues/4733 +https://github.com/restic/restic/pull/4734 diff --git a/cmd/restic/find.go b/cmd/restic/find.go index 7c28b3be5..c7754d5d9 100644 --- a/cmd/restic/find.go +++ b/cmd/restic/find.go @@ -19,11 +19,9 @@ func initMultiSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter, flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]` (can be specified multiple times)") flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path` (can be specified multiple times)") - if len(filt.Hosts) == 0 { - // parse host from env, if not exists or empty the default value will be used - if host := os.Getenv("RESTIC_HOST"); host != "" { - filt.Hosts = []string{host} - } + // set default based on env if set + if host := os.Getenv("RESTIC_HOST"); host != "" { + filt.Hosts = []string{host} } } @@ -34,11 +32,9 @@ func initSingleSnapshotFilter(flags *pflag.FlagSet, filt *restic.SnapshotFilter) flags.Var(&filt.Tags, "tag", "only consider snapshots including `tag[,tag,...]`, when snapshot ID \"latest\" is given (can be specified multiple times)") flags.StringArrayVar(&filt.Paths, "path", nil, "only consider snapshots including this (absolute) `path`, when snapshot ID \"latest\" is given (can be specified multiple times)") - if len(filt.Hosts) == 0 { - // parse host from env, if not exists or empty the default value will be used - if host := os.Getenv("RESTIC_HOST"); host != "" { - filt.Hosts = []string{host} - } + // set default based on env if set + if host := os.Getenv("RESTIC_HOST"); host != "" { + filt.Hosts = []string{host} } } diff --git a/cmd/restic/find_test.go b/cmd/restic/find_test.go new file mode 100644 index 000000000..a98a14f04 --- /dev/null +++ b/cmd/restic/find_test.go @@ -0,0 +1,61 @@ +package main + +import ( + "testing" + + "github.com/restic/restic/internal/restic" + rtest "github.com/restic/restic/internal/test" + "github.com/spf13/pflag" +) + +func TestSnapshotFilter(t *testing.T) { + for _, test := range []struct { + name string + args []string + expected []string + env string + }{ + { + "no value", + []string{}, + nil, + "", + }, + { + "args only", + []string{"--host", "abc"}, + []string{"abc"}, + "", + }, + { + "env default", + []string{}, + []string{"def"}, + "def", + }, + { + "both", + []string{"--host", "abc"}, + []string{"abc"}, + "def", + }, + } { + t.Run(test.name, func(t *testing.T) { + t.Setenv("RESTIC_HOST", test.env) + + for _, mode := range []bool{false, true} { + set := pflag.NewFlagSet("test", pflag.PanicOnError) + flt := &restic.SnapshotFilter{} + if mode { + initMultiSnapshotFilter(set, flt, false) + } else { + initSingleSnapshotFilter(set, flt) + } + err := set.Parse(test.args) + rtest.OK(t, err) + + rtest.Equals(t, test.expected, flt.Hosts, "unexpected hosts") + } + }) + } +} From 78dbc5ec5871a9995a25abe7c091fc1a9f104417 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Fri, 6 Nov 2020 03:41:02 +0300 Subject: [PATCH 102/117] vss: Add initial support for extended options --- cmd/restic/cmd_backup.go | 13 +++++++++++-- internal/fs/fs_local_vss.go | 30 +++++++++++++++++++++++++++++- 2 files changed, 40 insertions(+), 3 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index d3e5a8546..5329a928c 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -445,7 +445,16 @@ func findParentSnapshot(ctx context.Context, repo restic.ListerLoaderUnpacked, o } func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, term *termstatus.Terminal, args []string) error { - err := opts.Check(gopts, args) + var vsscfg fs.VSSConfig + var err error + + if runtime.GOOS == "windows" { + if vsscfg, err = fs.ParseVSSConfig(gopts.extended); err != nil { + return err + } + } + + err = opts.Check(gopts, args) if err != nil { return err } @@ -557,7 +566,7 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter } } - localVss := fs.NewLocalVss(errorHandler, messageHandler) + localVss := fs.NewLocalVss(errorHandler, messageHandler, vsscfg) defer localVss.DeleteSnapshots() targetFS = localVss } diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index aa3522aea..f68e2ff28 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -3,12 +3,40 @@ package fs import ( "os" "path/filepath" + "runtime" "strings" "sync" "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/options" ) +// VSSConfig holds extended options of windows volume shadow copy service. +type VSSConfig struct { +} + +func init() { + if runtime.GOOS == "windows" { + options.Register("vss", VSSConfig{}) + } +} + +// NewVSSConfig returns a new VSSConfig with the default values filled in. +func NewVSSConfig() VSSConfig { + return VSSConfig{} +} + +// ParseVSSConfig parses a VSS extended options to VSSConfig struct. +func ParseVSSConfig(o options.Options) (VSSConfig, error) { + cfg := NewVSSConfig() + o = o.Extract("vss") + if err := o.Apply("vss", &cfg); err != nil { + return VSSConfig{}, err + } + + return cfg, nil +} + // ErrorHandler is used to report errors via callback type ErrorHandler func(item string, err error) error @@ -31,7 +59,7 @@ var _ FS = &LocalVss{} // NewLocalVss creates a new wrapper around the windows filesystem using volume // shadow copy service to access locked files. -func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler) *LocalVss { +func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler, cfg VSSConfig) *LocalVss { return &LocalVss{ FS: Local{}, snapshots: make(map[string]VssSnapshot), From 7470e5356e04424cb891bd6cdea9071a005f30dd Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Fri, 6 Nov 2020 03:41:02 +0300 Subject: [PATCH 103/117] vss: Add "timeout" option Changing multiple "callAsyncFunctionAndWait" with fixed timeout to calculated timeout based on deadline. --- internal/fs/fs_local_vss.go | 10 ++++++++-- internal/fs/vss.go | 4 +++- internal/fs/vss_windows.go | 37 +++++++++++++++++++++++++------------ 3 files changed, 36 insertions(+), 15 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index f68e2ff28..1f6001782 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -6,6 +6,7 @@ import ( "runtime" "strings" "sync" + "time" "github.com/restic/restic/internal/errors" "github.com/restic/restic/internal/options" @@ -13,6 +14,7 @@ import ( // VSSConfig holds extended options of windows volume shadow copy service. type VSSConfig struct { + Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshots before timing out"` } func init() { @@ -23,7 +25,9 @@ func init() { // NewVSSConfig returns a new VSSConfig with the default values filled in. func NewVSSConfig() VSSConfig { - return VSSConfig{} + return VSSConfig{ + Timeout: time.Second * 120, + } } // ParseVSSConfig parses a VSS extended options to VSSConfig struct. @@ -52,6 +56,7 @@ type LocalVss struct { mutex sync.RWMutex msgError ErrorHandler msgMessage MessageHandler + timeout time.Duration } // statically ensure that LocalVss implements FS. @@ -66,6 +71,7 @@ func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler, cfg VSSConfig failedSnapshots: make(map[string]struct{}), msgError: msgError, msgMessage: msgMessage, + timeout: cfg.Timeout, } } @@ -144,7 +150,7 @@ func (fs *LocalVss) snapshotPath(path string) string { vssVolume := volumeNameLower + string(filepath.Separator) fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume) - if snapshot, err := NewVssSnapshot(vssVolume, 120, fs.msgError); err != nil { + if snapshot, err := NewVssSnapshot(vssVolume, fs.timeout, fs.msgError); err != nil { _ = fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", vssVolume, err)) fs.failedSnapshots[volumeNameLower] = struct{}{} diff --git a/internal/fs/vss.go b/internal/fs/vss.go index 5f0ea36d9..92143883d 100644 --- a/internal/fs/vss.go +++ b/internal/fs/vss.go @@ -4,6 +4,8 @@ package fs import ( + "time" + "github.com/restic/restic/internal/errors" ) @@ -34,7 +36,7 @@ func HasSufficientPrivilegesForVSS() error { // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. func NewVssSnapshot( - _ string, _ uint, _ ErrorHandler) (VssSnapshot, error) { + _ string, _ time.Duration, _ ErrorHandler) (VssSnapshot, error) { return VssSnapshot{}, errors.New("VSS snapshots are only supported on windows") } diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index d75567d25..4e7f10385 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -9,6 +9,7 @@ import ( "runtime" "strings" "syscall" + "time" "unsafe" ole "github.com/go-ole/go-ole" @@ -617,8 +618,13 @@ func (vssAsync *IVSSAsync) QueryStatus() (HRESULT, uint32) { // WaitUntilAsyncFinished waits until either the async call is finished or // the given timeout is reached. -func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(millis uint32) error { - hresult := vssAsync.Wait(millis) +func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(timeout time.Duration) error { + const maxTimeout = 2147483647 * time.Millisecond + if timeout > maxTimeout { + timeout = maxTimeout + } + + hresult := vssAsync.Wait(uint32(timeout.Milliseconds())) err := newVssErrorIfResultNotOK("Wait() failed", hresult) if err != nil { vssAsync.Cancel() @@ -677,7 +683,7 @@ type VssSnapshot struct { snapshotProperties VssSnapshotProperties snapshotDeviceObject string mountPointInfo map[string]MountPoint - timeoutInMillis uint32 + timeout time.Duration } // GetSnapshotDeviceObject returns root path to access the snapshot files @@ -730,7 +736,7 @@ func HasSufficientPrivilegesForVSS() error { // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. func NewVssSnapshot( - volume string, timeoutInSeconds uint, msgError ErrorHandler) (VssSnapshot, error) { + volume string, timeout time.Duration, msgError ErrorHandler) (VssSnapshot, error) { is64Bit, err := isRunningOn64BitWindows() if err != nil { @@ -744,7 +750,7 @@ func NewVssSnapshot( runtime.GOARCH)) } - timeoutInMillis := uint32(timeoutInSeconds * 1000) + deadline := time.Now().Add(timeout) oleIUnknown, err := initializeVssCOMInterface() if oleIUnknown != nil { @@ -796,7 +802,7 @@ func NewVssSnapshot( } err = callAsyncFunctionAndWait(iVssBackupComponents.GatherWriterMetadata, - "GatherWriterMetadata", timeoutInMillis) + "GatherWriterMetadata", deadline) if err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err @@ -854,7 +860,7 @@ func NewVssSnapshot( } err = callAsyncFunctionAndWait(iVssBackupComponents.PrepareForBackup, "PrepareForBackup", - timeoutInMillis) + deadline) if err != nil { // After calling PrepareForBackup one needs to call AbortBackup() before releasing the VSS // instance for proper cleanup. @@ -865,7 +871,7 @@ func NewVssSnapshot( } err = callAsyncFunctionAndWait(iVssBackupComponents.DoSnapshotSet, "DoSnapshotSet", - timeoutInMillis) + deadline) if err != nil { iVssBackupComponents.AbortBackup() iVssBackupComponents.Release() @@ -901,7 +907,7 @@ func NewVssSnapshot( } return VssSnapshot{iVssBackupComponents, snapshotSetID, snapshotProperties, - snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, timeoutInMillis}, nil + snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, time.Until(deadline)}, nil } // Delete deletes the created snapshot. @@ -922,8 +928,10 @@ func (p *VssSnapshot) Delete() error { if p.iVssBackupComponents != nil { defer p.iVssBackupComponents.Release() + deadline := time.Now().Add(p.timeout) + err = callAsyncFunctionAndWait(p.iVssBackupComponents.BackupComplete, "BackupComplete", - p.timeoutInMillis) + deadline) if err != nil { return err } @@ -945,7 +953,7 @@ type asyncCallFunc func() (*IVSSAsync, error) // callAsyncFunctionAndWait calls an async functions and waits for it to either // finish or timeout. -func callAsyncFunctionAndWait(function asyncCallFunc, name string, timeoutInMillis uint32) error { +func callAsyncFunctionAndWait(function asyncCallFunc, name string, deadline time.Time) error { iVssAsync, err := function() if err != nil { return err @@ -955,7 +963,12 @@ func callAsyncFunctionAndWait(function asyncCallFunc, name string, timeoutInMill return newVssTextError(fmt.Sprintf("%s() returned nil", name)) } - err = iVssAsync.WaitUntilAsyncFinished(timeoutInMillis) + timeout := time.Until(deadline) + if timeout <= 0 { + return newVssTextError(fmt.Sprintf("%s() deadline exceeded", name)) + } + + err = iVssAsync.WaitUntilAsyncFinished(timeout) iVssAsync.Release() return err } From c4f67c00644ebb344b08e8038868674f7fee0981 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Fri, 6 Nov 2020 03:41:02 +0300 Subject: [PATCH 104/117] vss: Add volume filtering Add options to exclude all mountpoints and arbitrary volumes from snapshotting. --- internal/fs/fs_local_vss.go | 113 +++++++++++++++++++++++++++--------- internal/fs/vss.go | 8 ++- internal/fs/vss_windows.go | 78 +++++++++++++++++-------- 3 files changed, 148 insertions(+), 51 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 1f6001782..0e73092f2 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -14,7 +14,9 @@ import ( // VSSConfig holds extended options of windows volume shadow copy service. type VSSConfig struct { - Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshots before timing out"` + ExcludeAllMountPoints bool `option:"excludeallmountpoints" help:"exclude mountpoints from snapshotting on all volumes"` + ExcludeVolumes string `option:"excludevolumes" help:"semicolon separated list of volumes to exclude from snapshotting (ex. 'c:\\;e:\\mnt;\\\\?\\Volume{...}')"` + Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshot before timing out"` } func init() { @@ -47,31 +49,59 @@ type ErrorHandler func(item string, err error) error // MessageHandler is used to report errors/messages via callbacks. type MessageHandler func(msg string, args ...interface{}) +// VolumeFilter is used to filter volumes by it's mount point or GUID path. +type VolumeFilter func(volume string) bool + // LocalVss is a wrapper around the local file system which uses windows volume // shadow copy service (VSS) in a transparent way. type LocalVss struct { FS - snapshots map[string]VssSnapshot - failedSnapshots map[string]struct{} - mutex sync.RWMutex - msgError ErrorHandler - msgMessage MessageHandler - timeout time.Duration + snapshots map[string]VssSnapshot + failedSnapshots map[string]struct{} + mutex sync.RWMutex + msgError ErrorHandler + msgMessage MessageHandler + excludeAllMountPoints bool + excludeVolumes map[string]struct{} + timeout time.Duration } // statically ensure that LocalVss implements FS. var _ FS = &LocalVss{} +// parseMountPoints try to convert semicolon separated list of mount points +// to map of lowercased volume GUID pathes. Mountpoints already in volume +// GUID path format will be validated and normalized. +func parseMountPoints(list string, msgError ErrorHandler) (volumes map[string]struct{}) { + if list == "" { + return + } + for _, s := range strings.Split(list, ";") { + if v, err := GetVolumeNameForVolumeMountPoint(s); err != nil { + msgError(s, errors.Errorf("failed to parse vss.excludevolumes [%s]: %s", s, err)) + } else { + if volumes == nil { + volumes = make(map[string]struct{}) + } + volumes[strings.ToLower(v)] = struct{}{} + } + } + + return +} + // NewLocalVss creates a new wrapper around the windows filesystem using volume // shadow copy service to access locked files. func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler, cfg VSSConfig) *LocalVss { return &LocalVss{ - FS: Local{}, - snapshots: make(map[string]VssSnapshot), - failedSnapshots: make(map[string]struct{}), - msgError: msgError, - msgMessage: msgMessage, - timeout: cfg.Timeout, + FS: Local{}, + snapshots: make(map[string]VssSnapshot), + failedSnapshots: make(map[string]struct{}), + msgError: msgError, + msgMessage: msgMessage, + excludeAllMountPoints: cfg.ExcludeAllMountPoints, + excludeVolumes: parseMountPoints(cfg.ExcludeVolumes, msgError), + timeout: cfg.Timeout, } } @@ -112,6 +142,24 @@ func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) { return os.Lstat(fs.snapshotPath(name)) } +// isMountPointExcluded is true if given mountpoint excluded by user. +func (fs *LocalVss) isMountPointExcluded(mountPoint string) bool { + if fs.excludeVolumes == nil { + return false + } + + volume, err := GetVolumeNameForVolumeMountPoint(mountPoint) + if err != nil { + fs.msgError(mountPoint, errors.Errorf("failed to get volume from mount point [%s]: %s", mountPoint, err)) + + return false + } + + _, ok := fs.excludeVolumes[strings.ToLower(volume)] + + return ok +} + // snapshotPath returns the path inside a VSS snapshots if it already exists. // If the path is not yet available as a snapshot, a snapshot is created. // If creation of a snapshot fails the file's original path is returned as @@ -148,23 +196,36 @@ func (fs *LocalVss) snapshotPath(path string) string { if !snapshotExists && !snapshotFailed { vssVolume := volumeNameLower + string(filepath.Separator) - fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume) - if snapshot, err := NewVssSnapshot(vssVolume, fs.timeout, fs.msgError); err != nil { - _ = fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", - vssVolume, err)) + if fs.isMountPointExcluded(vssVolume) { + fs.msgMessage("snapshots for [%s] excluded by user\n", vssVolume) fs.failedSnapshots[volumeNameLower] = struct{}{} } else { - fs.snapshots[volumeNameLower] = snapshot - fs.msgMessage("successfully created snapshot for [%s]\n", vssVolume) - if len(snapshot.mountPointInfo) > 0 { - fs.msgMessage("mountpoints in snapshot volume [%s]:\n", vssVolume) - for mp, mpInfo := range snapshot.mountPointInfo { - info := "" - if !mpInfo.IsSnapshotted() { - info = " (not snapshotted)" + fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume) + + var filter VolumeFilter + if !fs.excludeAllMountPoints { + filter = func(volume string) bool { + return !fs.isMountPointExcluded(volume) + } + } + + if snapshot, err := NewVssSnapshot(vssVolume, fs.timeout, filter, fs.msgError); err != nil { + fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", + vssVolume, err)) + fs.failedSnapshots[volumeNameLower] = struct{}{} + } else { + fs.snapshots[volumeNameLower] = snapshot + fs.msgMessage("successfully created snapshot for [%s]\n", vssVolume) + if len(snapshot.mountPointInfo) > 0 { + fs.msgMessage("mountpoints in snapshot volume [%s]:\n", vssVolume) + for mp, mpInfo := range snapshot.mountPointInfo { + info := "" + if !mpInfo.IsSnapshotted() { + info = " (not snapshotted)" + } + fs.msgMessage(" - %s%s\n", mp, info) } - fs.msgMessage(" - %s%s\n", mp, info) } } } diff --git a/internal/fs/vss.go b/internal/fs/vss.go index 92143883d..838bdf79b 100644 --- a/internal/fs/vss.go +++ b/internal/fs/vss.go @@ -33,10 +33,16 @@ func HasSufficientPrivilegesForVSS() error { return errors.New("VSS snapshots are only supported on windows") } +// GetVolumeNameForVolumeMountPoint clear input parameter +// and calls the equivalent windows api. +func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { + return mountPoint, nil +} + // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. func NewVssSnapshot( - _ string, _ time.Duration, _ ErrorHandler) (VssSnapshot, error) { + _ string, _ time.Duration, _ VolumeFilter, _ ErrorHandler) (VssSnapshot, error) { return VssSnapshot{}, errors.New("VSS snapshots are only supported on windows") } diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 4e7f10385..4ed289366 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -733,10 +733,33 @@ func HasSufficientPrivilegesForVSS() error { return err } +// GetVolumeNameForVolumeMountPoint clear input parameter +// and calls the equivalent windows api. +func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { + if mountPoint != "" && mountPoint[len(mountPoint)-1] != filepath.Separator { + mountPoint += string(filepath.Separator) + } + + mountPointPointer, err := syscall.UTF16PtrFromString(mountPoint) + if err != nil { + return mountPoint, err + } + + // A reasonable size for the buffer to accommodate the largest possible + // volume GUID path is 50 characters. + volumeNameBuffer := make([]uint16, 50) + if err := windows.GetVolumeNameForVolumeMountPoint( + mountPointPointer, &volumeNameBuffer[0], 50); err != nil { + return mountPoint, err + } + + return syscall.UTF16ToString(volumeNameBuffer), nil +} + // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. func NewVssSnapshot( - volume string, timeout time.Duration, msgError ErrorHandler) (VssSnapshot, error) { + volume string, timeout time.Duration, filter VolumeFilter, msgError ErrorHandler) (VssSnapshot, error) { is64Bit, err := isRunningOn64BitWindows() if err != nil { @@ -828,35 +851,42 @@ func NewVssSnapshot( return VssSnapshot{}, err } - mountPoints, err := enumerateMountedFolders(volume) - if err != nil { - iVssBackupComponents.Release() - return VssSnapshot{}, newVssTextError(fmt.Sprintf( - "failed to enumerate mount points for volume %s: %s", volume, err)) - } - mountPointInfo := make(map[string]MountPoint) - for _, mountPoint := range mountPoints { - // ensure every mountpoint is available even without a valid - // snapshot because we need to consider this when backing up files - mountPointInfo[mountPoint] = MountPoint{isSnapshotted: false} - - if isSupported, err := iVssBackupComponents.IsVolumeSupported(mountPoint); err != nil { - continue - } else if !isSupported { - continue - } - - var mountPointSnapshotSetID ole.GUID - err := iVssBackupComponents.AddToSnapshotSet(mountPoint, &mountPointSnapshotSetID) + // if filter==nil just don't process mount points for this volume at all + if filter != nil { + mountPoints, err := enumerateMountedFolders(volume) if err != nil { iVssBackupComponents.Release() - return VssSnapshot{}, err + + return VssSnapshot{}, newVssTextError(fmt.Sprintf( + "failed to enumerate mount points for volume %s: %s", volume, err)) } - mountPointInfo[mountPoint] = MountPoint{isSnapshotted: true, - snapshotSetID: mountPointSnapshotSetID} + for _, mountPoint := range mountPoints { + // ensure every mountpoint is available even without a valid + // snapshot because we need to consider this when backing up files + mountPointInfo[mountPoint] = MountPoint{isSnapshotted: false} + + if !filter(mountPoint) { + continue + } else if isSupported, err := iVssBackupComponents.IsVolumeSupported(mountPoint); err != nil { + continue + } else if !isSupported { + continue + } + + var mountPointSnapshotSetID ole.GUID + err := iVssBackupComponents.AddToSnapshotSet(mountPoint, &mountPointSnapshotSetID) + if err != nil { + iVssBackupComponents.Release() + + return VssSnapshot{}, err + } + + mountPointInfo[mountPoint] = MountPoint{isSnapshotted: true, + snapshotSetID: mountPointSnapshotSetID} + } } err = callAsyncFunctionAndWait(iVssBackupComponents.PrepareForBackup, "PrepareForBackup", From 9182e6bab55c87703d11987c19b9fdb463aa6a74 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Fri, 6 Nov 2020 06:18:30 +0300 Subject: [PATCH 105/117] vss: Update docs and changelog --- changelog/unreleased/pull-3067 | 18 ++++++++++++++++++ doc/040_backup.rst | 18 ++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 changelog/unreleased/pull-3067 diff --git a/changelog/unreleased/pull-3067 b/changelog/unreleased/pull-3067 new file mode 100644 index 000000000..a56c045fa --- /dev/null +++ b/changelog/unreleased/pull-3067 @@ -0,0 +1,18 @@ +Enhancement: Add options to configure Windows Shadow Copy Service + +Restic always used 120 sec. timeout and unconditionally created VSS snapshots +for all volume mount points on disk. Now this behavior can be fine-tuned by +new options, like exclude user specific volumes and mount points or completely +disable auto snapshotting of volume mount points. + +For example: + + restic backup --use-fs-snapshot -o vss.timeout=5m -o vss.excludeallmountpoints=true + +changes timeout to five minutes and disable snapshotting of mount points on all volumes, and + + restic backup --use-fs-snapshot -o vss.excludevolumes="d:\;c:\mnt\;\\?\Volume{e2e0315d-9066-4f97-8343-eb5659b35762}" + +excludes drive `D:`, mount point `C:\MNT` and specific volume from VSS snapshotting. + +https://github.com/restic/restic/pull/3067 diff --git a/doc/040_backup.rst b/doc/040_backup.rst index d0bd4b2e2..d1bb39f96 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -56,6 +56,24 @@ snapshot for each volume that contains files to backup. Files are read from the VSS snapshot instead of the regular filesystem. This allows to backup files that are exclusively locked by another process during the backup. +You can use three additional options to change VSS behaviour: + + * ``-o vss.timeout`` specifies timeout for VSS snapshot creation, the default value is 120 seconds + * ``-o vss.excludeallmountpoints`` disable auto snapshotting of all volume mount points + * ``-o vss.excludevolumes`` allows excluding specific volumes or volume mount points from snapshotting + +E.g., 2.5 minutes timeout with mount points snapshotting disabled can be specified as + +.. code-block:: console + + -o vss.timeout=2m30s -o vss.excludeallmountpoints=true + +and excluding drive ``D:\``, mount point ``C:\mnt`` and volume ``\\?\Volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}\`` as + +.. code-block:: console + + -o vss.excludevolumes="d:;c:\MNT\;\\?\volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}" + By default VSS ignores Outlook OST files. This is not a restriction of restic but the default Windows VSS configuration. The files not to snapshot are configured in the Windows registry under the following key: From 9d3d915e2c9a349630ac9d9272962b3ef801db39 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Fri, 6 Nov 2020 22:36:48 +0300 Subject: [PATCH 106/117] vss: Add some tests --- internal/fs/fs_local_vss_test.go | 211 +++++++++++++++++++++++++++++++ 1 file changed, 211 insertions(+) create mode 100644 internal/fs/fs_local_vss_test.go diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go new file mode 100644 index 000000000..ce07fee3c --- /dev/null +++ b/internal/fs/fs_local_vss_test.go @@ -0,0 +1,211 @@ +// +build windows + +package fs + +import ( + "fmt" + "regexp" + "strings" + "testing" + "time" + + "github.com/restic/restic/internal/options" +) + +func matchStrings(ptrs []string, strs []string) bool { + if len(ptrs) != len(strs) { + return false + } + + for i, p := range ptrs { + matched, err := regexp.MatchString(p, strs[i]) + if err != nil { + panic(err) + } + if !matched { + return false + } + } + + return true +} + +func matchMap(strs []string, m map[string]struct{}) bool { + if len(strs) != len(m) { + return false + } + + for _, s := range strs { + if _, ok := m[s]; !ok { + return false + } + } + + return true +} + +func TestVSSConfig(t *testing.T) { + type config struct { + excludeAllMountPoints bool + timeout time.Duration + } + setTests := []struct { + input options.Options + output config + }{ + { + options.Options{ + "vss.timeout": "6h38m42s", + }, + config{ + timeout: 23922000000000, + }, + }, + { + options.Options{ + "vss.excludeallmountpoints": "t", + }, + config{ + excludeAllMountPoints: true, + timeout: 120000000000, + }, + }, + { + options.Options{ + "vss.excludeallmountpoints": "0", + "vss.excludevolumes": "", + "vss.timeout": "120s", + }, + config{ + timeout: 120000000000, + }, + }, + } + for i, test := range setTests { + t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) { + cfg, err := ParseVSSConfig(test.input) + if err != nil { + t.Fatal(err) + } + + errorHandler := func(item string, err error) error { + t.Fatalf("unexpected error (%v)", err) + + return nil + } + messageHandler := func(msg string, args ...interface{}) { + t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args)) + } + + dst := NewLocalVss(errorHandler, messageHandler, cfg) + + if dst.excludeAllMountPoints != test.output.excludeAllMountPoints || + dst.excludeVolumes != nil || dst.timeout != test.output.timeout { + t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.output, dst) + } + }) + } +} + +func TestParseMountPoints(t *testing.T) { + volumeMatch := regexp.MustCompile(`^\\\\\?\\Volume\{[0-9a-f]{8}(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}\}\\$`) + + // It's not a good idea to test functions based on GetVolumeNameForVolumeMountPoint by calling + // GetVolumeNameForVolumeMountPoint itself, but we have restricted test environment: + // cannot manage volumes and can only be sure that the mount point C:\ exists + sysVolume, err := GetVolumeNameForVolumeMountPoint("C:") + if err != nil { + t.Fatal(err) + } + // We don't know a valid volume GUID path for C:\, but we'll at least check its format + if !volumeMatch.MatchString(sysVolume) { + t.Fatalf("invalid volume GUID path: %s", sysVolume) + } + sysVolumeMutated := strings.ToUpper(sysVolume[:len(sysVolume)-1]) + sysVolumeMatch := strings.ToLower(sysVolume) + + type check struct { + volume string + result bool + } + setTests := []struct { + input options.Options + output []string + checks []check + errors []string + }{ + { + options.Options{ + "vss.excludevolumes": `c:;c:\;` + sysVolume + `;` + sysVolumeMutated, + }, + []string{ + sysVolumeMatch, + }, + []check{ + {`c:\`, true}, + {`c:`, true}, + {sysVolume, true}, + {sysVolumeMutated, true}, + }, + []string{}, + }, + { + options.Options{ + "vss.excludevolumes": `z:\nonexistent;c:;c:\windows\;\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, + }, + []string{ + sysVolumeMatch, + }, + []check{ + {`c:\windows\`, false}, + {`\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, false}, + {`c:`, true}, + {``, false}, + }, + []string{ + `failed to parse vss\.excludevolumes \[z:\\nonexistent\]:.*`, + `failed to parse vss\.excludevolumes \[c:\\windows\\\]:.*`, + `failed to parse vss\.excludevolumes \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, + `failed to get volume from mount point \[c:\\windows\\\]:.*`, + `failed to get volume from mount point \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, + `failed to get volume from mount point \[\]:.*`, + }, + }, + } + + for i, test := range setTests { + t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) { + cfg, err := ParseVSSConfig(test.input) + if err != nil { + t.Fatal(err) + } + + var log []string + errorHandler := func(item string, err error) error { + log = append(log, strings.TrimSpace(err.Error())) + + return nil + } + messageHandler := func(msg string, args ...interface{}) { + t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args)) + } + + dst := NewLocalVss(errorHandler, messageHandler, cfg) + + if !matchMap(test.output, dst.excludeVolumes) { + t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", + test.output, dst.excludeVolumes) + } + + for _, c := range test.checks { + if dst.isMountPointExcluded(c.volume) != c.result { + t.Fatalf(`wrong check: isMountPointExcluded("%s") != %v`, c.volume, c.result) + } + } + + if !matchStrings(test.errors, log) { + t.Fatalf("wrong log, want:\n %#v\ngot:\n %#v", test.errors, log) + } + }) + } +} From 88c509e3e9c301d72e0aabf9abd3c7c13344b090 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Tue, 10 Nov 2020 06:48:05 +0300 Subject: [PATCH 107/117] vss: Change `ErrorHandler` signature We don't need `error` here: the only existing implementation of `ErrorHandler` always call `Backup.Error` and all implementations of `Backup.Error` always return nil. --- cmd/restic/cmd_backup.go | 4 ++-- internal/fs/fs_local_vss.go | 6 +++--- internal/fs/fs_local_vss_test.go | 8 ++------ 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/cmd/restic/cmd_backup.go b/cmd/restic/cmd_backup.go index 5329a928c..19b96e9b0 100644 --- a/cmd/restic/cmd_backup.go +++ b/cmd/restic/cmd_backup.go @@ -556,8 +556,8 @@ func runBackup(ctx context.Context, opts BackupOptions, gopts GlobalOptions, ter return err } - errorHandler := func(item string, err error) error { - return progressReporter.Error(item, err) + errorHandler := func(item string, err error) { + _ = progressReporter.Error(item, err) } messageHandler := func(msg string, args ...interface{}) { diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 0e73092f2..230e14a1f 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -43,8 +43,8 @@ func ParseVSSConfig(o options.Options) (VSSConfig, error) { return cfg, nil } -// ErrorHandler is used to report errors via callback -type ErrorHandler func(item string, err error) error +// ErrorHandler is used to report errors via callback. +type ErrorHandler func(item string, err error) // MessageHandler is used to report errors/messages via callbacks. type MessageHandler func(msg string, args ...interface{}) @@ -114,7 +114,7 @@ func (fs *LocalVss) DeleteSnapshots() { for volumeName, snapshot := range fs.snapshots { if err := snapshot.Delete(); err != nil { - _ = fs.msgError(volumeName, errors.Errorf("failed to delete VSS snapshot: %s", err)) + fs.msgError(volumeName, errors.Errorf("failed to delete VSS snapshot: %s", err)) activeSnapshots[volumeName] = snapshot } } diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index ce07fee3c..6beb35b98 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -88,10 +88,8 @@ func TestVSSConfig(t *testing.T) { t.Fatal(err) } - errorHandler := func(item string, err error) error { + errorHandler := func(item string, err error) { t.Fatalf("unexpected error (%v)", err) - - return nil } messageHandler := func(msg string, args ...interface{}) { t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args)) @@ -181,10 +179,8 @@ func TestParseMountPoints(t *testing.T) { } var log []string - errorHandler := func(item string, err error) error { + errorHandler := func(item string, err error) { log = append(log, strings.TrimSpace(err.Error())) - - return nil } messageHandler := func(msg string, args ...interface{}) { t.Fatalf("unexpected message (%s)", fmt.Sprintf(msg, args)) From 3bac1f0135f6c238e1ac90bfd14cb50eb83521c3 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Sun, 31 Jan 2021 00:34:41 +0300 Subject: [PATCH 108/117] vss: Fix issues reported by linters --- internal/fs/fs_local_vss.go | 4 +--- internal/fs/vss_windows.go | 43 +++++++++++++++++++++++-------------- 2 files changed, 28 insertions(+), 19 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 230e14a1f..5f55dcfd1 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -165,7 +165,6 @@ func (fs *LocalVss) isMountPointExcluded(mountPoint string) bool { // If creation of a snapshot fails the file's original path is returned as // a fallback. func (fs *LocalVss) snapshotPath(path string) string { - fixPath := fixpath(path) if strings.HasPrefix(fixPath, `\\?\UNC\`) { @@ -268,9 +267,8 @@ func (fs *LocalVss) snapshotPath(path string) string { snapshotPath = fs.Join(snapshot.GetSnapshotDeviceObject(), strings.TrimPrefix(fixPath, volumeName)) if snapshotPath == snapshot.GetSnapshotDeviceObject() { - snapshotPath = snapshotPath + string(filepath.Separator) + snapshotPath += string(filepath.Separator) } - } else { // no snapshot is available for the requested path: // -> try to backup without a snapshot diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 4ed289366..424548a74 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -21,6 +21,7 @@ import ( type HRESULT uint // HRESULT constant values necessary for using VSS api. +//nolint:golint const ( S_OK HRESULT = 0x00000000 E_ACCESSDENIED HRESULT = 0x80070005 @@ -256,6 +257,7 @@ type IVssBackupComponents struct { } // IVssBackupComponentsVTable is the vtable for IVssBackupComponents. +// nolint:structcheck type IVssBackupComponentsVTable struct { ole.IUnknownVtbl getWriterComponentsCount uintptr @@ -415,7 +417,7 @@ func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, idSnapshot panic(err) } - var result uintptr = 0 + var result uintptr if runtime.GOARCH == "386" { id := (*[4]uintptr)(unsafe.Pointer(ole.IID_NULL)) @@ -479,9 +481,9 @@ func (vss *IVssBackupComponents) DoSnapshotSet() (*IVSSAsync, error) { // DeleteSnapshots calls the equivalent VSS api. func (vss *IVssBackupComponents) DeleteSnapshots(snapshotID ole.GUID) (int32, ole.GUID, error) { - var deletedSnapshots int32 = 0 + var deletedSnapshots int32 var nondeletedSnapshotID ole.GUID - var result uintptr = 0 + var result uintptr if runtime.GOARCH == "386" { id := (*[4]uintptr)(unsafe.Pointer(&snapshotID)) @@ -505,7 +507,7 @@ func (vss *IVssBackupComponents) DeleteSnapshots(snapshotID ole.GUID) (int32, ol // GetSnapshotProperties calls the equivalent VSS api. func (vss *IVssBackupComponents) GetSnapshotProperties(snapshotID ole.GUID, properties *VssSnapshotProperties) error { - var result uintptr = 0 + var result uintptr if runtime.GOARCH == "386" { id := (*[4]uintptr)(unsafe.Pointer(&snapshotID)) @@ -528,8 +530,8 @@ func vssFreeSnapshotProperties(properties *VssSnapshotProperties) error { if err != nil { return err } - - proc.Call(uintptr(unsafe.Pointer(properties))) + // this function always succeeds and returns no value + _, _, _ = proc.Call(uintptr(unsafe.Pointer(properties))) return nil } @@ -544,6 +546,7 @@ func (vss *IVssBackupComponents) BackupComplete() (*IVSSAsync, error) { } // VssSnapshotProperties defines the properties of a VSS snapshot as part of the VSS api. +// nolint:structcheck type VssSnapshotProperties struct { snapshotID ole.GUID snapshotSetID ole.GUID @@ -700,7 +703,12 @@ func initializeVssCOMInterface() (*ole.IUnknown, error) { } // ensure COM is initialized before use - ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) + if err = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED); err != nil { + // CoInitializeEx returns 1 if COM is already initialized + if oleErr, ok := err.(*ole.OleError); !ok || oleErr.Code() != 1 { + return nil, err + } + } var oleIUnknown *ole.IUnknown result, _, _ := vssInstance.Call(uintptr(unsafe.Pointer(&oleIUnknown))) @@ -761,7 +769,6 @@ func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { func NewVssSnapshot( volume string, timeout time.Duration, filter VolumeFilter, msgError ErrorHandler) (VssSnapshot, error) { is64Bit, err := isRunningOn64BitWindows() - if err != nil { return VssSnapshot{}, newVssTextError(fmt.Sprintf( "Failed to detect windows architecture: %s", err.Error())) @@ -884,8 +891,10 @@ func NewVssSnapshot( return VssSnapshot{}, err } - mountPointInfo[mountPoint] = MountPoint{isSnapshotted: true, - snapshotSetID: mountPointSnapshotSetID} + mountPointInfo[mountPoint] = MountPoint{ + isSnapshotted: true, + snapshotSetID: mountPointSnapshotSetID, + } } } @@ -903,7 +912,7 @@ func NewVssSnapshot( err = callAsyncFunctionAndWait(iVssBackupComponents.DoSnapshotSet, "DoSnapshotSet", deadline) if err != nil { - iVssBackupComponents.AbortBackup() + _ = iVssBackupComponents.AbortBackup() iVssBackupComponents.Release() return VssSnapshot{}, err } @@ -911,13 +920,12 @@ func NewVssSnapshot( var snapshotProperties VssSnapshotProperties err = iVssBackupComponents.GetSnapshotProperties(snapshotSetID, &snapshotProperties) if err != nil { - iVssBackupComponents.AbortBackup() + _ = iVssBackupComponents.AbortBackup() iVssBackupComponents.Release() return VssSnapshot{}, err } for mountPoint, info := range mountPointInfo { - if !info.isSnapshotted { continue } @@ -936,8 +944,10 @@ func NewVssSnapshot( mountPointInfo[mountPoint] = info } - return VssSnapshot{iVssBackupComponents, snapshotSetID, snapshotProperties, - snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, time.Until(deadline)}, nil + return VssSnapshot{ + iVssBackupComponents, snapshotSetID, snapshotProperties, + snapshotProperties.GetSnapshotDeviceObject(), mountPointInfo, time.Until(deadline), + }, nil } // Delete deletes the created snapshot. @@ -968,7 +978,7 @@ func (p *VssSnapshot) Delete() error { if _, _, e := p.iVssBackupComponents.DeleteSnapshots(p.snapshotID); e != nil { err = newVssTextError(fmt.Sprintf("Failed to delete snapshot: %s", e.Error())) - p.iVssBackupComponents.AbortBackup() + _ = p.iVssBackupComponents.AbortBackup() if err != nil { return err } @@ -1079,6 +1089,7 @@ func enumerateMountedFolders(volume string) ([]string, error) { return mountedFolders, nil } + // nolint:errcheck defer windows.FindVolumeMountPointClose(handle) volumeMountPoint := syscall.UTF16ToString(volumeMountPointBuffer) From bb0f93ef3d3ac6dd8b86928571adf2583dd443b7 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 22 Mar 2021 23:31:19 +0300 Subject: [PATCH 109/117] vss: Add "provider" option --- internal/fs/fs_local_vss.go | 5 +- internal/fs/vss.go | 2 +- internal/fs/vss_windows.go | 168 +++++++++++++++++++++++++++++++++--- 3 files changed, 160 insertions(+), 15 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 5f55dcfd1..de30bcedb 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -17,6 +17,7 @@ type VSSConfig struct { ExcludeAllMountPoints bool `option:"excludeallmountpoints" help:"exclude mountpoints from snapshotting on all volumes"` ExcludeVolumes string `option:"excludevolumes" help:"semicolon separated list of volumes to exclude from snapshotting (ex. 'c:\\;e:\\mnt;\\\\?\\Volume{...}')"` Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshot before timing out"` + Provider string `option:"provider" help:"VSS provider identifier which will be used for snapshotting"` } func init() { @@ -64,6 +65,7 @@ type LocalVss struct { excludeAllMountPoints bool excludeVolumes map[string]struct{} timeout time.Duration + provider string } // statically ensure that LocalVss implements FS. @@ -102,6 +104,7 @@ func NewLocalVss(msgError ErrorHandler, msgMessage MessageHandler, cfg VSSConfig excludeAllMountPoints: cfg.ExcludeAllMountPoints, excludeVolumes: parseMountPoints(cfg.ExcludeVolumes, msgError), timeout: cfg.Timeout, + provider: cfg.Provider, } } @@ -209,7 +212,7 @@ func (fs *LocalVss) snapshotPath(path string) string { } } - if snapshot, err := NewVssSnapshot(vssVolume, fs.timeout, filter, fs.msgError); err != nil { + if snapshot, err := NewVssSnapshot(fs.provider, vssVolume, fs.timeout, filter, fs.msgError); err != nil { fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", vssVolume, err)) fs.failedSnapshots[volumeNameLower] = struct{}{} diff --git a/internal/fs/vss.go b/internal/fs/vss.go index 838bdf79b..a54475480 100644 --- a/internal/fs/vss.go +++ b/internal/fs/vss.go @@ -41,7 +41,7 @@ func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. -func NewVssSnapshot( +func NewVssSnapshot(_ string, _ string, _ time.Duration, _ VolumeFilter, _ ErrorHandler) (VssSnapshot, error) { return VssSnapshot{}, errors.New("VSS snapshots are only supported on windows") } diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 424548a74..18aea419d 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -367,7 +367,7 @@ func (vss *IVssBackupComponents) convertToVSSAsync( } // IsVolumeSupported calls the equivalent VSS api. -func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, error) { +func (vss *IVssBackupComponents) IsVolumeSupported(providerID *ole.GUID, volumeName string) (bool, error) { volumeNamePointer, err := syscall.UTF16PtrFromString(volumeName) if err != nil { panic(err) @@ -377,7 +377,7 @@ func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, err var result uintptr if runtime.GOARCH == "386" { - id := (*[4]uintptr)(unsafe.Pointer(ole.IID_NULL)) + id := (*[4]uintptr)(unsafe.Pointer(providerID)) result, _, _ = syscall.Syscall9(vss.getVTable().isVolumeSupported, 7, uintptr(unsafe.Pointer(vss)), id[0], id[1], id[2], id[3], @@ -385,7 +385,7 @@ func (vss *IVssBackupComponents) IsVolumeSupported(volumeName string) (bool, err 0) } else { result, _, _ = syscall.Syscall6(vss.getVTable().isVolumeSupported, 4, - uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(ole.IID_NULL)), + uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(providerID)), uintptr(unsafe.Pointer(volumeNamePointer)), uintptr(unsafe.Pointer(&isSupportedRaw)), 0, 0) } @@ -411,7 +411,7 @@ func (vss *IVssBackupComponents) StartSnapshotSet() (ole.GUID, error) { } // AddToSnapshotSet calls the equivalent VSS api. -func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, idSnapshot *ole.GUID) error { +func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, providerID *ole.GUID, idSnapshot *ole.GUID) error { volumeNamePointer, err := syscall.UTF16PtrFromString(volumeName) if err != nil { panic(err) @@ -420,15 +420,15 @@ func (vss *IVssBackupComponents) AddToSnapshotSet(volumeName string, idSnapshot var result uintptr if runtime.GOARCH == "386" { - id := (*[4]uintptr)(unsafe.Pointer(ole.IID_NULL)) + id := (*[4]uintptr)(unsafe.Pointer(providerID)) result, _, _ = syscall.Syscall9(vss.getVTable().addToSnapshotSet, 7, - uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)), id[0], id[1], - id[2], id[3], uintptr(unsafe.Pointer(idSnapshot)), 0, 0) + uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)), + id[0], id[1], id[2], id[3], uintptr(unsafe.Pointer(idSnapshot)), 0, 0) } else { result, _, _ = syscall.Syscall6(vss.getVTable().addToSnapshotSet, 4, uintptr(unsafe.Pointer(vss)), uintptr(unsafe.Pointer(volumeNamePointer)), - uintptr(unsafe.Pointer(ole.IID_NULL)), uintptr(unsafe.Pointer(idSnapshot)), 0, 0) + uintptr(unsafe.Pointer(providerID)), uintptr(unsafe.Pointer(idSnapshot)), 0, 0) } return newVssErrorIfResultNotOK("AddToSnapshotSet() failed", HRESULT(result)) @@ -535,6 +535,13 @@ func vssFreeSnapshotProperties(properties *VssSnapshotProperties) error { return nil } +func vssFreeProviderProperties(p *VssProviderProperties) { + ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerName))) + p.providerName = nil + ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerVersion))) + p.providerName = nil +} + // BackupComplete calls the equivalent VSS api. func (vss *IVssBackupComponents) BackupComplete() (*IVSSAsync, error) { var oleIUnknown *ole.IUnknown @@ -563,6 +570,17 @@ type VssSnapshotProperties struct { status uint } +// VssProviderProperties defines the properties of a VSS provider as part of the VSS api. +// nolint:structcheck +type VssProviderProperties struct { + providerID ole.GUID + providerName *uint16 + providerType uint32 + providerVersion *uint16 + providerVersionID ole.GUID + classID ole.GUID +} + // GetSnapshotDeviceObject returns root path to access the snapshot files // and folders. func (p *VssSnapshotProperties) GetSnapshotDeviceObject() string { @@ -660,6 +678,75 @@ func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(timeout time.Duration) error { return nil } +// UIID_IVSS_ADMIN defines the GUID of IVSSAdmin. +var ( + UIID_IVSS_ADMIN = ole.NewGUID("{77ED5996-2F63-11d3-8A39-00C04F72D8E3}") + CLSID_VSS_COORDINATOR = ole.NewGUID("{E579AB5F-1CC4-44b4-BED9-DE0991FF0623}") +) + +// IVSSAdmin VSS api interface. +type IVSSAdmin struct { + ole.IUnknown +} + +// IVSSAdminVTable is the vtable for IVSSAdmin. +// nolint:structcheck +type IVSSAdminVTable struct { + ole.IUnknownVtbl + registerProvider uintptr + unregisterProvider uintptr + queryProviders uintptr + abortAllSnapshotsInProgress uintptr +} + +// getVTable returns the vtable for IVSSAdmin. +func (vssAdmin *IVSSAdmin) getVTable() *IVSSAdminVTable { + return (*IVSSAdminVTable)(unsafe.Pointer(vssAdmin.RawVTable)) +} + +// QueryProviders calls the equivalent VSS api. +func (vssAdmin *IVSSAdmin) QueryProviders() (*IVssEnumObject, error) { + var enum *IVssEnumObject + + result, _, _ := syscall.Syscall(vssAdmin.getVTable().queryProviders, 2, + uintptr(unsafe.Pointer(vssAdmin)), uintptr(unsafe.Pointer(&enum)), 0) + + return enum, newVssErrorIfResultNotOK("QueryProviders() failed", HRESULT(result)) +} + +// IVssEnumObject VSS api interface. +type IVssEnumObject struct { + ole.IUnknown +} + +// IVssEnumObjectVTable is the vtable for IVssEnumObject. +// nolint:structcheck +type IVssEnumObjectVTable struct { + ole.IUnknownVtbl + next uintptr + skip uintptr + reset uintptr + clone uintptr +} + +// getVTable returns the vtable for IVssEnumObject. +func (vssEnum *IVssEnumObject) getVTable() *IVssEnumObjectVTable { + return (*IVssEnumObjectVTable)(unsafe.Pointer(vssEnum.RawVTable)) +} + +// Next calls the equivalent VSS api. +func (vssEnum *IVssEnumObject) Next(count uint, props unsafe.Pointer) (uint, error) { + var fetched uint32 + result, _, _ := syscall.Syscall6(vssEnum.getVTable().next, 4, + uintptr(unsafe.Pointer(vssEnum)), uintptr(count), uintptr(props), + uintptr(unsafe.Pointer(&fetched)), 0, 0) + if result == 1 { + return uint(fetched), nil + } + + return uint(fetched), newVssErrorIfResultNotOK("Next() failed", HRESULT(result)) +} + // MountPoint wraps all information of a snapshot of a mountpoint on a volume. type MountPoint struct { isSnapshotted bool @@ -766,7 +853,7 @@ func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { // NewVssSnapshot creates a new vss snapshot. If creating the snapshots doesn't // finish within the timeout an error is returned. -func NewVssSnapshot( +func NewVssSnapshot(provider string, volume string, timeout time.Duration, filter VolumeFilter, msgError ErrorHandler) (VssSnapshot, error) { is64Bit, err := isRunningOn64BitWindows() if err != nil { @@ -814,6 +901,12 @@ func NewVssSnapshot( iVssBackupComponents := (*IVssBackupComponents)(unsafe.Pointer(comInterface)) + providerID, err := getProviderID(provider) + if err != nil { + iVssBackupComponents.Release() + return VssSnapshot{}, err + } + if err := iVssBackupComponents.InitializeForBackup(); err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err @@ -838,7 +931,7 @@ func NewVssSnapshot( return VssSnapshot{}, err } - if isSupported, err := iVssBackupComponents.IsVolumeSupported(volume); err != nil { + if isSupported, err := iVssBackupComponents.IsVolumeSupported(providerID, volume); err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err } else if !isSupported { @@ -853,7 +946,7 @@ func NewVssSnapshot( return VssSnapshot{}, err } - if err := iVssBackupComponents.AddToSnapshotSet(volume, &snapshotSetID); err != nil { + if err := iVssBackupComponents.AddToSnapshotSet(volume, providerID, &snapshotSetID); err != nil { iVssBackupComponents.Release() return VssSnapshot{}, err } @@ -877,14 +970,14 @@ func NewVssSnapshot( if !filter(mountPoint) { continue - } else if isSupported, err := iVssBackupComponents.IsVolumeSupported(mountPoint); err != nil { + } else if isSupported, err := iVssBackupComponents.IsVolumeSupported(providerID, mountPoint); err != nil { continue } else if !isSupported { continue } var mountPointSnapshotSetID ole.GUID - err := iVssBackupComponents.AddToSnapshotSet(mountPoint, &mountPointSnapshotSetID) + err := iVssBackupComponents.AddToSnapshotSet(mountPoint, providerID, &mountPointSnapshotSetID) if err != nil { iVssBackupComponents.Release() @@ -988,6 +1081,55 @@ func (p *VssSnapshot) Delete() error { return nil } +func getProviderID(provider string) (*ole.GUID, error) { + comInterface, err := ole.CreateInstance(CLSID_VSS_COORDINATOR, UIID_IVSS_ADMIN) + if err != nil { + return nil, err + } + defer comInterface.Release() + + vssAdmin := (*IVSSAdmin)(unsafe.Pointer(comInterface)) + + providerLower := strings.ToLower(provider) + switch providerLower { + case "": + return ole.IID_NULL, nil + case "ms": + return ole.NewGUID("{b5946137-7b9f-4925-af80-51abd60b20d5}"), nil + } + + enum, err := vssAdmin.QueryProviders() + if err != nil { + return nil, err + } + defer enum.Release() + + id := ole.NewGUID(provider) + + var props struct { + objectType uint32 + provider VssProviderProperties + } + for { + count, err := enum.Next(1, unsafe.Pointer(&props)) + if err != nil { + return nil, err + } + + if count < 1 { + return nil, errors.Errorf(`invalid VSS provider "%s"`, provider) + } + + name := ole.UTF16PtrToString(props.provider.providerName) + vssFreeProviderProperties(&props.provider) + + if id != nil && *id == props.provider.providerID || + id == nil && providerLower == strings.ToLower(name) { + return &props.provider.providerID, nil + } + } +} + // asyncCallFunc is the callback type for callAsyncFunctionAndWait. type asyncCallFunc func() (*IVSSAsync, error) From 739d3243d9f8e17b0f80440aaec9bacdcb046745 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Tue, 23 Mar 2021 07:41:45 +0300 Subject: [PATCH 110/117] vss: Update docs and changelog --- changelog/unreleased/pull-3067 | 4 ++++ doc/040_backup.rst | 17 ++++++++++++++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/changelog/unreleased/pull-3067 b/changelog/unreleased/pull-3067 index a56c045fa..52f04c5e0 100644 --- a/changelog/unreleased/pull-3067 +++ b/changelog/unreleased/pull-3067 @@ -15,4 +15,8 @@ changes timeout to five minutes and disable snapshotting of mount points on all excludes drive `D:`, mount point `C:\MNT` and specific volume from VSS snapshotting. + restic backup --use-fs-snapshot -o vss.provider={b5946137-7b9f-4925-af80-51abd60b20d5} + +uses 'Microsoft Software Shadow Copy provider 1.0' instead of the default provider. + https://github.com/restic/restic/pull/3067 diff --git a/doc/040_backup.rst b/doc/040_backup.rst index d1bb39f96..7360bb0e0 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -56,11 +56,12 @@ snapshot for each volume that contains files to backup. Files are read from the VSS snapshot instead of the regular filesystem. This allows to backup files that are exclusively locked by another process during the backup. -You can use three additional options to change VSS behaviour: +You can use additional options to change VSS behaviour: * ``-o vss.timeout`` specifies timeout for VSS snapshot creation, the default value is 120 seconds * ``-o vss.excludeallmountpoints`` disable auto snapshotting of all volume mount points * ``-o vss.excludevolumes`` allows excluding specific volumes or volume mount points from snapshotting + * ``-o vss.provider`` specifies VSS provider used for snapshotting E.g., 2.5 minutes timeout with mount points snapshotting disabled can be specified as @@ -74,6 +75,20 @@ and excluding drive ``D:\``, mount point ``C:\mnt`` and volume ``\\?\Volume{04ce -o vss.excludevolumes="d:;c:\MNT\;\\?\volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}" +VSS provider can be specified by GUID + +.. code-block:: console + + -o vss.provider={3f900f90-00e9-440e-873a-96ca5eb079e5} + +or by name + +.. code-block:: console + + -o vss.provider="Hyper-V IC Software Shadow Copy Provider" + +Also ``MS`` can be used as alias for ``Microsoft Software Shadow Copy provider 1.0``. + By default VSS ignores Outlook OST files. This is not a restriction of restic but the default Windows VSS configuration. The files not to snapshot are configured in the Windows registry under the following key: From 0a8f9c5d9cef798902a49e63da0a51be0e14d095 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Fri, 30 Apr 2021 18:01:40 +0300 Subject: [PATCH 111/117] vss: Add tests for "provider" option --- internal/fs/fs_local_vss_test.go | 88 ++++++++++++++++++++++++++++++-- 1 file changed, 84 insertions(+), 4 deletions(-) diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index 6beb35b98..cff881151 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -9,6 +9,7 @@ import ( "testing" "time" + ole "github.com/go-ole/go-ole" "github.com/restic/restic/internal/options" ) @@ -18,6 +19,9 @@ func matchStrings(ptrs []string, strs []string) bool { } for i, p := range ptrs { + if p == "" { + return false + } matched, err := regexp.MatchString(p, strs[i]) if err != nil { panic(err) @@ -48,6 +52,7 @@ func TestVSSConfig(t *testing.T) { type config struct { excludeAllMountPoints bool timeout time.Duration + provider string } setTests := []struct { input options.Options @@ -55,19 +60,23 @@ func TestVSSConfig(t *testing.T) { }{ { options.Options{ - "vss.timeout": "6h38m42s", + "vss.timeout": "6h38m42s", + "vss.provider": "Ms", }, config{ - timeout: 23922000000000, + timeout: 23922000000000, + provider: "Ms", }, }, { options.Options{ "vss.excludeallmountpoints": "t", + "vss.provider": "{b5946137-7b9f-4925-af80-51abd60b20d5}", }, config{ excludeAllMountPoints: true, timeout: 120000000000, + provider: "{b5946137-7b9f-4925-af80-51abd60b20d5}", }, }, { @@ -75,9 +84,11 @@ func TestVSSConfig(t *testing.T) { "vss.excludeallmountpoints": "0", "vss.excludevolumes": "", "vss.timeout": "120s", + "vss.provider": "Microsoft Software Shadow Copy provider 1.0", }, config{ - timeout: 120000000000, + timeout: 120000000000, + provider: "Microsoft Software Shadow Copy provider 1.0", }, }, } @@ -98,7 +109,8 @@ func TestVSSConfig(t *testing.T) { dst := NewLocalVss(errorHandler, messageHandler, cfg) if dst.excludeAllMountPoints != test.output.excludeAllMountPoints || - dst.excludeVolumes != nil || dst.timeout != test.output.timeout { + dst.excludeVolumes != nil || dst.timeout != test.output.timeout || + dst.provider != test.output.provider { t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.output, dst) } }) @@ -205,3 +217,71 @@ func TestParseMountPoints(t *testing.T) { }) } } + +func TestParseProvider(t *testing.T) { + msProvider := ole.NewGUID("{b5946137-7b9f-4925-af80-51abd60b20d5}") + setTests := []struct { + provider string + id *ole.GUID + result string + }{ + { + "", + ole.IID_NULL, + "", + }, + { + "mS", + msProvider, + "", + }, + { + "{B5946137-7b9f-4925-Af80-51abD60b20d5}", + msProvider, + "", + }, + { + "Microsoft Software Shadow Copy provider 1.0", + msProvider, + "", + }, + { + "{04560982-3d7d-4bbc-84f7-0712f833a28f}", + nil, + `invalid VSS provider "{04560982-3d7d-4bbc-84f7-0712f833a28f}"`, + }, + { + "non-existent provider", + nil, + `invalid VSS provider "non-existent provider"`, + }, + } + + _ = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED) + + for i, test := range setTests { + t.Run(fmt.Sprintf("test-%d", i), func(t *testing.T) { + id, err := getProviderID(test.provider) + + if err != nil && id != nil { + t.Fatalf("err!=nil but id=%v", id) + } + + if test.result != "" || err != nil { + var result string + if err != nil { + result = err.Error() + } + matched, err := regexp.MatchString(test.result, result) + if err != nil { + panic(err) + } + if !matched || test.result == "" { + t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.result, result) + } + } else if !ole.IsEqualGUID(id, test.id) { + t.Fatalf("wrong id, want:\n %s\ngot:\n %s", test.id.String(), id.String()) + } + }) + } +} From 5703e5a6526fda6ab7856876bedaf07cffcce752 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 29 Apr 2024 01:18:46 +0300 Subject: [PATCH 112/117] Fix texts and comments --- changelog/unreleased/pull-3067 | 12 ++++++------ doc/040_backup.rst | 12 ++++++------ internal/fs/fs_local_vss_test.go | 4 +++- internal/fs/vss.go | 2 +- internal/fs/vss_windows.go | 2 +- 5 files changed, 17 insertions(+), 15 deletions(-) diff --git a/changelog/unreleased/pull-3067 b/changelog/unreleased/pull-3067 index 52f04c5e0..855c7f2be 100644 --- a/changelog/unreleased/pull-3067 +++ b/changelog/unreleased/pull-3067 @@ -2,21 +2,21 @@ Enhancement: Add options to configure Windows Shadow Copy Service Restic always used 120 sec. timeout and unconditionally created VSS snapshots for all volume mount points on disk. Now this behavior can be fine-tuned by -new options, like exclude user specific volumes and mount points or completely +new options, like exclude specific volumes and mount points or completely disable auto snapshotting of volume mount points. For example: - restic backup --use-fs-snapshot -o vss.timeout=5m -o vss.excludeallmountpoints=true - + restic backup --use-fs-snapshot -o vss.timeout=5m -o vss.exclude-all-mount-points=true + changes timeout to five minutes and disable snapshotting of mount points on all volumes, and - restic backup --use-fs-snapshot -o vss.excludevolumes="d:\;c:\mnt\;\\?\Volume{e2e0315d-9066-4f97-8343-eb5659b35762}" + restic backup --use-fs-snapshot -o vss.exclude-volumes="d:\;c:\mnt\;\\?\Volume{e2e0315d-9066-4f97-8343-eb5659b35762}" -excludes drive `D:`, mount point `C:\MNT` and specific volume from VSS snapshotting. +excludes drive `d:`, mount point `c:\mnt` and specific volume from VSS snapshotting. restic backup --use-fs-snapshot -o vss.provider={b5946137-7b9f-4925-af80-51abd60b20d5} - + uses 'Microsoft Software Shadow Copy provider 1.0' instead of the default provider. https://github.com/restic/restic/pull/3067 diff --git a/doc/040_backup.rst b/doc/040_backup.rst index 7360bb0e0..50de954ef 100644 --- a/doc/040_backup.rst +++ b/doc/040_backup.rst @@ -59,21 +59,21 @@ exclusively locked by another process during the backup. You can use additional options to change VSS behaviour: * ``-o vss.timeout`` specifies timeout for VSS snapshot creation, the default value is 120 seconds - * ``-o vss.excludeallmountpoints`` disable auto snapshotting of all volume mount points - * ``-o vss.excludevolumes`` allows excluding specific volumes or volume mount points from snapshotting + * ``-o vss.exclude-all-mount-points`` disable auto snapshotting of all volume mount points + * ``-o vss.exclude-volumes`` allows excluding specific volumes or volume mount points from snapshotting * ``-o vss.provider`` specifies VSS provider used for snapshotting -E.g., 2.5 minutes timeout with mount points snapshotting disabled can be specified as +For example a 2.5 minutes timeout with snapshotting of mount points disabled can be specified as .. code-block:: console - -o vss.timeout=2m30s -o vss.excludeallmountpoints=true + -o vss.timeout=2m30s -o vss.exclude-all-mount-points=true -and excluding drive ``D:\``, mount point ``C:\mnt`` and volume ``\\?\Volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}\`` as +and excluding drive ``d:\``, mount point ``c:\mnt`` and volume ``\\?\Volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}\`` as .. code-block:: console - -o vss.excludevolumes="d:;c:\MNT\;\\?\volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}" + -o vss.exclude-volumes="d:;c:\mnt\;\\?\volume{04ce0545-3391-11e0-ba2f-806e6f6e6963}" VSS provider can be specified by GUID diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index cff881151..23e86b911 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -127,10 +127,12 @@ func TestParseMountPoints(t *testing.T) { if err != nil { t.Fatal(err) } - // We don't know a valid volume GUID path for C:\, but we'll at least check its format + // We don't know a valid volume GUID path for c:\, but we'll at least check its format if !volumeMatch.MatchString(sysVolume) { t.Fatalf("invalid volume GUID path: %s", sysVolume) } + // Changing the case and removing trailing backslash allows tests + // the equality of different ways of writing a volume name sysVolumeMutated := strings.ToUpper(sysVolume[:len(sysVolume)-1]) sysVolumeMatch := strings.ToLower(sysVolume) diff --git a/internal/fs/vss.go b/internal/fs/vss.go index a54475480..8bfffab71 100644 --- a/internal/fs/vss.go +++ b/internal/fs/vss.go @@ -33,7 +33,7 @@ func HasSufficientPrivilegesForVSS() error { return errors.New("VSS snapshots are only supported on windows") } -// GetVolumeNameForVolumeMountPoint clear input parameter +// GetVolumeNameForVolumeMountPoint add trailing backslash to input parameter // and calls the equivalent windows api. func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { return mountPoint, nil diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 18aea419d..91c60c4ba 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -828,7 +828,7 @@ func HasSufficientPrivilegesForVSS() error { return err } -// GetVolumeNameForVolumeMountPoint clear input parameter +// GetVolumeNameForVolumeMountPoint add trailing backslash to input parameter // and calls the equivalent windows api. func GetVolumeNameForVolumeMountPoint(mountPoint string) (string, error) { if mountPoint != "" && mountPoint[len(mountPoint)-1] != filepath.Separator { From 24330c19a8be55fc4d4f89d9f4b912b066e0fa32 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 29 Apr 2024 01:21:33 +0300 Subject: [PATCH 113/117] Use kebab case in option names --- internal/fs/fs_local_vss.go | 6 +++--- internal/fs/fs_local_vss_test.go | 22 +++++++++++----------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index de30bcedb..0f983d136 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -14,8 +14,8 @@ import ( // VSSConfig holds extended options of windows volume shadow copy service. type VSSConfig struct { - ExcludeAllMountPoints bool `option:"excludeallmountpoints" help:"exclude mountpoints from snapshotting on all volumes"` - ExcludeVolumes string `option:"excludevolumes" help:"semicolon separated list of volumes to exclude from snapshotting (ex. 'c:\\;e:\\mnt;\\\\?\\Volume{...}')"` + ExcludeAllMountPoints bool `option:"exclude-all-mount-points" help:"exclude mountpoints from snapshotting on all volumes"` + ExcludeVolumes string `option:"exclude-volumes" help:"semicolon separated list of volumes to exclude from snapshotting (ex. 'c:\\;e:\\mnt;\\\\?\\Volume{...}')"` Timeout time.Duration `option:"timeout" help:"time that the VSS can spend creating snapshot before timing out"` Provider string `option:"provider" help:"VSS provider identifier which will be used for snapshotting"` } @@ -80,7 +80,7 @@ func parseMountPoints(list string, msgError ErrorHandler) (volumes map[string]st } for _, s := range strings.Split(list, ";") { if v, err := GetVolumeNameForVolumeMountPoint(s); err != nil { - msgError(s, errors.Errorf("failed to parse vss.excludevolumes [%s]: %s", s, err)) + msgError(s, errors.Errorf("failed to parse vss.exclude-volumes [%s]: %s", s, err)) } else { if volumes == nil { volumes = make(map[string]struct{}) diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index 23e86b911..9e11b6c6e 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -70,8 +70,8 @@ func TestVSSConfig(t *testing.T) { }, { options.Options{ - "vss.excludeallmountpoints": "t", - "vss.provider": "{b5946137-7b9f-4925-af80-51abd60b20d5}", + "vss.exclude-all-mount-points": "t", + "vss.provider": "{b5946137-7b9f-4925-af80-51abd60b20d5}", }, config{ excludeAllMountPoints: true, @@ -81,10 +81,10 @@ func TestVSSConfig(t *testing.T) { }, { options.Options{ - "vss.excludeallmountpoints": "0", - "vss.excludevolumes": "", - "vss.timeout": "120s", - "vss.provider": "Microsoft Software Shadow Copy provider 1.0", + "vss.exclude-all-mount-points": "0", + "vss.exclude-volumes": "", + "vss.timeout": "120s", + "vss.provider": "Microsoft Software Shadow Copy provider 1.0", }, config{ timeout: 120000000000, @@ -148,7 +148,7 @@ func TestParseMountPoints(t *testing.T) { }{ { options.Options{ - "vss.excludevolumes": `c:;c:\;` + sysVolume + `;` + sysVolumeMutated, + "vss.exclude-volumes": `c:;c:\;` + sysVolume + `;` + sysVolumeMutated, }, []string{ sysVolumeMatch, @@ -163,7 +163,7 @@ func TestParseMountPoints(t *testing.T) { }, { options.Options{ - "vss.excludevolumes": `z:\nonexistent;c:;c:\windows\;\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, + "vss.exclude-volumes": `z:\nonexistent;c:;c:\windows\;\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, }, []string{ sysVolumeMatch, @@ -175,9 +175,9 @@ func TestParseMountPoints(t *testing.T) { {``, false}, }, []string{ - `failed to parse vss\.excludevolumes \[z:\\nonexistent\]:.*`, - `failed to parse vss\.excludevolumes \[c:\\windows\\\]:.*`, - `failed to parse vss\.excludevolumes \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, + `failed to parse vss\.exclude-volumes \[z:\\nonexistent\]:.*`, + `failed to parse vss\.exclude-volumes \[c:\\windows\\\]:.*`, + `failed to parse vss\.exclude-volumes \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, `failed to get volume from mount point \[c:\\windows\\\]:.*`, `failed to get volume from mount point \[\\\\\?\\Volume\{39b9cac2-bcdb-4d51-97c8-0d0677d607fb\}\\\]:.*`, `failed to get volume from mount point \[\]:.*`, From 90b168eb6cde4fe1afd6aad68185d6abcca3b806 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 29 Apr 2024 01:23:50 +0300 Subject: [PATCH 114/117] isMountPointExcluded to isMountPointIncluded --- internal/fs/fs_local_vss.go | 22 ++++++++++------------ internal/fs/fs_local_vss_test.go | 20 ++++++++++---------- 2 files changed, 20 insertions(+), 22 deletions(-) diff --git a/internal/fs/fs_local_vss.go b/internal/fs/fs_local_vss.go index 0f983d136..48ab165f1 100644 --- a/internal/fs/fs_local_vss.go +++ b/internal/fs/fs_local_vss.go @@ -145,22 +145,20 @@ func (fs *LocalVss) Lstat(name string) (os.FileInfo, error) { return os.Lstat(fs.snapshotPath(name)) } -// isMountPointExcluded is true if given mountpoint excluded by user. -func (fs *LocalVss) isMountPointExcluded(mountPoint string) bool { +// isMountPointIncluded is true if given mountpoint included by user. +func (fs *LocalVss) isMountPointIncluded(mountPoint string) bool { if fs.excludeVolumes == nil { - return false + return true } volume, err := GetVolumeNameForVolumeMountPoint(mountPoint) if err != nil { fs.msgError(mountPoint, errors.Errorf("failed to get volume from mount point [%s]: %s", mountPoint, err)) - - return false + return true } _, ok := fs.excludeVolumes[strings.ToLower(volume)] - - return ok + return !ok } // snapshotPath returns the path inside a VSS snapshots if it already exists. @@ -199,20 +197,20 @@ func (fs *LocalVss) snapshotPath(path string) string { if !snapshotExists && !snapshotFailed { vssVolume := volumeNameLower + string(filepath.Separator) - if fs.isMountPointExcluded(vssVolume) { + if !fs.isMountPointIncluded(vssVolume) { fs.msgMessage("snapshots for [%s] excluded by user\n", vssVolume) fs.failedSnapshots[volumeNameLower] = struct{}{} } else { fs.msgMessage("creating VSS snapshot for [%s]\n", vssVolume) - var filter VolumeFilter + var includeVolume VolumeFilter if !fs.excludeAllMountPoints { - filter = func(volume string) bool { - return !fs.isMountPointExcluded(volume) + includeVolume = func(volume string) bool { + return fs.isMountPointIncluded(volume) } } - if snapshot, err := NewVssSnapshot(fs.provider, vssVolume, fs.timeout, filter, fs.msgError); err != nil { + if snapshot, err := NewVssSnapshot(fs.provider, vssVolume, fs.timeout, includeVolume, fs.msgError); err != nil { fs.msgError(vssVolume, errors.Errorf("failed to create snapshot for [%s]: %s", vssVolume, err)) fs.failedSnapshots[volumeNameLower] = struct{}{} diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index 9e11b6c6e..c25ce4535 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -154,10 +154,10 @@ func TestParseMountPoints(t *testing.T) { sysVolumeMatch, }, []check{ - {`c:\`, true}, - {`c:`, true}, - {sysVolume, true}, - {sysVolumeMutated, true}, + {`c:\`, false}, + {`c:`, false}, + {sysVolume, false}, + {sysVolumeMutated, false}, }, []string{}, }, @@ -169,10 +169,10 @@ func TestParseMountPoints(t *testing.T) { sysVolumeMatch, }, []check{ - {`c:\windows\`, false}, - {`\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, false}, - {`c:`, true}, - {``, false}, + {`c:\windows\`, true}, + {`\\?\Volume{39b9cac2-bcdb-4d51-97c8-0d0677d607fb}\`, true}, + {`c:`, false}, + {``, true}, }, []string{ `failed to parse vss\.exclude-volumes \[z:\\nonexistent\]:.*`, @@ -208,8 +208,8 @@ func TestParseMountPoints(t *testing.T) { } for _, c := range test.checks { - if dst.isMountPointExcluded(c.volume) != c.result { - t.Fatalf(`wrong check: isMountPointExcluded("%s") != %v`, c.volume, c.result) + if dst.isMountPointIncluded(c.volume) != c.result { + t.Fatalf(`wrong check: isMountPointIncluded("%s") != %v`, c.volume, c.result) } } From 7ee889bb0d0dcf0292745975454ed53d94cdb0a9 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 29 Apr 2024 01:25:25 +0300 Subject: [PATCH 115/117] Use S_FALSE and MaxInt --- internal/fs/vss_windows.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index 91c60c4ba..e8c5dc561 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -5,6 +5,7 @@ package fs import ( "fmt" + "math" "path/filepath" "runtime" "strings" @@ -24,6 +25,7 @@ type HRESULT uint //nolint:golint const ( S_OK HRESULT = 0x00000000 + S_FALSE HRESULT = 0x00000001 E_ACCESSDENIED HRESULT = 0x80070005 E_OUTOFMEMORY HRESULT = 0x8007000E E_INVALIDARG HRESULT = 0x80070057 @@ -640,7 +642,7 @@ func (vssAsync *IVSSAsync) QueryStatus() (HRESULT, uint32) { // WaitUntilAsyncFinished waits until either the async call is finished or // the given timeout is reached. func (vssAsync *IVSSAsync) WaitUntilAsyncFinished(timeout time.Duration) error { - const maxTimeout = 2147483647 * time.Millisecond + const maxTimeout = math.MaxInt32 * time.Millisecond if timeout > maxTimeout { timeout = maxTimeout } @@ -740,7 +742,7 @@ func (vssEnum *IVssEnumObject) Next(count uint, props unsafe.Pointer) (uint, err result, _, _ := syscall.Syscall6(vssEnum.getVTable().next, 4, uintptr(unsafe.Pointer(vssEnum)), uintptr(count), uintptr(props), uintptr(unsafe.Pointer(&fetched)), 0, 0) - if result == 1 { + if HRESULT(result) == S_FALSE { return uint(fetched), nil } @@ -791,8 +793,8 @@ func initializeVssCOMInterface() (*ole.IUnknown, error) { // ensure COM is initialized before use if err = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED); err != nil { - // CoInitializeEx returns 1 if COM is already initialized - if oleErr, ok := err.(*ole.OleError); !ok || oleErr.Code() != 1 { + // CoInitializeEx returns S_FALSE if COM is already initialized + if oleErr, ok := err.(*ole.OleError); !ok || HRESULT(oleErr.Code()) != S_FALSE { return nil, err } } From 125dba23c5e8e453eb7a5d784ac262cacca4f3c9 Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 29 Apr 2024 01:27:34 +0300 Subject: [PATCH 116/117] Rearange code --- internal/fs/fs_local_vss_test.go | 6 +----- internal/fs/vss_windows.go | 30 +++++++++++++++--------------- 2 files changed, 16 insertions(+), 20 deletions(-) diff --git a/internal/fs/fs_local_vss_test.go b/internal/fs/fs_local_vss_test.go index c25ce4535..60262c873 100644 --- a/internal/fs/fs_local_vss_test.go +++ b/internal/fs/fs_local_vss_test.go @@ -274,11 +274,7 @@ func TestParseProvider(t *testing.T) { if err != nil { result = err.Error() } - matched, err := regexp.MatchString(test.result, result) - if err != nil { - panic(err) - } - if !matched || test.result == "" { + if test.result != result || test.result == "" { t.Fatalf("wrong result, want:\n %#v\ngot:\n %#v", test.result, result) } } else if !ole.IsEqualGUID(id, test.id) { diff --git a/internal/fs/vss_windows.go b/internal/fs/vss_windows.go index e8c5dc561..0b51b00f3 100644 --- a/internal/fs/vss_windows.go +++ b/internal/fs/vss_windows.go @@ -537,13 +537,6 @@ func vssFreeSnapshotProperties(properties *VssSnapshotProperties) error { return nil } -func vssFreeProviderProperties(p *VssProviderProperties) { - ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerName))) - p.providerName = nil - ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerVersion))) - p.providerName = nil -} - // BackupComplete calls the equivalent VSS api. func (vss *IVssBackupComponents) BackupComplete() (*IVSSAsync, error) { var oleIUnknown *ole.IUnknown @@ -583,6 +576,13 @@ type VssProviderProperties struct { classID ole.GUID } +func vssFreeProviderProperties(p *VssProviderProperties) { + ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerName))) + p.providerName = nil + ole.CoTaskMemFree(uintptr(unsafe.Pointer(p.providerVersion))) + p.providerVersion = nil +} + // GetSnapshotDeviceObject returns root path to access the snapshot files // and folders. func (p *VssSnapshotProperties) GetSnapshotDeviceObject() string { @@ -1084,14 +1084,6 @@ func (p *VssSnapshot) Delete() error { } func getProviderID(provider string) (*ole.GUID, error) { - comInterface, err := ole.CreateInstance(CLSID_VSS_COORDINATOR, UIID_IVSS_ADMIN) - if err != nil { - return nil, err - } - defer comInterface.Release() - - vssAdmin := (*IVSSAdmin)(unsafe.Pointer(comInterface)) - providerLower := strings.ToLower(provider) switch providerLower { case "": @@ -1100,6 +1092,14 @@ func getProviderID(provider string) (*ole.GUID, error) { return ole.NewGUID("{b5946137-7b9f-4925-af80-51abd60b20d5}"), nil } + comInterface, err := ole.CreateInstance(CLSID_VSS_COORDINATOR, UIID_IVSS_ADMIN) + if err != nil { + return nil, err + } + defer comInterface.Release() + + vssAdmin := (*IVSSAdmin)(unsafe.Pointer(comInterface)) + enum, err := vssAdmin.QueryProviders() if err != nil { return nil, err From ccd35565ee10a12a7698f088aa37df1035fc230d Mon Sep 17 00:00:00 2001 From: DRON-666 <64691982+DRON-666@users.noreply.github.com> Date: Mon, 29 Apr 2024 01:48:22 +0300 Subject: [PATCH 117/117] s/sec./seconds --- changelog/unreleased/pull-3067 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog/unreleased/pull-3067 b/changelog/unreleased/pull-3067 index 855c7f2be..fbdcfd7e5 100644 --- a/changelog/unreleased/pull-3067 +++ b/changelog/unreleased/pull-3067 @@ -1,6 +1,6 @@ Enhancement: Add options to configure Windows Shadow Copy Service -Restic always used 120 sec. timeout and unconditionally created VSS snapshots +Restic always used 120 seconds timeout and unconditionally created VSS snapshots for all volume mount points on disk. Now this behavior can be fine-tuned by new options, like exclude specific volumes and mount points or completely disable auto snapshotting of volume mount points.