2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-30 08:44:02 +00:00

Merge pull request #4921 from MichaelEischer/restorer-bugs

restore: fix cancelation and partial updates of large files
This commit is contained in:
Michael Eischer 2024-07-14 11:38:58 +02:00 committed by GitHub
commit 1a45f05e19
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 192 additions and 81 deletions

View File

@ -21,3 +21,4 @@ https://github.com/restic/restic/issues/2662
https://github.com/restic/restic/pull/4837 https://github.com/restic/restic/pull/4837
https://github.com/restic/restic/pull/4838 https://github.com/restic/restic/pull/4838
https://github.com/restic/restic/pull/4864 https://github.com/restic/restic/pull/4864
https://github.com/restic/restic/pull/4921

View File

@ -134,10 +134,14 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error {
} }
fileOffset := int64(0) fileOffset := int64(0)
err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int) { err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int) {
if largeFile && !file.state.HasMatchingBlob(idx) { if largeFile {
if !file.state.HasMatchingBlob(idx) {
packsMap[packID] = append(packsMap[packID], fileBlobInfo{id: blob.ID, offset: fileOffset}) packsMap[packID] = append(packsMap[packID], fileBlobInfo{id: blob.ID, offset: fileOffset})
fileOffset += int64(blob.DataLength()) } else {
r.reportBlobProgress(file, uint64(blob.DataLength()))
} }
}
fileOffset += int64(blob.DataLength())
pack, ok := packs[packID] pack, ok := packs[packID]
if !ok { if !ok {
pack = &packInfo{ pack = &packInfo{
@ -192,6 +196,7 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error {
// the main restore loop // the main restore loop
wg.Go(func() error { wg.Go(func() error {
defer close(downloadCh)
for _, id := range packOrder { for _, id := range packOrder {
pack := packs[id] pack := packs[id]
// allow garbage collection of packInfo // allow garbage collection of packInfo
@ -203,7 +208,6 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error {
debug.Log("Scheduled download pack %s", pack.id.Str()) debug.Log("Scheduled download pack %s", pack.id.Str())
} }
} }
close(downloadCh)
return nil return nil
}) })
@ -244,8 +248,12 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error {
if fileBlobs, ok := file.blobs.(restic.IDs); ok { if fileBlobs, ok := file.blobs.(restic.IDs); ok {
fileOffset := int64(0) fileOffset := int64(0)
err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int) { err := r.forEachBlob(fileBlobs, func(packID restic.ID, blob restic.Blob, idx int) {
if packID.Equal(pack.id) && !file.state.HasMatchingBlob(idx) { if packID.Equal(pack.id) {
if !file.state.HasMatchingBlob(idx) {
addBlob(blob, fileOffset) addBlob(blob, fileOffset)
} else {
r.reportBlobProgress(file, uint64(blob.DataLength()))
}
} }
fileOffset += int64(blob.DataLength()) fileOffset += int64(blob.DataLength())
}) })
@ -273,10 +281,13 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error {
} }
func (r *fileRestorer) sanitizeError(file *fileInfo, err error) error { func (r *fileRestorer) sanitizeError(file *fileInfo, err error) error {
if err != nil { switch err {
err = r.Error(file.location, err) case nil, context.Canceled, context.DeadlineExceeded:
} // Context errors are permanent.
return err return err
default:
return r.Error(file.location, err)
}
} }
func (r *fileRestorer) reportError(blobs blobToFileOffsetsMapping, processedBlobs restic.BlobSet, err error) error { func (r *fileRestorer) reportError(blobs blobToFileOffsetsMapping, processedBlobs restic.BlobSet, err error) error {
@ -324,6 +335,11 @@ func (r *fileRestorer) downloadBlobs(ctx context.Context, packID restic.ID,
} }
for file, offsets := range blob.files { for file, offsets := range blob.files {
for _, offset := range offsets { for _, offset := range offsets {
// avoid long cancelation delays for frequently used blobs
if ctx.Err() != nil {
return ctx.Err()
}
writeToFile := func() error { writeToFile := func() error {
// this looks overly complicated and needs explanation // this looks overly complicated and needs explanation
// two competing requirements: // two competing requirements:
@ -341,11 +357,7 @@ func (r *fileRestorer) downloadBlobs(ctx context.Context, packID restic.ID,
createSize = file.size createSize = file.size
} }
writeErr := r.filesWriter.writeToFile(r.targetPath(file.location), blobData, offset, createSize, file.sparse) writeErr := r.filesWriter.writeToFile(r.targetPath(file.location), blobData, offset, createSize, file.sparse)
action := restore.ActionFileUpdated r.reportBlobProgress(file, uint64(len(blobData)))
if file.state == nil {
action = restore.ActionFileRestored
}
r.progress.AddProgress(file.location, action, uint64(len(blobData)), uint64(file.size))
return writeErr return writeErr
} }
err := r.sanitizeError(file, writeToFile()) err := r.sanitizeError(file, writeToFile())
@ -357,3 +369,11 @@ func (r *fileRestorer) downloadBlobs(ctx context.Context, packID restic.ID,
return nil return nil
}) })
} }
func (r *fileRestorer) reportBlobProgress(file *fileInfo, blobSize uint64) {
action := restore.ActionFileUpdated
if file.state == nil {
action = restore.ActionFileRestored
}
r.progress.AddProgress(file.location, action, uint64(blobSize), uint64(file.size))
}

View File

@ -115,11 +115,7 @@ type treeVisitor struct {
leaveDir func(node *restic.Node, target, location string, entries []string) error leaveDir func(node *restic.Node, target, location string, entries []string) error
} }
// traverseTree traverses a tree from the repo and calls treeVisitor. func (res *Restorer) sanitizeError(location string, err error) error {
// target is the path in the file system, location within the snapshot.
func (res *Restorer) traverseTree(ctx context.Context, target string, treeID restic.ID, visitor treeVisitor) error {
location := string(filepath.Separator)
sanitizeError := func(err error) error {
switch err { switch err {
case nil, context.Canceled, context.DeadlineExceeded: case nil, context.Canceled, context.DeadlineExceeded:
// Context errors are permanent. // Context errors are permanent.
@ -129,8 +125,13 @@ func (res *Restorer) traverseTree(ctx context.Context, target string, treeID res
} }
} }
// traverseTree traverses a tree from the repo and calls treeVisitor.
// target is the path in the file system, location within the snapshot.
func (res *Restorer) traverseTree(ctx context.Context, target string, treeID restic.ID, visitor treeVisitor) error {
location := string(filepath.Separator)
if visitor.enterDir != nil { if visitor.enterDir != nil {
err := sanitizeError(visitor.enterDir(nil, target, location)) err := res.sanitizeError(location, visitor.enterDir(nil, target, location))
if err != nil { if err != nil {
return err return err
} }
@ -140,7 +141,7 @@ func (res *Restorer) traverseTree(ctx context.Context, target string, treeID res
return err return err
} }
if hasRestored && visitor.leaveDir != nil { if hasRestored && visitor.leaveDir != nil {
err = sanitizeError(visitor.leaveDir(nil, target, location, childFilenames)) err = res.sanitizeError(location, visitor.leaveDir(nil, target, location, childFilenames))
} }
return err return err
@ -151,13 +152,17 @@ func (res *Restorer) traverseTreeInner(ctx context.Context, target, location str
tree, err := restic.LoadTree(ctx, res.repo, treeID) tree, err := restic.LoadTree(ctx, res.repo, treeID)
if err != nil { if err != nil {
debug.Log("error loading tree %v: %v", treeID, err) debug.Log("error loading tree %v: %v", treeID, err)
return nil, hasRestored, res.Error(location, err) return nil, hasRestored, res.sanitizeError(location, err)
} }
if res.opts.Delete { if res.opts.Delete {
filenames = make([]string, 0, len(tree.Nodes)) filenames = make([]string, 0, len(tree.Nodes))
} }
for i, node := range tree.Nodes { for i, node := range tree.Nodes {
if ctx.Err() != nil {
return nil, hasRestored, ctx.Err()
}
// allow GC of tree node // allow GC of tree node
tree.Nodes[i] = nil tree.Nodes[i] = nil
if res.opts.Delete { if res.opts.Delete {
@ -171,7 +176,7 @@ func (res *Restorer) traverseTreeInner(ctx context.Context, target, location str
nodeName := filepath.Base(filepath.Join(string(filepath.Separator), node.Name)) nodeName := filepath.Base(filepath.Join(string(filepath.Separator), node.Name))
if nodeName != node.Name { if nodeName != node.Name {
debug.Log("node %q has invalid name %q", node.Name, nodeName) debug.Log("node %q has invalid name %q", node.Name, nodeName)
err := res.Error(location, errors.Errorf("invalid child node name %s", node.Name)) err := res.sanitizeError(location, errors.Errorf("invalid child node name %s", node.Name))
if err != nil { if err != nil {
return nil, hasRestored, err return nil, hasRestored, err
} }
@ -186,7 +191,7 @@ func (res *Restorer) traverseTreeInner(ctx context.Context, target, location str
if target == nodeTarget || !fs.HasPathPrefix(target, nodeTarget) { if target == nodeTarget || !fs.HasPathPrefix(target, nodeTarget) {
debug.Log("target: %v %v", target, nodeTarget) debug.Log("target: %v %v", target, nodeTarget)
debug.Log("node %q has invalid target path %q", node.Name, nodeTarget) debug.Log("node %q has invalid target path %q", node.Name, nodeTarget)
err := res.Error(nodeLocation, errors.New("node has invalid path")) err := res.sanitizeError(nodeLocation, errors.New("node has invalid path"))
if err != nil { if err != nil {
return nil, hasRestored, err return nil, hasRestored, err
} }
@ -207,23 +212,13 @@ func (res *Restorer) traverseTreeInner(ctx context.Context, target, location str
hasRestored = true hasRestored = true
} }
sanitizeError := func(err error) error {
switch err {
case nil, context.Canceled, context.DeadlineExceeded:
// Context errors are permanent.
return err
default:
return res.Error(nodeLocation, err)
}
}
if node.Type == "dir" { if node.Type == "dir" {
if node.Subtree == nil { if node.Subtree == nil {
return nil, hasRestored, errors.Errorf("Dir without subtree in tree %v", treeID.Str()) return nil, hasRestored, errors.Errorf("Dir without subtree in tree %v", treeID.Str())
} }
if selectedForRestore && visitor.enterDir != nil { if selectedForRestore && visitor.enterDir != nil {
err = sanitizeError(visitor.enterDir(node, nodeTarget, nodeLocation)) err = res.sanitizeError(nodeLocation, visitor.enterDir(node, nodeTarget, nodeLocation))
if err != nil { if err != nil {
return nil, hasRestored, err return nil, hasRestored, err
} }
@ -236,7 +231,7 @@ func (res *Restorer) traverseTreeInner(ctx context.Context, target, location str
if childMayBeSelected { if childMayBeSelected {
childFilenames, childHasRestored, err = res.traverseTreeInner(ctx, nodeTarget, nodeLocation, *node.Subtree, visitor) childFilenames, childHasRestored, err = res.traverseTreeInner(ctx, nodeTarget, nodeLocation, *node.Subtree, visitor)
err = sanitizeError(err) err = res.sanitizeError(nodeLocation, err)
if err != nil { if err != nil {
return nil, hasRestored, err return nil, hasRestored, err
} }
@ -249,7 +244,7 @@ func (res *Restorer) traverseTreeInner(ctx context.Context, target, location str
// metadata need to be restore when leaving the directory in both cases // metadata need to be restore when leaving the directory in both cases
// selected for restore or any child of any subtree have been restored // selected for restore or any child of any subtree have been restored
if (selectedForRestore || childHasRestored) && visitor.leaveDir != nil { if (selectedForRestore || childHasRestored) && visitor.leaveDir != nil {
err = sanitizeError(visitor.leaveDir(node, nodeTarget, nodeLocation, childFilenames)) err = res.sanitizeError(nodeLocation, visitor.leaveDir(node, nodeTarget, nodeLocation, childFilenames))
if err != nil { if err != nil {
return nil, hasRestored, err return nil, hasRestored, err
} }
@ -259,7 +254,7 @@ func (res *Restorer) traverseTreeInner(ctx context.Context, target, location str
} }
if selectedForRestore { if selectedForRestore {
err = sanitizeError(visitor.visitNode(node, nodeTarget, nodeLocation)) err = res.sanitizeError(nodeLocation, visitor.visitNode(node, nodeTarget, nodeLocation))
if err != nil { if err != nil {
return nil, hasRestored, err return nil, hasRestored, err
} }
@ -368,7 +363,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error {
err = res.traverseTree(ctx, dst, *res.sn.Tree, treeVisitor{ err = res.traverseTree(ctx, dst, *res.sn.Tree, treeVisitor{
enterDir: func(_ *restic.Node, target, location string) error { enterDir: func(_ *restic.Node, target, location string) error {
debug.Log("first pass, enterDir: mkdir %q, leaveDir should restore metadata", location) debug.Log("first pass, enterDir: mkdir %q, leaveDir should restore metadata", location)
if location != "/" { if location != string(filepath.Separator) {
res.opts.Progress.AddFile(0) res.opts.Progress.AddFile(0)
} }
return res.ensureDir(target) return res.ensureDir(target)
@ -394,7 +389,7 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error {
idx.Add(node.Inode, node.DeviceID, location) idx.Add(node.Inode, node.DeviceID, location)
} }
buf, err = res.withOverwriteCheck(node, target, location, false, buf, func(updateMetadataOnly bool, matches *fileState) error { buf, err = res.withOverwriteCheck(ctx, node, target, location, false, buf, func(updateMetadataOnly bool, matches *fileState) error {
if updateMetadataOnly { if updateMetadataOnly {
res.opts.Progress.AddSkippedFile(location, node.Size) res.opts.Progress.AddSkippedFile(location, node.Size)
} else { } else {
@ -434,14 +429,14 @@ func (res *Restorer) RestoreTo(ctx context.Context, dst string) error {
visitNode: func(node *restic.Node, target, location string) error { visitNode: func(node *restic.Node, target, location string) error {
debug.Log("second pass, visitNode: restore node %q", location) debug.Log("second pass, visitNode: restore node %q", location)
if node.Type != "file" { if node.Type != "file" {
_, err := res.withOverwriteCheck(node, target, location, false, nil, func(_ bool, _ *fileState) error { _, err := res.withOverwriteCheck(ctx, node, target, location, false, nil, func(_ bool, _ *fileState) error {
return res.restoreNodeTo(ctx, node, target, location) return res.restoreNodeTo(ctx, node, target, location)
}) })
return err return err
} }
if idx.Has(node.Inode, node.DeviceID) && idx.Value(node.Inode, node.DeviceID) != location { if idx.Has(node.Inode, node.DeviceID) && idx.Value(node.Inode, node.DeviceID) != location {
_, err := res.withOverwriteCheck(node, target, location, true, nil, func(_ bool, _ *fileState) error { _, err := res.withOverwriteCheck(ctx, node, target, location, true, nil, func(_ bool, _ *fileState) error {
return res.restoreHardlinkAt(node, filerestorer.targetPath(idx.Value(node.Inode, node.DeviceID)), target, location) return res.restoreHardlinkAt(node, filerestorer.targetPath(idx.Value(node.Inode, node.DeviceID)), target, location)
}) })
return err return err
@ -528,7 +523,7 @@ func (res *Restorer) hasRestoredFile(location string) (metadataOnly bool, ok boo
return metadataOnly, ok return metadataOnly, ok
} }
func (res *Restorer) withOverwriteCheck(node *restic.Node, target, location string, isHardlink bool, buf []byte, cb func(updateMetadataOnly bool, matches *fileState) error) ([]byte, error) { func (res *Restorer) withOverwriteCheck(ctx context.Context, node *restic.Node, target, location string, isHardlink bool, buf []byte, cb func(updateMetadataOnly bool, matches *fileState) error) ([]byte, error) {
overwrite, err := shouldOverwrite(res.opts.Overwrite, node, target) overwrite, err := shouldOverwrite(res.opts.Overwrite, node, target)
if err != nil { if err != nil {
return buf, err return buf, err
@ -545,7 +540,7 @@ func (res *Restorer) withOverwriteCheck(node *restic.Node, target, location stri
updateMetadataOnly := false updateMetadataOnly := false
if node.Type == "file" && !isHardlink { if node.Type == "file" && !isHardlink {
// if a file fails to verify, then matches is nil which results in restoring from scratch // if a file fails to verify, then matches is nil which results in restoring from scratch
matches, buf, _ = res.verifyFile(target, node, false, res.opts.Overwrite == OverwriteIfChanged, buf) matches, buf, _ = res.verifyFile(ctx, target, node, false, res.opts.Overwrite == OverwriteIfChanged, buf)
// skip files that are already correct completely // skip files that are already correct completely
updateMetadataOnly = !matches.NeedsRestore() updateMetadataOnly = !matches.NeedsRestore()
} }
@ -628,10 +623,8 @@ func (res *Restorer) VerifyFiles(ctx context.Context, dst string) (int, error) {
g.Go(func() (err error) { g.Go(func() (err error) {
var buf []byte var buf []byte
for job := range work { for job := range work {
_, buf, err = res.verifyFile(job.path, job.node, true, false, buf) _, buf, err = res.verifyFile(ctx, job.path, job.node, true, false, buf)
if err != nil { err = res.sanitizeError(job.path, err)
err = res.Error(job.path, err)
}
if err != nil || ctx.Err() != nil { if err != nil || ctx.Err() != nil {
break break
} }
@ -676,7 +669,7 @@ func (s *fileState) HasMatchingBlob(i int) bool {
// buf and the first return value are scratch space, passed around for reuse. // buf and the first return value are scratch space, passed around for reuse.
// Reusing buffers prevents the verifier goroutines allocating all of RAM and // Reusing buffers prevents the verifier goroutines allocating all of RAM and
// flushing the filesystem cache (at least on Linux). // flushing the filesystem cache (at least on Linux).
func (res *Restorer) verifyFile(target string, node *restic.Node, failFast bool, trustMtime bool, buf []byte) (*fileState, []byte, error) { func (res *Restorer) verifyFile(ctx context.Context, target string, node *restic.Node, failFast bool, trustMtime bool, buf []byte) (*fileState, []byte, error) {
f, err := fs.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0) f, err := fs.OpenFile(target, fs.O_RDONLY|fs.O_NOFOLLOW, 0)
if err != nil { if err != nil {
return nil, buf, err return nil, buf, err
@ -707,6 +700,9 @@ func (res *Restorer) verifyFile(target string, node *restic.Node, failFast bool,
matches := make([]bool, len(node.Content)) matches := make([]bool, len(node.Content))
var offset int64 var offset int64
for i, blobID := range node.Content { for i, blobID := range node.Content {
if ctx.Err() != nil {
return nil, buf, ctx.Err()
}
length, found := res.repo.LookupBlobSize(restic.DataBlob, blobID) length, found := res.repo.LookupBlobSize(restic.DataBlob, blobID)
if !found { if !found {
return nil, buf, errors.Errorf("Unable to fetch blob %s", blobID) return nil, buf, errors.Errorf("Unable to fetch blob %s", blobID)

View File

@ -4,6 +4,7 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/json" "encoding/json"
"fmt"
"io" "io"
"math" "math"
"os" "os"
@ -21,6 +22,7 @@ import (
"github.com/restic/restic/internal/repository" "github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/restic"
rtest "github.com/restic/restic/internal/test" rtest "github.com/restic/restic/internal/test"
restoreui "github.com/restic/restic/internal/ui/restore"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@ -32,6 +34,7 @@ type Snapshot struct {
type File struct { type File struct {
Data string Data string
DataParts []string
Links uint64 Links uint64
Inode uint64 Inode uint64
Mode os.FileMode Mode os.FileMode
@ -59,11 +62,11 @@ type FileAttributes struct {
Encrypted bool Encrypted bool
} }
func saveFile(t testing.TB, repo restic.BlobSaver, node File) restic.ID { func saveFile(t testing.TB, repo restic.BlobSaver, data string) restic.ID {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
id, _, _, err := repo.SaveBlob(ctx, restic.DataBlob, []byte(node.Data), restic.ID{}, false) id, _, _, err := repo.SaveBlob(ctx, restic.DataBlob, []byte(data), restic.ID{}, false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -80,17 +83,24 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u
inode++ inode++
switch node := n.(type) { switch node := n.(type) {
case File: case File:
fi := n.(File).Inode fi := node.Inode
if fi == 0 { if fi == 0 {
fi = inode fi = inode
} }
lc := n.(File).Links lc := node.Links
if lc == 0 { if lc == 0 {
lc = 1 lc = 1
} }
fc := []restic.ID{} fc := []restic.ID{}
if len(n.(File).Data) > 0 { size := 0
fc = append(fc, saveFile(t, repo, node)) if len(node.Data) > 0 {
size = len(node.Data)
fc = append(fc, saveFile(t, repo, node.Data))
} else if len(node.DataParts) > 0 {
for _, part := range node.DataParts {
fc = append(fc, saveFile(t, repo, part))
size += len(part)
}
} }
mode := node.Mode mode := node.Mode
if mode == 0 { if mode == 0 {
@ -104,22 +114,21 @@ func saveDir(t testing.TB, repo restic.BlobSaver, nodes map[string]Node, inode u
UID: uint32(os.Getuid()), UID: uint32(os.Getuid()),
GID: uint32(os.Getgid()), GID: uint32(os.Getgid()),
Content: fc, Content: fc,
Size: uint64(len(n.(File).Data)), Size: uint64(size),
Inode: fi, Inode: fi,
Links: lc, Links: lc,
GenericAttributes: getGenericAttributes(node.attributes, false), GenericAttributes: getGenericAttributes(node.attributes, false),
}) })
rtest.OK(t, err) rtest.OK(t, err)
case Symlink: case Symlink:
symlink := n.(Symlink)
err := tree.Insert(&restic.Node{ err := tree.Insert(&restic.Node{
Type: "symlink", Type: "symlink",
Mode: os.ModeSymlink | 0o777, Mode: os.ModeSymlink | 0o777,
ModTime: symlink.ModTime, ModTime: node.ModTime,
Name: name, Name: name,
UID: uint32(os.Getuid()), UID: uint32(os.Getuid()),
GID: uint32(os.Getgid()), GID: uint32(os.Getgid()),
LinkTarget: symlink.Target, LinkTarget: node.Target,
Inode: inode, Inode: inode,
Links: 1, Links: 1,
}) })
@ -932,7 +941,7 @@ func TestRestorerSparseFiles(t *testing.T) {
len(zeros), blocks, 100*sparsity) len(zeros), blocks, 100*sparsity)
} }
func saveSnapshotsAndOverwrite(t *testing.T, baseSnapshot Snapshot, overwriteSnapshot Snapshot, options Options) string { func saveSnapshotsAndOverwrite(t *testing.T, baseSnapshot Snapshot, overwriteSnapshot Snapshot, baseOptions, overwriteOptions Options) string {
repo := repository.TestRepository(t) repo := repository.TestRepository(t)
tempdir := filepath.Join(rtest.TempDir(t), "target") tempdir := filepath.Join(rtest.TempDir(t), "target")
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@ -942,13 +951,13 @@ func saveSnapshotsAndOverwrite(t *testing.T, baseSnapshot Snapshot, overwriteSna
sn, id := saveSnapshot(t, repo, baseSnapshot, noopGetGenericAttributes) sn, id := saveSnapshot(t, repo, baseSnapshot, noopGetGenericAttributes)
t.Logf("base snapshot saved as %v", id.Str()) t.Logf("base snapshot saved as %v", id.Str())
res := NewRestorer(repo, sn, options) res := NewRestorer(repo, sn, baseOptions)
rtest.OK(t, res.RestoreTo(ctx, tempdir)) rtest.OK(t, res.RestoreTo(ctx, tempdir))
// overwrite snapshot // overwrite snapshot
sn, id = saveSnapshot(t, repo, overwriteSnapshot, noopGetGenericAttributes) sn, id = saveSnapshot(t, repo, overwriteSnapshot, noopGetGenericAttributes)
t.Logf("overwrite snapshot saved as %v", id.Str()) t.Logf("overwrite snapshot saved as %v", id.Str())
res = NewRestorer(repo, sn, options) res = NewRestorer(repo, sn, overwriteOptions)
rtest.OK(t, res.RestoreTo(ctx, tempdir)) rtest.OK(t, res.RestoreTo(ctx, tempdir))
_, err := res.VerifyFiles(ctx, tempdir) _, err := res.VerifyFiles(ctx, tempdir)
@ -970,7 +979,20 @@ func TestRestorerSparseOverwrite(t *testing.T) {
}, },
} }
saveSnapshotsAndOverwrite(t, baseSnapshot, sparseSnapshot, Options{Sparse: true, Overwrite: OverwriteAlways}) opts := Options{Sparse: true, Overwrite: OverwriteAlways}
saveSnapshotsAndOverwrite(t, baseSnapshot, sparseSnapshot, opts, opts)
}
type printerMock struct {
s restoreui.State
}
func (p *printerMock) Update(_ restoreui.State, _ time.Duration) {
}
func (p *printerMock) CompleteItem(action restoreui.ItemAction, item string, size uint64) {
}
func (p *printerMock) Finish(s restoreui.State, _ time.Duration) {
p.s = s
} }
func TestRestorerOverwriteBehavior(t *testing.T) { func TestRestorerOverwriteBehavior(t *testing.T) {
@ -1000,6 +1022,7 @@ func TestRestorerOverwriteBehavior(t *testing.T) {
var tests = []struct { var tests = []struct {
Overwrite OverwriteBehavior Overwrite OverwriteBehavior
Files map[string]string Files map[string]string
Progress restoreui.State
}{ }{
{ {
Overwrite: OverwriteAlways, Overwrite: OverwriteAlways,
@ -1007,6 +1030,14 @@ func TestRestorerOverwriteBehavior(t *testing.T) {
"foo": "content: new\n", "foo": "content: new\n",
"dirtest/file": "content: file2\n", "dirtest/file": "content: file2\n",
}, },
Progress: restoreui.State{
FilesFinished: 3,
FilesTotal: 3,
FilesSkipped: 0,
AllBytesWritten: 28,
AllBytesTotal: 28,
AllBytesSkipped: 0,
},
}, },
{ {
Overwrite: OverwriteIfChanged, Overwrite: OverwriteIfChanged,
@ -1014,6 +1045,14 @@ func TestRestorerOverwriteBehavior(t *testing.T) {
"foo": "content: new\n", "foo": "content: new\n",
"dirtest/file": "content: file2\n", "dirtest/file": "content: file2\n",
}, },
Progress: restoreui.State{
FilesFinished: 3,
FilesTotal: 3,
FilesSkipped: 0,
AllBytesWritten: 28,
AllBytesTotal: 28,
AllBytesSkipped: 0,
},
}, },
{ {
Overwrite: OverwriteIfNewer, Overwrite: OverwriteIfNewer,
@ -1021,6 +1060,14 @@ func TestRestorerOverwriteBehavior(t *testing.T) {
"foo": "content: new\n", "foo": "content: new\n",
"dirtest/file": "content: file\n", "dirtest/file": "content: file\n",
}, },
Progress: restoreui.State{
FilesFinished: 2,
FilesTotal: 2,
FilesSkipped: 1,
AllBytesWritten: 13,
AllBytesTotal: 13,
AllBytesSkipped: 15,
},
}, },
{ {
Overwrite: OverwriteNever, Overwrite: OverwriteNever,
@ -1028,12 +1075,22 @@ func TestRestorerOverwriteBehavior(t *testing.T) {
"foo": "content: foo\n", "foo": "content: foo\n",
"dirtest/file": "content: file\n", "dirtest/file": "content: file\n",
}, },
Progress: restoreui.State{
FilesFinished: 1,
FilesTotal: 1,
FilesSkipped: 2,
AllBytesWritten: 0,
AllBytesTotal: 0,
AllBytesSkipped: 28,
},
}, },
} }
for _, test := range tests { for _, test := range tests {
t.Run("", func(t *testing.T) { t.Run("", func(t *testing.T) {
tempdir := saveSnapshotsAndOverwrite(t, baseSnapshot, overwriteSnapshot, Options{Overwrite: test.Overwrite}) mock := &printerMock{}
progress := restoreui.NewProgress(mock, 0)
tempdir := saveSnapshotsAndOverwrite(t, baseSnapshot, overwriteSnapshot, Options{}, Options{Overwrite: test.Overwrite, Progress: progress})
for filename, content := range test.Files { for filename, content := range test.Files {
data, err := os.ReadFile(filepath.Join(tempdir, filepath.FromSlash(filename))) data, err := os.ReadFile(filepath.Join(tempdir, filepath.FromSlash(filename)))
@ -1046,10 +1103,57 @@ func TestRestorerOverwriteBehavior(t *testing.T) {
t.Errorf("file %v has wrong content: want %q, got %q", filename, content, data) t.Errorf("file %v has wrong content: want %q, got %q", filename, content, data)
} }
} }
progress.Finish()
rtest.Equals(t, test.Progress, mock.s)
}) })
} }
} }
func TestRestorerOverwritePartial(t *testing.T) {
parts := make([]string, 100)
size := 0
for i := 0; i < len(parts); i++ {
parts[i] = fmt.Sprint(i)
size += len(parts[i])
if i < 8 {
// small file
size += len(parts[i])
}
}
// the data of both snapshots is stored in different pack files
// thus both small an foo in the overwriteSnapshot contain blobs from
// two different pack files. This tests basic handling of blobs from
// different pack files.
baseTime := time.Now()
baseSnapshot := Snapshot{
Nodes: map[string]Node{
"foo": File{DataParts: parts[0:5], ModTime: baseTime},
"small": File{DataParts: parts[0:5], ModTime: baseTime},
},
}
overwriteSnapshot := Snapshot{
Nodes: map[string]Node{
"foo": File{DataParts: parts, ModTime: baseTime},
"small": File{DataParts: parts[0:8], ModTime: baseTime},
},
}
mock := &printerMock{}
progress := restoreui.NewProgress(mock, 0)
saveSnapshotsAndOverwrite(t, baseSnapshot, overwriteSnapshot, Options{}, Options{Overwrite: OverwriteAlways, Progress: progress})
progress.Finish()
rtest.Equals(t, restoreui.State{
FilesFinished: 2,
FilesTotal: 2,
FilesSkipped: 0,
AllBytesWritten: uint64(size),
AllBytesTotal: uint64(size),
AllBytesSkipped: 0,
}, mock.s)
}
func TestRestorerOverwriteSpecial(t *testing.T) { func TestRestorerOverwriteSpecial(t *testing.T) {
baseTime := time.Now() baseTime := time.Now()
baseSnapshot := Snapshot{ baseSnapshot := Snapshot{
@ -1080,7 +1184,8 @@ func TestRestorerOverwriteSpecial(t *testing.T) {
"file": "foo2", "file": "foo2",
} }
tempdir := saveSnapshotsAndOverwrite(t, baseSnapshot, overwriteSnapshot, Options{Overwrite: OverwriteAlways}) opts := Options{Overwrite: OverwriteAlways}
tempdir := saveSnapshotsAndOverwrite(t, baseSnapshot, overwriteSnapshot, opts, opts)
for filename, content := range files { for filename, content := range files {
data, err := os.ReadFile(filepath.Join(tempdir, filepath.FromSlash(filename))) data, err := os.ReadFile(filepath.Join(tempdir, filepath.FromSlash(filename)))
@ -1257,6 +1362,7 @@ func TestRestoreOverwriteDirectory(t *testing.T) {
"dir": File{Data: "content: file\n"}, "dir": File{Data: "content: file\n"},
}, },
}, },
Options{},
Options{Delete: true}, Options{Delete: true},
) )
} }

View File

@ -65,18 +65,6 @@ func getBlockCount(t *testing.T, filename string) int64 {
return st.Blocks return st.Blocks
} }
type printerMock struct {
s restoreui.State
}
func (p *printerMock) Update(_ restoreui.State, _ time.Duration) {
}
func (p *printerMock) CompleteItem(action restoreui.ItemAction, item string, size uint64) {
}
func (p *printerMock) Finish(s restoreui.State, _ time.Duration) {
p.s = s
}
func TestRestorerProgressBar(t *testing.T) { func TestRestorerProgressBar(t *testing.T) {
testRestorerProgressBar(t, false) testRestorerProgressBar(t, false)
} }