2
2
mirror of https://github.com/octoleo/restic.git synced 2024-06-08 20:02:21 +00:00

Merge pull request #4624 from MichaelEischer/better-restorer-error-reporting

Improver restorer error reporting
This commit is contained in:
Michael Eischer 2024-01-07 11:20:29 +01:00 committed by GitHub
commit e4a7eb09ef
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 119 additions and 15 deletions

View File

@ -0,0 +1,11 @@
Bugfix: Correct restore progress information if an error occurs
If an error occurred while restoring a snapshot, this could cause the restore
progress bar to show incorrect information. In addition, if a data file could
not be loaded completely, then errors would also be reported for some already
restored files.
We have improved the error reporting of the restore command to be more accurate.
https://github.com/restic/restic/pull/4624
https://forum.restic.net/t/errors-restoring-with-restic-on-windows-server-s3/6943

View File

@ -881,9 +881,9 @@ type BackendLoadFn func(ctx context.Context, h backend.Handle, length int, offse
const maxUnusedRange = 4 * 1024 * 1024 const maxUnusedRange = 4 * 1024 * 1024
// StreamPack loads the listed blobs from the specified pack file. The plaintext blob is passed to // StreamPack loads the listed blobs from the specified pack file. The plaintext blob is passed to
// the handleBlobFn callback or an error if decryption failed or the blob hash does not match. In // the handleBlobFn callback or an error if decryption failed or the blob hash does not match.
// case of download errors handleBlobFn might be called multiple times for the same blob. If the // handleBlobFn is never called multiple times for the same blob. If the callback returns an error,
// callback returns an error, then StreamPack will abort and not retry it. // then StreamPack will abort and not retry it.
func StreamPack(ctx context.Context, beLoad BackendLoadFn, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error { func StreamPack(ctx context.Context, beLoad BackendLoadFn, key *crypto.Key, packID restic.ID, blobs []restic.Blob, handleBlobFn func(blob restic.BlobHandle, buf []byte, err error) error) error {
if len(blobs) == 0 { if len(blobs) == 0 {
// nothing to do // nothing to do
@ -945,7 +945,9 @@ func streamPackPart(ctx context.Context, beLoad BackendLoadFn, key *crypto.Key,
currentBlobEnd := dataStart currentBlobEnd := dataStart
var buf []byte var buf []byte
var decode []byte var decode []byte
for _, entry := range blobs { for len(blobs) > 0 {
entry := blobs[0]
skipBytes := int(entry.Offset - currentBlobEnd) skipBytes := int(entry.Offset - currentBlobEnd)
if skipBytes < 0 { if skipBytes < 0 {
return errors.Errorf("overlapping blobs in pack %v", packID) return errors.Errorf("overlapping blobs in pack %v", packID)
@ -1008,6 +1010,8 @@ func streamPackPart(ctx context.Context, beLoad BackendLoadFn, key *crypto.Key,
cancel() cancel()
return backoff.Permanent(err) return backoff.Permanent(err)
} }
// ensure that each blob is only passed once to handleBlobFn
blobs = blobs[1:]
} }
return nil return nil
}) })

View File

@ -5,6 +5,7 @@ import (
"context" "context"
"crypto/sha256" "crypto/sha256"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io" "io"
"math/rand" "math/rand"
@ -14,6 +15,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/cenkalti/backoff/v4"
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/klauspost/compress/zstd" "github.com/klauspost/compress/zstd"
"github.com/restic/restic/internal/backend" "github.com/restic/restic/internal/backend"
@ -529,7 +531,9 @@ func testStreamPack(t *testing.T, version uint) {
packfileBlobs, packfile := buildPackfileWithoutHeader(blobSizes, &key, compress) packfileBlobs, packfile := buildPackfileWithoutHeader(blobSizes, &key, compress)
loadCalls := 0 loadCalls := 0
load := func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error { shortFirstLoad := false
loadBytes := func(length int, offset int64) []byte {
data := packfile data := packfile
if offset > int64(len(data)) { if offset > int64(len(data)) {
@ -541,12 +545,34 @@ func testStreamPack(t *testing.T, version uint) {
if length > len(data) { if length > len(data) {
length = len(data) length = len(data)
} }
if shortFirstLoad {
length /= 2
shortFirstLoad = false
}
return data[:length]
}
load := func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
data := loadBytes(length, offset)
if shortFirstLoad {
data = data[:len(data)/2]
shortFirstLoad = false
}
data = data[:length]
loadCalls++ loadCalls++
return fn(bytes.NewReader(data)) err := fn(bytes.NewReader(data))
if err == nil {
return nil
}
var permanent *backoff.PermanentError
if errors.As(err, &permanent) {
return err
}
// retry loading once
return fn(bytes.NewReader(loadBytes(length, offset)))
} }
// first, test regular usage // first, test regular usage
@ -554,19 +580,21 @@ func testStreamPack(t *testing.T, version uint) {
tests := []struct { tests := []struct {
blobs []restic.Blob blobs []restic.Blob
calls int calls int
shortFirstLoad bool
}{ }{
{packfileBlobs[1:2], 1}, {packfileBlobs[1:2], 1, false},
{packfileBlobs[2:5], 1}, {packfileBlobs[2:5], 1, false},
{packfileBlobs[2:8], 1}, {packfileBlobs[2:8], 1, false},
{[]restic.Blob{ {[]restic.Blob{
packfileBlobs[0], packfileBlobs[0],
packfileBlobs[4], packfileBlobs[4],
packfileBlobs[2], packfileBlobs[2],
}, 1}, }, 1, false},
{[]restic.Blob{ {[]restic.Blob{
packfileBlobs[0], packfileBlobs[0],
packfileBlobs[len(packfileBlobs)-1], packfileBlobs[len(packfileBlobs)-1],
}, 2}, }, 2, false},
{packfileBlobs[:], 1, true},
} }
for _, test := range tests { for _, test := range tests {
@ -593,6 +621,7 @@ func testStreamPack(t *testing.T, version uint) {
} }
loadCalls = 0 loadCalls = 0
shortFirstLoad = test.shortFirstLoad
err = repository.StreamPack(ctx, load, &key, restic.ID{}, test.blobs, handleBlob) err = repository.StreamPack(ctx, load, &key, restic.ID{}, test.blobs, handleBlob)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@ -605,6 +634,7 @@ func testStreamPack(t *testing.T, version uint) {
}) })
} }
}) })
shortFirstLoad = false
// next, test invalid uses, which should return an error // next, test invalid uses, which should return an error
t.Run("invalid", func(t *testing.T) { t.Run("invalid", func(t *testing.T) {

View File

@ -246,7 +246,10 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error {
return err return err
} }
// track already processed blobs for precise error reporting
processedBlobs := restic.NewBlobSet()
err := repository.StreamPack(ctx, r.packLoader, r.key, pack.id, blobList, func(h restic.BlobHandle, blobData []byte, err error) error { err := repository.StreamPack(ctx, r.packLoader, r.key, pack.id, blobList, func(h restic.BlobHandle, blobData []byte, err error) error {
processedBlobs.Insert(h)
blob := blobs[h.ID] blob := blobs[h.ID]
if err != nil { if err != nil {
for file := range blob.files { for file := range blob.files {
@ -292,7 +295,19 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error {
}) })
if err != nil { if err != nil {
for file := range pack.files { // only report error for not yet processed blobs
affectedFiles := make(map[*fileInfo]struct{})
for _, blob := range blobList {
if processedBlobs.Has(blob.BlobHandle) {
continue
}
blob := blobs[blob.ID]
for file := range blob.files {
affectedFiles[file] = struct{}{}
}
}
for file := range affectedFiles {
if errFile := sanitizeError(file, err); errFile != nil { if errFile := sanitizeError(file, err); errFile != nil {
return errFile return errFile
} }

View File

@ -317,3 +317,47 @@ func testPartialDownloadError(t *testing.T, part int) {
rtest.OK(t, err) rtest.OK(t, err)
verifyRestore(t, r, repo) verifyRestore(t, r, repo)
} }
func TestFatalDownloadError(t *testing.T) {
tempdir := rtest.TempDir(t)
content := []TestFile{
{
name: "file1",
blobs: []TestBlob{
{"data1-1", "pack1"},
{"data1-2", "pack1"},
},
},
{
name: "file2",
blobs: []TestBlob{
{"data2-1", "pack1"},
{"data2-2", "pack1"},
{"data2-3", "pack1"},
},
}}
repo := newTestRepo(content)
loader := repo.loader
repo.loader = func(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
// only return half the data to break file2
return loader(ctx, h, length/2, offset, fn)
}
r := newFileRestorer(tempdir, repo.loader, repo.key, repo.Lookup, 2, false, nil)
r.files = repo.files
var errors []string
r.Error = func(s string, e error) error {
// ignore errors as in the `restore` command
errors = append(errors, s)
return nil
}
err := r.restoreFiles(context.TODO())
rtest.OK(t, err)
rtest.Assert(t, len(errors) == 1, "unexpected number of restore errors, expected: 1, got: %v", len(errors))
rtest.Assert(t, errors[0] == "file2", "expected error for file2, got: %v", errors[0])
}