2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-21 20:35:12 +00:00

Merge pull request #4644 from MichaelEischer/refactor-repair-packs

Refactor and test `repair packs`
This commit is contained in:
Michael Eischer 2024-01-27 13:00:51 +01:00 committed by GitHub
commit 3424088274
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 482 additions and 278 deletions

View File

@ -12,7 +12,6 @@ import (
"runtime"
"strconv"
"strings"
"sync"
"time"
"github.com/spf13/cobra"
@ -25,7 +24,6 @@ import (
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/textfile"
"github.com/restic/restic/internal/ui"
"github.com/restic/restic/internal/ui/backup"
"github.com/restic/restic/internal/ui/termstatus"
)
@ -56,31 +54,9 @@ Exit status is 3 if some source data could not be read (incomplete snapshot crea
},
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
var wg sync.WaitGroup
cancelCtx, cancel := context.WithCancel(ctx)
defer func() {
// shutdown termstatus
cancel()
wg.Wait()
}()
term := termstatus.New(globalOptions.stdout, globalOptions.stderr, globalOptions.Quiet)
wg.Add(1)
go func() {
defer wg.Done()
term.Run(cancelCtx)
}()
// use the terminal for stdout/stderr
prevStdout, prevStderr := globalOptions.stdout, globalOptions.stderr
defer func() {
globalOptions.stdout, globalOptions.stderr = prevStdout, prevStderr
}()
stdioWrapper := ui.NewStdioWrapper(term)
globalOptions.stdout, globalOptions.stderr = stdioWrapper.Stdout(), stdioWrapper.Stderr()
return runBackup(ctx, backupOptions, globalOptions, term, args)
term, cancel := setupTermstatus()
defer cancel()
return runBackup(cmd.Context(), backupOptions, globalOptions, term, args)
},
}

View File

@ -15,6 +15,7 @@ import (
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui"
"github.com/restic/restic/internal/ui/progress"
"github.com/spf13/cobra"
)
@ -766,7 +767,7 @@ func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo r
return errors.Fatalf("%s", err)
}
} else if len(plan.ignorePacks) != 0 {
err = rebuildIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil)
err = rebuildIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil, false)
if err != nil {
return errors.Fatalf("%s", err)
}
@ -778,7 +779,7 @@ func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo r
}
if opts.unsafeRecovery {
_, err = writeIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil)
err = rebuildIndexFiles(ctx, gopts, repo, plan.ignorePacks, nil, true)
if err != nil {
return errors.Fatalf("%s", err)
}
@ -788,23 +789,22 @@ func doPrune(ctx context.Context, opts PruneOptions, gopts GlobalOptions, repo r
return nil
}
func writeIndexFiles(ctx context.Context, gopts GlobalOptions, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs) (restic.IDSet, error) {
func rebuildIndexFiles(ctx context.Context, gopts GlobalOptions, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs, skipDeletion bool) error {
Verbosef("rebuilding index\n")
bar := newProgressMax(!gopts.Quiet, 0, "packs processed")
obsoleteIndexes, err := repo.Index().Save(ctx, repo, removePacks, extraObsolete, bar)
bar.Done()
return obsoleteIndexes, err
}
func rebuildIndexFiles(ctx context.Context, gopts GlobalOptions, repo restic.Repository, removePacks restic.IDSet, extraObsolete restic.IDs) error {
obsoleteIndexes, err := writeIndexFiles(ctx, gopts, repo, removePacks, extraObsolete)
if err != nil {
return err
}
Verbosef("deleting obsolete index files\n")
return DeleteFilesChecked(ctx, gopts, repo, obsoleteIndexes, restic.IndexFile)
return repo.Index().Save(ctx, repo, removePacks, extraObsolete, restic.MasterIndexSaveOpts{
SaveProgress: bar,
DeleteProgress: func() *progress.Counter {
return newProgressMax(!gopts.Quiet, 0, "old indexes deleted")
},
DeleteReport: func(id restic.ID, err error) {
if gopts.verbosity > 2 {
Verbosef("removed index %v\n", id.String())
}
},
SkipDeletion: skipDeletion,
})
}
func getUsedBlobs(ctx context.Context, repo restic.Repository, ignoreSnapshots restic.IDSet, quiet bool) (usedBlobs restic.CountedBlobSet, err error) {

View File

@ -154,7 +154,7 @@ func rebuildIndex(ctx context.Context, opts RepairIndexOptions, gopts GlobalOpti
}
}
err = rebuildIndexFiles(ctx, gopts, repo, removePacks, obsoleteIndexes)
err = rebuildIndexFiles(ctx, gopts, repo, removePacks, obsoleteIndexes, false)
if err != nil {
return err
}

View File

@ -9,8 +9,8 @@ import (
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/termstatus"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
)
var cmdRepairPacks = &cobra.Command{
@ -29,7 +29,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
return runRepairPacks(cmd.Context(), globalOptions, args)
term, cancel := setupTermstatus()
defer cancel()
return runRepairPacks(cmd.Context(), globalOptions, term, args)
},
}
@ -37,7 +39,7 @@ func init() {
cmdRepair.AddCommand(cmdRepairPacks)
}
func runRepairPacks(ctx context.Context, gopts GlobalOptions, args []string) error {
func runRepairPacks(ctx context.Context, gopts GlobalOptions, term *termstatus.Terminal, args []string) error {
// FIXME discuss and add proper feature flag mechanism
flag, _ := os.LookupEnv("RESTIC_FEATURES")
if flag != "repair-packs-v1" {
@ -68,21 +70,19 @@ func runRepairPacks(ctx context.Context, gopts GlobalOptions, args []string) err
return err
}
return repairPacks(ctx, gopts, repo, ids)
}
func repairPacks(ctx context.Context, gopts GlobalOptions, repo *repository.Repository, ids restic.IDSet) error {
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
err := repo.LoadIndex(ctx, bar)
err = repo.LoadIndex(ctx, bar)
if err != nil {
return errors.Fatalf("%s", err)
}
Warnf("saving backup copies of pack files in current folder\n")
printer := newTerminalProgressPrinter(gopts.verbosity, term)
printer.P("saving backup copies of pack files to current folder")
for id := range ids {
f, err := os.OpenFile("pack-"+id.String(), os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0o666)
if err != nil {
return errors.Fatalf("%s", err)
return err
}
err = repo.Backend().Load(ctx, backend.Handle{Type: restic.PackFile, Name: id.String()}, 0, 0, func(rd io.Reader) error {
@ -94,66 +94,15 @@ func repairPacks(ctx context.Context, gopts GlobalOptions, repo *repository.Repo
return err
})
if err != nil {
return errors.Fatalf("%s", err)
return err
}
}
wg, wgCtx := errgroup.WithContext(ctx)
repo.StartPackUploader(wgCtx, wg)
repo.DisableAutoIndexUpdate()
Warnf("salvaging intact data from specified pack files\n")
bar = newProgressMax(!gopts.Quiet, uint64(len(ids)), "pack files")
defer bar.Done()
wg.Go(func() error {
// examine all data the indexes have for the pack file
for b := range repo.Index().ListPacks(wgCtx, ids) {
blobs := b.Blobs
if len(blobs) == 0 {
Warnf("no blobs found for pack %v\n", b.PackID)
bar.Add(1)
continue
}
err = repo.LoadBlobsFromPack(wgCtx, b.PackID, blobs, func(blob restic.BlobHandle, buf []byte, err error) error {
if err != nil {
// Fallback path
buf, err = repo.LoadBlob(wgCtx, blob.Type, blob.ID, nil)
if err != nil {
Warnf("failed to load blob %v: %v\n", blob.ID, err)
return nil
}
}
id, _, _, err := repo.SaveBlob(wgCtx, blob.Type, buf, restic.ID{}, true)
if !id.Equal(blob.ID) {
panic("pack id mismatch during upload")
}
return err
})
if err != nil {
return err
}
bar.Add(1)
}
return repo.Flush(wgCtx)
})
if err := wg.Wait(); err != nil {
return errors.Fatalf("%s", err)
}
bar.Done()
// remove salvaged packs from index
err = rebuildIndexFiles(ctx, gopts, repo, ids, nil)
err = repository.RepairPacks(ctx, repo, ids, printer)
if err != nil {
return errors.Fatalf("%s", err)
}
// cleanup
Warnf("removing salvaged pack files\n")
DeleteFiles(ctx, gopts, repo, ids, restic.PackFile)
Warnf("\nUse `restic repair snapshots --forget` to remove the corrupted data blobs from all snapshots\n")
return nil
}

View File

@ -3,7 +3,6 @@ package main
import (
"context"
"strings"
"sync"
"time"
"github.com/restic/restic/internal/debug"
@ -38,31 +37,9 @@ Exit status is 0 if the command was successful, and non-zero if there was any er
`,
DisableAutoGenTag: true,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
var wg sync.WaitGroup
cancelCtx, cancel := context.WithCancel(ctx)
defer func() {
// shutdown termstatus
cancel()
wg.Wait()
}()
term := termstatus.New(globalOptions.stdout, globalOptions.stderr, globalOptions.Quiet)
wg.Add(1)
go func() {
defer wg.Done()
term.Run(cancelCtx)
}()
// allow usage of warnf / verbosef
prevStdout, prevStderr := globalOptions.stdout, globalOptions.stderr
defer func() {
globalOptions.stdout, globalOptions.stderr = prevStdout, prevStderr
}()
stdioWrapper := ui.NewStdioWrapper(term)
globalOptions.stdout, globalOptions.stderr = stdioWrapper.Stdout(), stdioWrapper.Stderr()
return runRestore(ctx, restoreOptions, globalOptions, term, args)
term, cancel := setupTermstatus()
defer cancel()
return runRestore(cmd.Context(), restoreOptions, globalOptions, term, args)
},
}

View File

@ -3,9 +3,6 @@ package main
import (
"context"
"golang.org/x/sync/errgroup"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/restic"
)
@ -24,46 +21,21 @@ func DeleteFilesChecked(ctx context.Context, gopts GlobalOptions, repo restic.Re
// deleteFiles deletes the given fileList of fileType in parallel
// if ignoreError=true, it will print a warning if there was an error, else it will abort.
func deleteFiles(ctx context.Context, gopts GlobalOptions, ignoreError bool, repo restic.Repository, fileList restic.IDSet, fileType restic.FileType) error {
totalCount := len(fileList)
fileChan := make(chan restic.ID)
wg, ctx := errgroup.WithContext(ctx)
wg.Go(func() error {
defer close(fileChan)
for id := range fileList {
select {
case fileChan <- id:
case <-ctx.Done():
return ctx.Err()
bar := newProgressMax(!gopts.JSON && !gopts.Quiet, 0, "files deleted")
defer bar.Done()
return restic.ParallelRemove(ctx, repo, fileList, fileType, func(id restic.ID, err error) error {
if err != nil {
if !gopts.JSON {
Warnf("unable to remove %v/%v from the repository\n", fileType, id)
}
if !ignoreError {
return err
}
}
if !gopts.JSON && gopts.verbosity > 2 {
Verbosef("removed %v/%v\n", fileType, id)
}
return nil
})
bar := newProgressMax(!gopts.JSON && !gopts.Quiet, uint64(totalCount), "files deleted")
defer bar.Done()
// deleting files is IO-bound
workerCount := repo.Connections()
for i := 0; i < int(workerCount); i++ {
wg.Go(func() error {
for id := range fileChan {
h := backend.Handle{Type: fileType, Name: id.String()}
err := repo.Backend().Remove(ctx, h)
if err != nil {
if !gopts.JSON {
Warnf("unable to remove %v from the repository\n", h)
}
if !ignoreError {
return err
}
}
if !gopts.JSON && gopts.verbosity > 2 {
Verbosef("removed %v\n", h)
}
bar.Add(1)
}
return nil
})
}
err := wg.Wait()
return err
}, bar)
}

View File

@ -30,7 +30,7 @@ func calculateProgressInterval(show bool, json bool) time.Duration {
}
// newTerminalProgressMax returns a progress.Counter that prints to stdout or terminal if provided.
func newGenericProgressMax(show bool, max uint64, description string, print func(status string)) *progress.Counter {
func newGenericProgressMax(show bool, max uint64, description string, print func(status string, final bool)) *progress.Counter {
if !show {
return nil
}
@ -46,16 +46,18 @@ func newGenericProgressMax(show bool, max uint64, description string, print func
ui.FormatDuration(d), ui.FormatPercent(v, max), v, max, description)
}
print(status)
if final {
fmt.Print("\n")
}
print(status, final)
})
}
func newTerminalProgressMax(show bool, max uint64, description string, term *termstatus.Terminal) *progress.Counter {
return newGenericProgressMax(show, max, description, func(status string) {
term.SetStatus([]string{status})
return newGenericProgressMax(show, max, description, func(status string, final bool) {
if final {
term.SetStatus([]string{})
term.Print(status)
} else {
term.SetStatus([]string{status})
}
})
}
@ -64,7 +66,7 @@ func newProgressMax(show bool, max uint64, description string) *progress.Counter
return newGenericProgressMax(show, max, description, printProgress)
}
func printProgress(status string) {
func printProgress(status string, final bool) {
canUpdateStatus := stdoutCanUpdateStatus()
@ -95,6 +97,9 @@ func printProgress(status string) {
}
_, _ = os.Stdout.Write([]byte(clear + status + carriageControl))
if final {
_, _ = os.Stdout.Write([]byte("\n"))
}
}
func newIndexProgress(quiet bool, json bool) *progress.Counter {
@ -104,3 +109,21 @@ func newIndexProgress(quiet bool, json bool) *progress.Counter {
func newIndexTerminalProgress(quiet bool, json bool, term *termstatus.Terminal) *progress.Counter {
return newTerminalProgressMax(!quiet && !json && stdoutIsTerminal(), 0, "index files loaded", term)
}
type terminalProgressPrinter struct {
term *termstatus.Terminal
ui.Message
show bool
}
func (t *terminalProgressPrinter) NewCounter(description string) *progress.Counter {
return newTerminalProgressMax(t.show, 0, description, t.term)
}
func newTerminalProgressPrinter(verbosity uint, term *termstatus.Terminal) progress.Printer {
return &terminalProgressPrinter{
term: term,
Message: *ui.NewMessage(term, verbosity),
show: verbosity > 0,
}
}

43
cmd/restic/termstatus.go Normal file
View File

@ -0,0 +1,43 @@
package main
import (
"context"
"sync"
"github.com/restic/restic/internal/ui"
"github.com/restic/restic/internal/ui/termstatus"
)
// setupTermstatus creates a new termstatus and reroutes globalOptions.{stdout,stderr} to it
// The returned function must be called to shut down the termstatus,
//
// Expected usage:
// ```
// term, cancel := setupTermstatus()
// defer cancel()
// // do stuff
// ```
func setupTermstatus() (*termstatus.Terminal, func()) {
var wg sync.WaitGroup
// only shutdown once cancel is called to ensure that no output is lost
cancelCtx, cancel := context.WithCancel(context.Background())
term := termstatus.New(globalOptions.stdout, globalOptions.stderr, globalOptions.Quiet)
wg.Add(1)
go func() {
defer wg.Done()
term.Run(cancelCtx)
}()
// use the termstatus for stdout/stderr
prevStdout, prevStderr := globalOptions.stdout, globalOptions.stderr
stdioWrapper := ui.NewStdioWrapper(term)
globalOptions.stdout, globalOptions.stderr = stdioWrapper.Stdout(), stdioWrapper.Stderr()
return term, func() {
// shutdown termstatus
globalOptions.stdout, globalOptions.stderr = prevStdout, prevStderr
cancel()
wg.Wait()
}
}

View File

@ -9,7 +9,6 @@ import (
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/progress"
"golang.org/x/sync/errgroup"
)
@ -267,23 +266,22 @@ func (mi *MasterIndex) MergeFinalIndexes() error {
// Save saves all known indexes to index files, leaving out any
// packs whose ID is contained in packBlacklist from finalized indexes.
// The new index contains the IDs of all known indexes in the "supersedes"
// field. The IDs are also returned in the IDSet obsolete.
// After calling this function, you should remove the obsolete index files.
func (mi *MasterIndex) Save(ctx context.Context, repo restic.SaverUnpacked, packBlacklist restic.IDSet, extraObsolete restic.IDs, p *progress.Counter) (obsolete restic.IDSet, err error) {
p.SetMax(uint64(len(mi.Packs(packBlacklist))))
// It also removes the old index files and those listed in extraObsolete.
func (mi *MasterIndex) Save(ctx context.Context, repo restic.Repository, excludePacks restic.IDSet, extraObsolete restic.IDs, opts restic.MasterIndexSaveOpts) error {
p := opts.SaveProgress
p.SetMax(uint64(len(mi.Packs(excludePacks))))
mi.idxMutex.Lock()
defer mi.idxMutex.Unlock()
debug.Log("start rebuilding index of %d indexes, pack blacklist: %v", len(mi.idx), packBlacklist)
debug.Log("start rebuilding index of %d indexes, excludePacks: %v", len(mi.idx), excludePacks)
newIndex := NewIndex()
obsolete = restic.NewIDSet()
obsolete := restic.NewIDSet()
// track spawned goroutines using wg, create a new context which is
// cancelled as soon as an error occurs.
wg, ctx := errgroup.WithContext(ctx)
wg, wgCtx := errgroup.WithContext(ctx)
ch := make(chan *Index)
@ -310,21 +308,21 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.SaverUnpacked, pack
debug.Log("adding index %d", i)
for pbs := range idx.EachByPack(ctx, packBlacklist) {
for pbs := range idx.EachByPack(wgCtx, excludePacks) {
newIndex.StorePack(pbs.PackID, pbs.Blobs)
p.Add(1)
if IndexFull(newIndex, mi.compress) {
select {
case ch <- newIndex:
case <-ctx.Done():
return ctx.Err()
case <-wgCtx.Done():
return wgCtx.Err()
}
newIndex = NewIndex()
}
}
}
err = newIndex.AddToSupersedes(extraObsolete...)
err := newIndex.AddToSupersedes(extraObsolete...)
if err != nil {
return err
}
@ -332,7 +330,7 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.SaverUnpacked, pack
select {
case ch <- newIndex:
case <-ctx.Done():
case <-wgCtx.Done():
}
return nil
})
@ -341,7 +339,7 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.SaverUnpacked, pack
worker := func() error {
for idx := range ch {
idx.Finalize()
if _, err := SaveIndex(ctx, repo, idx); err != nil {
if _, err := SaveIndex(wgCtx, repo, idx); err != nil {
return err
}
}
@ -354,9 +352,27 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.SaverUnpacked, pack
for i := 0; i < workerCount; i++ {
wg.Go(worker)
}
err = wg.Wait()
err := wg.Wait()
p.Done()
if err != nil {
return err
}
return obsolete, err
if opts.SkipDeletion {
return nil
}
p = nil
if opts.DeleteProgress != nil {
p = opts.DeleteProgress()
}
defer p.Done()
return restic.ParallelRemove(ctx, repo, obsolete, restic.IndexFile, func(id restic.ID, err error) error {
if opts.DeleteReport != nil {
opts.DeleteReport(id, err)
}
return err
}, p)
}
// SaveIndex saves an index in the repository.

View File

@ -8,7 +8,6 @@ import (
"testing"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/checker"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/index"
@ -363,20 +362,11 @@ func testIndexSave(t *testing.T, version uint) {
t.Fatal(err)
}
obsoletes, err := repo.Index().Save(context.TODO(), repo, nil, nil, nil)
err = repo.Index().Save(context.TODO(), repo, nil, nil, restic.MasterIndexSaveOpts{})
if err != nil {
t.Fatalf("unable to save new index: %v", err)
}
for id := range obsoletes {
t.Logf("remove index %v", id.Str())
h := backend.Handle{Type: restic.IndexFile, Name: id.String()}
err = repo.Backend().Remove(context.TODO(), h)
if err != nil {
t.Errorf("error removing index %v: %v", id, err)
}
}
checker := checker.New(repo, false)
err = checker.LoadSnapshots(context.TODO())
if err != nil {

View File

@ -62,7 +62,7 @@ func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData fl
}
}
func createRandomWrongBlob(t testing.TB, repo restic.Repository) {
func createRandomWrongBlob(t testing.TB, repo restic.Repository) restic.BlobHandle {
length := randomSize(10*1024, 1024*1024) // 10KiB to 1MiB of data
buf := make([]byte, length)
rand.Read(buf)
@ -80,6 +80,7 @@ func createRandomWrongBlob(t testing.TB, repo restic.Repository) {
if err := repo.Flush(context.Background()); err != nil {
t.Fatalf("repo.Flush() returned error %v", err)
}
return restic.BlobHandle{ID: id, Type: restic.DataBlob}
}
// selectBlobs splits the list of all blobs randomly into two lists. A blob
@ -173,39 +174,27 @@ func flush(t *testing.T, repo restic.Repository) {
func rebuildIndex(t *testing.T, repo restic.Repository) {
err := repo.SetIndex(index.NewMasterIndex())
if err != nil {
t.Fatal(err)
}
rtest.OK(t, err)
packs := make(map[restic.ID]int64)
err = repo.List(context.TODO(), restic.PackFile, func(id restic.ID, size int64) error {
packs[id] = size
return nil
})
if err != nil {
t.Fatal(err)
}
rtest.OK(t, err)
_, err = repo.(*repository.Repository).CreateIndexFromPacks(context.TODO(), packs, nil)
if err != nil {
t.Fatal(err)
}
rtest.OK(t, err)
var obsoleteIndexes restic.IDs
err = repo.List(context.TODO(), restic.IndexFile, func(id restic.ID, size int64) error {
h := backend.Handle{
Type: restic.IndexFile,
Name: id.String(),
}
return repo.Backend().Remove(context.TODO(), h)
obsoleteIndexes = append(obsoleteIndexes, id)
return nil
})
if err != nil {
t.Fatal(err)
}
rtest.OK(t, err)
_, err = repo.Index().Save(context.TODO(), repo, restic.NewIDSet(), nil, nil)
if err != nil {
t.Fatal(err)
}
err = repo.Index().Save(context.TODO(), repo, restic.NewIDSet(), obsoleteIndexes, restic.MasterIndexSaveOpts{})
rtest.OK(t, err)
}
func reloadIndex(t *testing.T, repo restic.Repository) {

View File

@ -0,0 +1,88 @@
package repository
import (
"context"
"errors"
"io"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/ui/progress"
"golang.org/x/sync/errgroup"
)
func RepairPacks(ctx context.Context, repo restic.Repository, ids restic.IDSet, printer progress.Printer) error {
wg, wgCtx := errgroup.WithContext(ctx)
repo.StartPackUploader(wgCtx, wg)
printer.P("salvaging intact data from specified pack files")
bar := printer.NewCounter("pack files")
bar.SetMax(uint64(len(ids)))
defer bar.Done()
wg.Go(func() error {
// examine all data the indexes have for the pack file
for b := range repo.Index().ListPacks(wgCtx, ids) {
blobs := b.Blobs
if len(blobs) == 0 {
printer.E("no blobs found for pack %v", b.PackID)
bar.Add(1)
continue
}
err := repo.LoadBlobsFromPack(wgCtx, b.PackID, blobs, func(blob restic.BlobHandle, buf []byte, err error) error {
if err != nil {
// Fallback path
buf, err = repo.LoadBlob(wgCtx, blob.Type, blob.ID, nil)
if err != nil {
printer.E("failed to load blob %v: %v", blob.ID, err)
return nil
}
}
id, _, _, err := repo.SaveBlob(wgCtx, blob.Type, buf, restic.ID{}, true)
if !id.Equal(blob.ID) {
panic("pack id mismatch during upload")
}
return err
})
// ignore truncated file parts
if err != nil && !errors.Is(err, io.ErrUnexpectedEOF) {
return err
}
bar.Add(1)
}
return repo.Flush(wgCtx)
})
err := wg.Wait()
bar.Done()
if err != nil {
return err
}
// remove salvaged packs from index
printer.P("rebuilding index")
bar = printer.NewCounter("packs processed")
err = repo.Index().Save(ctx, repo, ids, nil, restic.MasterIndexSaveOpts{
SaveProgress: bar,
DeleteProgress: func() *progress.Counter {
return printer.NewCounter("old indexes deleted")
},
DeleteReport: func(id restic.ID, err error) {
printer.VV("removed index %v", id.String())
},
})
if err != nil {
return err
}
// cleanup
printer.P("removing salvaged pack files")
// if we fail to delete the damaged pack files, then prune will remove them later on
bar = printer.NewCounter("files deleted")
_ = restic.ParallelRemove(ctx, repo, ids, restic.PackFile, nil, bar)
bar.Done()
return nil
}

View File

@ -0,0 +1,130 @@
package repository_test
import (
"context"
"math/rand"
"testing"
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/index"
"github.com/restic/restic/internal/repository"
"github.com/restic/restic/internal/restic"
"github.com/restic/restic/internal/test"
rtest "github.com/restic/restic/internal/test"
"github.com/restic/restic/internal/ui/progress"
)
func listBlobs(repo restic.Repository) restic.BlobSet {
blobs := restic.NewBlobSet()
repo.Index().Each(context.TODO(), func(pb restic.PackedBlob) {
blobs.Insert(pb.BlobHandle)
})
return blobs
}
func replaceFile(t *testing.T, repo restic.Repository, h backend.Handle, damage func([]byte) []byte) {
buf, err := backend.LoadAll(context.TODO(), nil, repo.Backend(), h)
test.OK(t, err)
buf = damage(buf)
test.OK(t, repo.Backend().Remove(context.TODO(), h))
test.OK(t, repo.Backend().Save(context.TODO(), h, backend.NewByteReader(buf, repo.Backend().Hasher())))
}
func TestRepairBrokenPack(t *testing.T) {
repository.TestAllVersions(t, testRepairBrokenPack)
}
func testRepairBrokenPack(t *testing.T, version uint) {
tests := []struct {
name string
damage func(repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet)
}{
{
"valid pack",
func(repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) {
return packsBefore, restic.NewBlobSet()
},
},
{
"broken pack",
func(repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) {
wrongBlob := createRandomWrongBlob(t, repo)
damagedPacks := findPacksForBlobs(t, repo, restic.NewBlobSet(wrongBlob))
return damagedPacks, restic.NewBlobSet(wrongBlob)
},
},
{
"partially broken pack",
func(repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) {
// damage one of the pack files
damagedID := packsBefore.List()[0]
replaceFile(t, repo, backend.Handle{Type: backend.PackFile, Name: damagedID.String()},
func(buf []byte) []byte {
buf[0] ^= 0xff
return buf
})
// find blob that starts at offset 0
var damagedBlob restic.BlobHandle
for blobs := range repo.Index().ListPacks(context.TODO(), restic.NewIDSet(damagedID)) {
for _, blob := range blobs.Blobs {
if blob.Offset == 0 {
damagedBlob = blob.BlobHandle
}
}
}
return restic.NewIDSet(damagedID), restic.NewBlobSet(damagedBlob)
},
}, {
"truncated pack",
func(repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) {
// damage one of the pack files
damagedID := packsBefore.List()[0]
replaceFile(t, repo, backend.Handle{Type: backend.PackFile, Name: damagedID.String()},
func(buf []byte) []byte {
buf = buf[0:10]
return buf
})
// all blobs in the file are broken
damagedBlobs := restic.NewBlobSet()
for blobs := range repo.Index().ListPacks(context.TODO(), restic.NewIDSet(damagedID)) {
for _, blob := range blobs.Blobs {
damagedBlobs.Insert(blob.BlobHandle)
}
}
return restic.NewIDSet(damagedID), damagedBlobs
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
repo := repository.TestRepositoryWithVersion(t, version)
seed := time.Now().UnixNano()
rand.Seed(seed)
t.Logf("rand seed is %v", seed)
createRandomBlobs(t, repo, 5, 0.7)
packsBefore := listPacks(t, repo)
blobsBefore := listBlobs(repo)
toRepair, damagedBlobs := test.damage(repo, packsBefore)
rtest.OK(t, repository.RepairPacks(context.TODO(), repo, toRepair, &progress.NoopPrinter{}))
// reload index
rtest.OK(t, repo.SetIndex(index.NewMasterIndex()))
rtest.OK(t, repo.LoadIndex(context.TODO(), nil))
packsAfter := listPacks(t, repo)
blobsAfter := listBlobs(repo)
rtest.Assert(t, len(packsAfter.Intersect(toRepair)) == 0, "some damaged packs were not removed")
rtest.Assert(t, len(packsBefore.Sub(toRepair).Sub(packsAfter)) == 0, "not-damaged packs were removed")
rtest.Assert(t, blobsBefore.Sub(damagedBlobs).Equals(blobsAfter), "diverging blob lists")
})
}
}

View File

@ -28,10 +28,19 @@ var testSizes = []int{5, 23, 2<<18 + 23, 1 << 20}
var rnd = rand.New(rand.NewSource(time.Now().UnixNano()))
func TestSave(t *testing.T) {
repository.TestAllVersions(t, testSave)
repository.TestAllVersions(t, testSavePassID)
repository.TestAllVersions(t, testSaveCalculateID)
}
func testSave(t *testing.T, version uint) {
func testSavePassID(t *testing.T, version uint) {
testSave(t, version, false)
}
func testSaveCalculateID(t *testing.T, version uint) {
testSave(t, version, true)
}
func testSave(t *testing.T, version uint, calculateID bool) {
repo := repository.TestRepositoryWithVersion(t, version)
for _, size := range testSizes {
@ -45,51 +54,14 @@ func testSave(t *testing.T, version uint) {
repo.StartPackUploader(context.TODO(), &wg)
// save
sid, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, restic.ID{}, false)
inputID := restic.ID{}
if !calculateID {
inputID = id
}
sid, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, inputID, false)
rtest.OK(t, err)
rtest.Equals(t, id, sid)
rtest.OK(t, repo.Flush(context.Background()))
// rtest.OK(t, repo.SaveIndex())
// read back
buf, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, nil)
rtest.OK(t, err)
rtest.Equals(t, size, len(buf))
rtest.Assert(t, len(buf) == len(data),
"number of bytes read back does not match: expected %d, got %d",
len(data), len(buf))
rtest.Assert(t, bytes.Equal(buf, data),
"data does not match: expected %02x, got %02x",
data, buf)
}
}
func TestSaveFrom(t *testing.T) {
repository.TestAllVersions(t, testSaveFrom)
}
func testSaveFrom(t *testing.T, version uint) {
repo := repository.TestRepositoryWithVersion(t, version)
for _, size := range testSizes {
data := make([]byte, size)
_, err := io.ReadFull(rnd, data)
rtest.OK(t, err)
id := restic.Hash(data)
var wg errgroup.Group
repo.StartPackUploader(context.TODO(), &wg)
// save
id2, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, id, false)
rtest.OK(t, err)
rtest.Equals(t, id, id2)
rtest.OK(t, repo.Flush(context.Background()))
// read back

View File

@ -3,7 +3,9 @@ package restic
import (
"context"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/ui/progress"
"golang.org/x/sync/errgroup"
)
@ -50,3 +52,43 @@ func ParallelList(ctx context.Context, r Lister, t FileType, parallelism uint, f
return wg.Wait()
}
// ParallelRemove deletes the given fileList of fileType in parallel
// if callback returns an error, then it will abort.
func ParallelRemove(ctx context.Context, repo Repository, fileList IDSet, fileType FileType, report func(id ID, err error) error, bar *progress.Counter) error {
fileChan := make(chan ID)
wg, ctx := errgroup.WithContext(ctx)
wg.Go(func() error {
defer close(fileChan)
for id := range fileList {
select {
case fileChan <- id:
case <-ctx.Done():
return ctx.Err()
}
}
return nil
})
bar.SetMax(uint64(len(fileList)))
// deleting files is IO-bound
workerCount := repo.Connections()
for i := 0; i < int(workerCount); i++ {
wg.Go(func() error {
for id := range fileChan {
h := backend.Handle{Type: fileType, Name: id.String()}
err := repo.Backend().Remove(ctx, h)
if report != nil {
err = report(id, err)
}
if err != nil {
return err
}
bar.Add(1)
}
return nil
})
}
return wg.Wait()
}

View File

@ -89,6 +89,13 @@ type PackBlobs struct {
Blobs []Blob
}
type MasterIndexSaveOpts struct {
SaveProgress *progress.Counter
DeleteProgress func() *progress.Counter
DeleteReport func(id ID, err error)
SkipDeletion bool
}
// MasterIndex keeps track of the blobs are stored within files.
type MasterIndex interface {
Has(BlobHandle) bool
@ -99,7 +106,7 @@ type MasterIndex interface {
Each(ctx context.Context, fn func(PackedBlob))
ListPacks(ctx context.Context, packs IDSet) <-chan PackBlobs
Save(ctx context.Context, repo SaverUnpacked, packBlacklist IDSet, extraObsolete IDs, p *progress.Counter) (obsolete IDSet, err error)
Save(ctx context.Context, repo Repository, excludePacks IDSet, extraObsolete IDs, opts MasterIndexSaveOpts) error
}
// Lister allows listing files in a backend.

View File

@ -0,0 +1,30 @@
package progress
// A Printer can can return a new counter or print messages
// at different log levels.
// It must be safe to call its methods from concurrent goroutines.
type Printer interface {
NewCounter(description string) *Counter
E(msg string, args ...interface{})
P(msg string, args ...interface{})
V(msg string, args ...interface{})
VV(msg string, args ...interface{})
}
// NoopPrinter discards all messages
type NoopPrinter struct{}
var _ Printer = (*NoopPrinter)(nil)
func (*NoopPrinter) NewCounter(_ string) *Counter {
return nil
}
func (*NoopPrinter) E(_ string, _ ...interface{}) {}
func (*NoopPrinter) P(_ string, _ ...interface{}) {}
func (*NoopPrinter) V(_ string, _ ...interface{}) {}
func (*NoopPrinter) VV(_ string, _ ...interface{}) {}