2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-26 23:06:32 +00:00

Improve command shutdown on context cancellation

This commit is contained in:
Michael Eischer 2024-03-30 00:19:58 +01:00
parent 910927670f
commit 31624aeffd
17 changed files with 80 additions and 6 deletions

View File

@ -219,6 +219,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
Verbosef("load indexes\n")
bar := newIndexProgress(gopts.Quiet, gopts.JSON)
hints, errs := chkr.LoadIndex(ctx, bar)
if ctx.Err() != nil {
return ctx.Err()
}
errorsFound := false
suggestIndexRebuild := false
@ -280,6 +283,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
if orphanedPacks > 0 {
Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks)
}
if ctx.Err() != nil {
return ctx.Err()
}
Verbosef("check snapshots, trees and blobs\n")
errChan = make(chan error)
@ -313,6 +319,9 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
// Must happen after `errChan` is read from in the above loop to avoid
// deadlocking in the case of errors.
wg.Wait()
if ctx.Err() != nil {
return ctx.Err()
}
if opts.CheckUnused {
for _, id := range chkr.UnusedBlobs(ctx) {
@ -392,10 +401,13 @@ func runCheck(ctx context.Context, opts CheckOptions, gopts GlobalOptions, args
doReadData(packs)
}
if ctx.Err() != nil {
return ctx.Err()
}
if errorsFound {
return errors.Fatal("repository contains errors")
}
Verbosef("no errors were found\n")
return nil

View File

@ -103,6 +103,9 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
// also consider identical snapshot copies
dstSnapshotByOriginal[*sn.ID()] = append(dstSnapshotByOriginal[*sn.ID()], sn)
}
if ctx.Err() != nil {
return ctx.Err()
}
// remember already processed trees across all snapshots
visitedTrees := restic.NewIDSet()
@ -147,7 +150,7 @@ func runCopy(ctx context.Context, opts CopyOptions, gopts GlobalOptions, args []
}
Verbosef("snapshot %s saved\n", newID.Str())
}
return nil
return ctx.Err()
}
func similarSnapshots(sna *restic.Snapshot, snb *restic.Snapshot) bool {

View File

@ -608,6 +608,9 @@ func runFind(ctx context.Context, opts FindOptions, gopts GlobalOptions, args []
for sn := range FindFilteredSnapshots(ctx, snapshotLister, repo, &opts.SnapshotFilter, opts.Snapshots) {
filteredSnapshots = append(filteredSnapshots, sn)
}
if ctx.Err() != nil {
return ctx.Err()
}
sort.Slice(filteredSnapshots, func(i, j int) bool {
return filteredSnapshots[i].Time.Before(filteredSnapshots[j].Time)

View File

@ -188,6 +188,9 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) {
snapshots = append(snapshots, sn)
}
if ctx.Err() != nil {
return ctx.Err()
}
var jsonGroups []*ForgetGroup
@ -270,6 +273,10 @@ func runForget(ctx context.Context, opts ForgetOptions, pruneOptions PruneOption
}
}
if ctx.Err() != nil {
return ctx.Err()
}
if len(removeSnIDs) > 0 {
if !opts.DryRun {
bar := printer.NewCounter("files deleted")

View File

@ -197,6 +197,9 @@ func runPruneWithRepo(ctx context.Context, opts PruneOptions, gopts GlobalOption
if err != nil {
return err
}
if ctx.Err() != nil {
return ctx.Err()
}
if popts.DryRun {
printer.P("\nWould have made the following changes:")

View File

@ -66,11 +66,17 @@ func runRecover(ctx context.Context, gopts GlobalOptions) error {
trees[blob.Blob.ID] = false
}
})
if ctx.Err() != nil {
return ctx.Err()
}
Verbosef("load %d trees\n", len(trees))
bar = newProgressMax(!gopts.Quiet, uint64(len(trees)), "trees loaded")
for id := range trees {
tree, err := restic.LoadTree(ctx, repo, id)
if ctx.Err() != nil {
return ctx.Err()
}
if err != nil {
Warnf("unable to load tree %v: %v\n", id.Str(), err)
continue

View File

@ -145,6 +145,9 @@ func runRepairSnapshots(ctx context.Context, gopts GlobalOptions, opts RepairOpt
changedCount++
}
}
if ctx.Err() != nil {
return ctx.Err()
}
Verbosef("\n")
if changedCount == 0 {

View File

@ -294,6 +294,9 @@ func runRewrite(ctx context.Context, opts RewriteOptions, gopts GlobalOptions, a
changedCount++
}
}
if ctx.Err() != nil {
return ctx.Err()
}
Verbosef("\n")
if changedCount == 0 {

View File

@ -69,6 +69,9 @@ func runSnapshots(ctx context.Context, opts SnapshotOptions, gopts GlobalOptions
for sn := range FindFilteredSnapshots(ctx, repo, repo, &opts.SnapshotFilter, args) {
snapshots = append(snapshots, sn)
}
if ctx.Err() != nil {
return ctx.Err()
}
snapshotGroups, grouped, err := restic.GroupSnapshots(snapshots, opts.GroupBy)
if err != nil {
return err

View File

@ -117,9 +117,8 @@ func runStats(ctx context.Context, opts StatsOptions, gopts GlobalOptions, args
return fmt.Errorf("error walking snapshot: %v", err)
}
}
if err != nil {
return err
if ctx.Err() != nil {
return ctx.Err()
}
if opts.countMode == countModeRawData {

View File

@ -122,6 +122,9 @@ func runTag(ctx context.Context, opts TagOptions, gopts GlobalOptions, args []st
changeCnt++
}
}
if ctx.Err() != nil {
return ctx.Err()
}
if changeCnt == 0 {
Verbosef("no snapshots were modified\n")
} else {

View File

@ -380,6 +380,7 @@ func (fn *FutureNode) take(ctx context.Context) futureNodeResult {
return res
}
case <-ctx.Done():
return futureNodeResult{err: ctx.Err()}
}
return futureNodeResult{err: errors.Errorf("no result")}
}

View File

@ -90,6 +90,10 @@ func (s *TreeSaver) save(ctx context.Context, job *saveTreeJob) (*restic.Node, I
// return the error if it wasn't ignored
if fnr.err != nil {
debug.Log("err for %v: %v", fnr.snPath, fnr.err)
if fnr.err == context.Canceled {
return nil, stats, fnr.err
}
fnr.err = s.errFn(fnr.target, fnr.err)
if fnr.err == nil {
// ignore error

View File

@ -320,6 +320,9 @@ func (mi *MasterIndex) Save(ctx context.Context, repo restic.Repository, exclude
newIndex = NewIndex()
}
}
if wgCtx.Err() != nil {
return wgCtx.Err()
}
}
err := newIndex.AddToSupersedes(extraObsolete...)

View File

@ -130,6 +130,9 @@ func PlanPrune(ctx context.Context, opts PruneOptions, repo restic.Repository, g
}
keepBlobs.Delete(blob.BlobHandle)
})
if ctx.Err() != nil {
return nil, ctx.Err()
}
if keepBlobs.Len() < blobCount/2 {
// replace with copy to shrink map to necessary size if there's a chance to benefit
@ -166,6 +169,9 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re
usedBlobs[bh] = count
}
})
if ctx.Err() != nil {
return nil, nil, ctx.Err()
}
// Check if all used blobs have been found in index
missingBlobs := restic.NewBlobSet()
@ -240,6 +246,9 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re
// update indexPack
indexPack[blob.PackID] = ip
})
if ctx.Err() != nil {
return nil, nil, ctx.Err()
}
// if duplicate blobs exist, those will be set to either "used" or "unused":
// - mark only one occurrence of duplicate blobs as used
@ -286,6 +295,9 @@ func packInfoFromIndex(ctx context.Context, idx restic.MasterIndex, usedBlobs re
indexPack[blob.PackID] = ip
})
}
if ctx.Err() != nil {
return nil, nil, ctx.Err()
}
// Sanity check. If no duplicates exist, all blobs have value 1. After handling
// duplicates, this also applies to duplicates.
@ -528,6 +540,9 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (e
printer.P("deleting unreferenced packs\n")
_ = deleteFiles(ctx, true, repo, plan.removePacksFirst, restic.PackFile, printer)
}
if ctx.Err() != nil {
return ctx.Err()
}
if len(plan.repackPacks) != 0 {
printer.P("repacking packs\n")
@ -578,6 +593,9 @@ func (plan *PrunePlan) Execute(ctx context.Context, printer progress.Printer) (e
printer.P("removing %d old packs\n", len(plan.removePacks))
_ = deleteFiles(ctx, true, repo, plan.removePacks, restic.PackFile, printer)
}
if ctx.Err() != nil {
return ctx.Err()
}
if plan.opts.UnsafeRecovery {
err = rebuildIndexFiles(ctx, repo, plan.ignorePacks, nil, true, printer)

View File

@ -72,7 +72,7 @@ func repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito
return wgCtx.Err()
}
}
return nil
return wgCtx.Err()
})
worker := func() error {

View File

@ -713,6 +713,9 @@ func (r *Repository) LoadIndex(ctx context.Context, p *progress.Counter) error {
return errors.New("index uses feature not supported by repository version 1")
}
}
if ctx.Err() != nil {
return ctx.Err()
}
// remove index files from the cache which have been removed in the repo
return r.prepareCache()