mirror of
https://github.com/octoleo/restic.git
synced 2024-11-11 07:41:03 +00:00
Merge pull request #1036 from restic/prune-remove-invalid-files
prune: Remove invalid files
This commit is contained in:
commit
c4592f577a
@ -40,6 +40,12 @@ Small changes
|
|||||||
https://github.com/restic/restic/issues/1023
|
https://github.com/restic/restic/issues/1023
|
||||||
https://github.com/restic/restic/pull/1025
|
https://github.com/restic/restic/pull/1025
|
||||||
|
|
||||||
|
* The `prune` command has been improved and will now remove invalid pack
|
||||||
|
files, for example files that have not been uploaded completely because a
|
||||||
|
backup was interrupted.
|
||||||
|
https://github.com/restic/restic/issues/1029
|
||||||
|
https://github.com/restic/restic/pull/1036
|
||||||
|
|
||||||
Important Changes in 0.6.1
|
Important Changes in 0.6.1
|
||||||
==========================
|
==========================
|
||||||
|
|
||||||
|
@ -106,11 +106,15 @@ func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
|
|||||||
Verbosef("building new index for repo\n")
|
Verbosef("building new index for repo\n")
|
||||||
|
|
||||||
bar := newProgressMax(!gopts.Quiet, uint64(stats.packs), "packs")
|
bar := newProgressMax(!gopts.Quiet, uint64(stats.packs), "packs")
|
||||||
idx, err := index.New(ctx, repo, restic.NewIDSet(), bar)
|
idx, invalidFiles, err := index.New(ctx, repo, restic.NewIDSet(), bar)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, id := range invalidFiles {
|
||||||
|
Warnf("incomplete pack file (will be removed): %v\n", id)
|
||||||
|
}
|
||||||
|
|
||||||
blobs := 0
|
blobs := 0
|
||||||
for _, pack := range idx.Packs {
|
for _, pack := range idx.Packs {
|
||||||
stats.bytes += pack.Size
|
stats.bytes += pack.Size
|
||||||
@ -196,6 +200,12 @@ func pruneRepository(gopts GlobalOptions, repo restic.Repository) error {
|
|||||||
|
|
||||||
// find packs that are unneeded
|
// find packs that are unneeded
|
||||||
removePacks := restic.NewIDSet()
|
removePacks := restic.NewIDSet()
|
||||||
|
|
||||||
|
Verbosef("will remove %d invalid files\n", len(invalidFiles))
|
||||||
|
for _, id := range invalidFiles {
|
||||||
|
removePacks.Insert(id)
|
||||||
|
}
|
||||||
|
|
||||||
for packID, p := range idx.Packs {
|
for packID, p := range idx.Packs {
|
||||||
|
|
||||||
hasActiveBlob := false
|
hasActiveBlob := false
|
||||||
|
@ -50,7 +50,7 @@ func rebuildIndex(ctx context.Context, repo restic.Repository, ignorePacks resti
|
|||||||
}
|
}
|
||||||
|
|
||||||
bar := newProgressMax(!globalOptions.Quiet, packs-uint64(len(ignorePacks)), "packs")
|
bar := newProgressMax(!globalOptions.Quiet, packs-uint64(len(ignorePacks)), "packs")
|
||||||
idx, err := index.New(ctx, repo, ignorePacks, bar)
|
idx, _, err := index.New(ctx, repo, ignorePacks, bar)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -8,6 +8,7 @@ import (
|
|||||||
"restic"
|
"restic"
|
||||||
"restic/debug"
|
"restic/debug"
|
||||||
"restic/list"
|
"restic/list"
|
||||||
|
"restic/pack"
|
||||||
"restic/worker"
|
"restic/worker"
|
||||||
|
|
||||||
"restic/errors"
|
"restic/errors"
|
||||||
@ -33,22 +34,29 @@ func newIndex() *Index {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// New creates a new index for repo from scratch.
|
// New creates a new index for repo from scratch. InvalidFiles contains all IDs
|
||||||
func New(ctx context.Context, repo restic.Repository, ignorePacks restic.IDSet, p *restic.Progress) (*Index, error) {
|
// of files that cannot be listed successfully.
|
||||||
|
func New(ctx context.Context, repo restic.Repository, ignorePacks restic.IDSet, p *restic.Progress) (idx *Index, invalidFiles restic.IDs, err error) {
|
||||||
p.Start()
|
p.Start()
|
||||||
defer p.Done()
|
defer p.Done()
|
||||||
|
|
||||||
ch := make(chan worker.Job)
|
ch := make(chan worker.Job)
|
||||||
go list.AllPacks(ctx, repo, ignorePacks, ch)
|
go list.AllPacks(ctx, repo, ignorePacks, ch)
|
||||||
|
|
||||||
idx := newIndex()
|
idx = newIndex()
|
||||||
|
|
||||||
for job := range ch {
|
for job := range ch {
|
||||||
p.Report(restic.Stat{Blobs: 1})
|
p.Report(restic.Stat{Blobs: 1})
|
||||||
|
|
||||||
packID := job.Data.(restic.ID)
|
packID := job.Data.(restic.ID)
|
||||||
if job.Error != nil {
|
if job.Error != nil {
|
||||||
fmt.Fprintf(os.Stderr, "unable to list pack %v: %v\n", packID.Str(), job.Error)
|
cause := errors.Cause(job.Error)
|
||||||
|
if _, ok := cause.(pack.InvalidFileError); ok {
|
||||||
|
invalidFiles = append(invalidFiles, packID)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Fprintf(os.Stderr, "pack file cannot be listed %v: %v\n", packID.Str(), job.Error)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -58,11 +66,11 @@ func New(ctx context.Context, repo restic.Repository, ignorePacks restic.IDSet,
|
|||||||
|
|
||||||
err := idx.AddPack(packID, j.Size(), j.Entries())
|
err := idx.AddPack(packID, j.Size(), j.Entries())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return idx, nil
|
return idx, invalidFiles, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type packJSON struct {
|
type packJSON struct {
|
||||||
|
@ -43,7 +43,7 @@ func TestIndexNew(t *testing.T) {
|
|||||||
repo, cleanup := createFilledRepo(t, 3, 0)
|
repo, cleanup := createFilledRepo(t, 3, 0)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
idx, err := New(context.TODO(), repo, restic.NewIDSet(), nil)
|
idx, _, err := New(context.TODO(), repo, restic.NewIDSet(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("New() returned error %v", err)
|
t.Fatalf("New() returned error %v", err)
|
||||||
}
|
}
|
||||||
@ -70,7 +70,7 @@ func TestIndexLoad(t *testing.T) {
|
|||||||
|
|
||||||
validateIndex(t, repo, loadIdx)
|
validateIndex(t, repo, loadIdx)
|
||||||
|
|
||||||
newIdx, err := New(context.TODO(), repo, restic.NewIDSet(), nil)
|
newIdx, _, err := New(context.TODO(), repo, restic.NewIDSet(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("New() returned error %v", err)
|
t.Fatalf("New() returned error %v", err)
|
||||||
}
|
}
|
||||||
@ -134,7 +134,7 @@ func BenchmarkIndexNew(b *testing.B) {
|
|||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
idx, err := New(context.TODO(), repo, restic.NewIDSet(), nil)
|
idx, _, err := New(context.TODO(), repo, restic.NewIDSet(), nil)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatalf("New() returned error %v", err)
|
b.Fatalf("New() returned error %v", err)
|
||||||
@ -151,7 +151,7 @@ func BenchmarkIndexSave(b *testing.B) {
|
|||||||
repo, cleanup := repository.TestRepository(b)
|
repo, cleanup := repository.TestRepository(b)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
idx, err := New(context.TODO(), repo, restic.NewIDSet(), nil)
|
idx, _, err := New(context.TODO(), repo, restic.NewIDSet(), nil)
|
||||||
test.OK(b, err)
|
test.OK(b, err)
|
||||||
|
|
||||||
for i := 0; i < 8000; i++ {
|
for i := 0; i < 8000; i++ {
|
||||||
@ -184,7 +184,7 @@ func TestIndexDuplicateBlobs(t *testing.T) {
|
|||||||
repo, cleanup := createFilledRepo(t, 3, 0.01)
|
repo, cleanup := createFilledRepo(t, 3, 0.01)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
idx, err := New(context.TODO(), repo, restic.NewIDSet(), nil)
|
idx, _, err := New(context.TODO(), repo, restic.NewIDSet(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
@ -147,7 +147,7 @@ func saveIndex(t *testing.T, repo restic.Repository) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func rebuildIndex(t *testing.T, repo restic.Repository) {
|
func rebuildIndex(t *testing.T, repo restic.Repository) {
|
||||||
idx, err := index.New(context.TODO(), repo, restic.NewIDSet(), nil)
|
idx, _, err := index.New(context.TODO(), repo, restic.NewIDSet(), nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user