From 92816fa9664797aba4bc75a94f06479b2e4fad5a Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 11 Apr 2022 21:06:37 +0200 Subject: [PATCH 01/15] init: Enable compression support by default --- internal/restic/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/restic/config.go b/internal/restic/config.go index 6df32e2ef..ae4be0aa3 100644 --- a/internal/restic/config.go +++ b/internal/restic/config.go @@ -23,7 +23,7 @@ const MaxRepoVersion = 2 // StableRepoVersion is the version that is written to the config when a repository // is newly created with Init(). -const StableRepoVersion = 1 +const StableRepoVersion = 2 // JSONUnpackedLoader loads unpacked JSON. type JSONUnpackedLoader interface { From 3af6c180e402bd81becca48acc8ec897cb4542d7 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 31 Mar 2022 21:11:56 +0200 Subject: [PATCH 02/15] Improve migrate command --- cmd/restic/cmd_migrate.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go index 4af98005e..f82439715 100644 --- a/cmd/restic/cmd_migrate.go +++ b/cmd/restic/cmd_migrate.go @@ -8,11 +8,12 @@ import ( ) var cmdMigrate = &cobra.Command{ - Use: "migrate [flags] [name]", + Use: "migrate [flags] [migration name] [...]", Short: "Apply migrations", Long: ` -The "migrate" command applies migrations to a repository. When no migration -name is explicitly given, a list of migrations that can be applied is printed. +The "migrate" command checks which migrations can be applied for a repository +and prints a list with available migration names. If one or more migration +names are specified, these migrations are applied. EXIT STATUS =========== @@ -41,6 +42,8 @@ func init() { func checkMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repository) error { ctx := gopts.ctx Printf("available migrations:\n") + found := false + for _, m := range migrations.All { ok, err := m.Check(ctx, repo) if err != nil { @@ -48,10 +51,15 @@ func checkMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repos } if ok { - Printf(" %v: %v\n", m.Name(), m.Desc()) + Printf(" %v\t%v\n", m.Name(), m.Desc()) + found = true } } + if !found { + Printf("no migrations found") + } + return nil } From 82ed5a3a157e869b04caea0933834de7d9af9d56 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 31 Mar 2022 21:12:13 +0200 Subject: [PATCH 03/15] Add repo upgrade migration --- internal/migrations/upgrade_repo_v2.go | 46 ++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 internal/migrations/upgrade_repo_v2.go diff --git a/internal/migrations/upgrade_repo_v2.go b/internal/migrations/upgrade_repo_v2.go new file mode 100644 index 000000000..7f8f8111d --- /dev/null +++ b/internal/migrations/upgrade_repo_v2.go @@ -0,0 +1,46 @@ +package migrations + +import ( + "context" + "fmt" + + "github.com/restic/restic/internal/restic" +) + +func init() { + register(&UpgradeRepoV2{}) +} + +type UpgradeRepoV2 struct{} + +func (*UpgradeRepoV2) Name() string { + return "upgrade_repo_v2" +} + +func (*UpgradeRepoV2) Desc() string { + return "upgrade a repository to version 2" +} + +func (*UpgradeRepoV2) Check(ctx context.Context, repo restic.Repository) (bool, error) { + isV1 := repo.Config().Version == 1 + return isV1, nil +} + +func (*UpgradeRepoV2) Apply(ctx context.Context, repo restic.Repository) error { + cfg := repo.Config() + cfg.Version = 2 + + h := restic.Handle{Type: restic.ConfigFile} + + err := repo.Backend().Remove(ctx, h) + if err != nil { + return fmt.Errorf("remove old config file failed: %w", err) + } + + _, err = repo.SaveJSONUnpacked(ctx, restic.ConfigFile, cfg) + if err != nil { + return fmt.Errorf("save new config file failed: %w", err) + } + + return nil +} From a5f1d318ac853bfab7fe9ec7d67727346f53b26f Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Thu, 31 Mar 2022 21:29:19 +0200 Subject: [PATCH 04/15] Try to make repo upgrade migration more failsafe --- internal/migrations/upgrade_repo_v2.go | 57 +++++++++++++++++++++++--- 1 file changed, 52 insertions(+), 5 deletions(-) diff --git a/internal/migrations/upgrade_repo_v2.go b/internal/migrations/upgrade_repo_v2.go index 7f8f8111d..cf5ae8425 100644 --- a/internal/migrations/upgrade_repo_v2.go +++ b/internal/migrations/upgrade_repo_v2.go @@ -3,6 +3,10 @@ package migrations import ( "context" "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" "github.com/restic/restic/internal/restic" ) @@ -26,17 +30,19 @@ func (*UpgradeRepoV2) Check(ctx context.Context, repo restic.Repository) (bool, return isV1, nil } -func (*UpgradeRepoV2) Apply(ctx context.Context, repo restic.Repository) error { - cfg := repo.Config() - cfg.Version = 2 - +func (*UpgradeRepoV2) upgrade(ctx context.Context, repo restic.Repository) error { h := restic.Handle{Type: restic.ConfigFile} + // now remove the original file err := repo.Backend().Remove(ctx, h) if err != nil { - return fmt.Errorf("remove old config file failed: %w", err) + return fmt.Errorf("remove config failed: %w", err) } + // upgrade config + cfg := repo.Config() + cfg.Version = 2 + _, err = repo.SaveJSONUnpacked(ctx, restic.ConfigFile, cfg) if err != nil { return fmt.Errorf("save new config file failed: %w", err) @@ -44,3 +50,44 @@ func (*UpgradeRepoV2) Apply(ctx context.Context, repo restic.Repository) error { return nil } + +func (m *UpgradeRepoV2) Apply(ctx context.Context, repo restic.Repository) error { + tempdir, err := ioutil.TempDir("", "restic-migrate-upgrade-repo-v2-") + if err != nil { + return fmt.Errorf("create temp dir failed: %w", err) + } + + h := restic.Handle{Type: restic.ConfigFile} + + // read raw config file and save it to a temp dir, just in case + var rawConfigFile []byte + err = repo.Backend().Load(ctx, h, 0, 0, func(rd io.Reader) (err error) { + rawConfigFile, err = ioutil.ReadAll(rd) + return err + }) + if err != nil { + return fmt.Errorf("load config file failed: %w", err) + } + + err = ioutil.WriteFile(filepath.Join(tempdir, "config.old"), rawConfigFile, 0600) + if err != nil { + return fmt.Errorf("write config file backup to %v failed: %w", tempdir, err) + } + + // run the upgrade + err = m.upgrade(ctx, repo) + if err != nil { + // try contingency methods, reupload the original file + _ = repo.Backend().Remove(ctx, h) + uploadError := repo.Backend().Save(ctx, h, restic.NewByteReader(rawConfigFile, nil)) + if uploadError != nil { + return fmt.Errorf("error uploading config (%w), re-uploading old config filed failed as well (%v) but there is a backup in %v", err, uploadError, tempdir) + } + + return fmt.Errorf("error uploading config (%w), re-uploadid old config, there is a backup in %v", err, tempdir) + } + + _ = os.Remove(backupFileName) + _ = os.Remove(tempdir) + return nil +} From 8c244214bff928eacdd82b0c0715e9f4a24c38c2 Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 3 Apr 2022 14:48:39 +0200 Subject: [PATCH 05/15] Add tests for upgrade migration --- internal/migrations/upgrade_repo_v2.go | 42 ++++++-- internal/migrations/upgrade_repo_v2_test.go | 112 ++++++++++++++++++++ 2 files changed, 147 insertions(+), 7 deletions(-) create mode 100644 internal/migrations/upgrade_repo_v2_test.go diff --git a/internal/migrations/upgrade_repo_v2.go b/internal/migrations/upgrade_repo_v2.go index cf5ae8425..ada77444e 100644 --- a/internal/migrations/upgrade_repo_v2.go +++ b/internal/migrations/upgrade_repo_v2.go @@ -15,6 +15,26 @@ func init() { register(&UpgradeRepoV2{}) } +type UpgradeRepoV2Error struct { + UploadNewConfigError error + ReuploadOldConfigError error + + BackupFilePath string +} + +func (err *UpgradeRepoV2Error) Error() string { + if err.ReuploadOldConfigError != nil { + return fmt.Sprintf("error uploading config (%v), re-uploading old config filed failed as well (%v), but there is a backup of the config file in %v", err.UploadNewConfigError, err.ReuploadOldConfigError, err.BackupFilePath) + } + + return fmt.Sprintf("error uploading config (%v), re-uploaded old config was successful, there is a backup of the config file in %v", err.UploadNewConfigError, err.BackupFilePath) +} + +func (err *UpgradeRepoV2Error) Unwrap() error { + // consider the original upload error as the primary cause + return err.UploadNewConfigError +} + type UpgradeRepoV2 struct{} func (*UpgradeRepoV2) Name() string { @@ -69,7 +89,8 @@ func (m *UpgradeRepoV2) Apply(ctx context.Context, repo restic.Repository) error return fmt.Errorf("load config file failed: %w", err) } - err = ioutil.WriteFile(filepath.Join(tempdir, "config.old"), rawConfigFile, 0600) + backupFileName := filepath.Join(tempdir, "config") + err = ioutil.WriteFile(backupFileName, rawConfigFile, 0600) if err != nil { return fmt.Errorf("write config file backup to %v failed: %w", tempdir, err) } @@ -77,14 +98,21 @@ func (m *UpgradeRepoV2) Apply(ctx context.Context, repo restic.Repository) error // run the upgrade err = m.upgrade(ctx, repo) if err != nil { - // try contingency methods, reupload the original file - _ = repo.Backend().Remove(ctx, h) - uploadError := repo.Backend().Save(ctx, h, restic.NewByteReader(rawConfigFile, nil)) - if uploadError != nil { - return fmt.Errorf("error uploading config (%w), re-uploading old config filed failed as well (%v) but there is a backup in %v", err, uploadError, tempdir) + + // build an error we can return to the caller + repoError := &UpgradeRepoV2Error{ + UploadNewConfigError: err, + BackupFilePath: backupFileName, } - return fmt.Errorf("error uploading config (%w), re-uploadid old config, there is a backup in %v", err, tempdir) + // try contingency methods, reupload the original file + _ = repo.Backend().Remove(ctx, h) + err = repo.Backend().Save(ctx, h, restic.NewByteReader(rawConfigFile, nil)) + if err != nil { + repoError.ReuploadOldConfigError = err + } + + return repoError } _ = os.Remove(backupFileName) diff --git a/internal/migrations/upgrade_repo_v2_test.go b/internal/migrations/upgrade_repo_v2_test.go new file mode 100644 index 000000000..0d86d265c --- /dev/null +++ b/internal/migrations/upgrade_repo_v2_test.go @@ -0,0 +1,112 @@ +package migrations + +import ( + "context" + "os" + "path/filepath" + "sync" + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" +) + +func TestUpgradeRepoV2(t *testing.T) { + repo, cleanup := repository.TestRepositoryWithVersion(t, 1) + defer cleanup() + + if repo.Config().Version != 1 { + t.Fatal("test repo has wrong version") + } + + m := &UpgradeRepoV2{} + + ok, err := m.Check(context.Background(), repo) + if err != nil { + t.Fatal(err) + } + + if !ok { + t.Fatal("migration check returned false") + } + + err = m.Apply(context.Background(), repo) + if err != nil { + t.Fatal(err) + } +} + +type failBackend struct { + restic.Backend + + mu sync.Mutex + ConfigFileSavesUntilError uint +} + +func (be *failBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { + if h.Type != restic.ConfigFile { + return be.Backend.Save(ctx, h, rd) + } + + be.mu.Lock() + if be.ConfigFileSavesUntilError == 0 { + be.mu.Unlock() + return errors.New("failure induced for testing") + } + + be.ConfigFileSavesUntilError-- + be.mu.Unlock() + + return be.Backend.Save(ctx, h, rd) +} + +func TestUpgradeRepoV2Failure(t *testing.T) { + be, cleanup := repository.TestBackend(t) + defer cleanup() + + // wrap backend so that it fails upgrading the config after the initial write + be = &failBackend{ + ConfigFileSavesUntilError: 1, + Backend: be, + } + + repo, cleanup := repository.TestRepositoryWithBackend(t, be, 1) + defer cleanup() + + if repo.Config().Version != 1 { + t.Fatal("test repo has wrong version") + } + + m := &UpgradeRepoV2{} + + ok, err := m.Check(context.Background(), repo) + if err != nil { + t.Fatal(err) + } + + if !ok { + t.Fatal("migration check returned false") + } + + err = m.Apply(context.Background(), repo) + if err == nil { + t.Fatal("expected error returned from Apply(), got nil") + } + + upgradeErr := err.(*UpgradeRepoV2Error) + if upgradeErr.UploadNewConfigError == nil { + t.Fatal("expected upload error, got nil") + } + + if upgradeErr.ReuploadOldConfigError == nil { + t.Fatal("expected reupload error, got nil") + } + + if upgradeErr.BackupFilePath == "" { + t.Fatal("no backup file path found") + } + test.OK(t, os.Remove(upgradeErr.BackupFilePath)) + test.OK(t, os.Remove(filepath.Dir(upgradeErr.BackupFilePath))) +} From c8c0d659ec6ee630c1d3848e1bc07491832fb11e Mon Sep 17 00:00:00 2001 From: Alexander Neumann Date: Sun, 3 Apr 2022 17:26:13 +0200 Subject: [PATCH 06/15] Add migration to compress all data --- internal/migrations/compress_repo_v2.go | 102 ++++++++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 internal/migrations/compress_repo_v2.go diff --git a/internal/migrations/compress_repo_v2.go b/internal/migrations/compress_repo_v2.go new file mode 100644 index 000000000..b6986d29e --- /dev/null +++ b/internal/migrations/compress_repo_v2.go @@ -0,0 +1,102 @@ +package migrations + +import ( + "context" + "fmt" + + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" +) + +func init() { + register(&CompressRepoV2{}) +} + +type CompressRepoV2 struct{} + +func (*CompressRepoV2) Name() string { + return "compress_all_data" +} + +func (*CompressRepoV2) Desc() string { + return "compress all data in the repo" +} + +func (*CompressRepoV2) Check(ctx context.Context, repo restic.Repository) (bool, error) { + // only do very fast checks on the version here, we don't want the list of + // available migrations to take long to load + if repo.Config().Version < 2 { + return false, nil + } + + return true, nil +} + +// Apply requires that the repository must be already locked exclusively, this +// is done by the caller, so we can just go ahead, rewrite the packs as they +// are, remove the packs and rebuild the index. +func (*CompressRepoV2) Apply(ctx context.Context, repo restic.Repository) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + err := repo.LoadIndex(ctx) + if err != nil { + return fmt.Errorf("index load failed: %w", err) + } + + packsWithUncompressedData := restic.NewIDSet() + keepBlobs := restic.NewBlobSet() + + for blob := range repo.Index().Each(ctx) { + keepBlobs.Insert(blob.BlobHandle) + + if blob.UncompressedLength != 0 { + // blob is already compressed, ignore + continue + + } + + // remember pack ID + packsWithUncompressedData.Insert(blob.PackID) + } + + if len(packsWithUncompressedData) == 0 { + // nothing to do + return nil + } + + // don't upload new indexes until we're done + repo.(*repository.Repository).DisableAutoIndexUpdate() + obsoletePacks, err := repository.Repack(ctx, repo, repo, packsWithUncompressedData, keepBlobs, nil) + if err != nil { + return fmt.Errorf("repack failed: %w", err) + } + + if len(obsoletePacks) != len(packsWithUncompressedData) { + return fmt.Errorf("Repack() return other packs, %d != %d", len(obsoletePacks), len(packsWithUncompressedData)) + } + + // build new index + idx := repo.Index().(*repository.MasterIndex) + obsoleteIndexes, err := idx.Save(ctx, repo, obsoletePacks, nil, nil) + if err != nil { + return fmt.Errorf("saving new index failed: %w", err) + } + + // remove data + for id := range obsoleteIndexes { + err = repo.Backend().Remove(ctx, restic.Handle{Name: id.String(), Type: restic.IndexFile}) + if err != nil { + return fmt.Errorf("remove file failed: %w", err) + } + } + + for id := range obsoletePacks { + err = repo.Backend().Remove(ctx, restic.Handle{Name: id.String(), Type: restic.PackFile}) + if err != nil { + return fmt.Errorf("remove file failed: %w", err) + } + } + + return nil +} From 54067431020f275bd68f71cf29ac248f9107cffe Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 10 Apr 2022 11:57:01 +0200 Subject: [PATCH 07/15] prune: Automatically repack uncompressed trees for repo v2 Tree packs are cached locally at clients and thus benefit a lot from being compressed. Ensure this be having prune always repack pack files containing uncompressed trees. --- cmd/restic/cmd_prune.go | 17 +++- internal/migrations/compress_repo_v2.go | 102 ------------------------ 2 files changed, 14 insertions(+), 105 deletions(-) delete mode 100644 internal/migrations/compress_repo_v2.go diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index a6a8d0bde..71e1a21af 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -191,6 +191,7 @@ type packInfo struct { usedSize uint64 unusedSize uint64 tpe restic.BlobType + uncompressed bool } type packInfoWithID struct { @@ -299,6 +300,9 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB ip.unusedSize += size ip.unusedBlobs++ } + if !blob.IsCompressed() { + ip.uncompressed = true + } // update indexPack indexPack[blob.PackID] = ip } @@ -318,6 +322,8 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB } } + repoVersion := repo.Config().Version + // loop over all packs and decide what to do bar := newProgressMax(!gopts.Quiet, uint64(len(indexPack)), "packs processed") err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { @@ -350,6 +356,11 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB stats.packs.partlyUsed++ } + // repo v2: always repack tree blobs if uncompressed + mustCompress := repoVersion >= 2 && p.tpe == restic.TreeBlob && p.uncompressed + // use a flag that pack must be compressed + p.uncompressed = mustCompress + // decide what to do switch { case p.usedBlobs == 0 && p.duplicateBlobs == 0: @@ -362,7 +373,7 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB // if this is a data pack and --repack-cacheable-only is set => keep pack! keep(p) - case p.unusedBlobs == 0 && p.duplicateBlobs == 0 && p.tpe != restic.InvalidBlob: + case p.unusedBlobs == 0 && p.duplicateBlobs == 0 && p.tpe != restic.InvalidBlob && !mustCompress: // All blobs in pack are used and not duplicates/mixed => keep pack! keep(p) @@ -447,8 +458,8 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB case reachedRepackSize: keep(p.packInfo) - case p.duplicateBlobs > 0, p.tpe != restic.DataBlob: - // repacking duplicates/non-data is only limited by repackSize + case p.duplicateBlobs > 0, p.tpe != restic.DataBlob, p.uncompressed: + // repacking duplicates/non-data/uncompressed-trees is only limited by repackSize repack(p.ID, p.packInfo) case reachedUnusedSizeAfter: diff --git a/internal/migrations/compress_repo_v2.go b/internal/migrations/compress_repo_v2.go deleted file mode 100644 index b6986d29e..000000000 --- a/internal/migrations/compress_repo_v2.go +++ /dev/null @@ -1,102 +0,0 @@ -package migrations - -import ( - "context" - "fmt" - - "github.com/restic/restic/internal/repository" - "github.com/restic/restic/internal/restic" -) - -func init() { - register(&CompressRepoV2{}) -} - -type CompressRepoV2 struct{} - -func (*CompressRepoV2) Name() string { - return "compress_all_data" -} - -func (*CompressRepoV2) Desc() string { - return "compress all data in the repo" -} - -func (*CompressRepoV2) Check(ctx context.Context, repo restic.Repository) (bool, error) { - // only do very fast checks on the version here, we don't want the list of - // available migrations to take long to load - if repo.Config().Version < 2 { - return false, nil - } - - return true, nil -} - -// Apply requires that the repository must be already locked exclusively, this -// is done by the caller, so we can just go ahead, rewrite the packs as they -// are, remove the packs and rebuild the index. -func (*CompressRepoV2) Apply(ctx context.Context, repo restic.Repository) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - err := repo.LoadIndex(ctx) - if err != nil { - return fmt.Errorf("index load failed: %w", err) - } - - packsWithUncompressedData := restic.NewIDSet() - keepBlobs := restic.NewBlobSet() - - for blob := range repo.Index().Each(ctx) { - keepBlobs.Insert(blob.BlobHandle) - - if blob.UncompressedLength != 0 { - // blob is already compressed, ignore - continue - - } - - // remember pack ID - packsWithUncompressedData.Insert(blob.PackID) - } - - if len(packsWithUncompressedData) == 0 { - // nothing to do - return nil - } - - // don't upload new indexes until we're done - repo.(*repository.Repository).DisableAutoIndexUpdate() - obsoletePacks, err := repository.Repack(ctx, repo, repo, packsWithUncompressedData, keepBlobs, nil) - if err != nil { - return fmt.Errorf("repack failed: %w", err) - } - - if len(obsoletePacks) != len(packsWithUncompressedData) { - return fmt.Errorf("Repack() return other packs, %d != %d", len(obsoletePacks), len(packsWithUncompressedData)) - } - - // build new index - idx := repo.Index().(*repository.MasterIndex) - obsoleteIndexes, err := idx.Save(ctx, repo, obsoletePacks, nil, nil) - if err != nil { - return fmt.Errorf("saving new index failed: %w", err) - } - - // remove data - for id := range obsoleteIndexes { - err = repo.Backend().Remove(ctx, restic.Handle{Name: id.String(), Type: restic.IndexFile}) - if err != nil { - return fmt.Errorf("remove file failed: %w", err) - } - } - - for id := range obsoletePacks { - err = repo.Backend().Remove(ctx, restic.Handle{Name: id.String(), Type: restic.PackFile}) - if err != nil { - return fmt.Errorf("remove file failed: %w", err) - } - } - - return nil -} From 381bd94c6c0eb5f873fab5d51f36dcacdc918c4f Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 1 May 2022 12:02:05 +0200 Subject: [PATCH 08/15] prune: Add option to repack uncompressed data --- cmd/restic/cmd_prune.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index 71e1a21af..3bc904bfc 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -51,6 +51,7 @@ type PruneOptions struct { MaxRepackBytes uint64 RepackCachableOnly bool + RepackUncompressed bool } var pruneOptions PruneOptions @@ -68,6 +69,7 @@ func addPruneOptions(c *cobra.Command) { f.StringVar(&pruneOptions.MaxUnused, "max-unused", "5%", "tolerate given `limit` of unused data (absolute value in bytes with suffixes k/K, m/M, g/G, t/T, a value in % or the word 'unlimited')") f.StringVar(&pruneOptions.MaxRepackSize, "max-repack-size", "", "maximum `size` to repack (allowed suffixes: k/K, m/M, g/G, t/T)") f.BoolVar(&pruneOptions.RepackCachableOnly, "repack-cacheable-only", false, "only repack packs which are cacheable") + f.BoolVar(&pruneOptions.RepackUncompressed, "repack-uncompressed", false, "repack all uncompressed data") } func verifyPruneOptions(opts *PruneOptions) error { @@ -135,6 +137,10 @@ func runPrune(opts PruneOptions, gopts GlobalOptions) error { return err } + if opts.RepackUncompressed && gopts.Compression == repository.CompressionOff { + return errors.Fatal("disabled compression and `--repack-uncompressed` are mutually exclusive") + } + repo, err := OpenRepository(gopts) if err != nil { return err @@ -144,6 +150,10 @@ func runPrune(opts PruneOptions, gopts GlobalOptions) error { return errors.Fatal("prune requires a backend connection limit of at least two") } + if repo.Config().Version < 2 && opts.RepackUncompressed { + return errors.Fatal("compression requires at least repository format version 2") + } + if opts.UnsafeNoSpaceRecovery != "" { repoID := repo.Config().ID if opts.UnsafeNoSpaceRecovery != repoID { @@ -356,8 +366,12 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB stats.packs.partlyUsed++ } - // repo v2: always repack tree blobs if uncompressed - mustCompress := repoVersion >= 2 && p.tpe == restic.TreeBlob && p.uncompressed + mustCompress := false + if repoVersion >= 2 { + // repo v2: always repack tree blobs if uncompressed + // compress data blobs if requested + mustCompress = (p.tpe == restic.TreeBlob || opts.RepackUncompressed) && p.uncompressed + } // use a flag that pack must be compressed p.uncompressed = mustCompress From 7559d2f105632dd55c7595cd33d93110a4868f24 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 1 May 2022 14:30:14 +0200 Subject: [PATCH 09/15] Document repository version and minimum restic version --- doc/030_preparing_a_new_repo.rst | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 00bc637f5..9eb870565 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -40,7 +40,17 @@ options exist: be used to explicitely set the version for the new repository. By default, the current stable version is used. Have a look at the `design documentation `__ for - details. + details. The alias ``latest`` will always point to the latest repository version. + The below table shows which restic version is required to use a certain + repository version and shows new features introduced by the repository format. + ++--------------------+------------------------+---------------------+ +| Repository version | Minimum restic version | Major new features | ++====================+========================+=====================+ +| ``1`` | any version | | ++--------------------+------------------------+---------------------+ +| ``2`` | >= 0.14.0 | Compression support | ++--------------------+------------------------+---------------------+ Local From e36a40db10e58ccc15c791988db1ec453648f14c Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 1 May 2022 20:07:29 +0200 Subject: [PATCH 10/15] upgrade_repo_v2: Use atomic replace for supported backends --- internal/backend/azure/azure.go | 5 ++++ internal/backend/b2/b2.go | 5 ++++ internal/backend/dryrun/dry_backend.go | 4 ++++ internal/backend/gs/gs.go | 5 ++++ internal/backend/local/local.go | 5 ++++ internal/backend/mem/mem_backend.go | 5 ++++ internal/backend/rest/rest.go | 6 +++++ internal/backend/s3/s3.go | 5 ++++ internal/backend/sftp/sftp.go | 6 +++++ internal/backend/swift/swift.go | 5 ++++ internal/migrations/upgrade_repo_v2.go | 12 ++++++---- internal/mock/backend.go | 33 ++++++++++++++++---------- internal/restic/backend.go | 3 +++ 13 files changed, 82 insertions(+), 17 deletions(-) diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index 243a1eaef..ff89a6b01 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -125,6 +125,11 @@ func (be *Backend) Hasher() hash.Hash { return md5.New() } +// HasAtomicReplace returns whether Save() can atomically replace files +func (be *Backend) HasAtomicReplace() bool { + return true +} + // Path returns the path in the bucket that is used for this backend. func (be *Backend) Path() string { return be.prefix diff --git a/internal/backend/b2/b2.go b/internal/backend/b2/b2.go index 6108aaf5c..7f8019a74 100644 --- a/internal/backend/b2/b2.go +++ b/internal/backend/b2/b2.go @@ -147,6 +147,11 @@ func (be *b2Backend) Hasher() hash.Hash { return nil } +// HasAtomicReplace returns whether Save() can atomically replace files +func (be *b2Backend) HasAtomicReplace() bool { + return true +} + // IsNotExist returns true if the error is caused by a non-existing file. func (be *b2Backend) IsNotExist(err error) bool { return b2.IsNotExist(errors.Cause(err)) diff --git a/internal/backend/dryrun/dry_backend.go b/internal/backend/dryrun/dry_backend.go index 44eee9a45..31012df43 100644 --- a/internal/backend/dryrun/dry_backend.go +++ b/internal/backend/dryrun/dry_backend.go @@ -67,6 +67,10 @@ func (be *Backend) Hasher() hash.Hash { return be.b.Hasher() } +func (be *Backend) HasAtomicReplace() bool { + return be.b.HasAtomicReplace() +} + func (be *Backend) IsNotExist(err error) bool { return be.b.IsNotExist(err) } diff --git a/internal/backend/gs/gs.go b/internal/backend/gs/gs.go index c87211be3..92de75887 100644 --- a/internal/backend/gs/gs.go +++ b/internal/backend/gs/gs.go @@ -201,6 +201,11 @@ func (be *Backend) Hasher() hash.Hash { return md5.New() } +// HasAtomicReplace returns whether Save() can atomically replace files +func (be *Backend) HasAtomicReplace() bool { + return true +} + // Path returns the path in the bucket that is used for this backend. func (be *Backend) Path() string { return be.prefix diff --git a/internal/backend/local/local.go b/internal/backend/local/local.go index 77bc026ad..22fb8c8e5 100644 --- a/internal/backend/local/local.go +++ b/internal/backend/local/local.go @@ -102,6 +102,11 @@ func (b *Local) Hasher() hash.Hash { return nil } +// HasAtomicReplace returns whether Save() can atomically replace files +func (b *Local) HasAtomicReplace() bool { + return true +} + // IsNotExist returns true if the error is caused by a non existing file. func (b *Local) IsNotExist(err error) bool { return errors.Is(err, os.ErrNotExist) diff --git a/internal/backend/mem/mem_backend.go b/internal/backend/mem/mem_backend.go index 69476b693..b14149d52 100644 --- a/internal/backend/mem/mem_backend.go +++ b/internal/backend/mem/mem_backend.go @@ -268,6 +268,11 @@ func (be *MemoryBackend) Hasher() hash.Hash { return md5.New() } +// HasAtomicReplace returns whether Save() can atomically replace files +func (be *MemoryBackend) HasAtomicReplace() bool { + return false +} + // Delete removes all data in the backend. func (be *MemoryBackend) Delete(ctx context.Context) error { be.m.Lock() diff --git a/internal/backend/rest/rest.go b/internal/backend/rest/rest.go index 1e372229a..b9824bb53 100644 --- a/internal/backend/rest/rest.go +++ b/internal/backend/rest/rest.go @@ -121,6 +121,12 @@ func (b *Backend) Hasher() hash.Hash { return nil } +// HasAtomicReplace returns whether Save() can atomically replace files +func (b *Backend) HasAtomicReplace() bool { + // rest-server prevents overwriting + return false +} + // Save stores data in the backend at the handle. func (b *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { if err := h.Valid(); err != nil { diff --git a/internal/backend/s3/s3.go b/internal/backend/s3/s3.go index 1bdf2d795..ac1a1d5ce 100644 --- a/internal/backend/s3/s3.go +++ b/internal/backend/s3/s3.go @@ -269,6 +269,11 @@ func (be *Backend) Hasher() hash.Hash { return nil } +// HasAtomicReplace returns whether Save() can atomically replace files +func (be *Backend) HasAtomicReplace() bool { + return true +} + // Path returns the path in the bucket that is used for this backend. func (be *Backend) Path() string { return be.cfg.Prefix diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index a8a2a185d..ebbaaddad 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -267,6 +267,12 @@ func (r *SFTP) Hasher() hash.Hash { return nil } +// HasAtomicReplace returns whether Save() can atomically replace files +func (r *SFTP) HasAtomicReplace() bool { + // we use sftp's 'Rename()' in 'Save()' which does not allow overwriting + return false +} + // Join joins the given paths and cleans them afterwards. This always uses // forward slashes, which is required by sftp. func Join(parts ...string) string { diff --git a/internal/backend/swift/swift.go b/internal/backend/swift/swift.go index 6157002b5..b127cb832 100644 --- a/internal/backend/swift/swift.go +++ b/internal/backend/swift/swift.go @@ -129,6 +129,11 @@ func (be *beSwift) Hasher() hash.Hash { return md5.New() } +// HasAtomicReplace returns whether Save() can atomically replace files +func (be *beSwift) HasAtomicReplace() bool { + return true +} + // Load runs fn with a reader that yields the contents of the file at h at the // given offset. func (be *beSwift) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error { diff --git a/internal/migrations/upgrade_repo_v2.go b/internal/migrations/upgrade_repo_v2.go index ada77444e..02806e468 100644 --- a/internal/migrations/upgrade_repo_v2.go +++ b/internal/migrations/upgrade_repo_v2.go @@ -53,17 +53,19 @@ func (*UpgradeRepoV2) Check(ctx context.Context, repo restic.Repository) (bool, func (*UpgradeRepoV2) upgrade(ctx context.Context, repo restic.Repository) error { h := restic.Handle{Type: restic.ConfigFile} - // now remove the original file - err := repo.Backend().Remove(ctx, h) - if err != nil { - return fmt.Errorf("remove config failed: %w", err) + if !repo.Backend().HasAtomicReplace() { + // remove the original file for backends which do not support atomic overwriting + err := repo.Backend().Remove(ctx, h) + if err != nil { + return fmt.Errorf("remove config failed: %w", err) + } } // upgrade config cfg := repo.Config() cfg.Version = 2 - _, err = repo.SaveJSONUnpacked(ctx, restic.ConfigFile, cfg) + _, err := repo.SaveJSONUnpacked(ctx, restic.ConfigFile, cfg) if err != nil { return fmt.Errorf("save new config file failed: %w", err) } diff --git a/internal/mock/backend.go b/internal/mock/backend.go index 05fe1dc6e..655499b15 100644 --- a/internal/mock/backend.go +++ b/internal/mock/backend.go @@ -11,18 +11,19 @@ import ( // Backend implements a mock backend. type Backend struct { - CloseFn func() error - IsNotExistFn func(err error) bool - SaveFn func(ctx context.Context, h restic.Handle, rd restic.RewindReader) error - OpenReaderFn func(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) - StatFn func(ctx context.Context, h restic.Handle) (restic.FileInfo, error) - ListFn func(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error - RemoveFn func(ctx context.Context, h restic.Handle) error - TestFn func(ctx context.Context, h restic.Handle) (bool, error) - DeleteFn func(ctx context.Context) error - ConnectionsFn func() uint - LocationFn func() string - HasherFn func() hash.Hash + CloseFn func() error + IsNotExistFn func(err error) bool + SaveFn func(ctx context.Context, h restic.Handle, rd restic.RewindReader) error + OpenReaderFn func(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) + StatFn func(ctx context.Context, h restic.Handle) (restic.FileInfo, error) + ListFn func(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error + RemoveFn func(ctx context.Context, h restic.Handle) error + TestFn func(ctx context.Context, h restic.Handle) (bool, error) + DeleteFn func(ctx context.Context) error + ConnectionsFn func() uint + LocationFn func() string + HasherFn func() hash.Hash + HasAtomicReplaceFn func() bool } // NewBackend returns new mock Backend instance @@ -66,6 +67,14 @@ func (m *Backend) Hasher() hash.Hash { return m.HasherFn() } +// HasAtomicReplace returns whether Save() can atomically replace files +func (m *Backend) HasAtomicReplace() bool { + if m.HasAtomicReplaceFn == nil { + return false + } + return m.HasAtomicReplaceFn() +} + // IsNotExist returns true if the error is caused by a missing file. func (m *Backend) IsNotExist(err error) bool { if m.IsNotExistFn == nil { diff --git a/internal/restic/backend.go b/internal/restic/backend.go index 1203bf3d3..6ec10e685 100644 --- a/internal/restic/backend.go +++ b/internal/restic/backend.go @@ -24,6 +24,9 @@ type Backend interface { // Hasher may return a hash function for calculating a content hash for the backend Hasher() hash.Hash + // HasAtomicReplace returns whether Save() can atomically replace files + HasAtomicReplace() bool + // Test a boolean value whether a File with the name and type exists. Test(ctx context.Context, h Handle) (bool, error) From 4faff0debe08a77fa166b9b7d44c2519b88d7989 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sat, 7 May 2022 20:13:52 +0200 Subject: [PATCH 11/15] doc: Describe repository upgrade process --- doc/045_working_with_repos.rst | 23 +++++++++++++++++++++++ doc/060_forget.rst | 2 ++ 2 files changed, 25 insertions(+) diff --git a/doc/045_working_with_repos.rst b/doc/045_working_with_repos.rst index 8ba154f30..d34b09c08 100644 --- a/doc/045_working_with_repos.rst +++ b/doc/045_working_with_repos.rst @@ -298,3 +298,26 @@ a file size value the following command may be used: $ restic -r /srv/restic-repo check --read-data-subset=50M $ restic -r /srv/restic-repo check --read-data-subset=10G + + +Upgrading the repository format version +======================================= + +Repositories created using earlier restic versions use an older repository +format version and have to be upgraded to allow using all new features. +Upgrading must be done explicitly as a newer repository version increases the +minimum restic version required to access the repository. For example the +repository format version 2 is only readable using restic 0.14.0 or newer. + +Upgrading to repo version 2 is a two step process: first run +``migrate upgrade_repo_v2`` which will check the repository integrity and +then upgrade the repository version. Repository problems must be corrected +before the migration will be possible. After the migration is complete, run +``prune`` to compress the repository metadata. To limit the amount of data +rewritten in at once, you can use the ``prune --max-repack-size size`` +parameter, see :ref:`customize-pruning` for more details. + +File contents stored in the repository will not be rewritten, data from new +backups will be compressed. Over time more and more of the repository will +be compressed. To speed up this process and compress all not yet compressed +data, you can run ``prune --repack-uncompressed``. diff --git a/doc/060_forget.rst b/doc/060_forget.rst index ab67368aa..df9491c5e 100644 --- a/doc/060_forget.rst +++ b/doc/060_forget.rst @@ -388,6 +388,8 @@ the specified duration: if ``forget --keep-within 7d`` is run 8 days after the last good snapshot, then the attacker can still use that opportunity to remove all legitimate snapshots. +.. _customize-pruning: + Customize pruning ***************** From 5815f727ee4442873c00463daab56bbfd6f702f6 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 1 May 2022 20:08:02 +0200 Subject: [PATCH 12/15] checker: convert error type to use pointer-receivers --- cmd/restic/cmd_check.go | 4 ++-- internal/checker/checker.go | 12 ++++++------ internal/checker/checker_test.go | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index e7edff39e..1993b3304 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -222,7 +222,7 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error { dupFound := false for _, hint := range hints { Printf("%v\n", hint) - if _, ok := hint.(checker.ErrDuplicatePacks); ok { + if _, ok := hint.(*checker.ErrDuplicatePacks); ok { dupFound = true } } @@ -273,7 +273,7 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error { for err := range errChan { errorsFound = true - if e, ok := err.(checker.TreeError); ok { + if e, ok := err.(*checker.TreeError); ok { Warnf("error for tree %v:\n", e.ID.Str()) for _, treeErr := range e.Errors { Warnf(" %v\n", treeErr) diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 8e49209f5..2ecd1469c 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -63,7 +63,7 @@ type ErrDuplicatePacks struct { Indexes restic.IDSet } -func (e ErrDuplicatePacks) Error() string { +func (e *ErrDuplicatePacks) Error() string { return fmt.Sprintf("pack %v contained in several indexes: %v", e.PackID.Str(), e.Indexes) } @@ -73,7 +73,7 @@ type ErrOldIndexFormat struct { restic.ID } -func (err ErrOldIndexFormat) Error() string { +func (err *ErrOldIndexFormat) Error() string { return fmt.Sprintf("index %v has old format", err.ID.Str()) } @@ -93,7 +93,7 @@ func (c *Checker) LoadIndex(ctx context.Context) (hints []error, errs []error) { if oldFormat { debug.Log("index %v has old format", id.Str()) - hints = append(hints, ErrOldIndexFormat{id}) + hints = append(hints, &ErrOldIndexFormat{id}) } err = errors.Wrapf(err, "error loading index %v", id.Str()) @@ -137,7 +137,7 @@ func (c *Checker) LoadIndex(ctx context.Context) (hints []error, errs []error) { for packID := range c.packs { debug.Log(" check pack %v: contained in %d indexes", packID, len(packToIndex[packID])) if len(packToIndex[packID]) > 1 { - hints = append(hints, ErrDuplicatePacks{ + hints = append(hints, &ErrDuplicatePacks{ PackID: packID, Indexes: packToIndex[packID], }) @@ -257,7 +257,7 @@ type TreeError struct { Errors []error } -func (e TreeError) Error() string { +func (e *TreeError) Error() string { return fmt.Sprintf("tree %v: %v", e.ID.Str(), e.Errors) } @@ -276,7 +276,7 @@ func (c *Checker) checkTreeWorker(ctx context.Context, trees <-chan restic.TreeI if len(errs) == 0 { continue } - treeError := TreeError{ID: job.ID, Errors: errs} + treeError := &TreeError{ID: job.ID, Errors: errs} select { case <-ctx.Done(): return diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index 2a4384b15..f2ee0c732 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -289,7 +289,7 @@ func TestDuplicatePacksInIndex(t *testing.T) { found := false for _, hint := range hints { - if _, ok := hint.(checker.ErrDuplicatePacks); ok { + if _, ok := hint.(*checker.ErrDuplicatePacks); ok { found = true } else { t.Errorf("got unexpected hint: %v", hint) From 59eb132dcd6bc052ca54a043b204ecd6e47551ff Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 9 May 2022 22:25:36 +0200 Subject: [PATCH 13/15] check: Better differentiate between warnings and errors --- cmd/restic/cmd_check.go | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index 1993b3304..1bc9da687 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -219,15 +219,20 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error { Verbosef("load indexes\n") hints, errs := chkr.LoadIndex(gopts.ctx) - dupFound := false + errorsFound := false + suggestIndexRebuild := false for _, hint := range hints { - Printf("%v\n", hint) - if _, ok := hint.(*checker.ErrDuplicatePacks); ok { - dupFound = true + switch hint.(type) { + case *checker.ErrDuplicatePacks, *checker.ErrOldIndexFormat: + Printf("%v\n", hint) + suggestIndexRebuild = true + default: + Warnf("error: %v\n", hint) + errorsFound = true } } - if dupFound { + if suggestIndexRebuild { Printf("This is non-critical, you can run `restic rebuild-index' to correct this\n") } @@ -238,7 +243,6 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error { return errors.Fatal("LoadIndex returned errors") } - errorsFound := false orphanedPacks := 0 errChan := make(chan error) @@ -252,11 +256,11 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error { continue } errorsFound = true - Warnf("%v\n", err) + Warnf("error: %v\n", err) } if orphanedPacks > 0 { - Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nYou can run `restic prune` to correct this.\n", orphanedPacks) + Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks) } Verbosef("check snapshots, trees and blobs\n") From c1bbbcd0dcc7aa0fe7a181c67b5e08306184611e Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Sun, 1 May 2022 20:16:49 +0200 Subject: [PATCH 14/15] migrate: Allow migrations to request a check run This is currently only used by upgrade_repo_v2. --- cmd/restic/cmd_migrate.go | 14 ++++++++++++++ internal/migrations/interface.go | 5 +++++ internal/migrations/s3_layout.go | 4 ++++ internal/migrations/upgrade_repo_v2.go | 3 +++ 4 files changed, 26 insertions(+) diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go index f82439715..10d78b0ca 100644 --- a/cmd/restic/cmd_migrate.go +++ b/cmd/restic/cmd_migrate.go @@ -84,6 +84,20 @@ func applyMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repos Warnf("check for migration %v failed, continuing anyway\n", m.Name()) } + repoCheckOpts := m.RepoCheckOptions() + if repoCheckOpts != nil { + Printf("checking repository integrity...\n") + + checkOptions := CheckOptions{} + checkGopts := gopts + // the repository is already locked + checkGopts.NoLock = true + err = runCheck(checkOptions, checkGopts, []string{}) + if err != nil { + return err + } + } + Printf("applying migration %v...\n", m.Name()) if err = m.Apply(ctx, repo); err != nil { Warnf("migration %v failed: %v\n", m.Name(), err) diff --git a/internal/migrations/interface.go b/internal/migrations/interface.go index 9d9eedba1..eb0a8e60c 100644 --- a/internal/migrations/interface.go +++ b/internal/migrations/interface.go @@ -6,11 +6,16 @@ import ( "github.com/restic/restic/internal/restic" ) +type RepositoryCheckOptions struct { +} + // Migration implements a data migration. type Migration interface { // Check returns true if the migration can be applied to a repo. Check(context.Context, restic.Repository) (bool, error) + RepoCheckOptions() *RepositoryCheckOptions + // Apply runs the migration. Apply(context.Context, restic.Repository) error diff --git a/internal/migrations/s3_layout.go b/internal/migrations/s3_layout.go index 877b44c84..b64c3b073 100644 --- a/internal/migrations/s3_layout.go +++ b/internal/migrations/s3_layout.go @@ -37,6 +37,10 @@ func (m *S3Layout) Check(ctx context.Context, repo restic.Repository) (bool, err return true, nil } +func (m *S3Layout) RepoCheckOptions() *RepositoryCheckOptions { + return nil +} + func retry(max int, fail func(err error), f func() error) error { var err error for i := 0; i < max; i++ { diff --git a/internal/migrations/upgrade_repo_v2.go b/internal/migrations/upgrade_repo_v2.go index 02806e468..86abeaeff 100644 --- a/internal/migrations/upgrade_repo_v2.go +++ b/internal/migrations/upgrade_repo_v2.go @@ -50,6 +50,9 @@ func (*UpgradeRepoV2) Check(ctx context.Context, repo restic.Repository) (bool, return isV1, nil } +func (*UpgradeRepoV2) RepoCheckOptions() *RepositoryCheckOptions { + return &RepositoryCheckOptions{} +} func (*UpgradeRepoV2) upgrade(ctx context.Context, repo restic.Repository) error { h := restic.Handle{Type: restic.ConfigFile} From 5c6db534d473e45d57c768de143332dfd2dff908 Mon Sep 17 00:00:00 2001 From: Michael Eischer Date: Mon, 9 May 2022 22:39:02 +0200 Subject: [PATCH 15/15] Add compression migration support to changelog --- changelog/unreleased/issue-21 | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/changelog/unreleased/issue-21 b/changelog/unreleased/issue-21 index 0a3040add..b15c51d71 100644 --- a/changelog/unreleased/issue-21 +++ b/changelog/unreleased/issue-21 @@ -13,7 +13,12 @@ The new format version has not received much testing yet. Do not rely on it as your only backup copy! Please run `check` in regular intervals to detect any problems. -Upgrading in place is not yet supported. As a workaround, first create a new +To upgrade in place run `migrate upgrade_repo_v2` followed by `prune`. See the +documentation for more details. The migration checks the repository integrity +and upgrades the repository format but will not change any data. Afterwards, +prune will rewrite the metadata to make use of compression. + +As an alternative you can use the `copy` command to migrate snapshots: first create a new repository using `init --repository-version 2 --copy-chunker-params --repo2 path/to/old/repo`. Then use the `copy` command to copy all snapshots to the new repository.