diff --git a/changelog/unreleased/issue-21 b/changelog/unreleased/issue-21 index 0a3040add..b15c51d71 100644 --- a/changelog/unreleased/issue-21 +++ b/changelog/unreleased/issue-21 @@ -13,7 +13,12 @@ The new format version has not received much testing yet. Do not rely on it as your only backup copy! Please run `check` in regular intervals to detect any problems. -Upgrading in place is not yet supported. As a workaround, first create a new +To upgrade in place run `migrate upgrade_repo_v2` followed by `prune`. See the +documentation for more details. The migration checks the repository integrity +and upgrades the repository format but will not change any data. Afterwards, +prune will rewrite the metadata to make use of compression. + +As an alternative you can use the `copy` command to migrate snapshots: first create a new repository using `init --repository-version 2 --copy-chunker-params --repo2 path/to/old/repo`. Then use the `copy` command to copy all snapshots to the new repository. diff --git a/cmd/restic/cmd_check.go b/cmd/restic/cmd_check.go index e7edff39e..1bc9da687 100644 --- a/cmd/restic/cmd_check.go +++ b/cmd/restic/cmd_check.go @@ -219,15 +219,20 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error { Verbosef("load indexes\n") hints, errs := chkr.LoadIndex(gopts.ctx) - dupFound := false + errorsFound := false + suggestIndexRebuild := false for _, hint := range hints { - Printf("%v\n", hint) - if _, ok := hint.(checker.ErrDuplicatePacks); ok { - dupFound = true + switch hint.(type) { + case *checker.ErrDuplicatePacks, *checker.ErrOldIndexFormat: + Printf("%v\n", hint) + suggestIndexRebuild = true + default: + Warnf("error: %v\n", hint) + errorsFound = true } } - if dupFound { + if suggestIndexRebuild { Printf("This is non-critical, you can run `restic rebuild-index' to correct this\n") } @@ -238,7 +243,6 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error { return errors.Fatal("LoadIndex returned errors") } - errorsFound := false orphanedPacks := 0 errChan := make(chan error) @@ -252,11 +256,11 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error { continue } errorsFound = true - Warnf("%v\n", err) + Warnf("error: %v\n", err) } if orphanedPacks > 0 { - Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nYou can run `restic prune` to correct this.\n", orphanedPacks) + Verbosef("%d additional files were found in the repo, which likely contain duplicate data.\nThis is non-critical, you can run `restic prune` to correct this.\n", orphanedPacks) } Verbosef("check snapshots, trees and blobs\n") @@ -273,7 +277,7 @@ func runCheck(opts CheckOptions, gopts GlobalOptions, args []string) error { for err := range errChan { errorsFound = true - if e, ok := err.(checker.TreeError); ok { + if e, ok := err.(*checker.TreeError); ok { Warnf("error for tree %v:\n", e.ID.Str()) for _, treeErr := range e.Errors { Warnf(" %v\n", treeErr) diff --git a/cmd/restic/cmd_migrate.go b/cmd/restic/cmd_migrate.go index 4af98005e..10d78b0ca 100644 --- a/cmd/restic/cmd_migrate.go +++ b/cmd/restic/cmd_migrate.go @@ -8,11 +8,12 @@ import ( ) var cmdMigrate = &cobra.Command{ - Use: "migrate [flags] [name]", + Use: "migrate [flags] [migration name] [...]", Short: "Apply migrations", Long: ` -The "migrate" command applies migrations to a repository. When no migration -name is explicitly given, a list of migrations that can be applied is printed. +The "migrate" command checks which migrations can be applied for a repository +and prints a list with available migration names. If one or more migration +names are specified, these migrations are applied. EXIT STATUS =========== @@ -41,6 +42,8 @@ func init() { func checkMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repository) error { ctx := gopts.ctx Printf("available migrations:\n") + found := false + for _, m := range migrations.All { ok, err := m.Check(ctx, repo) if err != nil { @@ -48,10 +51,15 @@ func checkMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repos } if ok { - Printf(" %v: %v\n", m.Name(), m.Desc()) + Printf(" %v\t%v\n", m.Name(), m.Desc()) + found = true } } + if !found { + Printf("no migrations found") + } + return nil } @@ -76,6 +84,20 @@ func applyMigrations(opts MigrateOptions, gopts GlobalOptions, repo restic.Repos Warnf("check for migration %v failed, continuing anyway\n", m.Name()) } + repoCheckOpts := m.RepoCheckOptions() + if repoCheckOpts != nil { + Printf("checking repository integrity...\n") + + checkOptions := CheckOptions{} + checkGopts := gopts + // the repository is already locked + checkGopts.NoLock = true + err = runCheck(checkOptions, checkGopts, []string{}) + if err != nil { + return err + } + } + Printf("applying migration %v...\n", m.Name()) if err = m.Apply(ctx, repo); err != nil { Warnf("migration %v failed: %v\n", m.Name(), err) diff --git a/cmd/restic/cmd_prune.go b/cmd/restic/cmd_prune.go index a6a8d0bde..3bc904bfc 100644 --- a/cmd/restic/cmd_prune.go +++ b/cmd/restic/cmd_prune.go @@ -51,6 +51,7 @@ type PruneOptions struct { MaxRepackBytes uint64 RepackCachableOnly bool + RepackUncompressed bool } var pruneOptions PruneOptions @@ -68,6 +69,7 @@ func addPruneOptions(c *cobra.Command) { f.StringVar(&pruneOptions.MaxUnused, "max-unused", "5%", "tolerate given `limit` of unused data (absolute value in bytes with suffixes k/K, m/M, g/G, t/T, a value in % or the word 'unlimited')") f.StringVar(&pruneOptions.MaxRepackSize, "max-repack-size", "", "maximum `size` to repack (allowed suffixes: k/K, m/M, g/G, t/T)") f.BoolVar(&pruneOptions.RepackCachableOnly, "repack-cacheable-only", false, "only repack packs which are cacheable") + f.BoolVar(&pruneOptions.RepackUncompressed, "repack-uncompressed", false, "repack all uncompressed data") } func verifyPruneOptions(opts *PruneOptions) error { @@ -135,6 +137,10 @@ func runPrune(opts PruneOptions, gopts GlobalOptions) error { return err } + if opts.RepackUncompressed && gopts.Compression == repository.CompressionOff { + return errors.Fatal("disabled compression and `--repack-uncompressed` are mutually exclusive") + } + repo, err := OpenRepository(gopts) if err != nil { return err @@ -144,6 +150,10 @@ func runPrune(opts PruneOptions, gopts GlobalOptions) error { return errors.Fatal("prune requires a backend connection limit of at least two") } + if repo.Config().Version < 2 && opts.RepackUncompressed { + return errors.Fatal("compression requires at least repository format version 2") + } + if opts.UnsafeNoSpaceRecovery != "" { repoID := repo.Config().ID if opts.UnsafeNoSpaceRecovery != repoID { @@ -191,6 +201,7 @@ type packInfo struct { usedSize uint64 unusedSize uint64 tpe restic.BlobType + uncompressed bool } type packInfoWithID struct { @@ -299,6 +310,9 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB ip.unusedSize += size ip.unusedBlobs++ } + if !blob.IsCompressed() { + ip.uncompressed = true + } // update indexPack indexPack[blob.PackID] = ip } @@ -318,6 +332,8 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB } } + repoVersion := repo.Config().Version + // loop over all packs and decide what to do bar := newProgressMax(!gopts.Quiet, uint64(len(indexPack)), "packs processed") err := repo.List(ctx, restic.PackFile, func(id restic.ID, packSize int64) error { @@ -350,6 +366,15 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB stats.packs.partlyUsed++ } + mustCompress := false + if repoVersion >= 2 { + // repo v2: always repack tree blobs if uncompressed + // compress data blobs if requested + mustCompress = (p.tpe == restic.TreeBlob || opts.RepackUncompressed) && p.uncompressed + } + // use a flag that pack must be compressed + p.uncompressed = mustCompress + // decide what to do switch { case p.usedBlobs == 0 && p.duplicateBlobs == 0: @@ -362,7 +387,7 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB // if this is a data pack and --repack-cacheable-only is set => keep pack! keep(p) - case p.unusedBlobs == 0 && p.duplicateBlobs == 0 && p.tpe != restic.InvalidBlob: + case p.unusedBlobs == 0 && p.duplicateBlobs == 0 && p.tpe != restic.InvalidBlob && !mustCompress: // All blobs in pack are used and not duplicates/mixed => keep pack! keep(p) @@ -447,8 +472,8 @@ func prune(opts PruneOptions, gopts GlobalOptions, repo restic.Repository, usedB case reachedRepackSize: keep(p.packInfo) - case p.duplicateBlobs > 0, p.tpe != restic.DataBlob: - // repacking duplicates/non-data is only limited by repackSize + case p.duplicateBlobs > 0, p.tpe != restic.DataBlob, p.uncompressed: + // repacking duplicates/non-data/uncompressed-trees is only limited by repackSize repack(p.ID, p.packInfo) case reachedUnusedSizeAfter: diff --git a/doc/030_preparing_a_new_repo.rst b/doc/030_preparing_a_new_repo.rst index 8e7a024ca..13c0347ad 100644 --- a/doc/030_preparing_a_new_repo.rst +++ b/doc/030_preparing_a_new_repo.rst @@ -40,7 +40,17 @@ options exist: be used to explicitely set the version for the new repository. By default, the current stable version is used. Have a look at the `design documentation `__ for - details. + details. The alias ``latest`` will always point to the latest repository version. + The below table shows which restic version is required to use a certain + repository version and shows new features introduced by the repository format. + ++--------------------+------------------------+---------------------+ +| Repository version | Minimum restic version | Major new features | ++====================+========================+=====================+ +| ``1`` | any version | | ++--------------------+------------------------+---------------------+ +| ``2`` | >= 0.14.0 | Compression support | ++--------------------+------------------------+---------------------+ Local diff --git a/doc/045_working_with_repos.rst b/doc/045_working_with_repos.rst index 8ba154f30..d34b09c08 100644 --- a/doc/045_working_with_repos.rst +++ b/doc/045_working_with_repos.rst @@ -298,3 +298,26 @@ a file size value the following command may be used: $ restic -r /srv/restic-repo check --read-data-subset=50M $ restic -r /srv/restic-repo check --read-data-subset=10G + + +Upgrading the repository format version +======================================= + +Repositories created using earlier restic versions use an older repository +format version and have to be upgraded to allow using all new features. +Upgrading must be done explicitly as a newer repository version increases the +minimum restic version required to access the repository. For example the +repository format version 2 is only readable using restic 0.14.0 or newer. + +Upgrading to repo version 2 is a two step process: first run +``migrate upgrade_repo_v2`` which will check the repository integrity and +then upgrade the repository version. Repository problems must be corrected +before the migration will be possible. After the migration is complete, run +``prune`` to compress the repository metadata. To limit the amount of data +rewritten in at once, you can use the ``prune --max-repack-size size`` +parameter, see :ref:`customize-pruning` for more details. + +File contents stored in the repository will not be rewritten, data from new +backups will be compressed. Over time more and more of the repository will +be compressed. To speed up this process and compress all not yet compressed +data, you can run ``prune --repack-uncompressed``. diff --git a/doc/060_forget.rst b/doc/060_forget.rst index ab67368aa..df9491c5e 100644 --- a/doc/060_forget.rst +++ b/doc/060_forget.rst @@ -388,6 +388,8 @@ the specified duration: if ``forget --keep-within 7d`` is run 8 days after the last good snapshot, then the attacker can still use that opportunity to remove all legitimate snapshots. +.. _customize-pruning: + Customize pruning ***************** diff --git a/internal/backend/azure/azure.go b/internal/backend/azure/azure.go index 243a1eaef..ff89a6b01 100644 --- a/internal/backend/azure/azure.go +++ b/internal/backend/azure/azure.go @@ -125,6 +125,11 @@ func (be *Backend) Hasher() hash.Hash { return md5.New() } +// HasAtomicReplace returns whether Save() can atomically replace files +func (be *Backend) HasAtomicReplace() bool { + return true +} + // Path returns the path in the bucket that is used for this backend. func (be *Backend) Path() string { return be.prefix diff --git a/internal/backend/b2/b2.go b/internal/backend/b2/b2.go index 6108aaf5c..7f8019a74 100644 --- a/internal/backend/b2/b2.go +++ b/internal/backend/b2/b2.go @@ -147,6 +147,11 @@ func (be *b2Backend) Hasher() hash.Hash { return nil } +// HasAtomicReplace returns whether Save() can atomically replace files +func (be *b2Backend) HasAtomicReplace() bool { + return true +} + // IsNotExist returns true if the error is caused by a non-existing file. func (be *b2Backend) IsNotExist(err error) bool { return b2.IsNotExist(errors.Cause(err)) diff --git a/internal/backend/dryrun/dry_backend.go b/internal/backend/dryrun/dry_backend.go index 44eee9a45..31012df43 100644 --- a/internal/backend/dryrun/dry_backend.go +++ b/internal/backend/dryrun/dry_backend.go @@ -67,6 +67,10 @@ func (be *Backend) Hasher() hash.Hash { return be.b.Hasher() } +func (be *Backend) HasAtomicReplace() bool { + return be.b.HasAtomicReplace() +} + func (be *Backend) IsNotExist(err error) bool { return be.b.IsNotExist(err) } diff --git a/internal/backend/gs/gs.go b/internal/backend/gs/gs.go index c87211be3..92de75887 100644 --- a/internal/backend/gs/gs.go +++ b/internal/backend/gs/gs.go @@ -201,6 +201,11 @@ func (be *Backend) Hasher() hash.Hash { return md5.New() } +// HasAtomicReplace returns whether Save() can atomically replace files +func (be *Backend) HasAtomicReplace() bool { + return true +} + // Path returns the path in the bucket that is used for this backend. func (be *Backend) Path() string { return be.prefix diff --git a/internal/backend/local/local.go b/internal/backend/local/local.go index 77bc026ad..22fb8c8e5 100644 --- a/internal/backend/local/local.go +++ b/internal/backend/local/local.go @@ -102,6 +102,11 @@ func (b *Local) Hasher() hash.Hash { return nil } +// HasAtomicReplace returns whether Save() can atomically replace files +func (b *Local) HasAtomicReplace() bool { + return true +} + // IsNotExist returns true if the error is caused by a non existing file. func (b *Local) IsNotExist(err error) bool { return errors.Is(err, os.ErrNotExist) diff --git a/internal/backend/mem/mem_backend.go b/internal/backend/mem/mem_backend.go index 69476b693..b14149d52 100644 --- a/internal/backend/mem/mem_backend.go +++ b/internal/backend/mem/mem_backend.go @@ -268,6 +268,11 @@ func (be *MemoryBackend) Hasher() hash.Hash { return md5.New() } +// HasAtomicReplace returns whether Save() can atomically replace files +func (be *MemoryBackend) HasAtomicReplace() bool { + return false +} + // Delete removes all data in the backend. func (be *MemoryBackend) Delete(ctx context.Context) error { be.m.Lock() diff --git a/internal/backend/rest/rest.go b/internal/backend/rest/rest.go index 1e372229a..b9824bb53 100644 --- a/internal/backend/rest/rest.go +++ b/internal/backend/rest/rest.go @@ -121,6 +121,12 @@ func (b *Backend) Hasher() hash.Hash { return nil } +// HasAtomicReplace returns whether Save() can atomically replace files +func (b *Backend) HasAtomicReplace() bool { + // rest-server prevents overwriting + return false +} + // Save stores data in the backend at the handle. func (b *Backend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { if err := h.Valid(); err != nil { diff --git a/internal/backend/s3/s3.go b/internal/backend/s3/s3.go index 1bdf2d795..ac1a1d5ce 100644 --- a/internal/backend/s3/s3.go +++ b/internal/backend/s3/s3.go @@ -269,6 +269,11 @@ func (be *Backend) Hasher() hash.Hash { return nil } +// HasAtomicReplace returns whether Save() can atomically replace files +func (be *Backend) HasAtomicReplace() bool { + return true +} + // Path returns the path in the bucket that is used for this backend. func (be *Backend) Path() string { return be.cfg.Prefix diff --git a/internal/backend/sftp/sftp.go b/internal/backend/sftp/sftp.go index a8a2a185d..ebbaaddad 100644 --- a/internal/backend/sftp/sftp.go +++ b/internal/backend/sftp/sftp.go @@ -267,6 +267,12 @@ func (r *SFTP) Hasher() hash.Hash { return nil } +// HasAtomicReplace returns whether Save() can atomically replace files +func (r *SFTP) HasAtomicReplace() bool { + // we use sftp's 'Rename()' in 'Save()' which does not allow overwriting + return false +} + // Join joins the given paths and cleans them afterwards. This always uses // forward slashes, which is required by sftp. func Join(parts ...string) string { diff --git a/internal/backend/swift/swift.go b/internal/backend/swift/swift.go index 6157002b5..b127cb832 100644 --- a/internal/backend/swift/swift.go +++ b/internal/backend/swift/swift.go @@ -129,6 +129,11 @@ func (be *beSwift) Hasher() hash.Hash { return md5.New() } +// HasAtomicReplace returns whether Save() can atomically replace files +func (be *beSwift) HasAtomicReplace() bool { + return true +} + // Load runs fn with a reader that yields the contents of the file at h at the // given offset. func (be *beSwift) Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error { diff --git a/internal/checker/checker.go b/internal/checker/checker.go index 8e49209f5..2ecd1469c 100644 --- a/internal/checker/checker.go +++ b/internal/checker/checker.go @@ -63,7 +63,7 @@ type ErrDuplicatePacks struct { Indexes restic.IDSet } -func (e ErrDuplicatePacks) Error() string { +func (e *ErrDuplicatePacks) Error() string { return fmt.Sprintf("pack %v contained in several indexes: %v", e.PackID.Str(), e.Indexes) } @@ -73,7 +73,7 @@ type ErrOldIndexFormat struct { restic.ID } -func (err ErrOldIndexFormat) Error() string { +func (err *ErrOldIndexFormat) Error() string { return fmt.Sprintf("index %v has old format", err.ID.Str()) } @@ -93,7 +93,7 @@ func (c *Checker) LoadIndex(ctx context.Context) (hints []error, errs []error) { if oldFormat { debug.Log("index %v has old format", id.Str()) - hints = append(hints, ErrOldIndexFormat{id}) + hints = append(hints, &ErrOldIndexFormat{id}) } err = errors.Wrapf(err, "error loading index %v", id.Str()) @@ -137,7 +137,7 @@ func (c *Checker) LoadIndex(ctx context.Context) (hints []error, errs []error) { for packID := range c.packs { debug.Log(" check pack %v: contained in %d indexes", packID, len(packToIndex[packID])) if len(packToIndex[packID]) > 1 { - hints = append(hints, ErrDuplicatePacks{ + hints = append(hints, &ErrDuplicatePacks{ PackID: packID, Indexes: packToIndex[packID], }) @@ -257,7 +257,7 @@ type TreeError struct { Errors []error } -func (e TreeError) Error() string { +func (e *TreeError) Error() string { return fmt.Sprintf("tree %v: %v", e.ID.Str(), e.Errors) } @@ -276,7 +276,7 @@ func (c *Checker) checkTreeWorker(ctx context.Context, trees <-chan restic.TreeI if len(errs) == 0 { continue } - treeError := TreeError{ID: job.ID, Errors: errs} + treeError := &TreeError{ID: job.ID, Errors: errs} select { case <-ctx.Done(): return diff --git a/internal/checker/checker_test.go b/internal/checker/checker_test.go index 2a4384b15..f2ee0c732 100644 --- a/internal/checker/checker_test.go +++ b/internal/checker/checker_test.go @@ -289,7 +289,7 @@ func TestDuplicatePacksInIndex(t *testing.T) { found := false for _, hint := range hints { - if _, ok := hint.(checker.ErrDuplicatePacks); ok { + if _, ok := hint.(*checker.ErrDuplicatePacks); ok { found = true } else { t.Errorf("got unexpected hint: %v", hint) diff --git a/internal/migrations/interface.go b/internal/migrations/interface.go index 9d9eedba1..eb0a8e60c 100644 --- a/internal/migrations/interface.go +++ b/internal/migrations/interface.go @@ -6,11 +6,16 @@ import ( "github.com/restic/restic/internal/restic" ) +type RepositoryCheckOptions struct { +} + // Migration implements a data migration. type Migration interface { // Check returns true if the migration can be applied to a repo. Check(context.Context, restic.Repository) (bool, error) + RepoCheckOptions() *RepositoryCheckOptions + // Apply runs the migration. Apply(context.Context, restic.Repository) error diff --git a/internal/migrations/s3_layout.go b/internal/migrations/s3_layout.go index 877b44c84..b64c3b073 100644 --- a/internal/migrations/s3_layout.go +++ b/internal/migrations/s3_layout.go @@ -37,6 +37,10 @@ func (m *S3Layout) Check(ctx context.Context, repo restic.Repository) (bool, err return true, nil } +func (m *S3Layout) RepoCheckOptions() *RepositoryCheckOptions { + return nil +} + func retry(max int, fail func(err error), f func() error) error { var err error for i := 0; i < max; i++ { diff --git a/internal/migrations/upgrade_repo_v2.go b/internal/migrations/upgrade_repo_v2.go new file mode 100644 index 000000000..86abeaeff --- /dev/null +++ b/internal/migrations/upgrade_repo_v2.go @@ -0,0 +1,126 @@ +package migrations + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/restic/restic/internal/restic" +) + +func init() { + register(&UpgradeRepoV2{}) +} + +type UpgradeRepoV2Error struct { + UploadNewConfigError error + ReuploadOldConfigError error + + BackupFilePath string +} + +func (err *UpgradeRepoV2Error) Error() string { + if err.ReuploadOldConfigError != nil { + return fmt.Sprintf("error uploading config (%v), re-uploading old config filed failed as well (%v), but there is a backup of the config file in %v", err.UploadNewConfigError, err.ReuploadOldConfigError, err.BackupFilePath) + } + + return fmt.Sprintf("error uploading config (%v), re-uploaded old config was successful, there is a backup of the config file in %v", err.UploadNewConfigError, err.BackupFilePath) +} + +func (err *UpgradeRepoV2Error) Unwrap() error { + // consider the original upload error as the primary cause + return err.UploadNewConfigError +} + +type UpgradeRepoV2 struct{} + +func (*UpgradeRepoV2) Name() string { + return "upgrade_repo_v2" +} + +func (*UpgradeRepoV2) Desc() string { + return "upgrade a repository to version 2" +} + +func (*UpgradeRepoV2) Check(ctx context.Context, repo restic.Repository) (bool, error) { + isV1 := repo.Config().Version == 1 + return isV1, nil +} + +func (*UpgradeRepoV2) RepoCheckOptions() *RepositoryCheckOptions { + return &RepositoryCheckOptions{} +} +func (*UpgradeRepoV2) upgrade(ctx context.Context, repo restic.Repository) error { + h := restic.Handle{Type: restic.ConfigFile} + + if !repo.Backend().HasAtomicReplace() { + // remove the original file for backends which do not support atomic overwriting + err := repo.Backend().Remove(ctx, h) + if err != nil { + return fmt.Errorf("remove config failed: %w", err) + } + } + + // upgrade config + cfg := repo.Config() + cfg.Version = 2 + + _, err := repo.SaveJSONUnpacked(ctx, restic.ConfigFile, cfg) + if err != nil { + return fmt.Errorf("save new config file failed: %w", err) + } + + return nil +} + +func (m *UpgradeRepoV2) Apply(ctx context.Context, repo restic.Repository) error { + tempdir, err := ioutil.TempDir("", "restic-migrate-upgrade-repo-v2-") + if err != nil { + return fmt.Errorf("create temp dir failed: %w", err) + } + + h := restic.Handle{Type: restic.ConfigFile} + + // read raw config file and save it to a temp dir, just in case + var rawConfigFile []byte + err = repo.Backend().Load(ctx, h, 0, 0, func(rd io.Reader) (err error) { + rawConfigFile, err = ioutil.ReadAll(rd) + return err + }) + if err != nil { + return fmt.Errorf("load config file failed: %w", err) + } + + backupFileName := filepath.Join(tempdir, "config") + err = ioutil.WriteFile(backupFileName, rawConfigFile, 0600) + if err != nil { + return fmt.Errorf("write config file backup to %v failed: %w", tempdir, err) + } + + // run the upgrade + err = m.upgrade(ctx, repo) + if err != nil { + + // build an error we can return to the caller + repoError := &UpgradeRepoV2Error{ + UploadNewConfigError: err, + BackupFilePath: backupFileName, + } + + // try contingency methods, reupload the original file + _ = repo.Backend().Remove(ctx, h) + err = repo.Backend().Save(ctx, h, restic.NewByteReader(rawConfigFile, nil)) + if err != nil { + repoError.ReuploadOldConfigError = err + } + + return repoError + } + + _ = os.Remove(backupFileName) + _ = os.Remove(tempdir) + return nil +} diff --git a/internal/migrations/upgrade_repo_v2_test.go b/internal/migrations/upgrade_repo_v2_test.go new file mode 100644 index 000000000..0d86d265c --- /dev/null +++ b/internal/migrations/upgrade_repo_v2_test.go @@ -0,0 +1,112 @@ +package migrations + +import ( + "context" + "os" + "path/filepath" + "sync" + "testing" + + "github.com/restic/restic/internal/errors" + "github.com/restic/restic/internal/repository" + "github.com/restic/restic/internal/restic" + "github.com/restic/restic/internal/test" +) + +func TestUpgradeRepoV2(t *testing.T) { + repo, cleanup := repository.TestRepositoryWithVersion(t, 1) + defer cleanup() + + if repo.Config().Version != 1 { + t.Fatal("test repo has wrong version") + } + + m := &UpgradeRepoV2{} + + ok, err := m.Check(context.Background(), repo) + if err != nil { + t.Fatal(err) + } + + if !ok { + t.Fatal("migration check returned false") + } + + err = m.Apply(context.Background(), repo) + if err != nil { + t.Fatal(err) + } +} + +type failBackend struct { + restic.Backend + + mu sync.Mutex + ConfigFileSavesUntilError uint +} + +func (be *failBackend) Save(ctx context.Context, h restic.Handle, rd restic.RewindReader) error { + if h.Type != restic.ConfigFile { + return be.Backend.Save(ctx, h, rd) + } + + be.mu.Lock() + if be.ConfigFileSavesUntilError == 0 { + be.mu.Unlock() + return errors.New("failure induced for testing") + } + + be.ConfigFileSavesUntilError-- + be.mu.Unlock() + + return be.Backend.Save(ctx, h, rd) +} + +func TestUpgradeRepoV2Failure(t *testing.T) { + be, cleanup := repository.TestBackend(t) + defer cleanup() + + // wrap backend so that it fails upgrading the config after the initial write + be = &failBackend{ + ConfigFileSavesUntilError: 1, + Backend: be, + } + + repo, cleanup := repository.TestRepositoryWithBackend(t, be, 1) + defer cleanup() + + if repo.Config().Version != 1 { + t.Fatal("test repo has wrong version") + } + + m := &UpgradeRepoV2{} + + ok, err := m.Check(context.Background(), repo) + if err != nil { + t.Fatal(err) + } + + if !ok { + t.Fatal("migration check returned false") + } + + err = m.Apply(context.Background(), repo) + if err == nil { + t.Fatal("expected error returned from Apply(), got nil") + } + + upgradeErr := err.(*UpgradeRepoV2Error) + if upgradeErr.UploadNewConfigError == nil { + t.Fatal("expected upload error, got nil") + } + + if upgradeErr.ReuploadOldConfigError == nil { + t.Fatal("expected reupload error, got nil") + } + + if upgradeErr.BackupFilePath == "" { + t.Fatal("no backup file path found") + } + test.OK(t, os.Remove(upgradeErr.BackupFilePath)) + test.OK(t, os.Remove(filepath.Dir(upgradeErr.BackupFilePath))) +} diff --git a/internal/mock/backend.go b/internal/mock/backend.go index 05fe1dc6e..655499b15 100644 --- a/internal/mock/backend.go +++ b/internal/mock/backend.go @@ -11,18 +11,19 @@ import ( // Backend implements a mock backend. type Backend struct { - CloseFn func() error - IsNotExistFn func(err error) bool - SaveFn func(ctx context.Context, h restic.Handle, rd restic.RewindReader) error - OpenReaderFn func(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) - StatFn func(ctx context.Context, h restic.Handle) (restic.FileInfo, error) - ListFn func(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error - RemoveFn func(ctx context.Context, h restic.Handle) error - TestFn func(ctx context.Context, h restic.Handle) (bool, error) - DeleteFn func(ctx context.Context) error - ConnectionsFn func() uint - LocationFn func() string - HasherFn func() hash.Hash + CloseFn func() error + IsNotExistFn func(err error) bool + SaveFn func(ctx context.Context, h restic.Handle, rd restic.RewindReader) error + OpenReaderFn func(ctx context.Context, h restic.Handle, length int, offset int64) (io.ReadCloser, error) + StatFn func(ctx context.Context, h restic.Handle) (restic.FileInfo, error) + ListFn func(ctx context.Context, t restic.FileType, fn func(restic.FileInfo) error) error + RemoveFn func(ctx context.Context, h restic.Handle) error + TestFn func(ctx context.Context, h restic.Handle) (bool, error) + DeleteFn func(ctx context.Context) error + ConnectionsFn func() uint + LocationFn func() string + HasherFn func() hash.Hash + HasAtomicReplaceFn func() bool } // NewBackend returns new mock Backend instance @@ -66,6 +67,14 @@ func (m *Backend) Hasher() hash.Hash { return m.HasherFn() } +// HasAtomicReplace returns whether Save() can atomically replace files +func (m *Backend) HasAtomicReplace() bool { + if m.HasAtomicReplaceFn == nil { + return false + } + return m.HasAtomicReplaceFn() +} + // IsNotExist returns true if the error is caused by a missing file. func (m *Backend) IsNotExist(err error) bool { if m.IsNotExistFn == nil { diff --git a/internal/restic/backend.go b/internal/restic/backend.go index 1203bf3d3..6ec10e685 100644 --- a/internal/restic/backend.go +++ b/internal/restic/backend.go @@ -24,6 +24,9 @@ type Backend interface { // Hasher may return a hash function for calculating a content hash for the backend Hasher() hash.Hash + // HasAtomicReplace returns whether Save() can atomically replace files + HasAtomicReplace() bool + // Test a boolean value whether a File with the name and type exists. Test(ctx context.Context, h Handle) (bool, error) diff --git a/internal/restic/config.go b/internal/restic/config.go index 6df32e2ef..ae4be0aa3 100644 --- a/internal/restic/config.go +++ b/internal/restic/config.go @@ -23,7 +23,7 @@ const MaxRepoVersion = 2 // StableRepoVersion is the version that is written to the config when a repository // is newly created with Init(). -const StableRepoVersion = 1 +const StableRepoVersion = 2 // JSONUnpackedLoader loads unpacked JSON. type JSONUnpackedLoader interface {