2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-22 04:45:15 +00:00

Merge pull request #4681 from MichaelEischer/verify-integrity-on-upload

backup: verify blobs before upload
This commit is contained in:
Michael Eischer 2024-02-04 18:04:27 +00:00 committed by GitHub
commit d5e662315a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 310 additions and 34 deletions

View File

@ -0,0 +1,18 @@
Enhancement: Add extra verification of data integrity before upload
Hardware issues, or a bug in restic or its dependencies, could previously cause
corruption in the files restic created and stored in the repository. Detecting
such corruption previously required explicitly running the `check --read-data`
or `check --read-data-subset` commands.
To further ensure data integrity, even in the case of hardware issues or
software bugs, restic now performs additional verification of the files about
to be uploaded to the repository.
These extra checks will increase CPU usage during backups. They can therefore,
if absolutely necessary, be disabled using the `--no-extra-verify` global
option. Please note that this should be combined with more active checking
using the previously mentioned check commands.
https://github.com/restic/restic/issues/4529
https://github.com/restic/restic/pull/4681

View File

@ -67,6 +67,7 @@ type GlobalOptions struct {
CleanupCache bool CleanupCache bool
Compression repository.CompressionMode Compression repository.CompressionMode
PackSize uint PackSize uint
NoExtraVerify bool
backend.TransportOptions backend.TransportOptions
limiter.Limits limiter.Limits
@ -139,6 +140,7 @@ func init() {
f.BoolVar(&globalOptions.InsecureTLS, "insecure-tls", false, "skip TLS certificate verification when connecting to the repository (insecure)") f.BoolVar(&globalOptions.InsecureTLS, "insecure-tls", false, "skip TLS certificate verification when connecting to the repository (insecure)")
f.BoolVar(&globalOptions.CleanupCache, "cleanup-cache", false, "auto remove old cache directories") f.BoolVar(&globalOptions.CleanupCache, "cleanup-cache", false, "auto remove old cache directories")
f.Var(&globalOptions.Compression, "compression", "compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)") f.Var(&globalOptions.Compression, "compression", "compression mode (only available for repository format version 2), one of (auto|off|max) (default: $RESTIC_COMPRESSION)")
f.BoolVar(&globalOptions.NoExtraVerify, "no-extra-verify", false, "skip additional verification of data before upload (see documentation)")
f.IntVar(&globalOptions.Limits.UploadKb, "limit-upload", 0, "limits uploads to a maximum `rate` in KiB/s. (default: unlimited)") f.IntVar(&globalOptions.Limits.UploadKb, "limit-upload", 0, "limits uploads to a maximum `rate` in KiB/s. (default: unlimited)")
f.IntVar(&globalOptions.Limits.DownloadKb, "limit-download", 0, "limits downloads to a maximum `rate` in KiB/s. (default: unlimited)") f.IntVar(&globalOptions.Limits.DownloadKb, "limit-download", 0, "limits downloads to a maximum `rate` in KiB/s. (default: unlimited)")
f.UintVar(&globalOptions.PackSize, "pack-size", 0, "set target pack `size` in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE)") f.UintVar(&globalOptions.PackSize, "pack-size", 0, "set target pack `size` in MiB, created pack files may be larger (default: $RESTIC_PACK_SIZE)")
@ -453,8 +455,9 @@ func OpenRepository(ctx context.Context, opts GlobalOptions) (*repository.Reposi
} }
s, err := repository.New(be, repository.Options{ s, err := repository.New(be, repository.Options{
Compression: opts.Compression, Compression: opts.Compression,
PackSize: opts.PackSize * 1024 * 1024, PackSize: opts.PackSize * 1024 * 1024,
NoExtraVerify: opts.NoExtraVerify,
}) })
if err != nil { if err != nil {
return nil, errors.Fatal(err.Error()) return nil, errors.Fatal(err.Error())

View File

@ -60,6 +60,20 @@ only applied for the single run of restic. The option can also be set via the en
variable ``RESTIC_COMPRESSION``. variable ``RESTIC_COMPRESSION``.
Data Verification
=================
To prevent the upload of corrupted data to the repository, which can happen due
to hardware issues or software bugs, restic verifies that generated files can
be decoded and contain the correct data beforehand. This increases the CPU usage
during backups. If necessary, you can disable this verification using the
``--no-extra-verify`` option of the ``backup`` command. However, in this case
you should verify the repository integrity more actively using
``restic check --read-data`` (or the similar ``--read-data-subset`` option).
Otherwise, data corruption due to hardware issues or software bugs might go
unnoticed.
File Read Concurrency File Read Concurrency
===================== =====================

View File

@ -1880,7 +1880,7 @@ func TestArchiverContextCanceled(t *testing.T) {
}) })
// Ensure that the archiver itself reports the canceled context and not just the backend // Ensure that the archiver itself reports the canceled context and not just the backend
repo := repository.TestRepositoryWithBackend(t, &noCancelBackend{mem.New()}, 0) repo := repository.TestRepositoryWithBackend(t, &noCancelBackend{mem.New()}, 0, repository.Options{})
back := restictest.Chdir(t, tempdir) back := restictest.Chdir(t, tempdir)
defer back() defer back()

View File

@ -69,7 +69,7 @@ func TestUpgradeRepoV2Failure(t *testing.T) {
Backend: be, Backend: be,
} }
repo := repository.TestRepositoryWithBackend(t, be, 1) repo := repository.TestRepositoryWithBackend(t, be, 1, repository.Options{})
if repo.Config().Version != 1 { if repo.Config().Version != 1 {
t.Fatal("test repo has wrong version") t.Fatal("test repo has wrong version")
} }

View File

@ -1,6 +1,7 @@
package pack package pack
import ( import (
"bytes"
"context" "context"
"encoding/binary" "encoding/binary"
"fmt" "fmt"
@ -74,7 +75,7 @@ func (p *Packer) Finalize() error {
p.m.Lock() p.m.Lock()
defer p.m.Unlock() defer p.m.Unlock()
header, err := p.makeHeader() header, err := makeHeader(p.blobs)
if err != nil { if err != nil {
return err return err
} }
@ -83,6 +84,12 @@ func (p *Packer) Finalize() error {
nonce := crypto.NewRandomNonce() nonce := crypto.NewRandomNonce()
encryptedHeader = append(encryptedHeader, nonce...) encryptedHeader = append(encryptedHeader, nonce...)
encryptedHeader = p.k.Seal(encryptedHeader, nonce, header, nil) encryptedHeader = p.k.Seal(encryptedHeader, nonce, header, nil)
encryptedHeader = binary.LittleEndian.AppendUint32(encryptedHeader, uint32(len(encryptedHeader)))
if err := verifyHeader(p.k, encryptedHeader, p.blobs); err != nil {
//nolint:revive // ignore linter warnings about error message spelling
return fmt.Errorf("Detected data corruption while writing pack-file header: %w\nCorrupted data is either caused by hardware issues or software bugs. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting.", err)
}
// append the header // append the header
n, err := p.wr.Write(encryptedHeader) n, err := p.wr.Write(encryptedHeader)
@ -90,18 +97,33 @@ func (p *Packer) Finalize() error {
return errors.Wrap(err, "Write") return errors.Wrap(err, "Write")
} }
hdrBytes := len(encryptedHeader) if n != len(encryptedHeader) {
if n != hdrBytes {
return errors.New("wrong number of bytes written") return errors.New("wrong number of bytes written")
} }
p.bytes += uint(len(encryptedHeader))
// write length return nil
err = binary.Write(p.wr, binary.LittleEndian, uint32(hdrBytes)) }
func verifyHeader(k *crypto.Key, header []byte, expected []restic.Blob) error {
// do not offer a way to skip the pack header verification, as pack headers are usually small enough
// to not result in a significant performance impact
decoded, hdrSize, err := List(k, bytes.NewReader(header), int64(len(header)))
if err != nil { if err != nil {
return errors.Wrap(err, "binary.Write") return fmt.Errorf("header decoding failed: %w", err)
}
if hdrSize != uint32(len(header)) {
return fmt.Errorf("unexpected header size %v instead of %v", hdrSize, len(header))
}
if len(decoded) != len(expected) {
return fmt.Errorf("pack header size mismatch")
}
for i := 0; i < len(decoded); i++ {
if decoded[i] != expected[i] {
return fmt.Errorf("pack header entry mismatch got %v instead of %v", decoded[i], expected[i])
}
} }
p.bytes += uint(hdrBytes + binary.Size(uint32(0)))
return nil return nil
} }
@ -111,10 +133,10 @@ func (p *Packer) HeaderOverhead() int {
} }
// makeHeader constructs the header for p. // makeHeader constructs the header for p.
func (p *Packer) makeHeader() ([]byte, error) { func makeHeader(blobs []restic.Blob) ([]byte, error) {
buf := make([]byte, 0, len(p.blobs)*int(entrySize)) buf := make([]byte, 0, len(blobs)*int(entrySize))
for _, b := range p.blobs { for _, b := range blobs {
switch { switch {
case b.Type == restic.DataBlob && b.UncompressedLength == 0: case b.Type == restic.DataBlob && b.UncompressedLength == 0:
buf = append(buf, 0) buf = append(buf, 0)

View File

@ -4,6 +4,7 @@ import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"io" "io"
"strings"
"testing" "testing"
"github.com/restic/restic/internal/crypto" "github.com/restic/restic/internal/crypto"
@ -177,3 +178,60 @@ func TestReadRecords(t *testing.T) {
} }
} }
} }
func TestUnpackedVerification(t *testing.T) {
// create random keys
k := crypto.NewRandomKey()
blobs := []restic.Blob{
{
BlobHandle: restic.NewRandomBlobHandle(),
Length: 42,
Offset: 0,
UncompressedLength: 2 * 42,
},
}
type DamageType string
const (
damageData DamageType = "data"
damageCiphertext DamageType = "ciphertext"
damageLength DamageType = "length"
)
for _, test := range []struct {
damage DamageType
msg string
}{
{"", ""},
{damageData, "pack header entry mismatch"},
{damageCiphertext, "ciphertext verification failed"},
{damageLength, "header decoding failed"},
} {
header, err := makeHeader(blobs)
rtest.OK(t, err)
if test.damage == damageData {
header[8] ^= 0x42
}
encryptedHeader := make([]byte, 0, crypto.CiphertextLength(len(header)))
nonce := crypto.NewRandomNonce()
encryptedHeader = append(encryptedHeader, nonce...)
encryptedHeader = k.Seal(encryptedHeader, nonce, header, nil)
encryptedHeader = binary.LittleEndian.AppendUint32(encryptedHeader, uint32(len(encryptedHeader)))
if test.damage == damageCiphertext {
encryptedHeader[8] ^= 0x42
}
if test.damage == damageLength {
encryptedHeader[len(encryptedHeader)-1] ^= 0x42
}
err = verifyHeader(k, encryptedHeader, blobs)
if test.msg == "" {
rtest.Assert(t, err == nil, "expected no error, got %v", err)
} else {
rtest.Assert(t, strings.Contains(err.Error(), test.msg), "expected error to contain %q, got %q", test.msg, err)
}
}
}

View File

@ -4,7 +4,6 @@ import (
"context" "context"
"testing" "testing"
"github.com/restic/restic/internal/backend/mem"
"github.com/restic/restic/internal/restic" "github.com/restic/restic/internal/restic"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
) )
@ -19,7 +18,7 @@ func FuzzSaveLoadBlob(f *testing.F) {
} }
id := restic.Hash(blob) id := restic.Hash(blob)
repo := TestRepositoryWithBackend(t, mem.New(), 2) repo := TestRepositoryWithVersion(t, 2)
var wg errgroup.Group var wg errgroup.Group
repo.StartPackUploader(context.TODO(), &wg) repo.StartPackUploader(context.TODO(), &wg)

View File

@ -336,7 +336,8 @@ func TestRepackWrongBlob(t *testing.T) {
} }
func testRepackWrongBlob(t *testing.T, version uint) { func testRepackWrongBlob(t *testing.T, version uint) {
repo := repository.TestRepositoryWithVersion(t, version) // disable verification to allow adding corrupted blobs to the repository
repo := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true})
seed := time.Now().UnixNano() seed := time.Now().UnixNano()
rand.Seed(seed) rand.Seed(seed)
@ -361,7 +362,8 @@ func TestRepackBlobFallback(t *testing.T) {
} }
func testRepackBlobFallback(t *testing.T, version uint) { func testRepackBlobFallback(t *testing.T, version uint) {
repo := repository.TestRepositoryWithVersion(t, version) // disable verification to allow adding corrupted blobs to the repository
repo := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true})
seed := time.Now().UnixNano() seed := time.Now().UnixNano()
rand.Seed(seed) rand.Seed(seed)

View File

@ -38,17 +38,17 @@ func TestRepairBrokenPack(t *testing.T) {
func testRepairBrokenPack(t *testing.T, version uint) { func testRepairBrokenPack(t *testing.T, version uint) {
tests := []struct { tests := []struct {
name string name string
damage func(repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) damage func(t *testing.T, repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet)
}{ }{
{ {
"valid pack", "valid pack",
func(repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { func(t *testing.T, repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) {
return packsBefore, restic.NewBlobSet() return packsBefore, restic.NewBlobSet()
}, },
}, },
{ {
"broken pack", "broken pack",
func(repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { func(t *testing.T, repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) {
wrongBlob := createRandomWrongBlob(t, repo) wrongBlob := createRandomWrongBlob(t, repo)
damagedPacks := findPacksForBlobs(t, repo, restic.NewBlobSet(wrongBlob)) damagedPacks := findPacksForBlobs(t, repo, restic.NewBlobSet(wrongBlob))
return damagedPacks, restic.NewBlobSet(wrongBlob) return damagedPacks, restic.NewBlobSet(wrongBlob)
@ -56,7 +56,7 @@ func testRepairBrokenPack(t *testing.T, version uint) {
}, },
{ {
"partially broken pack", "partially broken pack",
func(repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { func(t *testing.T, repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) {
// damage one of the pack files // damage one of the pack files
damagedID := packsBefore.List()[0] damagedID := packsBefore.List()[0]
replaceFile(t, repo, backend.Handle{Type: backend.PackFile, Name: damagedID.String()}, replaceFile(t, repo, backend.Handle{Type: backend.PackFile, Name: damagedID.String()},
@ -79,7 +79,7 @@ func testRepairBrokenPack(t *testing.T, version uint) {
}, },
}, { }, {
"truncated pack", "truncated pack",
func(repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) { func(t *testing.T, repo restic.Repository, packsBefore restic.IDSet) (restic.IDSet, restic.BlobSet) {
// damage one of the pack files // damage one of the pack files
damagedID := packsBefore.List()[0] damagedID := packsBefore.List()[0]
replaceFile(t, repo, backend.Handle{Type: backend.PackFile, Name: damagedID.String()}, replaceFile(t, repo, backend.Handle{Type: backend.PackFile, Name: damagedID.String()},
@ -102,7 +102,8 @@ func testRepairBrokenPack(t *testing.T, version uint) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
repo := repository.TestRepositoryWithVersion(t, version) // disable verification to allow adding corrupted blobs to the repository
repo := repository.TestRepositoryWithBackend(t, nil, version, repository.Options{NoExtraVerify: true})
seed := time.Now().UnixNano() seed := time.Now().UnixNano()
rand.Seed(seed) rand.Seed(seed)
@ -112,7 +113,7 @@ func testRepairBrokenPack(t *testing.T, version uint) {
packsBefore := listPacks(t, repo) packsBefore := listPacks(t, repo)
blobsBefore := listBlobs(repo) blobsBefore := listBlobs(repo)
toRepair, damagedBlobs := test.damage(repo, packsBefore) toRepair, damagedBlobs := test.damage(t, repo, packsBefore)
rtest.OK(t, repository.RepairPacks(context.TODO(), repo, toRepair, &progress.NoopPrinter{})) rtest.OK(t, repository.RepairPacks(context.TODO(), repo, toRepair, &progress.NoopPrinter{}))
// reload index // reload index

View File

@ -59,8 +59,9 @@ type Repository struct {
} }
type Options struct { type Options struct {
Compression CompressionMode Compression CompressionMode
PackSize uint PackSize uint
NoExtraVerify bool
} }
// CompressionMode configures if data should be compressed. // CompressionMode configures if data should be compressed.
@ -423,6 +424,11 @@ func (r *Repository) saveAndEncrypt(ctx context.Context, t restic.BlobType, data
// encrypt blob // encrypt blob
ciphertext = r.key.Seal(ciphertext, nonce, data, nil) ciphertext = r.key.Seal(ciphertext, nonce, data, nil)
if err := r.verifyCiphertext(ciphertext, uncompressedLength, id); err != nil {
//nolint:revive // ignore linter warnings about error message spelling
return 0, fmt.Errorf("Detected data corruption while saving blob %v: %w\nCorrupted blobs are either caused by hardware issues or software bugs. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting.", id, err)
}
// find suitable packer and add blob // find suitable packer and add blob
var pm *packerManager var pm *packerManager
@ -438,6 +444,31 @@ func (r *Repository) saveAndEncrypt(ctx context.Context, t restic.BlobType, data
return pm.SaveBlob(ctx, t, id, ciphertext, uncompressedLength) return pm.SaveBlob(ctx, t, id, ciphertext, uncompressedLength)
} }
func (r *Repository) verifyCiphertext(buf []byte, uncompressedLength int, id restic.ID) error {
if r.opts.NoExtraVerify {
return nil
}
nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():]
plaintext, err := r.key.Open(nil, nonce, ciphertext, nil)
if err != nil {
return fmt.Errorf("decryption failed: %w", err)
}
if uncompressedLength != 0 {
// DecodeAll will allocate a slice if it is not large enough since it
// knows the decompressed size (because we're using EncodeAll)
plaintext, err = r.getZstdDecoder().DecodeAll(plaintext, nil)
if err != nil {
return fmt.Errorf("decompression failed: %w", err)
}
}
if !restic.Hash(plaintext).Equal(id) {
return errors.New("hash mismatch")
}
return nil
}
func (r *Repository) compressUnpacked(p []byte) ([]byte, error) { func (r *Repository) compressUnpacked(p []byte) ([]byte, error) {
// compression is only available starting from version 2 // compression is only available starting from version 2
if r.cfg.Version < 2 { if r.cfg.Version < 2 {
@ -474,7 +505,8 @@ func (r *Repository) decompressUnpacked(p []byte) ([]byte, error) {
// SaveUnpacked encrypts data and stores it in the backend. Returned is the // SaveUnpacked encrypts data and stores it in the backend. Returned is the
// storage hash. // storage hash.
func (r *Repository) SaveUnpacked(ctx context.Context, t restic.FileType, p []byte) (id restic.ID, err error) { func (r *Repository) SaveUnpacked(ctx context.Context, t restic.FileType, buf []byte) (id restic.ID, err error) {
p := buf
if t != restic.ConfigFile { if t != restic.ConfigFile {
p, err = r.compressUnpacked(p) p, err = r.compressUnpacked(p)
if err != nil { if err != nil {
@ -489,6 +521,11 @@ func (r *Repository) SaveUnpacked(ctx context.Context, t restic.FileType, p []by
ciphertext = r.key.Seal(ciphertext, nonce, p, nil) ciphertext = r.key.Seal(ciphertext, nonce, p, nil)
if err := r.verifyUnpacked(ciphertext, t, buf); err != nil {
//nolint:revive // ignore linter warnings about error message spelling
return restic.ID{}, fmt.Errorf("Detected data corruption while saving file of type %v: %w\nCorrupted data is either caused by hardware issues or software bugs. Please open an issue at https://github.com/restic/restic/issues/new/choose for further troubleshooting.", t, err)
}
if t == restic.ConfigFile { if t == restic.ConfigFile {
id = restic.ID{} id = restic.ID{}
} else { } else {
@ -506,6 +543,29 @@ func (r *Repository) SaveUnpacked(ctx context.Context, t restic.FileType, p []by
return id, nil return id, nil
} }
func (r *Repository) verifyUnpacked(buf []byte, t restic.FileType, expected []byte) error {
if r.opts.NoExtraVerify {
return nil
}
nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():]
plaintext, err := r.key.Open(nil, nonce, ciphertext, nil)
if err != nil {
return fmt.Errorf("decryption failed: %w", err)
}
if t != restic.ConfigFile {
plaintext, err = r.decompressUnpacked(plaintext)
if err != nil {
return fmt.Errorf("decompression failed: %w", err)
}
}
if !bytes.Equal(plaintext, expected) {
return errors.New("data mismatch")
}
return nil
}
// Flush saves all remaining packs and the index // Flush saves all remaining packs and the index
func (r *Repository) Flush(ctx context.Context) error { func (r *Repository) Flush(ctx context.Context) error {
if err := r.flushPacks(ctx); err != nil { if err := r.flushPacks(ctx); err != nil {

View File

@ -351,3 +351,101 @@ func testStreamPack(t *testing.T, version uint) {
} }
}) })
} }
func TestBlobVerification(t *testing.T) {
repo := TestRepository(t).(*Repository)
type DamageType string
const (
damageData DamageType = "data"
damageCompressed DamageType = "compressed"
damageCiphertext DamageType = "ciphertext"
)
for _, test := range []struct {
damage DamageType
msg string
}{
{"", ""},
{damageData, "hash mismatch"},
{damageCompressed, "decompression failed"},
{damageCiphertext, "ciphertext verification failed"},
} {
plaintext := rtest.Random(800, 1234)
id := restic.Hash(plaintext)
if test.damage == damageData {
plaintext[42] ^= 0x42
}
uncompressedLength := uint(len(plaintext))
plaintext = repo.getZstdEncoder().EncodeAll(plaintext, nil)
if test.damage == damageCompressed {
plaintext = plaintext[:len(plaintext)-8]
}
nonce := crypto.NewRandomNonce()
ciphertext := append([]byte{}, nonce...)
ciphertext = repo.Key().Seal(ciphertext, nonce, plaintext, nil)
if test.damage == damageCiphertext {
ciphertext[42] ^= 0x42
}
err := repo.verifyCiphertext(ciphertext, int(uncompressedLength), id)
if test.msg == "" {
rtest.Assert(t, err == nil, "expected no error, got %v", err)
} else {
rtest.Assert(t, strings.Contains(err.Error(), test.msg), "expected error to contain %q, got %q", test.msg, err)
}
}
}
func TestUnpackedVerification(t *testing.T) {
repo := TestRepository(t).(*Repository)
type DamageType string
const (
damageData DamageType = "data"
damageCompressed DamageType = "compressed"
damageCiphertext DamageType = "ciphertext"
)
for _, test := range []struct {
damage DamageType
msg string
}{
{"", ""},
{damageData, "data mismatch"},
{damageCompressed, "decompression failed"},
{damageCiphertext, "ciphertext verification failed"},
} {
plaintext := rtest.Random(800, 1234)
orig := append([]byte{}, plaintext...)
if test.damage == damageData {
plaintext[42] ^= 0x42
}
compressed := []byte{2}
compressed = repo.getZstdEncoder().EncodeAll(plaintext, compressed)
if test.damage == damageCompressed {
compressed = compressed[:len(compressed)-8]
}
nonce := crypto.NewRandomNonce()
ciphertext := append([]byte{}, nonce...)
ciphertext = repo.Key().Seal(ciphertext, nonce, compressed, nil)
if test.damage == damageCiphertext {
ciphertext[42] ^= 0x42
}
err := repo.verifyUnpacked(ciphertext, restic.IndexFile, orig)
if test.msg == "" {
rtest.Assert(t, err == nil, "expected no error, got %v", err)
} else {
rtest.Assert(t, strings.Contains(err.Error(), test.msg), "expected error to contain %q, got %q", test.msg, err)
}
}
}

View File

@ -44,7 +44,7 @@ const TestChunkerPol = chunker.Pol(0x3DA3358B4DC173)
// TestRepositoryWithBackend returns a repository initialized with a test // TestRepositoryWithBackend returns a repository initialized with a test
// password. If be is nil, an in-memory backend is used. A constant polynomial // password. If be is nil, an in-memory backend is used. A constant polynomial
// is used for the chunker and low-security test parameters. // is used for the chunker and low-security test parameters.
func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint) restic.Repository { func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint, opts Options) restic.Repository {
t.Helper() t.Helper()
TestUseLowSecurityKDFParameters(t) TestUseLowSecurityKDFParameters(t)
restic.TestDisableCheckPolynomial(t) restic.TestDisableCheckPolynomial(t)
@ -53,7 +53,7 @@ func TestRepositoryWithBackend(t testing.TB, be backend.Backend, version uint) r
be = TestBackend(t) be = TestBackend(t)
} }
repo, err := New(be, Options{}) repo, err := New(be, opts)
if err != nil { if err != nil {
t.Fatalf("TestRepository(): new repo failed: %v", err) t.Fatalf("TestRepository(): new repo failed: %v", err)
} }
@ -79,6 +79,7 @@ func TestRepository(t testing.TB) restic.Repository {
func TestRepositoryWithVersion(t testing.TB, version uint) restic.Repository { func TestRepositoryWithVersion(t testing.TB, version uint) restic.Repository {
t.Helper() t.Helper()
dir := os.Getenv("RESTIC_TEST_REPO") dir := os.Getenv("RESTIC_TEST_REPO")
opts := Options{}
if dir != "" { if dir != "" {
_, err := os.Stat(dir) _, err := os.Stat(dir)
if err != nil { if err != nil {
@ -86,7 +87,7 @@ func TestRepositoryWithVersion(t testing.TB, version uint) restic.Repository {
if err != nil { if err != nil {
t.Fatalf("error creating local backend at %v: %v", dir, err) t.Fatalf("error creating local backend at %v: %v", dir, err)
} }
return TestRepositoryWithBackend(t, be, version) return TestRepositoryWithBackend(t, be, version, opts)
} }
if err == nil { if err == nil {
@ -94,7 +95,7 @@ func TestRepositoryWithVersion(t testing.TB, version uint) restic.Repository {
} }
} }
return TestRepositoryWithBackend(t, nil, version) return TestRepositoryWithBackend(t, nil, version, opts)
} }
// TestOpenLocal opens a local repository. // TestOpenLocal opens a local repository.

View File

@ -66,7 +66,7 @@ func (be *failLockLoadingBackend) Load(ctx context.Context, h backend.Handle, le
func TestMultipleLockFailure(t *testing.T) { func TestMultipleLockFailure(t *testing.T) {
be := &failLockLoadingBackend{Backend: mem.New()} be := &failLockLoadingBackend{Backend: mem.New()}
repo := repository.TestRepositoryWithBackend(t, be, 0) repo := repository.TestRepositoryWithBackend(t, be, 0, repository.Options{})
restic.TestSetLockTimeout(t, 5*time.Millisecond) restic.TestSetLockTimeout(t, 5*time.Millisecond)
lock1, err := restic.NewLock(context.TODO(), repo) lock1, err := restic.NewLock(context.TODO(), repo)