2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-24 05:37:37 +00:00
restic/internal/repository/repository_test.go

460 lines
13 KiB
Go
Raw Normal View History

package repository_test
import (
"bytes"
2017-06-05 21:56:59 +00:00
"context"
"crypto/sha256"
"fmt"
2015-02-16 19:00:23 +00:00
"io"
2017-01-13 11:20:37 +00:00
"math/rand"
"os"
2015-07-04 14:52:17 +00:00
"path/filepath"
"sync"
"testing"
2017-01-13 11:20:37 +00:00
"time"
"github.com/restic/restic/internal/backend"
"github.com/restic/restic/internal/backend/local"
2024-05-09 19:22:19 +00:00
"github.com/restic/restic/internal/backend/mem"
"github.com/restic/restic/internal/cache"
2022-03-21 19:38:53 +00:00
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/index"
2017-07-23 12:21:03 +00:00
"github.com/restic/restic/internal/repository"
2017-07-24 15:42:25 +00:00
"github.com/restic/restic/internal/restic"
2024-05-09 19:22:19 +00:00
"github.com/restic/restic/internal/test"
2017-10-02 13:06:39 +00:00
rtest "github.com/restic/restic/internal/test"
"golang.org/x/sync/errgroup"
)
2015-02-16 19:00:23 +00:00
var testSizes = []int{5, 23, 2<<18 + 23, 1 << 20}
2017-01-13 11:20:37 +00:00
var rnd = rand.New(rand.NewSource(time.Now().UnixNano()))
func TestSave(t *testing.T) {
2024-01-20 20:50:23 +00:00
repository.TestAllVersions(t, testSavePassID)
repository.TestAllVersions(t, testSaveCalculateID)
}
2024-01-20 20:50:23 +00:00
func testSavePassID(t *testing.T, version uint) {
testSave(t, version, false)
}
2024-01-20 20:50:23 +00:00
func testSaveCalculateID(t *testing.T, version uint) {
testSave(t, version, true)
}
2024-01-20 20:50:23 +00:00
func testSave(t *testing.T, version uint, calculateID bool) {
repo := repository.TestRepositoryWithVersion(t, version)
2015-02-16 19:00:23 +00:00
for _, size := range testSizes {
data := make([]byte, size)
2017-01-13 11:20:37 +00:00
_, err := io.ReadFull(rnd, data)
2017-10-02 13:06:39 +00:00
rtest.OK(t, err)
2015-02-16 19:00:23 +00:00
2016-08-31 18:29:54 +00:00
id := restic.Hash(data)
2015-02-16 19:00:23 +00:00
var wg errgroup.Group
repo.StartPackUploader(context.TODO(), &wg)
2015-02-16 19:00:23 +00:00
// save
2024-01-20 20:50:23 +00:00
inputID := restic.ID{}
if !calculateID {
inputID = id
}
sid, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, inputID, false)
2017-10-02 13:06:39 +00:00
rtest.OK(t, err)
2024-01-20 20:50:23 +00:00
rtest.Equals(t, id, sid)
2015-02-16 19:00:23 +00:00
rtest.OK(t, repo.Flush(context.Background()))
2015-02-16 19:00:23 +00:00
// read back
buf, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, nil)
2017-10-02 13:06:39 +00:00
rtest.OK(t, err)
rtest.Equals(t, size, len(buf))
2015-02-16 19:00:23 +00:00
2017-10-02 13:06:39 +00:00
rtest.Assert(t, len(buf) == len(data),
2015-02-16 19:00:23 +00:00
"number of bytes read back does not match: expected %d, got %d",
len(data), len(buf))
2017-10-02 13:06:39 +00:00
rtest.Assert(t, bytes.Equal(buf, data),
2015-02-16 19:00:23 +00:00
"data does not match: expected %02x, got %02x",
data, buf)
}
}
2016-05-08 11:13:29 +00:00
func BenchmarkSaveAndEncrypt(t *testing.B) {
repository.BenchmarkAllVersions(t, benchmarkSaveAndEncrypt)
}
func benchmarkSaveAndEncrypt(t *testing.B, version uint) {
repo := repository.TestRepositoryWithVersion(t, version)
2015-02-16 19:00:23 +00:00
size := 4 << 20 // 4MiB
data := make([]byte, size)
2017-01-13 11:20:37 +00:00
_, err := io.ReadFull(rnd, data)
2017-10-02 13:06:39 +00:00
rtest.OK(t, err)
2015-02-16 19:00:23 +00:00
2016-08-31 18:29:54 +00:00
id := restic.ID(sha256.Sum256(data))
var wg errgroup.Group
repo.StartPackUploader(context.Background(), &wg)
2015-02-16 19:00:23 +00:00
t.ReportAllocs()
2015-02-16 19:00:23 +00:00
t.ResetTimer()
t.SetBytes(int64(size))
for i := 0; i < t.N; i++ {
2022-05-01 12:26:57 +00:00
_, _, _, err = repo.SaveBlob(context.TODO(), restic.DataBlob, data, id, true)
2017-10-02 13:06:39 +00:00
rtest.OK(t, err)
2015-02-16 19:00:23 +00:00
}
}
2015-02-17 22:05:23 +00:00
2017-01-24 10:25:33 +00:00
func TestLoadBlob(t *testing.T) {
repository.TestAllVersions(t, testLoadBlob)
}
func testLoadBlob(t *testing.T, version uint) {
repo := repository.TestRepositoryWithVersion(t, version)
2017-01-24 10:25:33 +00:00
length := 1000000
2022-06-12 12:48:30 +00:00
buf := crypto.NewBlobBuffer(length)
2017-01-24 10:25:33 +00:00
_, err := io.ReadFull(rnd, buf)
2017-10-02 13:06:39 +00:00
rtest.OK(t, err)
2017-01-24 10:25:33 +00:00
var wg errgroup.Group
repo.StartPackUploader(context.TODO(), &wg)
2022-05-01 12:26:57 +00:00
id, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false)
2017-10-02 13:06:39 +00:00
rtest.OK(t, err)
rtest.OK(t, repo.Flush(context.Background()))
2017-01-24 10:25:33 +00:00
2022-06-12 12:48:30 +00:00
base := crypto.CiphertextLength(length)
for _, testlength := range []int{0, base - 20, base - 1, base, base + 7, base + 15, base + 1000} {
2017-01-24 10:25:33 +00:00
buf = make([]byte, 0, testlength)
buf, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf)
2017-01-24 10:25:33 +00:00
if err != nil {
t.Errorf("LoadBlob() returned an error for buffer size %v: %v", testlength, err)
continue
}
if len(buf) != length {
t.Errorf("LoadBlob() returned the wrong number of bytes: want %v, got %v", length, len(buf))
2017-01-24 10:25:33 +00:00
continue
}
}
}
2024-05-09 19:22:19 +00:00
func TestLoadBlobBroken(t *testing.T) {
be := mem.New()
repo := repository.TestRepositoryWithBackend(t, &damageOnceBackend{Backend: be}, restic.StableRepoVersion, repository.Options{}).(*repository.Repository)
buf := test.Random(42, 1000)
var wg errgroup.Group
repo.StartPackUploader(context.TODO(), &wg)
id, _, _, err := repo.SaveBlob(context.TODO(), restic.TreeBlob, buf, restic.ID{}, false)
rtest.OK(t, err)
rtest.OK(t, repo.Flush(context.Background()))
// setup cache after saving the blob to make sure that the damageOnceBackend damages the cached data
c := cache.TestNewCache(t)
repo.UseCache(c)
data, err := repo.LoadBlob(context.TODO(), restic.TreeBlob, id, nil)
rtest.OK(t, err)
rtest.Assert(t, bytes.Equal(buf, data), "data mismatch")
pack := repo.Index().Lookup(restic.BlobHandle{Type: restic.TreeBlob, ID: id})[0].PackID
rtest.Assert(t, c.Has(backend.Handle{Type: restic.PackFile, Name: pack.String()}), "expected tree pack to be cached")
}
2017-01-13 11:20:37 +00:00
func BenchmarkLoadBlob(b *testing.B) {
repository.BenchmarkAllVersions(b, benchmarkLoadBlob)
}
func benchmarkLoadBlob(b *testing.B, version uint) {
repo := repository.TestRepositoryWithVersion(b, version)
2017-01-13 11:20:37 +00:00
length := 1000000
2022-06-12 12:48:30 +00:00
buf := crypto.NewBlobBuffer(length)
2017-01-13 11:20:37 +00:00
_, err := io.ReadFull(rnd, buf)
2017-10-02 13:06:39 +00:00
rtest.OK(b, err)
2017-01-13 11:20:37 +00:00
var wg errgroup.Group
repo.StartPackUploader(context.TODO(), &wg)
2022-05-01 12:26:57 +00:00
id, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false)
2017-10-02 13:06:39 +00:00
rtest.OK(b, err)
rtest.OK(b, repo.Flush(context.Background()))
2017-01-13 11:20:37 +00:00
b.ResetTimer()
b.SetBytes(int64(length))
for i := 0; i < b.N; i++ {
var err error
buf, err = repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf)
// Checking the SHA-256 with restic.Hash can make up 38% of the time
// spent in this loop, so pause the timer.
b.StopTimer()
2017-10-02 13:06:39 +00:00
rtest.OK(b, err)
if len(buf) != length {
b.Errorf("wanted %d bytes, got %d", length, len(buf))
2017-01-13 11:20:37 +00:00
}
id2 := restic.Hash(buf)
2017-01-13 11:20:37 +00:00
if !id.Equal(id2) {
b.Errorf("wrong data returned, wanted %v, got %v", id.Str(), id2.Str())
}
b.StartTimer()
2017-01-13 11:20:37 +00:00
}
}
func BenchmarkLoadUnpacked(b *testing.B) {
repository.BenchmarkAllVersions(b, benchmarkLoadUnpacked)
}
func benchmarkLoadUnpacked(b *testing.B, version uint) {
repo := repository.TestRepositoryWithVersion(b, version)
2017-01-13 19:56:50 +00:00
length := 1000000
2022-06-12 12:48:30 +00:00
buf := crypto.NewBlobBuffer(length)
2017-01-13 19:56:50 +00:00
_, err := io.ReadFull(rnd, buf)
2017-10-02 13:06:39 +00:00
rtest.OK(b, err)
2017-01-13 19:56:50 +00:00
dataID := restic.Hash(buf)
storageID, err := repo.SaveUnpacked(context.TODO(), restic.PackFile, buf)
2017-10-02 13:06:39 +00:00
rtest.OK(b, err)
// rtest.OK(b, repo.Flush())
2017-01-13 19:56:50 +00:00
b.ResetTimer()
b.SetBytes(int64(length))
for i := 0; i < b.N; i++ {
data, err := repo.LoadUnpacked(context.TODO(), restic.PackFile, storageID)
2017-10-02 13:06:39 +00:00
rtest.OK(b, err)
// See comment in BenchmarkLoadBlob.
b.StopTimer()
2017-01-13 19:56:50 +00:00
if len(data) != length {
b.Errorf("wanted %d bytes, got %d", length, len(data))
}
id2 := restic.Hash(data)
if !dataID.Equal(id2) {
b.Errorf("wrong data returned, wanted %v, got %v", storageID.Str(), id2.Str())
}
b.StartTimer()
2017-01-13 19:56:50 +00:00
}
}
2015-07-04 14:52:17 +00:00
var repoFixture = filepath.Join("testdata", "test-repo.tar.gz")
func TestRepositoryLoadIndex(t *testing.T) {
repo, cleanup := repository.TestFromFixture(t, repoFixture)
2016-09-04 12:29:04 +00:00
defer cleanup()
rtest.OK(t, repo.LoadIndex(context.TODO(), nil))
2015-07-04 14:52:17 +00:00
}
// loadIndex loads the index id from backend and returns it.
func loadIndex(ctx context.Context, repo restic.LoaderUnpacked, id restic.ID) (*index.Index, error) {
buf, err := repo.LoadUnpacked(ctx, restic.IndexFile, id)
if err != nil {
return nil, err
}
idx, oldFormat, err := index.DecodeIndex(buf, id)
if oldFormat {
fmt.Fprintf(os.Stderr, "index %v has old format\n", id.Str())
}
return idx, err
}
func TestRepositoryLoadUnpackedBroken(t *testing.T) {
2024-04-14 09:48:40 +00:00
repo := repository.TestRepository(t)
data := rtest.Random(23, 12345)
id := restic.Hash(data)
h := backend.Handle{Type: restic.IndexFile, Name: id.String()}
// damage buffer
data[0] ^= 0xff
// store broken file
2024-04-14 09:48:40 +00:00
err := repo.Backend().Save(context.TODO(), h, backend.NewByteReader(data, repo.Backend().Hasher()))
rtest.OK(t, err)
_, err = repo.LoadUnpacked(context.TODO(), restic.IndexFile, id)
rtest.Assert(t, errors.Is(err, restic.ErrInvalidData), "unexpected error: %v", err)
}
type damageOnceBackend struct {
backend.Backend
m sync.Map
}
func (be *damageOnceBackend) Load(ctx context.Context, h backend.Handle, length int, offset int64, fn func(rd io.Reader) error) error {
// don't break the config file as we can't retry it
if h.Type == restic.ConfigFile {
return be.Backend.Load(ctx, h, length, offset, fn)
}
2024-05-09 19:39:52 +00:00
h.IsMetadata = false
_, retry := be.m.Swap(h, true)
if !retry {
// return broken data on the first try
2024-05-09 19:22:19 +00:00
offset++
}
return be.Backend.Load(ctx, h, length, offset, fn)
}
func TestRepositoryLoadUnpackedRetryBroken(t *testing.T) {
repodir, cleanup := rtest.Env(t, repoFixture)
defer cleanup()
be, err := local.Open(context.TODO(), local.Config{Path: repodir, Connections: 2})
rtest.OK(t, err)
repo := repository.TestOpenBackend(t, &damageOnceBackend{Backend: be})
rtest.OK(t, repo.LoadIndex(context.TODO(), nil))
}
2015-07-04 14:52:17 +00:00
func BenchmarkLoadIndex(b *testing.B) {
repository.BenchmarkAllVersions(b, benchmarkLoadIndex)
}
func benchmarkLoadIndex(b *testing.B, version uint) {
2017-01-13 20:39:40 +00:00
repository.TestUseLowSecurityKDFParameters(b)
repo := repository.TestRepositoryWithVersion(b, version)
idx := index.NewIndex()
2017-01-13 20:39:40 +00:00
for i := 0; i < 5000; i++ {
2022-05-26 11:41:06 +00:00
idx.StorePack(restic.NewRandomID(), []restic.Blob{
{
2020-11-05 21:18:00 +00:00
BlobHandle: restic.NewRandomBlobHandle(),
Length: 1234,
Offset: 1235,
2017-01-13 20:39:40 +00:00
},
})
}
idx.Finalize()
2017-01-13 20:39:40 +00:00
id, err := index.SaveIndex(context.TODO(), repo, idx)
2017-10-02 13:06:39 +00:00
rtest.OK(b, err)
2017-01-13 20:39:40 +00:00
b.Logf("index saved as %v", id.Str())
fi, err := repo.Backend().Stat(context.TODO(), backend.Handle{Type: restic.IndexFile, Name: id.String()})
2017-10-02 13:06:39 +00:00
rtest.OK(b, err)
2017-01-13 20:39:40 +00:00
b.Logf("filesize is %v", fi.Size)
2016-09-04 12:29:04 +00:00
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := loadIndex(context.TODO(), repo, id)
2017-10-02 13:06:39 +00:00
rtest.OK(b, err)
2016-09-04 12:29:04 +00:00
}
2015-07-04 14:52:17 +00:00
}
// saveRandomDataBlobs generates random data blobs and saves them to the repository.
func saveRandomDataBlobs(t testing.TB, repo restic.Repository, num int, sizeMax int) {
var wg errgroup.Group
repo.StartPackUploader(context.TODO(), &wg)
for i := 0; i < num; i++ {
2017-01-13 11:20:37 +00:00
size := rand.Int() % sizeMax
buf := make([]byte, size)
2017-01-13 11:20:37 +00:00
_, err := io.ReadFull(rnd, buf)
2017-10-02 13:06:39 +00:00
rtest.OK(t, err)
2022-05-01 12:26:57 +00:00
_, _, _, err = repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false)
2017-10-02 13:06:39 +00:00
rtest.OK(t, err)
}
}
func TestRepositoryIncrementalIndex(t *testing.T) {
repository.TestAllVersions(t, testRepositoryIncrementalIndex)
}
func testRepositoryIncrementalIndex(t *testing.T, version uint) {
repo := repository.TestRepositoryWithVersion(t, version).(*repository.Repository)
index.IndexFull = func(*index.Index, bool) bool { return true }
2022-05-26 11:30:52 +00:00
// add a few rounds of packs
for j := 0; j < 5; j++ {
2022-05-26 11:30:52 +00:00
// add some packs, write intermediate index
saveRandomDataBlobs(t, repo, 20, 1<<15)
rtest.OK(t, repo.Flush(context.TODO()))
}
// save final index
rtest.OK(t, repo.Flush(context.TODO()))
2016-08-31 18:29:54 +00:00
packEntries := make(map[restic.ID]map[restic.ID]struct{})
err := repo.List(context.TODO(), restic.IndexFile, func(id restic.ID, size int64) error {
idx, err := loadIndex(context.TODO(), repo, id)
2017-10-02 13:06:39 +00:00
rtest.OK(t, err)
rtest.OK(t, idx.Each(context.TODO(), func(pb restic.PackedBlob) {
if _, ok := packEntries[pb.PackID]; !ok {
2016-08-31 18:29:54 +00:00
packEntries[pb.PackID] = make(map[restic.ID]struct{})
}
packEntries[pb.PackID][id] = struct{}{}
}))
return nil
})
if err != nil {
t.Fatal(err)
}
for packID, ids := range packEntries {
if len(ids) > 1 {
t.Errorf("pack %v listed in %d indexes\n", packID, len(ids))
}
}
2022-03-21 19:38:53 +00:00
}
func TestInvalidCompression(t *testing.T) {
var comp repository.CompressionMode
err := comp.Set("nope")
rtest.Assert(t, err != nil, "missing error")
_, err = repository.New(nil, repository.Options{Compression: comp})
rtest.Assert(t, err != nil, "missing error")
}
2024-05-09 19:39:52 +00:00
func TestListPack(t *testing.T) {
be := mem.New()
repo := repository.TestRepositoryWithBackend(t, &damageOnceBackend{Backend: be}, restic.StableRepoVersion, repository.Options{}).(*repository.Repository)
buf := test.Random(42, 1000)
var wg errgroup.Group
repo.StartPackUploader(context.TODO(), &wg)
id, _, _, err := repo.SaveBlob(context.TODO(), restic.TreeBlob, buf, restic.ID{}, false)
rtest.OK(t, err)
rtest.OK(t, repo.Flush(context.Background()))
// setup cache after saving the blob to make sure that the damageOnceBackend damages the cached data
c := cache.TestNewCache(t)
repo.UseCache(c)
// Forcibly cache pack file
packID := repo.Index().Lookup(restic.BlobHandle{Type: restic.TreeBlob, ID: id})[0].PackID
rtest.OK(t, repo.Backend().Load(context.TODO(), backend.Handle{Type: restic.PackFile, IsMetadata: true, Name: packID.String()}, 0, 0, func(rd io.Reader) error { return nil }))
// Get size to list pack
var size int64
rtest.OK(t, repo.List(context.TODO(), restic.PackFile, func(id restic.ID, sz int64) error {
if id == packID {
size = sz
}
return nil
}))
blobs, _, err := repo.ListPack(context.TODO(), packID, size)
rtest.OK(t, err)
rtest.Assert(t, len(blobs) == 1 && blobs[0].ID == id, "unexpected blobs in pack: %v", blobs)
rtest.Assert(t, !c.Has(backend.Handle{Type: restic.PackFile, Name: packID.String()}), "tree pack should no longer be cached as ListPack does not set IsMetadata in the backend.Handle")
}