2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-02 11:46:36 +00:00
restic/internal/repository/repository_test.go

391 lines
8.9 KiB
Go
Raw Normal View History

package repository_test
import (
"bytes"
2017-06-05 21:56:59 +00:00
"context"
"crypto/sha256"
2015-02-16 19:00:23 +00:00
"io"
2017-01-13 11:20:37 +00:00
"math/rand"
2015-07-04 14:52:17 +00:00
"path/filepath"
"testing"
2017-01-13 11:20:37 +00:00
"time"
2017-07-23 12:21:03 +00:00
"github.com/restic/restic/internal"
"github.com/restic/restic/internal/archiver"
"github.com/restic/restic/internal/repository"
. "github.com/restic/restic/internal/test"
)
2015-02-16 19:00:23 +00:00
var testSizes = []int{5, 23, 2<<18 + 23, 1 << 20}
2017-01-13 11:20:37 +00:00
var rnd = rand.New(rand.NewSource(time.Now().UnixNano()))
func TestSave(t *testing.T) {
2016-09-04 11:24:51 +00:00
repo, cleanup := repository.TestRepository(t)
2016-09-04 10:52:43 +00:00
defer cleanup()
for _, size := range testSizes {
data := make([]byte, size)
2017-01-13 11:20:37 +00:00
_, err := io.ReadFull(rnd, data)
OK(t, err)
2016-08-31 18:29:54 +00:00
id := restic.Hash(data)
// save
2017-06-05 21:56:59 +00:00
sid, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, restic.ID{})
OK(t, err)
Equals(t, id, sid)
2015-05-09 11:32:52 +00:00
OK(t, repo.Flush())
2015-10-12 20:34:12 +00:00
// OK(t, repo.SaveIndex())
// read back
buf := restic.NewBlobBuffer(size)
2017-06-05 21:56:59 +00:00
n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf)
2015-10-12 20:07:56 +00:00
OK(t, err)
Equals(t, len(buf), n)
Assert(t, len(buf) == len(data),
"number of bytes read back does not match: expected %d, got %d",
len(data), len(buf))
Assert(t, bytes.Equal(buf, data),
"data does not match: expected %02x, got %02x",
data, buf)
}
}
2015-02-16 19:00:23 +00:00
func TestSaveFrom(t *testing.T) {
2016-09-04 11:24:51 +00:00
repo, cleanup := repository.TestRepository(t)
2016-09-04 10:52:43 +00:00
defer cleanup()
2015-02-16 19:00:23 +00:00
for _, size := range testSizes {
data := make([]byte, size)
2017-01-13 11:20:37 +00:00
_, err := io.ReadFull(rnd, data)
2015-04-09 19:15:48 +00:00
OK(t, err)
2015-02-16 19:00:23 +00:00
2016-08-31 18:29:54 +00:00
id := restic.Hash(data)
2015-02-16 19:00:23 +00:00
// save
2017-06-05 21:56:59 +00:00
id2, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, id)
2015-04-09 19:15:48 +00:00
OK(t, err)
2016-05-08 11:13:29 +00:00
Equals(t, id, id2)
2015-02-16 19:00:23 +00:00
2015-05-09 11:32:52 +00:00
OK(t, repo.Flush())
2015-02-16 19:00:23 +00:00
// read back
buf := restic.NewBlobBuffer(size)
2017-06-05 21:56:59 +00:00
n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf)
2015-10-12 20:07:56 +00:00
OK(t, err)
Equals(t, len(buf), n)
2015-02-16 19:00:23 +00:00
2015-04-09 19:15:48 +00:00
Assert(t, len(buf) == len(data),
2015-02-16 19:00:23 +00:00
"number of bytes read back does not match: expected %d, got %d",
len(data), len(buf))
2015-04-09 19:15:48 +00:00
Assert(t, bytes.Equal(buf, data),
2015-02-16 19:00:23 +00:00
"data does not match: expected %02x, got %02x",
data, buf)
}
}
2016-05-08 11:13:29 +00:00
func BenchmarkSaveAndEncrypt(t *testing.B) {
2016-09-04 11:24:51 +00:00
repo, cleanup := repository.TestRepository(t)
2016-09-04 10:52:43 +00:00
defer cleanup()
2015-02-16 19:00:23 +00:00
size := 4 << 20 // 4MiB
data := make([]byte, size)
2017-01-13 11:20:37 +00:00
_, err := io.ReadFull(rnd, data)
2015-04-09 19:15:48 +00:00
OK(t, err)
2015-02-16 19:00:23 +00:00
2016-08-31 18:29:54 +00:00
id := restic.ID(sha256.Sum256(data))
2015-02-16 19:00:23 +00:00
t.ResetTimer()
t.SetBytes(int64(size))
for i := 0; i < t.N; i++ {
// save
2017-06-05 21:56:59 +00:00
_, err = repo.SaveBlob(context.TODO(), restic.DataBlob, data, id)
2015-04-09 19:15:48 +00:00
OK(t, err)
2015-02-16 19:00:23 +00:00
}
}
2015-02-17 22:05:23 +00:00
func TestLoadTree(t *testing.T) {
2016-09-04 11:24:51 +00:00
repo, cleanup := repository.TestRepository(t)
2016-09-04 10:52:43 +00:00
defer cleanup()
2015-06-28 11:15:35 +00:00
if BenchArchiveDirectory == "" {
2015-05-09 11:32:52 +00:00
t.Skip("benchdir not set, skipping")
}
// archive a few files
2016-09-04 10:52:43 +00:00
sn := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil)
2015-05-09 11:32:52 +00:00
OK(t, repo.Flush())
2017-06-05 21:56:59 +00:00
_, err := repo.LoadTree(context.TODO(), *sn.Tree)
2015-04-09 19:15:48 +00:00
OK(t, err)
}
func BenchmarkLoadTree(t *testing.B) {
2016-09-04 11:24:51 +00:00
repo, cleanup := repository.TestRepository(t)
2016-09-04 10:52:43 +00:00
defer cleanup()
2015-10-26 19:47:48 +00:00
if BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping")
}
// archive a few files
2016-09-04 10:52:43 +00:00
sn := archiver.TestSnapshot(t, repo, BenchArchiveDirectory, nil)
2015-10-26 19:47:48 +00:00
OK(t, repo.Flush())
t.ResetTimer()
for i := 0; i < t.N; i++ {
2017-06-05 21:56:59 +00:00
_, err := repo.LoadTree(context.TODO(), *sn.Tree)
2015-10-26 19:47:48 +00:00
OK(t, err)
}
}
2017-01-24 10:25:33 +00:00
func TestLoadBlob(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
length := 1000000
buf := restic.NewBlobBuffer(length)
_, err := io.ReadFull(rnd, buf)
OK(t, err)
2017-06-05 21:56:59 +00:00
id, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{})
2017-01-24 10:25:33 +00:00
OK(t, err)
OK(t, repo.Flush())
// first, test with buffers that are too small
for _, testlength := range []int{length - 20, length, restic.CiphertextLength(length) - 1} {
buf = make([]byte, 0, testlength)
2017-06-05 21:56:59 +00:00
n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf)
2017-01-24 10:25:33 +00:00
if err == nil {
t.Errorf("LoadBlob() did not return an error for a buffer that is too small to hold the blob")
continue
}
if n != 0 {
t.Errorf("LoadBlob() returned an error and n > 0")
continue
}
}
// then use buffers that are large enough
base := restic.CiphertextLength(length)
for _, testlength := range []int{base, base + 7, base + 15, base + 1000} {
buf = make([]byte, 0, testlength)
2017-06-05 21:56:59 +00:00
n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf)
2017-01-24 10:25:33 +00:00
if err != nil {
t.Errorf("LoadBlob() returned an error for buffer size %v: %v", testlength, err)
continue
}
if n != length {
t.Errorf("LoadBlob() returned the wrong number of bytes: want %v, got %v", length, n)
continue
}
}
}
2017-01-13 11:20:37 +00:00
func BenchmarkLoadBlob(b *testing.B) {
repo, cleanup := repository.TestRepository(b)
defer cleanup()
length := 1000000
buf := restic.NewBlobBuffer(length)
2017-01-13 11:20:37 +00:00
_, err := io.ReadFull(rnd, buf)
OK(b, err)
2017-06-05 21:56:59 +00:00
id, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{})
2017-01-13 11:20:37 +00:00
OK(b, err)
OK(b, repo.Flush())
b.ResetTimer()
b.SetBytes(int64(length))
for i := 0; i < b.N; i++ {
2017-06-05 21:56:59 +00:00
n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf)
2017-01-13 11:20:37 +00:00
OK(b, err)
if n != length {
b.Errorf("wanted %d bytes, got %d", length, n)
}
id2 := restic.Hash(buf[:n])
if !id.Equal(id2) {
b.Errorf("wrong data returned, wanted %v, got %v", id.Str(), id2.Str())
}
}
}
2017-01-13 19:56:50 +00:00
func BenchmarkLoadAndDecrypt(b *testing.B) {
repo, cleanup := repository.TestRepository(b)
defer cleanup()
length := 1000000
buf := restic.NewBlobBuffer(length)
_, err := io.ReadFull(rnd, buf)
OK(b, err)
dataID := restic.Hash(buf)
2017-06-05 21:56:59 +00:00
storageID, err := repo.SaveUnpacked(context.TODO(), restic.DataFile, buf)
2017-01-13 19:56:50 +00:00
OK(b, err)
// OK(b, repo.Flush())
b.ResetTimer()
b.SetBytes(int64(length))
for i := 0; i < b.N; i++ {
2017-06-05 21:56:59 +00:00
data, err := repo.LoadAndDecrypt(context.TODO(), restic.DataFile, storageID)
2017-01-13 19:56:50 +00:00
OK(b, err)
if len(data) != length {
b.Errorf("wanted %d bytes, got %d", length, len(data))
}
id2 := restic.Hash(data)
if !dataID.Equal(id2) {
b.Errorf("wrong data returned, wanted %v, got %v", storageID.Str(), id2.Str())
}
}
}
func TestLoadJSONUnpacked(t *testing.T) {
2016-09-04 11:24:51 +00:00
repo, cleanup := repository.TestRepository(t)
2016-09-04 10:52:43 +00:00
defer cleanup()
2015-06-28 11:15:35 +00:00
if BenchArchiveDirectory == "" {
2015-05-09 11:32:52 +00:00
t.Skip("benchdir not set, skipping")
}
// archive a snapshot
sn := restic.Snapshot{}
sn.Hostname = "foobar"
sn.Username = "test!"
2017-06-05 21:56:59 +00:00
id, err := repo.SaveJSONUnpacked(context.TODO(), restic.SnapshotFile, &sn)
OK(t, err)
var sn2 restic.Snapshot
// restore
2017-06-05 21:56:59 +00:00
err = repo.LoadJSONUnpacked(context.TODO(), restic.SnapshotFile, id, &sn2)
OK(t, err)
Equals(t, sn.Hostname, sn2.Hostname)
Equals(t, sn.Username, sn2.Username)
}
2015-07-04 14:52:17 +00:00
var repoFixture = filepath.Join("testdata", "test-repo.tar.gz")
func TestRepositoryLoadIndex(t *testing.T) {
2016-09-04 12:29:04 +00:00
repodir, cleanup := Env(t, repoFixture)
defer cleanup()
repo := repository.TestOpenLocal(t, repodir)
2017-06-05 21:56:59 +00:00
OK(t, repo.LoadIndex(context.TODO()))
2015-07-04 14:52:17 +00:00
}
func BenchmarkLoadIndex(b *testing.B) {
2017-01-13 20:39:40 +00:00
repository.TestUseLowSecurityKDFParameters(b)
repo, cleanup := repository.TestRepository(b)
2016-09-04 12:29:04 +00:00
defer cleanup()
2017-01-13 20:39:40 +00:00
idx := repository.NewIndex()
for i := 0; i < 5000; i++ {
idx.Store(restic.PackedBlob{
Blob: restic.Blob{
Type: restic.DataBlob,
Length: 1234,
ID: restic.NewRandomID(),
Offset: 1235,
},
PackID: restic.NewRandomID(),
})
}
2017-06-05 21:56:59 +00:00
id, err := repository.SaveIndex(context.TODO(), repo, idx)
2017-01-13 20:39:40 +00:00
OK(b, err)
b.Logf("index saved as %v (%v entries)", id.Str(), idx.Count(restic.DataBlob))
2017-06-05 21:56:59 +00:00
fi, err := repo.Backend().Stat(context.TODO(), restic.Handle{Type: restic.IndexFile, Name: id.String()})
2017-01-13 20:39:40 +00:00
OK(b, err)
b.Logf("filesize is %v", fi.Size)
2016-09-04 12:29:04 +00:00
b.ResetTimer()
for i := 0; i < b.N; i++ {
2017-06-05 21:56:59 +00:00
_, err := repository.LoadIndex(context.TODO(), repo, id)
2017-01-13 20:39:40 +00:00
OK(b, err)
2016-09-04 12:29:04 +00:00
}
2015-07-04 14:52:17 +00:00
}
// saveRandomDataBlobs generates random data blobs and saves them to the repository.
func saveRandomDataBlobs(t testing.TB, repo restic.Repository, num int, sizeMax int) {
for i := 0; i < num; i++ {
2017-01-13 11:20:37 +00:00
size := rand.Int() % sizeMax
buf := make([]byte, size)
2017-01-13 11:20:37 +00:00
_, err := io.ReadFull(rnd, buf)
OK(t, err)
2017-06-05 21:56:59 +00:00
_, err = repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{})
OK(t, err)
}
}
func TestRepositoryIncrementalIndex(t *testing.T) {
2016-09-04 11:24:51 +00:00
repo, cleanup := repository.TestRepository(t)
2016-09-04 10:52:43 +00:00
defer cleanup()
repository.IndexFull = func(*repository.Index) bool { return true }
// add 15 packs
for j := 0; j < 5; j++ {
// add 3 packs, write intermediate index
for i := 0; i < 3; i++ {
saveRandomDataBlobs(t, repo, 5, 1<<15)
OK(t, repo.Flush())
}
2017-06-05 21:56:59 +00:00
OK(t, repo.SaveFullIndex(context.TODO()))
}
// add another 5 packs
for i := 0; i < 5; i++ {
saveRandomDataBlobs(t, repo, 5, 1<<15)
OK(t, repo.Flush())
}
// save final index
2017-06-05 21:56:59 +00:00
OK(t, repo.SaveIndex(context.TODO()))
2016-08-31 18:29:54 +00:00
packEntries := make(map[restic.ID]map[restic.ID]struct{})
2017-06-05 21:56:59 +00:00
for id := range repo.List(context.TODO(), restic.IndexFile) {
idx, err := repository.LoadIndex(context.TODO(), repo, id)
OK(t, err)
2017-06-18 12:45:02 +00:00
for pb := range idx.Each(context.TODO()) {
if _, ok := packEntries[pb.PackID]; !ok {
2016-08-31 18:29:54 +00:00
packEntries[pb.PackID] = make(map[restic.ID]struct{})
}
packEntries[pb.PackID][id] = struct{}{}
}
}
for packID, ids := range packEntries {
if len(ids) > 1 {
t.Errorf("pack %v listed in %d indexes\n", packID, len(ids))
}
}
}