2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-16 01:57:10 +00:00
restic/internal/archiver/archiver_test.go

279 lines
6.9 KiB
Go
Raw Normal View History

2016-08-31 20:39:36 +00:00
package archiver_test
2014-11-16 20:41:05 +00:00
import (
"bytes"
2017-06-05 21:56:59 +00:00
"context"
2014-11-16 20:41:05 +00:00
"io"
2017-09-04 19:39:21 +00:00
"io/ioutil"
"os"
"path/filepath"
2014-11-16 20:41:05 +00:00
"testing"
"time"
2014-11-16 20:41:05 +00:00
2017-07-23 12:21:03 +00:00
"github.com/restic/restic/internal/archiver"
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/repository"
2017-07-24 15:42:25 +00:00
"github.com/restic/restic/internal/restic"
2017-10-02 13:06:39 +00:00
rtest "github.com/restic/restic/internal/test"
2017-07-23 12:21:03 +00:00
"github.com/restic/restic/internal/errors"
"github.com/restic/chunker"
2014-11-16 20:41:05 +00:00
)
var testPol = chunker.Pol(0x3DA3358B4DC173)
2015-02-17 19:02:43 +00:00
type Rdr interface {
io.ReadSeeker
io.ReaderAt
2015-02-17 19:02:43 +00:00
}
func benchmarkChunkEncrypt(b testing.TB, buf, buf2 []byte, rd Rdr, key *crypto.Key) {
rd.Seek(0, 0)
ch := chunker.New(rd, testPol)
2017-10-29 10:33:57 +00:00
nonce := crypto.NewRandomNonce()
for {
chunk, err := ch.Next(buf)
if errors.Cause(err) == io.EOF {
break
}
2017-10-02 13:06:39 +00:00
rtest.OK(b, err)
2017-10-02 13:06:39 +00:00
rtest.Assert(b, uint(len(chunk.Data)) == chunk.Length,
"invalid length: got %d, expected %d", len(chunk.Data), chunk.Length)
2017-10-29 10:33:57 +00:00
_ = key.Seal(buf2[:0], nonce, chunk.Data, nil)
}
}
2014-11-16 20:41:05 +00:00
func BenchmarkChunkEncrypt(b *testing.B) {
2016-09-04 11:24:51 +00:00
repo, cleanup := repository.TestRepository(b)
2016-09-04 10:52:43 +00:00
defer cleanup()
2017-10-02 13:06:39 +00:00
data := rtest.Random(23, 10<<20) // 10MiB
rd := bytes.NewReader(data)
2014-11-16 20:41:05 +00:00
2015-05-04 22:14:07 +00:00
buf := make([]byte, chunker.MaxSize)
buf2 := make([]byte, chunker.MaxSize)
2015-02-17 19:02:43 +00:00
2014-11-16 20:41:05 +00:00
b.ResetTimer()
b.SetBytes(int64(len(data)))
for i := 0; i < b.N; i++ {
benchmarkChunkEncrypt(b, buf, buf2, rd, repo.Key())
}
2015-02-17 19:02:43 +00:00
}
func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *crypto.Key) {
ch := chunker.New(rd, testPol)
2017-10-29 10:33:57 +00:00
nonce := crypto.NewRandomNonce()
2014-11-16 20:41:05 +00:00
for {
chunk, err := ch.Next(buf)
if errors.Cause(err) == io.EOF {
break
}
2014-11-16 20:41:05 +00:00
2017-10-29 10:33:57 +00:00
_ = key.Seal(chunk.Data[:0], nonce, chunk.Data, nil)
}
}
func BenchmarkChunkEncryptParallel(b *testing.B) {
2016-09-04 11:24:51 +00:00
repo, cleanup := repository.TestRepository(b)
2016-09-04 10:52:43 +00:00
defer cleanup()
2017-10-02 13:06:39 +00:00
data := rtest.Random(23, 10<<20) // 10MiB
2015-05-04 22:14:07 +00:00
buf := make([]byte, chunker.MaxSize)
2015-02-17 19:02:43 +00:00
b.ResetTimer()
b.SetBytes(int64(len(data)))
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
rd := bytes.NewReader(data)
benchmarkChunkEncryptP(pb, buf, rd, repo.Key())
2014-11-16 20:41:05 +00:00
}
})
2014-11-16 20:41:05 +00:00
}
func archiveDirectory(b testing.TB) {
2016-09-04 11:24:51 +00:00
repo, cleanup := repository.TestRepository(b)
2016-09-04 10:52:43 +00:00
defer cleanup()
2016-08-31 21:07:50 +00:00
arch := archiver.New(repo)
2017-10-02 13:06:39 +00:00
_, id, err := arch.Snapshot(context.TODO(), nil, []string{rtest.BenchArchiveDirectory}, nil, "localhost", nil, time.Now())
rtest.OK(b, err)
b.Logf("snapshot archived as %v", id)
}
2015-02-17 21:39:44 +00:00
func TestArchiveDirectory(t *testing.T) {
2017-10-02 13:06:39 +00:00
if rtest.BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping TestArchiveDirectory")
}
archiveDirectory(t)
}
func BenchmarkArchiveDirectory(b *testing.B) {
2017-10-02 13:06:39 +00:00
if rtest.BenchArchiveDirectory == "" {
b.Skip("benchdir not set, skipping BenchmarkArchiveDirectory")
}
2015-05-01 20:58:50 +00:00
for i := 0; i < b.N; i++ {
archiveDirectory(b)
}
2015-02-17 21:39:44 +00:00
}
func countPacks(t testing.TB, repo restic.Repository, tpe restic.FileType) (n uint) {
err := repo.Backend().List(context.TODO(), tpe, func(restic.FileInfo) error {
n++
return nil
})
if err != nil {
t.Fatal(err)
}
return n
}
func archiveWithDedup(t testing.TB) {
2016-09-04 11:24:51 +00:00
repo, cleanup := repository.TestRepository(t)
2016-09-04 10:52:43 +00:00
defer cleanup()
2017-10-02 13:06:39 +00:00
if rtest.BenchArchiveDirectory == "" {
t.Skip("benchdir not set, skipping TestArchiverDedup")
2015-02-17 22:40:37 +00:00
}
var cnt struct {
before, after, after2 struct {
packs, dataBlobs, treeBlobs uint
}
}
2015-02-17 21:39:44 +00:00
// archive a few files
2017-10-02 13:06:39 +00:00
sn := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, nil)
2015-03-09 21:58:17 +00:00
t.Logf("archived snapshot %v", sn.ID().Str())
2015-02-17 21:39:44 +00:00
// get archive stats
cnt.before.packs = countPacks(t, repo, restic.DataFile)
2016-08-31 20:39:36 +00:00
cnt.before.dataBlobs = repo.Index().Count(restic.DataBlob)
cnt.before.treeBlobs = repo.Index().Count(restic.TreeBlob)
t.Logf("packs %v, data blobs %v, tree blobs %v",
cnt.before.packs, cnt.before.dataBlobs, cnt.before.treeBlobs)
2015-02-17 21:39:44 +00:00
2015-03-09 21:58:17 +00:00
// archive the same files again, without parent snapshot
2017-10-02 13:06:39 +00:00
sn2 := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, nil)
2015-03-09 21:58:17 +00:00
t.Logf("archived snapshot %v", sn2.ID().Str())
2015-02-17 21:39:44 +00:00
// get archive stats again
cnt.after.packs = countPacks(t, repo, restic.DataFile)
2016-08-31 20:39:36 +00:00
cnt.after.dataBlobs = repo.Index().Count(restic.DataBlob)
cnt.after.treeBlobs = repo.Index().Count(restic.TreeBlob)
t.Logf("packs %v, data blobs %v, tree blobs %v",
cnt.after.packs, cnt.after.dataBlobs, cnt.after.treeBlobs)
// if there are more data blobs, something is wrong
if cnt.after.dataBlobs > cnt.before.dataBlobs {
t.Fatalf("TestArchiverDedup: too many data blobs in repository: before %d, after %d",
cnt.before.dataBlobs, cnt.after.dataBlobs)
}
2015-03-09 21:58:17 +00:00
// archive the same files again, with a parent snapshot
2017-10-02 13:06:39 +00:00
sn3 := archiver.TestSnapshot(t, repo, rtest.BenchArchiveDirectory, sn2.ID())
2015-03-09 21:58:17 +00:00
t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str())
// get archive stats again
cnt.after2.packs = countPacks(t, repo, restic.DataFile)
2016-08-31 20:39:36 +00:00
cnt.after2.dataBlobs = repo.Index().Count(restic.DataBlob)
cnt.after2.treeBlobs = repo.Index().Count(restic.TreeBlob)
t.Logf("packs %v, data blobs %v, tree blobs %v",
cnt.after2.packs, cnt.after2.dataBlobs, cnt.after2.treeBlobs)
// if there are more data blobs, something is wrong
if cnt.after2.dataBlobs > cnt.before.dataBlobs {
t.Fatalf("TestArchiverDedup: too many data blobs in repository: before %d, after %d",
cnt.before.dataBlobs, cnt.after2.dataBlobs)
}
2015-02-17 21:39:44 +00:00
}
2015-02-21 23:09:57 +00:00
func TestArchiveDedup(t *testing.T) {
archiveWithDedup(t)
}
2017-03-07 10:17:15 +00:00
func TestArchiveEmptySnapshot(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
arch := archiver.New(repo)
2017-09-02 17:28:09 +00:00
sn, id, err := arch.Snapshot(context.TODO(), nil, []string{"file-does-not-exist-123123213123", "file2-does-not-exist-too-123123123"}, nil, "localhost", nil, time.Now())
2017-03-07 10:17:15 +00:00
if err == nil {
t.Errorf("expected error for empty snapshot, got nil")
}
if !id.IsNull() {
t.Errorf("expected null ID for empty snapshot, got %v", id.Str())
}
if sn != nil {
t.Errorf("expected null snapshot for empty snapshot, got %v", sn)
}
}
2017-09-04 19:39:21 +00:00
func chdir(t testing.TB, target string) (cleanup func()) {
curdir, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
t.Logf("chdir to %v", target)
err = os.Chdir(target)
if err != nil {
t.Fatal(err)
}
return func() {
t.Logf("chdir back to %v", curdir)
err := os.Chdir(curdir)
if err != nil {
t.Fatal(err)
}
}
}
func TestArchiveNameCollision(t *testing.T) {
repo, cleanup := repository.TestRepository(t)
defer cleanup()
2017-10-02 13:06:39 +00:00
dir, cleanup := rtest.TempDir(t)
2017-09-04 19:39:21 +00:00
defer cleanup()
root := filepath.Join(dir, "root")
2017-10-02 13:06:39 +00:00
rtest.OK(t, os.MkdirAll(root, 0755))
2017-09-04 19:39:21 +00:00
2017-10-02 13:06:39 +00:00
rtest.OK(t, ioutil.WriteFile(filepath.Join(dir, "testfile"), []byte("testfile1"), 0644))
rtest.OK(t, ioutil.WriteFile(filepath.Join(dir, "root", "testfile"), []byte("testfile2"), 0644))
2017-09-04 19:39:21 +00:00
defer chdir(t, root)()
arch := archiver.New(repo)
2017-09-09 12:58:07 +00:00
sn, id, err := arch.Snapshot(context.TODO(), nil, []string{"testfile", filepath.Join("..", "testfile")}, nil, "localhost", nil, time.Now())
2017-10-02 13:06:39 +00:00
rtest.OK(t, err)
2017-09-04 19:39:21 +00:00
t.Logf("snapshot archived as %v", id)
tree, err := repo.LoadTree(context.TODO(), *sn.Tree)
2017-10-02 13:06:39 +00:00
rtest.OK(t, err)
2017-09-04 19:39:21 +00:00
if len(tree.Nodes) != 2 {
t.Fatalf("tree has %d nodes, wanted 2: %v", len(tree.Nodes), tree.Nodes)
}
}