2014-12-05 20:45:49 +00:00
|
|
|
package restic_test
|
2014-11-16 20:41:05 +00:00
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
2015-02-10 20:59:18 +00:00
|
|
|
"flag"
|
2014-11-16 20:41:05 +00:00
|
|
|
"io"
|
|
|
|
"testing"
|
|
|
|
|
2014-12-05 20:45:49 +00:00
|
|
|
"github.com/restic/restic"
|
2015-02-17 21:39:44 +00:00
|
|
|
"github.com/restic/restic/backend"
|
2014-12-05 20:45:49 +00:00
|
|
|
"github.com/restic/restic/chunker"
|
2015-04-09 19:15:48 +00:00
|
|
|
. "github.com/restic/restic/test"
|
2014-11-16 20:41:05 +00:00
|
|
|
)
|
|
|
|
|
2015-03-10 14:58:23 +00:00
|
|
|
var benchArchiveDirectory = flag.String("test.benchdir", ".", "benchmark archiving a real directory (default: .)")
|
2015-04-05 22:22:19 +00:00
|
|
|
var testPol = chunker.Pol(0x3DA3358B4DC173)
|
2015-02-10 20:59:18 +00:00
|
|
|
|
2015-02-08 21:54:45 +00:00
|
|
|
const bufSize = chunker.MiB
|
|
|
|
|
2015-02-17 19:02:43 +00:00
|
|
|
type Rdr interface {
|
2015-02-09 22:39:16 +00:00
|
|
|
io.ReadSeeker
|
|
|
|
io.ReaderAt
|
2015-02-17 19:02:43 +00:00
|
|
|
}
|
2015-02-09 22:39:16 +00:00
|
|
|
|
2015-02-17 19:02:43 +00:00
|
|
|
func benchmarkChunkEncrypt(b testing.TB, buf []byte, rd Rdr, key *restic.Key) {
|
2015-02-09 22:39:16 +00:00
|
|
|
ch := restic.GetChunker("BenchmarkChunkEncrypt")
|
|
|
|
rd.Seek(0, 0)
|
2015-04-05 22:22:19 +00:00
|
|
|
ch.Reset(rd, testPol)
|
2015-02-09 22:39:16 +00:00
|
|
|
|
|
|
|
for {
|
|
|
|
chunk, err := ch.Next()
|
|
|
|
|
|
|
|
if err == io.EOF {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2015-04-09 19:15:48 +00:00
|
|
|
OK(b, err)
|
2015-02-09 22:39:16 +00:00
|
|
|
|
2015-02-17 19:02:43 +00:00
|
|
|
// reduce length of buf
|
|
|
|
buf = buf[:chunk.Length]
|
|
|
|
n, err := io.ReadFull(chunk.Reader(rd), buf)
|
2015-04-09 19:15:48 +00:00
|
|
|
OK(b, err)
|
|
|
|
Assert(b, uint(n) == chunk.Length, "invalid length: got %d, expected %d", n, chunk.Length)
|
2015-02-09 22:39:16 +00:00
|
|
|
|
2015-02-17 19:02:43 +00:00
|
|
|
_, err = key.Encrypt(buf, buf)
|
2015-04-09 19:15:48 +00:00
|
|
|
OK(b, err)
|
2015-02-09 22:39:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
restic.FreeChunker("BenchmarkChunkEncrypt", ch)
|
|
|
|
}
|
|
|
|
|
2014-11-16 20:41:05 +00:00
|
|
|
func BenchmarkChunkEncrypt(b *testing.B) {
|
2015-04-12 07:36:14 +00:00
|
|
|
data := Random(23, 10<<20) // 10MiB
|
2015-02-08 21:54:45 +00:00
|
|
|
rd := bytes.NewReader(data)
|
2014-11-16 20:41:05 +00:00
|
|
|
|
|
|
|
be := setupBackend(b)
|
|
|
|
defer teardownBackend(b, be)
|
|
|
|
key := setupKey(b, be, "geheim")
|
|
|
|
|
2015-02-17 19:02:43 +00:00
|
|
|
buf := restic.GetChunkBuf("BenchmarkChunkEncrypt")
|
|
|
|
|
2014-11-16 20:41:05 +00:00
|
|
|
b.ResetTimer()
|
|
|
|
b.SetBytes(int64(len(data)))
|
|
|
|
|
|
|
|
for i := 0; i < b.N; i++ {
|
2015-02-17 19:02:43 +00:00
|
|
|
benchmarkChunkEncrypt(b, buf, rd, key)
|
2015-02-09 22:39:16 +00:00
|
|
|
}
|
2014-11-16 20:41:05 +00:00
|
|
|
|
2015-02-17 19:02:43 +00:00
|
|
|
restic.FreeChunkBuf("BenchmarkChunkEncrypt", buf)
|
|
|
|
}
|
2015-02-09 22:39:16 +00:00
|
|
|
|
2015-02-17 19:02:43 +00:00
|
|
|
func benchmarkChunkEncryptP(b *testing.PB, buf []byte, rd Rdr, key *restic.Key) {
|
2015-02-09 22:39:16 +00:00
|
|
|
ch := restic.GetChunker("BenchmarkChunkEncryptP")
|
|
|
|
rd.Seek(0, 0)
|
2015-04-05 22:22:19 +00:00
|
|
|
ch.Reset(rd, testPol)
|
2014-11-16 20:41:05 +00:00
|
|
|
|
2015-02-09 22:39:16 +00:00
|
|
|
for {
|
|
|
|
chunk, err := ch.Next()
|
|
|
|
if err == io.EOF {
|
|
|
|
break
|
|
|
|
}
|
2014-11-16 20:41:05 +00:00
|
|
|
|
2015-02-09 22:39:16 +00:00
|
|
|
// reduce length of chunkBuf
|
2015-02-17 19:02:43 +00:00
|
|
|
buf = buf[:chunk.Length]
|
|
|
|
io.ReadFull(chunk.Reader(rd), buf)
|
|
|
|
key.Encrypt(buf, buf)
|
2015-02-09 22:39:16 +00:00
|
|
|
}
|
2014-11-16 20:41:05 +00:00
|
|
|
|
2015-02-09 22:39:16 +00:00
|
|
|
restic.FreeChunker("BenchmarkChunkEncryptP", ch)
|
|
|
|
}
|
2015-02-08 21:54:45 +00:00
|
|
|
|
2015-02-09 22:39:16 +00:00
|
|
|
func BenchmarkChunkEncryptParallel(b *testing.B) {
|
|
|
|
be := setupBackend(b)
|
|
|
|
defer teardownBackend(b, be)
|
|
|
|
key := setupKey(b, be, "geheim")
|
|
|
|
|
2015-04-12 07:36:14 +00:00
|
|
|
data := Random(23, 10<<20) // 10MiB
|
2015-02-09 22:39:16 +00:00
|
|
|
|
2015-02-17 19:02:43 +00:00
|
|
|
buf := restic.GetChunkBuf("BenchmarkChunkEncryptParallel")
|
|
|
|
|
2015-02-09 22:39:16 +00:00
|
|
|
b.ResetTimer()
|
|
|
|
b.SetBytes(int64(len(data)))
|
|
|
|
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
|
|
|
rd := bytes.NewReader(data)
|
2015-02-17 19:02:43 +00:00
|
|
|
benchmarkChunkEncryptP(pb, buf, rd, key)
|
2014-11-16 20:41:05 +00:00
|
|
|
}
|
2015-02-09 22:39:16 +00:00
|
|
|
})
|
2015-02-17 19:02:43 +00:00
|
|
|
|
|
|
|
restic.FreeChunkBuf("BenchmarkChunkEncryptParallel", buf)
|
2014-11-16 20:41:05 +00:00
|
|
|
}
|
2015-02-10 20:59:18 +00:00
|
|
|
|
|
|
|
func BenchmarkArchiveDirectory(b *testing.B) {
|
|
|
|
if *benchArchiveDirectory == "" {
|
|
|
|
b.Skip("benchdir not set, skipping BenchmarkArchiveDirectory")
|
|
|
|
}
|
|
|
|
|
2015-03-28 14:07:08 +00:00
|
|
|
server := setupBackend(b)
|
|
|
|
defer teardownBackend(b, server)
|
|
|
|
key := setupKey(b, server, "geheim")
|
|
|
|
server.SetKey(key)
|
2015-02-10 20:59:18 +00:00
|
|
|
|
2015-02-21 13:23:49 +00:00
|
|
|
arch, err := restic.NewArchiver(server)
|
2015-04-09 19:15:48 +00:00
|
|
|
OK(b, err)
|
2015-02-10 20:59:18 +00:00
|
|
|
|
2015-03-02 13:48:47 +00:00
|
|
|
_, id, err := arch.Snapshot(nil, []string{*benchArchiveDirectory}, nil)
|
2015-02-10 20:59:18 +00:00
|
|
|
|
|
|
|
b.Logf("snapshot archived as %v", id)
|
|
|
|
}
|
2015-02-17 21:39:44 +00:00
|
|
|
|
2015-03-09 21:58:17 +00:00
|
|
|
func snapshot(t testing.TB, server restic.Server, path string, parent backend.ID) *restic.Snapshot {
|
2015-02-21 13:23:49 +00:00
|
|
|
arch, err := restic.NewArchiver(server)
|
2015-04-09 19:15:48 +00:00
|
|
|
OK(t, err)
|
|
|
|
OK(t, arch.Preload())
|
2015-03-09 21:58:17 +00:00
|
|
|
sn, _, err := arch.Snapshot(nil, []string{path}, parent)
|
2015-04-09 19:15:48 +00:00
|
|
|
OK(t, err)
|
2015-02-17 21:39:44 +00:00
|
|
|
return sn
|
|
|
|
}
|
|
|
|
|
2015-03-10 14:58:23 +00:00
|
|
|
func countBlobs(t testing.TB, server restic.Server) (trees int, data int) {
|
2015-03-28 10:50:23 +00:00
|
|
|
return server.Count(backend.Tree), server.Count(backend.Data)
|
2015-02-17 21:39:44 +00:00
|
|
|
}
|
|
|
|
|
2015-02-18 21:46:09 +00:00
|
|
|
func archiveWithPreload(t testing.TB) {
|
2015-02-17 22:40:37 +00:00
|
|
|
if *benchArchiveDirectory == "" {
|
|
|
|
t.Skip("benchdir not set, skipping TestArchiverPreload")
|
|
|
|
}
|
|
|
|
|
2015-03-28 14:07:08 +00:00
|
|
|
server := setupBackend(t)
|
|
|
|
defer teardownBackend(t, server)
|
|
|
|
key := setupKey(t, server, "geheim")
|
|
|
|
server.SetKey(key)
|
2015-02-17 21:39:44 +00:00
|
|
|
|
|
|
|
// archive a few files
|
2015-03-09 21:58:17 +00:00
|
|
|
sn := snapshot(t, server, *benchArchiveDirectory, nil)
|
|
|
|
t.Logf("archived snapshot %v", sn.ID().Str())
|
2015-02-17 21:39:44 +00:00
|
|
|
|
|
|
|
// get archive stats
|
2015-03-10 14:58:23 +00:00
|
|
|
beforeTrees, beforeData := countBlobs(t, server)
|
|
|
|
t.Logf("found %v trees, %v data blobs", beforeTrees, beforeData)
|
2015-02-17 21:39:44 +00:00
|
|
|
|
2015-03-09 21:58:17 +00:00
|
|
|
// archive the same files again, without parent snapshot
|
|
|
|
sn2 := snapshot(t, server, *benchArchiveDirectory, nil)
|
|
|
|
t.Logf("archived snapshot %v", sn2.ID().Str())
|
2015-02-17 21:39:44 +00:00
|
|
|
|
|
|
|
// get archive stats
|
2015-03-10 14:58:23 +00:00
|
|
|
afterTrees2, afterData2 := countBlobs(t, server)
|
|
|
|
t.Logf("found %v trees, %v data blobs", afterTrees2, afterData2)
|
2015-02-17 21:39:44 +00:00
|
|
|
|
2015-03-10 14:58:23 +00:00
|
|
|
// if there are more blobs, something is wrong
|
|
|
|
if afterData2 > beforeData {
|
|
|
|
t.Fatalf("TestArchiverPreload: too many data blobs in repository: before %d, after %d",
|
|
|
|
beforeData, afterData2)
|
2015-03-09 21:58:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// archive the same files again, with a parent snapshot
|
|
|
|
sn3 := snapshot(t, server, *benchArchiveDirectory, sn2.ID())
|
|
|
|
t.Logf("archived snapshot %v, parent %v", sn3.ID().Str(), sn2.ID().Str())
|
|
|
|
|
|
|
|
// get archive stats
|
2015-03-10 14:58:23 +00:00
|
|
|
afterTrees3, afterData3 := countBlobs(t, server)
|
|
|
|
t.Logf("found %v trees, %v data blobs", afterTrees3, afterData3)
|
2015-03-09 21:58:17 +00:00
|
|
|
|
2015-03-10 14:58:23 +00:00
|
|
|
// if there are more blobs, something is wrong
|
|
|
|
if afterData3 > beforeData {
|
|
|
|
t.Fatalf("TestArchiverPreload: too many data blobs in repository: before %d, after %d",
|
|
|
|
beforeData, afterData3)
|
2015-02-18 21:46:09 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestArchivePreload(t *testing.T) {
|
|
|
|
archiveWithPreload(t)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkPreload(t *testing.B) {
|
|
|
|
if *benchArchiveDirectory == "" {
|
|
|
|
t.Skip("benchdir not set, skipping TestArchiverPreload")
|
|
|
|
}
|
|
|
|
|
2015-03-28 14:07:08 +00:00
|
|
|
server := setupBackend(t)
|
|
|
|
defer teardownBackend(t, server)
|
|
|
|
key := setupKey(t, server, "geheim")
|
|
|
|
server.SetKey(key)
|
2015-02-18 21:46:09 +00:00
|
|
|
|
|
|
|
// archive a few files
|
2015-02-21 13:23:49 +00:00
|
|
|
arch, err := restic.NewArchiver(server)
|
2015-04-09 19:15:48 +00:00
|
|
|
OK(t, err)
|
2015-03-02 13:48:47 +00:00
|
|
|
sn, _, err := arch.Snapshot(nil, []string{*benchArchiveDirectory}, nil)
|
2015-04-09 19:15:48 +00:00
|
|
|
OK(t, err)
|
2015-02-18 21:46:09 +00:00
|
|
|
t.Logf("archived snapshot %v", sn.ID())
|
|
|
|
|
|
|
|
// start benchmark
|
|
|
|
t.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < t.N; i++ {
|
|
|
|
// create new archiver and preload
|
2015-02-21 13:23:49 +00:00
|
|
|
arch2, err := restic.NewArchiver(server)
|
2015-04-09 19:15:48 +00:00
|
|
|
OK(t, err)
|
|
|
|
OK(t, arch2.Preload())
|
2015-02-17 21:39:44 +00:00
|
|
|
}
|
|
|
|
}
|
2015-02-21 23:09:57 +00:00
|
|
|
|
|
|
|
func BenchmarkLoadTree(t *testing.B) {
|
|
|
|
if *benchArchiveDirectory == "" {
|
|
|
|
t.Skip("benchdir not set, skipping TestArchiverPreload")
|
|
|
|
}
|
|
|
|
|
2015-03-28 14:07:08 +00:00
|
|
|
server := setupBackend(t)
|
|
|
|
defer teardownBackend(t, server)
|
|
|
|
key := setupKey(t, server, "geheim")
|
|
|
|
server.SetKey(key)
|
2015-02-21 23:09:57 +00:00
|
|
|
|
|
|
|
// archive a few files
|
|
|
|
arch, err := restic.NewArchiver(server)
|
2015-04-09 19:15:48 +00:00
|
|
|
OK(t, err)
|
2015-03-02 13:48:47 +00:00
|
|
|
sn, _, err := arch.Snapshot(nil, []string{*benchArchiveDirectory}, nil)
|
2015-04-09 19:15:48 +00:00
|
|
|
OK(t, err)
|
2015-02-21 23:09:57 +00:00
|
|
|
t.Logf("archived snapshot %v", sn.ID())
|
|
|
|
|
2015-03-28 10:50:23 +00:00
|
|
|
list := make([]backend.ID, 0, 10)
|
|
|
|
done := make(chan struct{})
|
|
|
|
|
|
|
|
for name := range server.List(backend.Tree, done) {
|
|
|
|
id, err := backend.ParseID(name)
|
|
|
|
if err != nil {
|
|
|
|
t.Logf("invalid id for tree %v", name)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
list = append(list, id)
|
|
|
|
if len(list) == cap(list) {
|
|
|
|
close(done)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-21 23:09:57 +00:00
|
|
|
// start benchmark
|
|
|
|
t.ResetTimer()
|
|
|
|
|
|
|
|
for i := 0; i < t.N; i++ {
|
2015-03-28 14:07:08 +00:00
|
|
|
for _, id := range list {
|
|
|
|
_, err := restic.LoadTree(server, restic.Blob{Storage: id})
|
2015-04-09 19:15:48 +00:00
|
|
|
OK(t, err)
|
2015-03-28 10:50:23 +00:00
|
|
|
}
|
2015-02-21 23:09:57 +00:00
|
|
|
}
|
|
|
|
}
|