2016-04-10 14:52:15 +00:00
|
|
|
package restic
|
|
|
|
|
|
|
|
import (
|
2016-08-01 19:30:46 +00:00
|
|
|
"encoding/json"
|
2016-04-10 14:52:15 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"math/rand"
|
|
|
|
"restic/backend"
|
|
|
|
"restic/pack"
|
|
|
|
"restic/repository"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/restic/chunker"
|
|
|
|
)
|
|
|
|
|
|
|
|
// fakeFile returns a reader which yields deterministic pseudo-random data.
|
2016-08-02 20:11:55 +00:00
|
|
|
func fakeFile(t testing.TB, seed, size int64) io.Reader {
|
2016-07-31 08:29:53 +00:00
|
|
|
return io.LimitReader(repository.NewRandReader(rand.New(rand.NewSource(seed))), size)
|
2016-04-10 14:52:15 +00:00
|
|
|
}
|
|
|
|
|
2016-08-02 20:07:06 +00:00
|
|
|
type fakeFileSystem struct {
|
2016-08-07 19:56:06 +00:00
|
|
|
t testing.TB
|
|
|
|
repo *repository.Repository
|
|
|
|
knownBlobs backend.IDSet
|
|
|
|
duplication float32
|
2016-08-02 20:07:06 +00:00
|
|
|
}
|
|
|
|
|
2016-04-10 14:52:15 +00:00
|
|
|
// saveFile reads from rd and saves the blobs in the repository. The list of
|
|
|
|
// IDs is returned.
|
2016-08-02 20:07:06 +00:00
|
|
|
func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs backend.IDs) {
|
2016-07-31 09:04:04 +00:00
|
|
|
blobs = backend.IDs{}
|
2016-08-02 20:07:06 +00:00
|
|
|
ch := chunker.New(rd, fs.repo.Config.ChunkerPolynomial)
|
2016-04-10 14:52:15 +00:00
|
|
|
|
|
|
|
for {
|
|
|
|
chunk, err := ch.Next(getBuf())
|
|
|
|
if err == io.EOF {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2016-08-02 20:07:06 +00:00
|
|
|
fs.t.Fatalf("unable to save chunk in repo: %v", err)
|
2016-04-10 14:52:15 +00:00
|
|
|
}
|
|
|
|
|
2016-08-01 19:30:46 +00:00
|
|
|
id := backend.Hash(chunk.Data)
|
2016-08-03 20:38:05 +00:00
|
|
|
if !fs.blobIsKnown(id, pack.Data) {
|
2016-08-02 20:07:06 +00:00
|
|
|
_, err := fs.repo.SaveAndEncrypt(pack.Data, chunk.Data, &id)
|
2016-08-01 19:30:46 +00:00
|
|
|
if err != nil {
|
2016-08-02 20:07:06 +00:00
|
|
|
fs.t.Fatalf("error saving chunk: %v", err)
|
2016-08-01 19:30:46 +00:00
|
|
|
}
|
2016-08-02 20:07:06 +00:00
|
|
|
|
|
|
|
fs.knownBlobs.Insert(id)
|
2016-04-10 14:52:15 +00:00
|
|
|
}
|
2016-08-15 19:37:11 +00:00
|
|
|
freeBuf(chunk.Data)
|
2016-08-01 19:30:46 +00:00
|
|
|
|
2016-04-10 14:52:15 +00:00
|
|
|
blobs = append(blobs, id)
|
|
|
|
}
|
|
|
|
|
|
|
|
return blobs
|
|
|
|
}
|
|
|
|
|
2016-07-31 08:58:09 +00:00
|
|
|
const (
|
|
|
|
maxFileSize = 1500000
|
2016-07-31 09:04:04 +00:00
|
|
|
maxSeed = 32
|
2016-07-31 08:58:09 +00:00
|
|
|
maxNodes = 32
|
|
|
|
)
|
2016-04-10 15:25:32 +00:00
|
|
|
|
2016-08-02 20:07:06 +00:00
|
|
|
func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, backend.ID) {
|
2016-08-01 19:30:46 +00:00
|
|
|
data, err := json.Marshal(tree)
|
|
|
|
if err != nil {
|
2016-08-02 20:07:06 +00:00
|
|
|
fs.t.Fatalf("json.Marshal(tree) returned error: %v", err)
|
2016-08-01 19:30:46 +00:00
|
|
|
return false, backend.ID{}
|
|
|
|
}
|
|
|
|
data = append(data, '\n')
|
|
|
|
|
|
|
|
id := backend.Hash(data)
|
2016-08-03 20:38:05 +00:00
|
|
|
return fs.blobIsKnown(id, pack.Tree), id
|
2016-08-02 20:07:06 +00:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-08-03 20:38:05 +00:00
|
|
|
func (fs fakeFileSystem) blobIsKnown(id backend.ID, t pack.BlobType) bool {
|
2016-08-07 19:56:06 +00:00
|
|
|
if rand.Float32() < fs.duplication {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2016-08-02 20:07:06 +00:00
|
|
|
if fs.knownBlobs.Has(id) {
|
|
|
|
return true
|
2016-08-01 19:30:46 +00:00
|
|
|
}
|
|
|
|
|
2016-08-03 20:38:05 +00:00
|
|
|
if fs.repo.Index().Has(id, t) {
|
2016-08-02 20:07:06 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
fs.knownBlobs.Insert(id)
|
|
|
|
return false
|
2016-08-01 19:30:46 +00:00
|
|
|
}
|
|
|
|
|
2016-08-02 20:11:55 +00:00
|
|
|
// saveTree saves a tree of fake files in the repo and returns the ID.
|
2016-08-02 20:07:06 +00:00
|
|
|
func (fs fakeFileSystem) saveTree(seed int64, depth int) backend.ID {
|
2016-04-10 14:52:15 +00:00
|
|
|
rnd := rand.NewSource(seed)
|
2016-07-31 08:58:09 +00:00
|
|
|
numNodes := int(rnd.Int63() % maxNodes)
|
2016-04-10 14:52:15 +00:00
|
|
|
|
|
|
|
var tree Tree
|
|
|
|
for i := 0; i < numNodes; i++ {
|
2016-07-31 08:58:09 +00:00
|
|
|
|
|
|
|
// randomly select the type of the node, either tree (p = 1/4) or file (p = 3/4).
|
|
|
|
if depth > 1 && rnd.Int63()%4 == 0 {
|
|
|
|
treeSeed := rnd.Int63() % maxSeed
|
2016-08-02 20:07:06 +00:00
|
|
|
id := fs.saveTree(treeSeed, depth-1)
|
2016-07-31 08:58:09 +00:00
|
|
|
|
|
|
|
node := &Node{
|
|
|
|
Name: fmt.Sprintf("dir-%v", treeSeed),
|
|
|
|
Type: "dir",
|
|
|
|
Mode: 0755,
|
|
|
|
Subtree: &id,
|
|
|
|
}
|
|
|
|
|
|
|
|
tree.Nodes = append(tree.Nodes, node)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
fileSeed := rnd.Int63() % maxSeed
|
2016-07-31 09:04:04 +00:00
|
|
|
fileSize := (maxFileSize / maxSeed) * fileSeed
|
2016-04-10 15:25:32 +00:00
|
|
|
|
|
|
|
node := &Node{
|
2016-07-31 08:58:09 +00:00
|
|
|
Name: fmt.Sprintf("file-%v", fileSeed),
|
2016-04-10 15:25:32 +00:00
|
|
|
Type: "file",
|
|
|
|
Mode: 0644,
|
2016-07-31 08:58:09 +00:00
|
|
|
Size: uint64(fileSize),
|
2016-04-10 15:25:32 +00:00
|
|
|
}
|
2016-04-10 14:52:15 +00:00
|
|
|
|
2016-08-02 20:07:06 +00:00
|
|
|
node.Content = fs.saveFile(fakeFile(fs.t, fileSeed, fileSize))
|
2016-04-10 14:52:15 +00:00
|
|
|
tree.Nodes = append(tree.Nodes, node)
|
|
|
|
}
|
|
|
|
|
2016-08-02 20:07:06 +00:00
|
|
|
if known, id := fs.treeIsKnown(&tree); known {
|
2016-08-01 19:30:46 +00:00
|
|
|
return id
|
|
|
|
}
|
|
|
|
|
2016-08-02 20:07:06 +00:00
|
|
|
id, err := fs.repo.SaveJSON(pack.Tree, tree)
|
2016-04-10 14:52:15 +00:00
|
|
|
if err != nil {
|
2016-08-02 20:07:06 +00:00
|
|
|
fs.t.Fatal(err)
|
2016-04-10 14:52:15 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return id
|
|
|
|
}
|
|
|
|
|
2016-05-08 20:38:38 +00:00
|
|
|
// TestCreateSnapshot creates a snapshot filled with fake data. The
|
2016-04-10 14:52:15 +00:00
|
|
|
// fake data is generated deterministically from the timestamp `at`, which is
|
2016-07-31 08:58:09 +00:00
|
|
|
// also used as the snapshot's timestamp. The tree's depth can be specified
|
2016-08-07 19:56:06 +00:00
|
|
|
// with the parameter depth. The parameter duplication is a probability that
|
|
|
|
// the same blob will saved again.
|
|
|
|
func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, depth int, duplication float32) *Snapshot {
|
2016-07-31 08:58:09 +00:00
|
|
|
seed := at.Unix()
|
|
|
|
t.Logf("create fake snapshot at %s with seed %d", at, seed)
|
|
|
|
|
2016-04-10 14:52:15 +00:00
|
|
|
fakedir := fmt.Sprintf("fakedir-at-%v", at.Format("2006-01-02 15:04:05"))
|
|
|
|
snapshot, err := NewSnapshot([]string{fakedir})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
snapshot.Time = at
|
|
|
|
|
2016-08-02 20:07:06 +00:00
|
|
|
fs := fakeFileSystem{
|
2016-08-07 19:56:06 +00:00
|
|
|
t: t,
|
|
|
|
repo: repo,
|
|
|
|
knownBlobs: backend.NewIDSet(),
|
|
|
|
duplication: duplication,
|
2016-08-02 20:07:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
treeID := fs.saveTree(seed, depth)
|
2016-04-10 14:52:15 +00:00
|
|
|
snapshot.Tree = &treeID
|
|
|
|
|
|
|
|
id, err := repo.SaveJSONUnpacked(backend.Snapshot, snapshot)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2016-07-31 14:12:19 +00:00
|
|
|
snapshot.id = &id
|
|
|
|
|
2016-04-10 14:52:15 +00:00
|
|
|
t.Logf("saved snapshot %v", id.Str())
|
|
|
|
|
|
|
|
err = repo.Flush()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = repo.SaveIndex()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2016-07-31 14:12:19 +00:00
|
|
|
return snapshot
|
2016-04-10 14:52:15 +00:00
|
|
|
}
|
2016-08-06 15:29:08 +00:00
|
|
|
|
|
|
|
// TestResetRepository removes all packs and indexes from the repository.
|
|
|
|
func TestResetRepository(t testing.TB, repo *repository.Repository) {
|
|
|
|
done := make(chan struct{})
|
|
|
|
defer close(done)
|
|
|
|
|
|
|
|
for _, tpe := range []backend.Type{backend.Snapshot, backend.Index, backend.Data} {
|
|
|
|
for id := range repo.Backend().List(tpe, done) {
|
|
|
|
err := repo.Backend().Remove(tpe, id)
|
|
|
|
if err != nil {
|
|
|
|
t.Errorf("removing %v (%v) failed: %v", id[0:12], tpe, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
repo.SetIndex(repository.NewMasterIndex())
|
|
|
|
}
|