2016-04-10 14:52:15 +00:00
|
|
|
package restic
|
|
|
|
|
|
|
|
import (
|
2017-06-04 09:16:55 +00:00
|
|
|
"context"
|
2016-04-10 14:52:15 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"math/rand"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2016-09-03 11:34:04 +00:00
|
|
|
"github.com/restic/chunker"
|
2021-08-07 20:52:05 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2016-04-10 14:52:15 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// fakeFile returns a reader which yields deterministic pseudo-random data.
|
2020-03-09 08:57:51 +00:00
|
|
|
func fakeFile(seed, size int64) io.Reader {
|
2020-03-09 08:54:24 +00:00
|
|
|
return io.LimitReader(rand.New(rand.NewSource(seed)), size)
|
2016-04-10 14:52:15 +00:00
|
|
|
}
|
|
|
|
|
2016-08-02 20:07:06 +00:00
|
|
|
type fakeFileSystem struct {
|
2023-07-16 13:55:05 +00:00
|
|
|
t testing.TB
|
|
|
|
repo Repository
|
|
|
|
buf []byte
|
|
|
|
chunker *chunker.Chunker
|
|
|
|
rand *rand.Rand
|
2016-08-02 20:07:06 +00:00
|
|
|
}
|
|
|
|
|
2016-04-10 14:52:15 +00:00
|
|
|
// saveFile reads from rd and saves the blobs in the repository. The list of
|
|
|
|
// IDs is returned.
|
2017-06-04 09:16:55 +00:00
|
|
|
func (fs *fakeFileSystem) saveFile(ctx context.Context, rd io.Reader) (blobs IDs) {
|
2017-01-17 11:56:20 +00:00
|
|
|
if fs.buf == nil {
|
|
|
|
fs.buf = make([]byte, chunker.MaxSize)
|
|
|
|
}
|
2016-04-10 14:52:15 +00:00
|
|
|
|
2017-01-17 11:56:20 +00:00
|
|
|
if fs.chunker == nil {
|
|
|
|
fs.chunker = chunker.New(rd, fs.repo.Config().ChunkerPolynomial)
|
|
|
|
} else {
|
|
|
|
fs.chunker.Reset(rd, fs.repo.Config().ChunkerPolynomial)
|
|
|
|
}
|
|
|
|
|
|
|
|
blobs = IDs{}
|
2016-04-10 14:52:15 +00:00
|
|
|
for {
|
2017-01-17 11:56:20 +00:00
|
|
|
chunk, err := fs.chunker.Next(fs.buf)
|
2022-06-13 18:35:37 +00:00
|
|
|
if err == io.EOF {
|
2016-04-10 14:52:15 +00:00
|
|
|
break
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2016-08-02 20:07:06 +00:00
|
|
|
fs.t.Fatalf("unable to save chunk in repo: %v", err)
|
2016-04-10 14:52:15 +00:00
|
|
|
}
|
|
|
|
|
2023-07-16 13:55:05 +00:00
|
|
|
id, _, _, err := fs.repo.SaveBlob(ctx, DataBlob, chunk.Data, ID{}, false)
|
|
|
|
if err != nil {
|
|
|
|
fs.t.Fatalf("error saving chunk: %v", err)
|
2016-04-10 14:52:15 +00:00
|
|
|
}
|
2016-08-01 19:30:46 +00:00
|
|
|
|
2016-04-10 14:52:15 +00:00
|
|
|
blobs = append(blobs, id)
|
|
|
|
}
|
|
|
|
|
|
|
|
return blobs
|
|
|
|
}
|
|
|
|
|
2016-07-31 08:58:09 +00:00
|
|
|
const (
|
2018-03-11 20:42:39 +00:00
|
|
|
maxFileSize = 20000
|
2016-07-31 09:04:04 +00:00
|
|
|
maxSeed = 32
|
2018-03-11 20:42:39 +00:00
|
|
|
maxNodes = 15
|
2016-07-31 08:58:09 +00:00
|
|
|
)
|
2016-04-10 15:25:32 +00:00
|
|
|
|
2016-08-02 20:11:55 +00:00
|
|
|
// saveTree saves a tree of fake files in the repo and returns the ID.
|
2017-06-04 09:16:55 +00:00
|
|
|
func (fs *fakeFileSystem) saveTree(ctx context.Context, seed int64, depth int) ID {
|
2016-04-10 14:52:15 +00:00
|
|
|
rnd := rand.NewSource(seed)
|
2016-07-31 08:58:09 +00:00
|
|
|
numNodes := int(rnd.Int63() % maxNodes)
|
2016-04-10 14:52:15 +00:00
|
|
|
|
|
|
|
var tree Tree
|
|
|
|
for i := 0; i < numNodes; i++ {
|
2016-07-31 08:58:09 +00:00
|
|
|
|
|
|
|
// randomly select the type of the node, either tree (p = 1/4) or file (p = 3/4).
|
|
|
|
if depth > 1 && rnd.Int63()%4 == 0 {
|
|
|
|
treeSeed := rnd.Int63() % maxSeed
|
2017-06-04 09:16:55 +00:00
|
|
|
id := fs.saveTree(ctx, treeSeed, depth-1)
|
2016-07-31 08:58:09 +00:00
|
|
|
|
|
|
|
node := &Node{
|
2016-09-01 19:20:03 +00:00
|
|
|
Name: fmt.Sprintf("dir-%v", treeSeed),
|
|
|
|
Type: "dir",
|
|
|
|
Mode: 0755,
|
|
|
|
Subtree: &id,
|
2016-07-31 08:58:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tree.Nodes = append(tree.Nodes, node)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
fileSeed := rnd.Int63() % maxSeed
|
2016-07-31 09:04:04 +00:00
|
|
|
fileSize := (maxFileSize / maxSeed) * fileSeed
|
2016-04-10 15:25:32 +00:00
|
|
|
|
|
|
|
node := &Node{
|
2016-09-01 19:20:03 +00:00
|
|
|
Name: fmt.Sprintf("file-%v", fileSeed),
|
|
|
|
Type: "file",
|
|
|
|
Mode: 0644,
|
|
|
|
Size: uint64(fileSize),
|
2016-04-10 15:25:32 +00:00
|
|
|
}
|
2016-04-10 14:52:15 +00:00
|
|
|
|
2020-03-09 08:57:51 +00:00
|
|
|
node.Content = fs.saveFile(ctx, fakeFile(fileSeed, fileSize))
|
2016-04-10 14:52:15 +00:00
|
|
|
tree.Nodes = append(tree.Nodes, node)
|
|
|
|
}
|
|
|
|
|
2023-07-16 13:55:05 +00:00
|
|
|
tree.Sort()
|
2016-08-01 19:30:46 +00:00
|
|
|
|
2023-07-16 13:55:05 +00:00
|
|
|
id, err := SaveTree(ctx, fs.repo, &tree)
|
2016-04-10 14:52:15 +00:00
|
|
|
if err != nil {
|
2023-07-16 13:55:05 +00:00
|
|
|
fs.t.Fatalf("SaveTree returned error: %v", err)
|
2016-04-10 14:52:15 +00:00
|
|
|
}
|
|
|
|
return id
|
|
|
|
}
|
|
|
|
|
2016-05-08 20:38:38 +00:00
|
|
|
// TestCreateSnapshot creates a snapshot filled with fake data. The
|
2016-04-10 14:52:15 +00:00
|
|
|
// fake data is generated deterministically from the timestamp `at`, which is
|
2016-07-31 08:58:09 +00:00
|
|
|
// also used as the snapshot's timestamp. The tree's depth can be specified
|
2016-08-07 19:56:06 +00:00
|
|
|
// with the parameter depth. The parameter duplication is a probability that
|
|
|
|
// the same blob will saved again.
|
2023-07-16 13:55:05 +00:00
|
|
|
func TestCreateSnapshot(t testing.TB, repo Repository, at time.Time, depth int) *Snapshot {
|
2016-07-31 08:58:09 +00:00
|
|
|
seed := at.Unix()
|
|
|
|
t.Logf("create fake snapshot at %s with seed %d", at, seed)
|
|
|
|
|
2016-04-10 14:52:15 +00:00
|
|
|
fakedir := fmt.Sprintf("fakedir-at-%v", at.Format("2006-01-02 15:04:05"))
|
2023-07-16 13:55:05 +00:00
|
|
|
snapshot, err := NewSnapshot([]string{fakedir}, []string{"test"}, "foo", at)
|
2016-04-10 14:52:15 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2016-08-02 20:07:06 +00:00
|
|
|
fs := fakeFileSystem{
|
2023-07-16 13:55:05 +00:00
|
|
|
t: t,
|
|
|
|
repo: repo,
|
|
|
|
rand: rand.New(rand.NewSource(seed)),
|
2016-08-02 20:07:06 +00:00
|
|
|
}
|
|
|
|
|
2021-08-07 20:52:05 +00:00
|
|
|
var wg errgroup.Group
|
|
|
|
repo.StartPackUploader(context.TODO(), &wg)
|
|
|
|
|
2017-06-04 09:16:55 +00:00
|
|
|
treeID := fs.saveTree(context.TODO(), seed, depth)
|
2016-04-10 14:52:15 +00:00
|
|
|
snapshot.Tree = &treeID
|
|
|
|
|
2021-08-07 20:52:05 +00:00
|
|
|
err = repo.Flush(context.Background())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2022-06-12 12:38:19 +00:00
|
|
|
id, err := SaveSnapshot(context.TODO(), repo, snapshot)
|
2016-04-10 14:52:15 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2016-07-31 14:12:19 +00:00
|
|
|
snapshot.id = &id
|
|
|
|
|
2016-04-10 14:52:15 +00:00
|
|
|
t.Logf("saved snapshot %v", id.Str())
|
|
|
|
|
2016-07-31 14:12:19 +00:00
|
|
|
return snapshot
|
2016-04-10 14:52:15 +00:00
|
|
|
}
|
2016-08-06 15:29:08 +00:00
|
|
|
|
2016-08-31 18:29:54 +00:00
|
|
|
// TestParseID parses s as a ID and panics if that fails.
|
|
|
|
func TestParseID(s string) ID {
|
|
|
|
id, err := ParseID(s)
|
|
|
|
if err != nil {
|
2017-05-28 07:43:28 +00:00
|
|
|
panic(fmt.Sprintf("unable to parse string %q as ID: %v", s, err))
|
2016-08-06 15:29:08 +00:00
|
|
|
}
|
|
|
|
|
2016-08-31 18:29:54 +00:00
|
|
|
return id
|
2016-08-06 15:29:08 +00:00
|
|
|
}
|
2020-04-18 17:46:33 +00:00
|
|
|
|
|
|
|
// TestParseHandle parses s as a ID, panics if that fails and creates a BlobHandle with t.
|
|
|
|
func TestParseHandle(s string, t BlobType) BlobHandle {
|
|
|
|
return BlobHandle{ID: TestParseID(s), Type: t}
|
|
|
|
}
|
2022-07-24 09:22:57 +00:00
|
|
|
|
|
|
|
// TestSetSnapshotID sets the snapshot's ID.
|
2023-05-18 17:23:32 +00:00
|
|
|
func TestSetSnapshotID(_ testing.TB, sn *Snapshot, id ID) {
|
2022-07-24 09:22:57 +00:00
|
|
|
sn.id = &id
|
|
|
|
}
|
2023-03-15 02:16:24 +00:00
|
|
|
|
2023-04-09 18:47:10 +00:00
|
|
|
// ParseDurationOrPanic parses a duration from a string or panics if string is invalid.
|
2023-03-15 02:16:24 +00:00
|
|
|
// The format is `6y5m234d37h`.
|
|
|
|
func ParseDurationOrPanic(s string) Duration {
|
|
|
|
d, err := ParseDuration(s)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return d
|
|
|
|
}
|
2023-12-24 14:04:05 +00:00
|
|
|
|
|
|
|
// TestLoadAllSnapshots returns a list of all snapshots in the repo.
|
|
|
|
// If a snapshot ID is in excludeIDs, it will not be included in the result.
|
|
|
|
func TestLoadAllSnapshots(ctx context.Context, repo Repository, excludeIDs IDSet) (snapshots Snapshots, err error) {
|
|
|
|
err = ForAllSnapshots(ctx, repo, repo, excludeIDs, func(id ID, sn *Snapshot, err error) error {
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
snapshots = append(snapshots, sn)
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return snapshots, nil
|
|
|
|
}
|