2015-08-16 20:27:07 +00:00
|
|
|
// +build !openbsd
|
2015-08-17 17:40:34 +00:00
|
|
|
// +build !windows
|
2015-08-16 20:27:07 +00:00
|
|
|
|
2015-07-26 14:43:42 +00:00
|
|
|
package fuse
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"math/rand"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2017-01-24 10:41:29 +00:00
|
|
|
"golang.org/x/net/context"
|
|
|
|
|
|
|
|
"restic/repository"
|
2016-08-21 15:46:23 +00:00
|
|
|
|
2015-07-26 14:43:42 +00:00
|
|
|
"bazil.org/fuse"
|
2017-06-18 14:29:00 +00:00
|
|
|
"bazil.org/fuse/fs"
|
2015-07-26 14:43:42 +00:00
|
|
|
|
2016-02-14 14:29:28 +00:00
|
|
|
"restic"
|
|
|
|
. "restic/test"
|
2015-07-26 14:43:42 +00:00
|
|
|
)
|
|
|
|
|
2017-01-24 10:41:29 +00:00
|
|
|
func testRead(t testing.TB, f *file, offset, length int, data []byte) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
2015-07-26 14:43:42 +00:00
|
|
|
|
2017-01-24 10:41:29 +00:00
|
|
|
req := &fuse.ReadRequest{
|
|
|
|
Offset: int64(offset),
|
|
|
|
Size: length,
|
2015-07-26 14:43:42 +00:00
|
|
|
}
|
2017-01-24 10:41:29 +00:00
|
|
|
resp := &fuse.ReadResponse{
|
|
|
|
Data: data,
|
2015-07-26 14:43:42 +00:00
|
|
|
}
|
2017-01-24 10:41:29 +00:00
|
|
|
OK(t, f.Read(ctx, req, resp))
|
|
|
|
}
|
2015-07-26 14:43:42 +00:00
|
|
|
|
2017-01-24 10:41:29 +00:00
|
|
|
func firstSnapshotID(t testing.TB, repo restic.Repository) (first restic.ID) {
|
2017-06-05 21:56:59 +00:00
|
|
|
for id := range repo.List(context.TODO(), restic.SnapshotFile) {
|
2017-01-24 10:41:29 +00:00
|
|
|
if first.IsNull() {
|
|
|
|
first = id
|
|
|
|
}
|
2015-07-26 14:43:42 +00:00
|
|
|
}
|
2017-01-24 10:41:29 +00:00
|
|
|
return first
|
2015-07-26 14:43:42 +00:00
|
|
|
}
|
|
|
|
|
2017-01-24 10:41:29 +00:00
|
|
|
func loadFirstSnapshot(t testing.TB, repo restic.Repository) *restic.Snapshot {
|
|
|
|
id := firstSnapshotID(t, repo)
|
2017-06-05 21:56:59 +00:00
|
|
|
sn, err := restic.LoadSnapshot(context.TODO(), repo, id)
|
2017-01-24 10:41:29 +00:00
|
|
|
OK(t, err)
|
|
|
|
return sn
|
2015-07-26 14:43:42 +00:00
|
|
|
}
|
|
|
|
|
2017-01-24 10:41:29 +00:00
|
|
|
func loadTree(t testing.TB, repo restic.Repository, id restic.ID) *restic.Tree {
|
2017-06-05 21:56:59 +00:00
|
|
|
tree, err := repo.LoadTree(context.TODO(), id)
|
2017-01-24 10:41:29 +00:00
|
|
|
OK(t, err)
|
|
|
|
return tree
|
2015-07-26 14:43:42 +00:00
|
|
|
}
|
|
|
|
|
2017-01-24 10:41:29 +00:00
|
|
|
func TestFuseFile(t *testing.T) {
|
|
|
|
repo, cleanup := repository.TestRepository(t)
|
|
|
|
defer cleanup()
|
2015-07-26 14:43:42 +00:00
|
|
|
|
2017-01-24 10:41:29 +00:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
defer cancel()
|
2015-07-26 14:43:42 +00:00
|
|
|
|
2017-01-24 10:41:29 +00:00
|
|
|
timestamp, err := time.Parse(time.RFC3339, "2017-01-24T10:42:56+01:00")
|
|
|
|
OK(t, err)
|
|
|
|
restic.TestCreateSnapshot(t, repo, timestamp, 2, 0.1)
|
2015-07-26 14:43:42 +00:00
|
|
|
|
2017-01-24 10:41:29 +00:00
|
|
|
sn := loadFirstSnapshot(t, repo)
|
|
|
|
tree := loadTree(t, repo, *sn.Tree)
|
2015-07-26 14:43:42 +00:00
|
|
|
|
2017-01-24 10:41:29 +00:00
|
|
|
var content restic.IDs
|
|
|
|
for _, node := range tree.Nodes {
|
|
|
|
content = append(content, node.Content...)
|
|
|
|
}
|
|
|
|
t.Logf("tree loaded, content: %v", content)
|
|
|
|
|
|
|
|
var (
|
|
|
|
filesize uint64
|
|
|
|
memfile []byte
|
|
|
|
)
|
|
|
|
for _, id := range content {
|
|
|
|
size, err := repo.LookupBlobSize(id, restic.DataBlob)
|
|
|
|
OK(t, err)
|
|
|
|
filesize += uint64(size)
|
|
|
|
|
|
|
|
buf := restic.NewBlobBuffer(int(size))
|
2017-06-05 21:56:59 +00:00
|
|
|
n, err := repo.LoadBlob(context.TODO(), restic.DataBlob, id, buf)
|
2017-01-24 10:41:29 +00:00
|
|
|
OK(t, err)
|
|
|
|
|
|
|
|
if uint(n) != size {
|
|
|
|
t.Fatalf("not enough bytes read for id %v: want %v, got %v", id.Str(), size, n)
|
|
|
|
}
|
2015-07-26 14:43:42 +00:00
|
|
|
|
2017-01-24 10:41:29 +00:00
|
|
|
if uint(len(buf)) != size {
|
|
|
|
t.Fatalf("buffer has wrong length for id %v: want %v, got %v", id.Str(), size, len(buf))
|
|
|
|
}
|
2015-07-26 14:43:42 +00:00
|
|
|
|
|
|
|
memfile = append(memfile, buf...)
|
|
|
|
}
|
|
|
|
|
2017-01-24 10:41:29 +00:00
|
|
|
t.Logf("filesize is %v, memfile has size %v", filesize, len(memfile))
|
|
|
|
|
2015-07-26 14:43:42 +00:00
|
|
|
node := &restic.Node{
|
|
|
|
Name: "foo",
|
|
|
|
Inode: 23,
|
|
|
|
Mode: 0742,
|
2017-01-24 10:41:29 +00:00
|
|
|
Size: filesize,
|
|
|
|
Content: content,
|
2015-07-26 14:43:42 +00:00
|
|
|
}
|
2017-06-18 14:29:00 +00:00
|
|
|
root := &Root{
|
|
|
|
blobSizeCache: NewBlobSizeCache(context.TODO(), repo.Index()),
|
|
|
|
repo: repo,
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Logf("blob cache has %d entries", len(root.blobSizeCache.m))
|
|
|
|
|
|
|
|
inode := fs.GenerateDynamicInode(1, "foo")
|
|
|
|
f, err := newFile(context.TODO(), root, inode, node)
|
2015-07-26 14:43:42 +00:00
|
|
|
OK(t, err)
|
|
|
|
|
|
|
|
attr := fuse.Attr{}
|
|
|
|
OK(t, f.Attr(ctx, &attr))
|
|
|
|
|
2017-06-18 14:29:00 +00:00
|
|
|
Equals(t, inode, attr.Inode)
|
2015-07-26 14:43:42 +00:00
|
|
|
Equals(t, node.Mode, attr.Mode)
|
|
|
|
Equals(t, node.Size, attr.Size)
|
2016-02-14 20:24:02 +00:00
|
|
|
Equals(t, (node.Size/uint64(attr.BlockSize))+1, attr.Blocks)
|
2015-07-26 14:43:42 +00:00
|
|
|
|
|
|
|
for i := 0; i < 200; i++ {
|
2017-01-24 10:41:29 +00:00
|
|
|
offset := rand.Intn(int(filesize))
|
|
|
|
length := rand.Intn(int(filesize)-offset) + 100
|
2015-07-26 14:43:42 +00:00
|
|
|
|
|
|
|
b := memfile[offset : offset+length]
|
2017-01-24 10:41:29 +00:00
|
|
|
|
2016-09-03 12:03:43 +00:00
|
|
|
buf := make([]byte, length)
|
2017-01-24 10:41:29 +00:00
|
|
|
|
2016-09-03 12:03:43 +00:00
|
|
|
testRead(t, f, offset, length, buf)
|
|
|
|
if !bytes.Equal(b, buf) {
|
2017-01-24 10:41:29 +00:00
|
|
|
t.Errorf("test %d failed, wrong data returned (offset %v, length %v)", i, offset, length)
|
2015-07-26 14:43:42 +00:00
|
|
|
}
|
|
|
|
}
|
2017-01-24 10:41:29 +00:00
|
|
|
|
|
|
|
OK(t, f.Release(ctx, nil))
|
2015-07-26 14:43:42 +00:00
|
|
|
}
|