fuse: refactor and add tests for fuse.file

This commit is contained in:
Alexander Neumann 2015-07-26 16:43:42 +02:00
parent 55ddd5317d
commit bd746a0425
3 changed files with 179 additions and 9 deletions

View File

@ -4,8 +4,8 @@ import (
"sync"
"github.com/restic/restic"
"github.com/restic/restic/backend"
"github.com/restic/restic/pack"
"github.com/restic/restic/repository"
"bazil.org/fuse"
"bazil.org/fuse/fs"
@ -16,11 +16,18 @@ import (
var _ = fs.HandleReader(&file{})
var _ = fs.HandleReleaser(&file{})
// BlobLoader is an abstracted repository with a reduced set of methods used
// for fuse operations.
type BlobLoader interface {
LookupBlobSize(backend.ID) (uint, error)
LoadBlob(pack.BlobType, backend.ID, []byte) ([]byte, error)
}
type file struct {
repo *repository.Repository
repo BlobLoader
node *restic.Node
sizes []uint32
sizes []uint
blobs [][]byte
}
@ -32,14 +39,15 @@ var blobPool = sync.Pool{
},
}
func newFile(repo *repository.Repository, node *restic.Node) (*file, error) {
sizes := make([]uint32, len(node.Content))
for i, blobID := range node.Content {
length, err := repo.Index().LookupSize(blobID)
func newFile(repo BlobLoader, node *restic.Node) (*file, error) {
sizes := make([]uint, len(node.Content))
for i, id := range node.Content {
size, err := repo.LookupBlobSize(id)
if err != nil {
return nil, err
}
sizes[i] = uint32(length)
sizes[i] = size
}
return &file{
@ -65,7 +73,7 @@ func (f *file) getBlobAt(i int) (blob []byte, err error) {
buf := blobPool.Get().([]byte)
buf = buf[:cap(buf)]
if uint32(len(buf)) < f.sizes[i] {
if uint(len(buf)) < f.sizes[i] {
if len(buf) > defaultBlobSize {
blobPool.Put(buf)
}

View File

@ -0,0 +1,157 @@
package fuse
import (
"bytes"
"errors"
"fmt"
"math/rand"
"testing"
"time"
"bazil.org/fuse"
"github.com/restic/restic"
"github.com/restic/restic/backend"
"github.com/restic/restic/pack"
. "github.com/restic/restic/test"
)
type MockRepo struct {
blobs map[backend.ID][]byte
}
func NewMockRepo(content map[backend.ID][]byte) *MockRepo {
return &MockRepo{blobs: content}
}
func (m *MockRepo) LookupBlobSize(id backend.ID) (uint, error) {
buf, ok := m.blobs[id]
if !ok {
return 0, errors.New("blob not found")
}
return uint(len(buf)), nil
}
func (m *MockRepo) LoadBlob(t pack.BlobType, id backend.ID, buf []byte) ([]byte, error) {
size, err := m.LookupBlobSize(id)
if err != nil {
return nil, err
}
if uint(cap(buf)) < size {
return nil, errors.New("buffer too small")
}
buf = buf[:size]
copy(buf, m.blobs[id])
return buf, nil
}
type MockContext struct{}
func (m MockContext) Deadline() (time.Time, bool) { return time.Now(), false }
func (m MockContext) Done() <-chan struct{} { return nil }
func (m MockContext) Err() error { return nil }
func (m MockContext) Value(key interface{}) interface{} { return nil }
var testContent = genTestContent()
var testContentLengths = []uint{
4646 * 1024,
655 * 1024,
378 * 1024,
8108 * 1024,
558 * 1024,
}
var testMaxFileSize uint
func genTestContent() map[backend.ID][]byte {
m := make(map[backend.ID][]byte)
for _, length := range testContentLengths {
buf := Random(int(length), int(length))
id := backend.Hash(buf)
m[id] = buf
testMaxFileSize += length
}
return m
}
const maxBufSize = 20 * 1024 * 1024
func testRead(t *testing.T, f *file, offset, length int, data []byte) []byte {
ctx := MockContext{}
req := &fuse.ReadRequest{
Offset: int64(offset),
Size: length,
}
resp := &fuse.ReadResponse{
Data: make([]byte, length),
}
OK(t, f.Read(ctx, req, resp))
return resp.Data
}
var offsetReadsTests = []struct {
offset, length int
}{
{0, 5 * 1024 * 1024},
{4000 * 1024, 1000 * 1024},
}
func TestFuseFile(t *testing.T) {
repo := NewMockRepo(testContent)
ctx := MockContext{}
memfile := make([]byte, 0, maxBufSize)
var ids backend.IDs
for id, buf := range repo.blobs {
ids = append(ids, id)
memfile = append(memfile, buf...)
}
node := &restic.Node{
Name: "foo",
Inode: 23,
Mode: 0742,
Size: 42,
Content: ids,
}
f, err := newFile(repo, node)
OK(t, err)
attr := fuse.Attr{}
OK(t, f.Attr(ctx, &attr))
Equals(t, node.Inode, attr.Inode)
Equals(t, node.Mode, attr.Mode)
Equals(t, node.Size, attr.Size)
for i, test := range offsetReadsTests {
b := memfile[test.offset : test.offset+test.length]
res := testRead(t, f, test.offset, test.length, b)
if !bytes.Equal(b, res) {
t.Errorf("test %d failed, wrong data returned", i)
}
}
for i := 0; i < 200; i++ {
length := rand.Intn(int(testMaxFileSize) / 2)
offset := rand.Intn(int(testMaxFileSize))
if length+offset > int(testMaxFileSize) {
diff := length + offset - int(testMaxFileSize)
length -= diff
}
b := memfile[offset : offset+length]
fmt.Printf("test offset %d, length %d\n", offset, length)
res := testRead(t, f, offset, length, b)
if !bytes.Equal(b, res) {
t.Errorf("test %d failed (offset %d, length %d), wrong data returned", i, offset, length)
}
}
}

View File

@ -201,6 +201,11 @@ func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface
return nil
}
// LookupBlobSize returns the size of blob id.
func (r *Repository) LookupBlobSize(id backend.ID) (uint, error) {
return r.Index().LookupSize(id)
}
const minPackSize = 4 * chunker.MiB
const maxPackSize = 16 * chunker.MiB
const maxPackers = 200