2015-08-16 20:27:07 +00:00
|
|
|
// +build !openbsd
|
2015-08-17 17:40:34 +00:00
|
|
|
// +build !windows
|
2015-08-16 20:27:07 +00:00
|
|
|
|
2015-07-19 12:28:11 +00:00
|
|
|
package fuse
|
|
|
|
|
|
|
|
import (
|
2015-07-26 12:25:01 +00:00
|
|
|
"sync"
|
|
|
|
|
2016-02-14 14:29:28 +00:00
|
|
|
"restic"
|
|
|
|
"restic/backend"
|
2016-05-08 20:20:46 +00:00
|
|
|
"restic/debug"
|
2016-02-14 14:29:28 +00:00
|
|
|
"restic/pack"
|
2015-07-19 12:28:11 +00:00
|
|
|
|
|
|
|
"bazil.org/fuse"
|
|
|
|
"bazil.org/fuse/fs"
|
|
|
|
"golang.org/x/net/context"
|
|
|
|
)
|
|
|
|
|
2016-02-14 20:24:02 +00:00
|
|
|
// The default block size to report in stat
|
|
|
|
const blockSize = 512
|
|
|
|
|
2015-07-19 12:28:11 +00:00
|
|
|
// Statically ensure that *file implements the given interface
|
|
|
|
var _ = fs.HandleReader(&file{})
|
2015-07-26 12:25:01 +00:00
|
|
|
var _ = fs.HandleReleaser(&file{})
|
2015-07-19 12:28:11 +00:00
|
|
|
|
2015-07-26 14:43:42 +00:00
|
|
|
// BlobLoader is an abstracted repository with a reduced set of methods used
|
|
|
|
// for fuse operations.
|
|
|
|
type BlobLoader interface {
|
|
|
|
LookupBlobSize(backend.ID) (uint, error)
|
|
|
|
LoadBlob(pack.BlobType, backend.ID, []byte) ([]byte, error)
|
|
|
|
}
|
|
|
|
|
2015-07-19 12:28:11 +00:00
|
|
|
type file struct {
|
2015-07-26 18:41:29 +00:00
|
|
|
repo BlobLoader
|
|
|
|
node *restic.Node
|
|
|
|
ownerIsRoot bool
|
2015-07-19 12:28:11 +00:00
|
|
|
|
2015-07-26 14:43:42 +00:00
|
|
|
sizes []uint
|
2015-07-19 12:28:11 +00:00
|
|
|
blobs [][]byte
|
|
|
|
}
|
|
|
|
|
2015-07-26 12:25:01 +00:00
|
|
|
const defaultBlobSize = 128 * 1024
|
|
|
|
|
|
|
|
var blobPool = sync.Pool{
|
|
|
|
New: func() interface{} {
|
|
|
|
return make([]byte, defaultBlobSize)
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2015-07-26 18:41:29 +00:00
|
|
|
func newFile(repo BlobLoader, node *restic.Node, ownerIsRoot bool) (*file, error) {
|
2016-05-08 20:20:46 +00:00
|
|
|
debug.Log("newFile", "create new file for %v with %d blobs", node.Name, len(node.Content))
|
2015-07-26 14:43:42 +00:00
|
|
|
sizes := make([]uint, len(node.Content))
|
|
|
|
for i, id := range node.Content {
|
|
|
|
size, err := repo.LookupBlobSize(id)
|
2015-07-19 12:28:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2015-07-26 14:43:42 +00:00
|
|
|
|
|
|
|
sizes[i] = size
|
2015-07-19 12:28:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return &file{
|
2015-07-26 18:41:29 +00:00
|
|
|
repo: repo,
|
|
|
|
node: node,
|
|
|
|
sizes: sizes,
|
|
|
|
blobs: make([][]byte, len(node.Content)),
|
|
|
|
ownerIsRoot: ownerIsRoot,
|
2015-07-19 12:28:11 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *file) Attr(ctx context.Context, a *fuse.Attr) error {
|
2016-05-08 20:20:46 +00:00
|
|
|
debug.Log("file.Attr", "Attr(%v)", f.node.Name)
|
2015-07-19 12:28:11 +00:00
|
|
|
a.Inode = f.node.Inode
|
|
|
|
a.Mode = f.node.Mode
|
|
|
|
a.Size = f.node.Size
|
2016-02-14 20:24:02 +00:00
|
|
|
a.Blocks = (f.node.Size / blockSize) + 1
|
|
|
|
a.BlockSize = blockSize
|
2015-07-26 18:41:29 +00:00
|
|
|
|
|
|
|
if !f.ownerIsRoot {
|
|
|
|
a.Uid = f.node.UID
|
|
|
|
a.Gid = f.node.GID
|
|
|
|
}
|
2015-07-21 20:11:30 +00:00
|
|
|
a.Atime = f.node.AccessTime
|
|
|
|
a.Ctime = f.node.ChangeTime
|
|
|
|
a.Mtime = f.node.ModTime
|
2015-07-19 12:28:11 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *file) getBlobAt(i int) (blob []byte, err error) {
|
2016-05-08 20:20:46 +00:00
|
|
|
debug.Log("file.getBlobAt", "getBlobAt(%v, %v)", f.node.Name, i)
|
2015-07-19 12:28:11 +00:00
|
|
|
if f.blobs[i] != nil {
|
2015-07-26 12:25:01 +00:00
|
|
|
return f.blobs[i], nil
|
|
|
|
}
|
|
|
|
|
|
|
|
buf := blobPool.Get().([]byte)
|
|
|
|
buf = buf[:cap(buf)]
|
|
|
|
|
2015-07-26 14:43:42 +00:00
|
|
|
if uint(len(buf)) < f.sizes[i] {
|
2015-07-26 12:25:01 +00:00
|
|
|
if len(buf) > defaultBlobSize {
|
|
|
|
blobPool.Put(buf)
|
2015-07-19 12:28:11 +00:00
|
|
|
}
|
2015-07-26 12:25:01 +00:00
|
|
|
buf = make([]byte, f.sizes[i])
|
2015-07-19 12:28:11 +00:00
|
|
|
}
|
|
|
|
|
2015-07-26 12:25:01 +00:00
|
|
|
blob, err = f.repo.LoadBlob(pack.Data, f.node.Content[i], buf)
|
|
|
|
if err != nil {
|
2016-05-08 20:20:46 +00:00
|
|
|
debug.Log("file.getBlobAt", "LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err)
|
2015-07-26 12:25:01 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
f.blobs[i] = blob
|
|
|
|
|
2015-07-19 12:28:11 +00:00
|
|
|
return blob, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *file) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
|
2016-05-08 20:20:46 +00:00
|
|
|
debug.Log("file.Read", "Read(%v), file size %v", req.Size, f.node.Size)
|
2015-07-26 12:25:01 +00:00
|
|
|
offset := req.Offset
|
2015-07-19 12:28:11 +00:00
|
|
|
|
|
|
|
// Skip blobs before the offset
|
|
|
|
startContent := 0
|
2015-07-26 12:25:01 +00:00
|
|
|
for offset > int64(f.sizes[startContent]) {
|
|
|
|
offset -= int64(f.sizes[startContent])
|
2015-07-19 12:28:11 +00:00
|
|
|
startContent++
|
|
|
|
}
|
|
|
|
|
2015-07-26 12:25:01 +00:00
|
|
|
dst := resp.Data[0:req.Size]
|
|
|
|
readBytes := 0
|
|
|
|
remainingBytes := req.Size
|
|
|
|
for i := startContent; remainingBytes > 0 && i < len(f.sizes); i++ {
|
2015-07-19 12:28:11 +00:00
|
|
|
blob, err := f.getBlobAt(i)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-07-26 12:25:01 +00:00
|
|
|
if offset > 0 {
|
|
|
|
blob = blob[offset:len(blob)]
|
|
|
|
offset = 0
|
2015-07-19 12:28:11 +00:00
|
|
|
}
|
2015-07-26 12:25:01 +00:00
|
|
|
|
|
|
|
copied := copy(dst, blob)
|
|
|
|
remainingBytes -= copied
|
|
|
|
readBytes += copied
|
|
|
|
|
|
|
|
dst = dst[copied:]
|
|
|
|
}
|
|
|
|
resp.Data = resp.Data[:readBytes]
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *file) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
|
|
|
|
for i := range f.blobs {
|
|
|
|
if f.blobs[i] != nil {
|
|
|
|
blobPool.Put(f.blobs[i])
|
|
|
|
f.blobs[i] = nil
|
2015-07-19 12:28:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|