2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-02 11:46:36 +00:00
restic/internal/fuse/file.go

179 lines
4.0 KiB
Go
Raw Normal View History

2015-08-16 20:27:07 +00:00
// +build !openbsd
2015-08-17 17:40:34 +00:00
// +build !windows
2015-08-16 20:27:07 +00:00
2015-07-19 12:28:11 +00:00
package fuse
import (
2017-07-23 12:21:03 +00:00
"github.com/restic/restic/internal/errors"
2017-07-24 15:42:25 +00:00
"github.com/restic/restic/internal/restic"
2017-07-23 12:21:03 +00:00
"github.com/restic/restic/internal/debug"
2015-07-19 12:28:11 +00:00
"bazil.org/fuse"
"bazil.org/fuse/fs"
"golang.org/x/net/context"
)
// The default block size to report in stat
const blockSize = 512
2015-07-19 12:28:11 +00:00
// Statically ensure that *file implements the given interface
var _ = fs.HandleReader(&file{})
var _ = fs.HandleReleaser(&file{})
2015-07-19 12:28:11 +00:00
type file struct {
2017-06-18 13:11:32 +00:00
root *Root
node *restic.Node
inode uint64
2015-07-19 12:28:11 +00:00
2017-01-24 10:41:29 +00:00
sizes []int
2015-07-19 12:28:11 +00:00
blobs [][]byte
}
2017-06-18 13:11:32 +00:00
func newFile(ctx context.Context, root *Root, inode uint64, node *restic.Node) (fusefile *file, err error) {
2016-09-27 20:35:08 +00:00
debug.Log("create new file for %v with %d blobs", node.Name, len(node.Content))
var bytes uint64
2017-01-24 10:41:29 +00:00
sizes := make([]int, len(node.Content))
for i, id := range node.Content {
2017-06-18 13:11:32 +00:00
size, ok := root.blobSizeCache.Lookup(id)
if !ok {
2017-06-18 13:11:32 +00:00
size, err = root.repo.LookupBlobSize(id, restic.DataBlob)
if err != nil {
return nil, err
}
2015-07-19 12:28:11 +00:00
}
2017-01-24 10:41:29 +00:00
sizes[i] = int(size)
bytes += uint64(size)
}
if bytes != node.Size {
2016-09-27 20:35:08 +00:00
debug.Log("sizes do not match: node.Size %v != size %v, using real size", node.Size, bytes)
node.Size = bytes
2015-07-19 12:28:11 +00:00
}
return &file{
2017-06-18 14:28:55 +00:00
inode: inode,
2017-06-18 13:11:32 +00:00
root: root,
node: node,
sizes: sizes,
blobs: make([][]byte, len(node.Content)),
2015-07-19 12:28:11 +00:00
}, nil
}
func (f *file) Attr(ctx context.Context, a *fuse.Attr) error {
2016-09-27 20:35:08 +00:00
debug.Log("Attr(%v)", f.node.Name)
2017-06-18 13:11:32 +00:00
a.Inode = f.inode
2015-07-19 12:28:11 +00:00
a.Mode = f.node.Mode
a.Size = f.node.Size
a.Blocks = (f.node.Size / blockSize) + 1
a.BlockSize = blockSize
2017-02-10 20:16:48 +00:00
a.Nlink = uint32(f.node.Links)
2017-06-18 13:11:32 +00:00
if !f.root.cfg.OwnerIsRoot {
a.Uid = f.node.UID
a.Gid = f.node.GID
}
2015-07-21 20:11:30 +00:00
a.Atime = f.node.AccessTime
a.Ctime = f.node.ChangeTime
a.Mtime = f.node.ModTime
2017-02-11 20:50:03 +00:00
2015-07-19 12:28:11 +00:00
return nil
2017-02-11 20:50:03 +00:00
2015-07-19 12:28:11 +00:00
}
2017-06-04 09:16:55 +00:00
func (f *file) getBlobAt(ctx context.Context, i int) (blob []byte, err error) {
2016-09-27 20:35:08 +00:00
debug.Log("getBlobAt(%v, %v)", f.node.Name, i)
2015-07-19 12:28:11 +00:00
if f.blobs[i] != nil {
return f.blobs[i], nil
}
// release earlier blobs
for j := 0; j < i; j++ {
f.blobs[j] = nil
}
2017-01-24 10:41:29 +00:00
buf := restic.NewBlobBuffer(f.sizes[i])
2017-06-18 13:11:32 +00:00
n, err := f.root.repo.LoadBlob(ctx, restic.DataBlob, f.node.Content[i], buf)
if err != nil {
2016-09-27 20:35:08 +00:00
debug.Log("LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err)
return nil, err
}
f.blobs[i] = buf[:n]
2016-09-03 18:33:28 +00:00
return buf[:n], nil
2015-07-19 12:28:11 +00:00
}
func (f *file) Read(ctx context.Context, req *fuse.ReadRequest, resp *fuse.ReadResponse) error {
2016-09-27 20:35:08 +00:00
debug.Log("Read(%v, %v, %v), file size %v", f.node.Name, req.Size, req.Offset, f.node.Size)
offset := req.Offset
2015-07-19 12:28:11 +00:00
2016-07-29 19:05:36 +00:00
if uint64(offset) > f.node.Size {
2016-09-27 20:35:08 +00:00
debug.Log("Read(%v): offset is greater than file size: %v > %v",
2016-07-29 19:05:36 +00:00
f.node.Name, req.Offset, f.node.Size)
2016-07-29 18:55:09 +00:00
return errors.New("offset greater than files size")
}
2016-07-29 19:18:32 +00:00
// handle special case: file is empty
if f.node.Size == 0 {
resp.Data = resp.Data[:0]
return nil
}
2015-07-19 12:28:11 +00:00
// Skip blobs before the offset
startContent := 0
for offset > int64(f.sizes[startContent]) {
offset -= int64(f.sizes[startContent])
2015-07-19 12:28:11 +00:00
startContent++
}
dst := resp.Data[0:req.Size]
readBytes := 0
remainingBytes := req.Size
for i := startContent; remainingBytes > 0 && i < len(f.sizes); i++ {
2017-06-04 09:16:55 +00:00
blob, err := f.getBlobAt(ctx, i)
2015-07-19 12:28:11 +00:00
if err != nil {
return err
}
if offset > 0 {
2017-05-16 23:28:39 +00:00
blob = blob[offset:]
offset = 0
2015-07-19 12:28:11 +00:00
}
copied := copy(dst, blob)
remainingBytes -= copied
readBytes += copied
dst = dst[copied:]
}
resp.Data = resp.Data[:readBytes]
return nil
}
func (f *file) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
for i := range f.blobs {
2017-01-24 10:41:29 +00:00
f.blobs[i] = nil
2015-07-19 12:28:11 +00:00
}
return nil
}
func (f *file) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp *fuse.ListxattrResponse) error {
debug.Log("Listxattr(%v, %v)", f.node.Name, req.Size)
for _, attr := range f.node.ExtendedAttributes {
resp.Append(attr.Name)
}
return nil
}
func (f *file) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *fuse.GetxattrResponse) error {
debug.Log("Getxattr(%v, %v, %v)", f.node.Name, req.Name, req.Size)
attrval := f.node.GetExtendedAttribute(req.Name)
if attrval != nil {
resp.Xattr = attrval
return nil
}
return fuse.ErrNoXattr
}