2018-04-08 12:02:30 +00:00
|
|
|
package restorer
|
|
|
|
|
|
|
|
import (
|
2024-06-01 19:56:56 +00:00
|
|
|
"fmt"
|
2018-04-08 12:02:30 +00:00
|
|
|
"os"
|
|
|
|
"sync"
|
2024-06-01 19:56:56 +00:00
|
|
|
"syscall"
|
2018-04-08 12:02:30 +00:00
|
|
|
|
2020-10-05 13:38:39 +00:00
|
|
|
"github.com/cespare/xxhash/v2"
|
2020-08-15 15:45:05 +00:00
|
|
|
"github.com/restic/restic/internal/debug"
|
2024-06-01 19:56:56 +00:00
|
|
|
"github.com/restic/restic/internal/errors"
|
2021-02-02 14:43:40 +00:00
|
|
|
"github.com/restic/restic/internal/fs"
|
2018-04-08 12:02:30 +00:00
|
|
|
)
|
|
|
|
|
2019-11-27 12:22:38 +00:00
|
|
|
// writes blobs to target files.
|
|
|
|
// multiple files can be written to concurrently.
|
|
|
|
// multiple blobs can be concurrently written to the same file.
|
|
|
|
// TODO I am not 100% convinced this is necessary, i.e. it may be okay
|
2022-08-19 17:12:26 +00:00
|
|
|
// to use multiple os.File to write to the same target file
|
2018-04-08 12:02:30 +00:00
|
|
|
type filesWriter struct {
|
2024-06-29 18:23:28 +00:00
|
|
|
buckets []filesWriterBucket
|
|
|
|
allowRecursiveDelete bool
|
2018-04-08 12:02:30 +00:00
|
|
|
}
|
|
|
|
|
2019-11-27 12:22:38 +00:00
|
|
|
type filesWriterBucket struct {
|
|
|
|
lock sync.Mutex
|
2020-02-26 20:48:05 +00:00
|
|
|
files map[string]*partialFile
|
|
|
|
}
|
|
|
|
|
|
|
|
type partialFile struct {
|
|
|
|
*os.File
|
2022-08-07 15:56:14 +00:00
|
|
|
users int // Reference count.
|
2022-08-07 15:26:46 +00:00
|
|
|
sparse bool
|
2019-11-27 12:22:38 +00:00
|
|
|
}
|
|
|
|
|
2024-06-29 18:23:28 +00:00
|
|
|
func newFilesWriter(count int, allowRecursiveDelete bool) *filesWriter {
|
2019-11-27 12:22:38 +00:00
|
|
|
buckets := make([]filesWriterBucket, count)
|
|
|
|
for b := 0; b < count; b++ {
|
2020-02-26 20:48:05 +00:00
|
|
|
buckets[b].files = make(map[string]*partialFile)
|
2019-11-27 12:22:38 +00:00
|
|
|
}
|
2019-02-25 05:50:40 +00:00
|
|
|
return &filesWriter{
|
2024-06-29 18:23:28 +00:00
|
|
|
buckets: buckets,
|
|
|
|
allowRecursiveDelete: allowRecursiveDelete,
|
2019-02-25 05:50:40 +00:00
|
|
|
}
|
2018-04-08 12:02:30 +00:00
|
|
|
}
|
|
|
|
|
2024-06-01 19:56:56 +00:00
|
|
|
func openFile(path string) (*os.File, error) {
|
2024-06-13 20:40:35 +00:00
|
|
|
f, err := fs.OpenFile(path, fs.O_WRONLY|fs.O_NOFOLLOW, 0600)
|
2024-05-31 15:06:08 +00:00
|
|
|
if err != nil {
|
2024-06-01 19:56:56 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
fi, err := f.Stat()
|
|
|
|
if err != nil {
|
|
|
|
_ = f.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if !fi.Mode().IsRegular() {
|
|
|
|
_ = f.Close()
|
|
|
|
return nil, fmt.Errorf("unexpected file type %v at %q", fi.Mode().Type(), path)
|
|
|
|
}
|
|
|
|
return f, nil
|
|
|
|
}
|
2024-05-30 20:58:44 +00:00
|
|
|
|
2024-06-29 18:23:28 +00:00
|
|
|
func createFile(path string, createSize int64, sparse bool, allowRecursiveDelete bool) (*os.File, error) {
|
2024-06-13 20:40:35 +00:00
|
|
|
f, err := fs.OpenFile(path, fs.O_CREATE|fs.O_WRONLY|fs.O_NOFOLLOW, 0600)
|
2024-06-01 19:56:56 +00:00
|
|
|
if err != nil && fs.IsAccessDenied(err) {
|
2024-05-30 20:58:44 +00:00
|
|
|
// If file is readonly, clear the readonly flag by resetting the
|
|
|
|
// permissions of the file and try again
|
|
|
|
// as the metadata will be set again in the second pass and the
|
|
|
|
// readonly flag will be applied again if needed.
|
|
|
|
if err = fs.ResetPermissions(path); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-06-13 20:40:35 +00:00
|
|
|
if f, err = fs.OpenFile(path, fs.O_WRONLY|fs.O_NOFOLLOW, 0600); err != nil {
|
2024-06-01 19:56:56 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else if err != nil && (errors.Is(err, syscall.ELOOP) || errors.Is(err, syscall.EISDIR)) {
|
|
|
|
// symlink or directory, try to remove it later on
|
|
|
|
f = nil
|
|
|
|
} else if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2024-07-10 20:28:48 +00:00
|
|
|
var fi os.FileInfo
|
2024-06-01 19:56:56 +00:00
|
|
|
if f != nil {
|
|
|
|
// stat to check that we've opened a regular file
|
|
|
|
fi, err = f.Stat()
|
|
|
|
if err != nil {
|
|
|
|
_ = f.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2024-06-01 20:21:16 +00:00
|
|
|
|
|
|
|
mustReplace := f == nil || !fi.Mode().IsRegular()
|
|
|
|
if !mustReplace {
|
|
|
|
ex := fs.ExtendedStat(fi)
|
|
|
|
if ex.Links > 1 {
|
|
|
|
// there is no efficient way to find out which other files might be linked to this file
|
|
|
|
// thus nuke the existing file and start with a fresh one
|
|
|
|
mustReplace = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if mustReplace {
|
2024-06-01 19:56:56 +00:00
|
|
|
// close handle if we still have it
|
|
|
|
if f != nil {
|
|
|
|
if err := f.Close(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// not what we expected, try to get rid of it
|
2024-06-29 18:23:28 +00:00
|
|
|
if allowRecursiveDelete {
|
|
|
|
if err := fs.RemoveAll(path); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if err := fs.Remove(path); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-06-01 19:56:56 +00:00
|
|
|
}
|
|
|
|
// create a new file, pass O_EXCL to make sure there are no surprises
|
2024-06-13 20:40:35 +00:00
|
|
|
f, err = fs.OpenFile(path, fs.O_CREATE|fs.O_WRONLY|fs.O_EXCL|fs.O_NOFOLLOW, 0600)
|
2024-06-01 19:56:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
fi, err = f.Stat()
|
|
|
|
if err != nil {
|
|
|
|
_ = f.Close()
|
2024-05-30 20:58:44 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-06-01 19:56:56 +00:00
|
|
|
return ensureSize(f, fi, createSize, sparse)
|
|
|
|
}
|
|
|
|
|
2024-07-10 20:28:48 +00:00
|
|
|
func ensureSize(f *os.File, fi os.FileInfo, createSize int64, sparse bool) (*os.File, error) {
|
2024-05-31 15:06:08 +00:00
|
|
|
if sparse {
|
2024-06-01 19:56:56 +00:00
|
|
|
err := truncateSparse(f, createSize)
|
2024-05-31 15:06:08 +00:00
|
|
|
if err != nil {
|
|
|
|
_ = f.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-06-01 19:56:56 +00:00
|
|
|
} else if fi.Size() > createSize {
|
|
|
|
// file is too long must shorten it
|
|
|
|
err := f.Truncate(createSize)
|
2024-05-31 15:06:08 +00:00
|
|
|
if err != nil {
|
|
|
|
_ = f.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
2024-06-01 19:56:56 +00:00
|
|
|
} else if createSize > 0 {
|
|
|
|
err := fs.PreallocateFile(f, createSize)
|
|
|
|
if err != nil {
|
|
|
|
// Just log the preallocate error but don't let it cause the restore process to fail.
|
|
|
|
// Preallocate might return an error if the filesystem (implementation) does not
|
|
|
|
// support preallocation or our parameters combination to the preallocate call
|
|
|
|
// This should yield a syscall.ENOTSUP error, but some other errors might also
|
|
|
|
// show up.
|
|
|
|
debug.Log("Failed to preallocate %v with size %v: %v", f.Name(), createSize, err)
|
2024-05-30 20:58:44 +00:00
|
|
|
}
|
|
|
|
}
|
2024-05-31 15:06:08 +00:00
|
|
|
return f, nil
|
2024-05-30 20:58:44 +00:00
|
|
|
}
|
|
|
|
|
2022-08-07 15:26:46 +00:00
|
|
|
func (w *filesWriter) writeToFile(path string, blob []byte, offset int64, createSize int64, sparse bool) error {
|
2019-11-27 12:22:38 +00:00
|
|
|
bucket := &w.buckets[uint(xxhash.Sum64String(path))%uint(len(w.buckets))]
|
|
|
|
|
2020-02-26 20:48:05 +00:00
|
|
|
acquireWriter := func() (*partialFile, error) {
|
2019-11-27 12:22:38 +00:00
|
|
|
bucket.lock.Lock()
|
|
|
|
defer bucket.lock.Unlock()
|
|
|
|
|
|
|
|
if wr, ok := bucket.files[path]; ok {
|
2020-02-26 20:48:05 +00:00
|
|
|
bucket.files[path].users++
|
2019-02-25 05:50:40 +00:00
|
|
|
return wr, nil
|
2018-04-08 12:02:30 +00:00
|
|
|
}
|
2024-02-23 00:31:20 +00:00
|
|
|
var f *os.File
|
|
|
|
var err error
|
2020-08-15 15:45:05 +00:00
|
|
|
if createSize >= 0 {
|
2024-06-29 18:23:28 +00:00
|
|
|
f, err = createFile(path, createSize, sparse, w.allowRecursiveDelete)
|
2024-05-30 20:58:44 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2024-02-23 00:31:20 +00:00
|
|
|
}
|
2024-06-01 19:56:56 +00:00
|
|
|
} else if f, err = openFile(path); err != nil {
|
2018-04-08 12:02:30 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-27 12:22:38 +00:00
|
|
|
|
2022-08-07 15:26:46 +00:00
|
|
|
wr := &partialFile{File: f, users: 1, sparse: sparse}
|
2019-11-27 12:22:38 +00:00
|
|
|
bucket.files[path] = wr
|
|
|
|
|
2018-04-08 12:02:30 +00:00
|
|
|
return wr, nil
|
|
|
|
}
|
2019-11-27 12:22:38 +00:00
|
|
|
|
2020-02-26 20:48:05 +00:00
|
|
|
releaseWriter := func(wr *partialFile) error {
|
2019-11-27 12:22:38 +00:00
|
|
|
bucket.lock.Lock()
|
|
|
|
defer bucket.lock.Unlock()
|
|
|
|
|
2020-02-26 20:48:05 +00:00
|
|
|
if bucket.files[path].users == 1 {
|
2019-11-27 12:22:38 +00:00
|
|
|
delete(bucket.files, path)
|
|
|
|
return wr.Close()
|
2019-02-25 05:50:40 +00:00
|
|
|
}
|
2020-02-26 20:48:05 +00:00
|
|
|
bucket.files[path].users--
|
2019-11-27 12:22:38 +00:00
|
|
|
return nil
|
2019-02-25 05:50:40 +00:00
|
|
|
}
|
2018-04-08 12:02:30 +00:00
|
|
|
|
|
|
|
wr, err := acquireWriter()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-11-27 12:22:38 +00:00
|
|
|
|
|
|
|
_, err = wr.WriteAt(blob, offset)
|
|
|
|
|
2018-04-08 12:02:30 +00:00
|
|
|
if err != nil {
|
2021-01-30 18:35:46 +00:00
|
|
|
// ignore subsequent errors
|
|
|
|
_ = releaseWriter(wr)
|
2018-04-08 12:02:30 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-11-27 12:22:38 +00:00
|
|
|
return releaseWriter(wr)
|
2018-04-08 12:02:30 +00:00
|
|
|
}
|