2
2
mirror of https://github.com/octoleo/restic.git synced 2024-12-27 04:32:40 +00:00
restic/internal/archiver/file_saver.go

278 lines
6.0 KiB
Go
Raw Normal View History

2018-03-30 20:43:18 +00:00
package archiver
import (
"context"
"fmt"
2018-03-30 20:43:18 +00:00
"io"
"os"
"sync"
2018-03-30 20:43:18 +00:00
"github.com/restic/chunker"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/restic"
2022-05-27 17:08:50 +00:00
"golang.org/x/sync/errgroup"
2018-03-30 20:43:18 +00:00
)
// SaveBlobFn saves a blob to a repo.
type SaveBlobFn func(context.Context, restic.BlobType, *Buffer, string, func(res SaveBlobResponse))
2018-03-30 20:43:18 +00:00
// FileSaver concurrently saves incoming files to the repo.
type FileSaver struct {
saveFilePool *BufferPool
saveBlob SaveBlobFn
2018-03-30 20:43:18 +00:00
pol chunker.Pol
ch chan<- saveFileJob
2018-03-30 20:43:18 +00:00
CompleteBlob func(bytes uint64)
2018-03-30 20:43:18 +00:00
NodeFromFileInfo func(snPath, filename string, fi os.FileInfo, ignoreXattrListError bool) (*restic.Node, error)
2018-03-30 20:43:18 +00:00
}
2018-04-29 13:34:41 +00:00
// NewFileSaver returns a new file saver. A worker pool with fileWorkers is
2018-03-30 20:43:18 +00:00
// started, it is stopped when ctx is cancelled.
2022-05-27 17:08:50 +00:00
func NewFileSaver(ctx context.Context, wg *errgroup.Group, save SaveBlobFn, pol chunker.Pol, fileWorkers, blobWorkers uint) *FileSaver {
2018-04-29 13:34:41 +00:00
ch := make(chan saveFileJob)
2018-04-29 13:34:41 +00:00
debug.Log("new file saver with %v file workers and %v blob workers", fileWorkers, blobWorkers)
poolSize := fileWorkers + blobWorkers
2018-03-30 20:43:18 +00:00
s := &FileSaver{
saveBlob: save,
saveFilePool: NewBufferPool(int(poolSize), chunker.MaxSize),
2018-03-30 20:43:18 +00:00
pol: pol,
ch: ch,
CompleteBlob: func(uint64) {},
2018-03-30 20:43:18 +00:00
}
for i := uint(0); i < fileWorkers; i++ {
2022-05-27 17:08:50 +00:00
wg.Go(func() error {
s.worker(ctx, ch)
return nil
})
2018-03-30 20:43:18 +00:00
}
return s
}
2022-05-27 17:08:50 +00:00
func (s *FileSaver) TriggerShutdown() {
close(s.ch)
}
2018-03-30 20:43:18 +00:00
// CompleteFunc is called when the file has been saved.
type CompleteFunc func(*restic.Node, ItemStats)
// Save stores the file f and returns the data once it has been completed. The
// file is closed by Save. completeReading is only called if the file was read
// successfully. complete is always called. If completeReading is called, then
// this will always happen before calling complete.
func (s *FileSaver) Save(ctx context.Context, snPath string, target string, file fs.File, fi os.FileInfo, start func(), completeReading func(), complete CompleteFunc) FutureNode {
fn, ch := newFutureNode()
job := saveFileJob{
snPath: snPath,
target: target,
file: file,
fi: fi,
ch: ch,
start: start,
completeReading: completeReading,
complete: complete,
2018-03-30 20:43:18 +00:00
}
select {
case s.ch <- job:
case <-ctx.Done():
debug.Log("not sending job, context is cancelled: %v", ctx.Err())
_ = file.Close()
close(ch)
}
return fn
2018-03-30 20:43:18 +00:00
}
type saveFileJob struct {
snPath string
target string
file fs.File
fi os.FileInfo
ch chan<- futureNodeResult
start func()
completeReading func()
complete CompleteFunc
2018-03-30 20:43:18 +00:00
}
// saveFile stores the file f in the repo, then closes it.
func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPath string, target string, f fs.File, fi os.FileInfo, start func(), finishReading func(), finish func(res futureNodeResult)) {
2018-03-30 20:43:18 +00:00
start()
fnr := futureNodeResult{
snPath: snPath,
target: target,
}
var lock sync.Mutex
remaining := 0
isCompleted := false
completeBlob := func() {
lock.Lock()
defer lock.Unlock()
remaining--
if remaining == 0 && fnr.err == nil {
if isCompleted {
panic("completed twice")
}
for _, id := range fnr.node.Content {
if id.IsNull() {
panic("completed file with null ID")
}
}
isCompleted = true
finish(fnr)
}
}
completeError := func(err error) {
lock.Lock()
defer lock.Unlock()
if fnr.err == nil {
if isCompleted {
panic("completed twice")
}
isCompleted = true
fnr.err = fmt.Errorf("failed to save %v: %w", target, err)
fnr.node = nil
fnr.stats = ItemStats{}
finish(fnr)
}
}
2018-03-30 20:43:18 +00:00
debug.Log("%v", snPath)
node, err := s.NodeFromFileInfo(snPath, f.Name(), fi, false)
2018-03-30 20:43:18 +00:00
if err != nil {
_ = f.Close()
completeError(err)
return
2018-03-30 20:43:18 +00:00
}
if node.Type != "file" {
_ = f.Close()
completeError(errors.Errorf("node type %q is wrong", node.Type))
return
2018-03-30 20:43:18 +00:00
}
// reuse the chunker
chnker.Reset(f, s.pol)
node.Content = []restic.ID{}
node.Size = 0
var idx int
2018-03-30 20:43:18 +00:00
for {
buf := s.saveFilePool.Get()
chunk, err := chnker.Next(buf.Data)
if err == io.EOF {
2018-03-30 20:43:18 +00:00
buf.Release()
break
}
2018-04-29 13:34:41 +00:00
2018-03-30 20:43:18 +00:00
buf.Data = chunk.Data
node.Size += uint64(chunk.Length)
2018-03-30 20:43:18 +00:00
if err != nil {
_ = f.Close()
completeError(err)
return
2018-03-30 20:43:18 +00:00
}
// test if the context has been cancelled, return the error
if ctx.Err() != nil {
_ = f.Close()
completeError(ctx.Err())
return
2018-03-30 20:43:18 +00:00
}
// add a place to store the saveBlob result
pos := idx
lock.Lock()
node.Content = append(node.Content, restic.ID{})
lock.Unlock()
s.saveBlob(ctx, restic.DataBlob, buf, target, func(sbr SaveBlobResponse) {
lock.Lock()
if !sbr.known {
fnr.stats.DataBlobs++
fnr.stats.DataSize += uint64(sbr.length)
fnr.stats.DataSizeInRepo += uint64(sbr.sizeInRepo)
}
node.Content[pos] = sbr.id
lock.Unlock()
completeBlob()
})
idx++
2018-03-30 20:43:18 +00:00
// test if the context has been cancelled, return the error
if ctx.Err() != nil {
_ = f.Close()
completeError(ctx.Err())
return
2018-03-30 20:43:18 +00:00
}
s.CompleteBlob(uint64(len(chunk.Data)))
2018-03-30 20:43:18 +00:00
}
err = f.Close()
if err != nil {
completeError(err)
return
2018-03-30 20:43:18 +00:00
}
fnr.node = node
lock.Lock()
// require one additional completeFuture() call to ensure that the future only completes
// after reaching the end of this method
remaining += idx + 1
lock.Unlock()
finishReading()
completeBlob()
2018-03-30 20:43:18 +00:00
}
func (s *FileSaver) worker(ctx context.Context, jobs <-chan saveFileJob) {
2018-03-30 20:43:18 +00:00
// a worker has one chunker which is reused for each file (because it contains a rather large buffer)
chnker := chunker.New(nil, s.pol)
for {
var job saveFileJob
2022-05-27 17:08:50 +00:00
var ok bool
2018-03-30 20:43:18 +00:00
select {
case <-ctx.Done():
return
2022-05-27 17:08:50 +00:00
case job, ok = <-jobs:
if !ok {
return
}
2018-03-30 20:43:18 +00:00
}
s.saveFile(ctx, chnker, job.snPath, job.target, job.file, job.fi, job.start, func() {
if job.completeReading != nil {
job.completeReading()
}
}, func(res futureNodeResult) {
if job.complete != nil {
job.complete(res.node, res.stats)
}
job.ch <- res
close(job.ch)
})
2018-03-30 20:43:18 +00:00
}
}