2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-25 06:07:44 +00:00

restore: split downloadPack into smaller methods

This commit is contained in:
Michael Eischer 2024-01-07 12:00:32 +01:00
parent e4a7eb09ef
commit 9328f34d43

View File

@ -197,12 +197,13 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error {
return wg.Wait() return wg.Wait()
} }
func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error { type blobToFileOffsetsMapping map[restic.ID]struct {
files map[*fileInfo][]int64 // file -> offsets (plural!) of the blob in the file
}
func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error {
// calculate blob->[]files->[]offsets mappings // calculate blob->[]files->[]offsets mappings
blobs := make(map[restic.ID]struct { blobs := make(blobToFileOffsetsMapping)
files map[*fileInfo][]int64 // file -> offsets (plural!) of the blob in the file
})
var blobList []restic.Blob var blobList []restic.Blob
for file := range pack.files { for file := range pack.files {
addBlob := func(blob restic.Blob, fileOffset int64) { addBlob := func(blob restic.Blob, fileOffset int64) {
@ -239,60 +240,9 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error {
} }
} }
sanitizeError := func(file *fileInfo, err error) error {
if err != nil {
err = r.Error(file.location, err)
}
return err
}
// track already processed blobs for precise error reporting // track already processed blobs for precise error reporting
processedBlobs := restic.NewBlobSet() processedBlobs := restic.NewBlobSet()
err := repository.StreamPack(ctx, r.packLoader, r.key, pack.id, blobList, func(h restic.BlobHandle, blobData []byte, err error) error { err := r.downloadBlobs(ctx, pack.id, blobList, blobs, processedBlobs)
processedBlobs.Insert(h)
blob := blobs[h.ID]
if err != nil {
for file := range blob.files {
if errFile := sanitizeError(file, err); errFile != nil {
return errFile
}
}
return nil
}
for file, offsets := range blob.files {
for _, offset := range offsets {
writeToFile := func() error {
// this looks overly complicated and needs explanation
// two competing requirements:
// - must create the file once and only once
// - should allow concurrent writes to the file
// so write the first blob while holding file lock
// write other blobs after releasing the lock
createSize := int64(-1)
file.lock.Lock()
if file.inProgress {
file.lock.Unlock()
} else {
defer file.lock.Unlock()
file.inProgress = true
createSize = file.size
}
writeErr := r.filesWriter.writeToFile(r.targetPath(file.location), blobData, offset, createSize, file.sparse)
if r.progress != nil {
r.progress.AddProgress(file.location, uint64(len(blobData)), uint64(file.size))
}
return writeErr
}
err := sanitizeError(file, writeToFile())
if err != nil {
return err
}
}
}
return nil
})
if err != nil { if err != nil {
// only report error for not yet processed blobs // only report error for not yet processed blobs
@ -308,7 +258,7 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error {
} }
for file := range affectedFiles { for file := range affectedFiles {
if errFile := sanitizeError(file, err); errFile != nil { if errFile := r.sanitizeError(file, err); errFile != nil {
return errFile return errFile
} }
} }
@ -316,3 +266,61 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) error {
return nil return nil
} }
func (r *fileRestorer) sanitizeError(file *fileInfo, err error) error {
if err != nil {
err = r.Error(file.location, err)
}
return err
}
func (r *fileRestorer) downloadBlobs(ctx context.Context, packID restic.ID, blobList []restic.Blob,
blobs blobToFileOffsetsMapping, processedBlobs restic.BlobSet) error {
return repository.StreamPack(ctx, r.packLoader, r.key, packID, blobList,
func(h restic.BlobHandle, blobData []byte, err error) error {
processedBlobs.Insert(h)
blob := blobs[h.ID]
if err != nil {
for file := range blob.files {
if errFile := r.sanitizeError(file, err); errFile != nil {
return errFile
}
}
return nil
}
for file, offsets := range blob.files {
for _, offset := range offsets {
writeToFile := func() error {
// this looks overly complicated and needs explanation
// two competing requirements:
// - must create the file once and only once
// - should allow concurrent writes to the file
// so write the first blob while holding file lock
// write other blobs after releasing the lock
createSize := int64(-1)
file.lock.Lock()
if file.inProgress {
file.lock.Unlock()
} else {
defer file.lock.Unlock()
file.inProgress = true
createSize = file.size
}
writeErr := r.filesWriter.writeToFile(r.targetPath(file.location), blobData, offset, createSize, file.sparse)
if r.progress != nil {
r.progress.AddProgress(file.location, uint64(len(blobData)), uint64(file.size))
}
return writeErr
}
err := r.sanitizeError(file, writeToFile())
if err != nil {
return err
}
}
}
return nil
})
}