2
2
mirror of https://github.com/octoleo/restic.git synced 2024-12-27 12:42:49 +00:00

restore: Don't save (part of) pack in memory

This commit is contained in:
Alexander Weiss 2020-11-18 12:36:06 +01:00
parent 8b84c96d9d
commit 3e0acf1395

View File

@ -1,11 +1,12 @@
package restorer
import (
"bytes"
"bufio"
"context"
"io"
"math"
"path/filepath"
"sort"
"sync"
"github.com/restic/restic/internal/crypto"
@ -179,6 +180,8 @@ func (r *fileRestorer) restoreFiles(ctx context.Context) error {
return nil
}
const maxBufferSize = 4 * 1024 * 1024
func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) {
// calculate pack byte range and blob->[]files->[]offsets mappings
@ -226,18 +229,12 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) {
}
}
packData := make([]byte, int(end-start))
h := restic.Handle{Type: restic.PackFile, Name: pack.id.String()}
err := r.packLoader(ctx, h, int(end-start), start, func(rd io.Reader) error {
l, err := io.ReadFull(rd, packData)
if err != nil {
return err
sortedBlobs := make([]restic.ID, 0, len(blobs))
for blobID := range blobs {
sortedBlobs = append(sortedBlobs, blobID)
}
if l != len(packData) {
return errors.Errorf("unexpected pack size: expected %d but got %d", len(packData), l)
}
return nil
sort.Slice(sortedBlobs, func(i, j int) bool {
return blobs[sortedBlobs[i]].offset < blobs[sortedBlobs[j]].offset
})
markFileError := func(file *fileInfo, err error) {
@ -248,23 +245,28 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) {
}
}
h := restic.Handle{Type: restic.PackFile, Name: pack.id.String()}
err := r.packLoader(ctx, h, int(end-start), start, func(rd io.Reader) error {
bufferSize := int(end - start)
if bufferSize > maxBufferSize {
bufferSize = maxBufferSize
}
BufRd := bufio.NewReaderSize(rd, bufferSize)
currentBlobEnd := start
for _, blobID := range sortedBlobs {
blob := blobs[blobID]
_, err := BufRd.Discard(int(blob.offset - currentBlobEnd))
if err != nil {
for file := range pack.files {
markFileError(file, err)
return err
}
return
}
rd := bytes.NewReader(packData)
for blobID, blob := range blobs {
blobData, err := r.loadBlob(rd, blobID, blob.offset-start, blob.length)
blobData, err := r.loadBlob(BufRd, blobID, blob.length)
if err != nil {
for file := range blob.files {
markFileError(file, err)
}
continue
}
currentBlobEnd = blob.offset + int64(blob.length)
for file, offsets := range blob.files {
for _, offset := range offsets {
writeToFile := func() error {
@ -294,14 +296,25 @@ func (r *fileRestorer) downloadPack(ctx context.Context, pack *packInfo) {
}
}
}
return nil
})
if err != nil {
for file := range pack.files {
markFileError(file, err)
}
return
}
func (r *fileRestorer) loadBlob(rd io.ReaderAt, blobID restic.ID, offset int64, length int) ([]byte, error) {
}
func (r *fileRestorer) loadBlob(rd io.Reader, blobID restic.ID, length int) ([]byte, error) {
// TODO reconcile with Repository#loadBlob implementation
buf := make([]byte, length)
n, err := rd.ReadAt(buf, offset)
n, err := rd.Read(buf)
if err != nil {
return nil, err
}