2
2
mirror of https://github.com/octoleo/restic.git synced 2024-06-01 08:30:49 +00:00
restic/internal/repository/repack.go

193 lines
4.5 KiB
Go
Raw Normal View History

package repository
import (
2017-06-04 09:16:55 +00:00
"context"
2020-09-19 22:45:11 +00:00
"os"
"sync"
2017-07-23 12:21:03 +00:00
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/errors"
2017-07-23 12:21:03 +00:00
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/pack"
2017-07-24 15:42:25 +00:00
"github.com/restic/restic/internal/restic"
2020-09-19 22:45:11 +00:00
"golang.org/x/sync/errgroup"
)
2020-09-19 22:45:11 +00:00
const numRepackWorkers = 8
// Repack takes a list of packs together with a list of blobs contained in
// these packs. Each pack is loaded and the blobs listed in keepBlobs is saved
// into a new pack. Returned is the list of obsolete packs which can then
// be removed.
func Repack(ctx context.Context, repo restic.Repository, packs restic.IDSet, keepBlobs restic.BlobSet, p *restic.Progress) (obsoletePacks restic.IDSet, err error) {
if p != nil {
p.Start()
defer p.Done()
}
2016-09-27 20:35:08 +00:00
debug.Log("repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs))
2020-09-19 22:45:11 +00:00
wg, ctx := errgroup.WithContext(ctx)
2020-09-19 22:45:11 +00:00
downloadQueue := make(chan restic.ID)
wg.Go(func() error {
defer close(downloadQueue)
for packID := range packs {
select {
case downloadQueue <- packID:
case <-ctx.Done():
return ctx.Err()
}
}
2020-09-19 22:45:11 +00:00
return nil
})
2020-09-19 22:45:11 +00:00
type repackJob struct {
tempfile *os.File
hash restic.ID
packLength int64
}
processQueue := make(chan repackJob)
// used to close processQueue once all downloaders have finished
var downloadWG sync.WaitGroup
2020-09-19 22:45:11 +00:00
downloader := func() error {
defer downloadWG.Done()
for packID := range downloadQueue {
// load the complete pack into a temp file
h := restic.Handle{Type: restic.PackFile, Name: packID.String()}
2020-09-19 22:45:11 +00:00
tempfile, hash, packLength, err := DownloadAndHash(ctx, repo.Backend(), h)
if err != nil {
return errors.Wrap(err, "Repack")
}
2020-09-19 22:45:11 +00:00
debug.Log("pack %v loaded (%d bytes), hash %v", packID, packLength, hash)
2020-09-19 22:45:11 +00:00
if !packID.Equal(hash) {
return errors.Errorf("hash does not match id: want %v, got %v", packID, hash)
}
2020-09-19 22:45:11 +00:00
select {
case processQueue <- repackJob{tempfile, hash, packLength}:
case <-ctx.Done():
return ctx.Err()
}
2020-09-19 22:45:11 +00:00
}
return nil
}
2020-09-19 22:45:11 +00:00
downloadWG.Add(numRepackWorkers)
for i := 0; i < numRepackWorkers; i++ {
wg.Go(downloader)
}
wg.Go(func() error {
downloadWG.Wait()
close(processQueue)
return nil
})
var keepMutex sync.Mutex
worker := func() error {
for job := range processQueue {
tempfile, packID, packLength := job.tempfile, job.hash, job.packLength
blobs, err := pack.List(repo.Key(), tempfile, packLength)
if err != nil {
2020-09-19 22:45:11 +00:00
return err
}
2020-09-19 22:45:11 +00:00
debug.Log("processing pack %v, blobs: %v", packID, len(blobs))
var buf []byte
for _, entry := range blobs {
h := restic.BlobHandle{ID: entry.ID, Type: entry.Type}
keepMutex.Lock()
shouldKeep := keepBlobs.Has(h)
keepMutex.Unlock()
if !shouldKeep {
continue
}
debug.Log(" process blob %v", h)
if uint(cap(buf)) < entry.Length {
buf = make([]byte, entry.Length)
}
buf = buf[:entry.Length]
n, err := tempfile.ReadAt(buf, int64(entry.Offset))
if err != nil {
return errors.Wrap(err, "ReadAt")
}
if n != len(buf) {
return errors.Errorf("read blob %v from %v: not enough bytes read, want %v, got %v",
h, tempfile.Name(), len(buf), n)
}
nonce, ciphertext := buf[:repo.Key().NonceSize()], buf[repo.Key().NonceSize():]
plaintext, err := repo.Key().Open(ciphertext[:0], nonce, ciphertext, nil)
if err != nil {
return err
}
id := restic.Hash(plaintext)
if !id.Equal(entry.ID) {
debug.Log("read blob %v/%v from %v: wrong data returned, hash is %v",
h.Type, h.ID, tempfile.Name(), id)
return errors.Errorf("read blob %v from %v: wrong data returned, hash is %v",
h, tempfile.Name(), id)
}
keepMutex.Lock()
// recheck whether some other worker was faster
shouldKeep = keepBlobs.Has(h)
if shouldKeep {
keepBlobs.Delete(h)
}
keepMutex.Unlock()
if !shouldKeep {
continue
}
// We do want to save already saved blobs!
_, _, err = repo.SaveBlob(ctx, entry.Type, plaintext, entry.ID, true)
if err != nil {
return err
}
debug.Log(" saved blob %v", entry.ID)
}
2020-09-19 22:45:11 +00:00
if err = tempfile.Close(); err != nil {
return errors.Wrap(err, "Close")
}
2020-09-19 22:45:11 +00:00
if err = fs.RemoveIfExists(tempfile.Name()); err != nil {
return errors.Wrap(err, "Remove")
}
if p != nil {
p.Report(restic.Stat{Blobs: 1})
}
}
2020-09-19 22:45:11 +00:00
return nil
}
2020-09-19 22:45:11 +00:00
for i := 0; i < numRepackWorkers; i++ {
wg.Go(worker)
}
2020-09-19 22:45:11 +00:00
if err := wg.Wait(); err != nil {
return nil, err
}
if err := repo.Flush(ctx); err != nil {
return nil, err
}
return packs, nil
}