2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-18 11:05:18 +00:00
restic/internal/repository/packer_manager.go

162 lines
3.5 KiB
Go
Raw Normal View History

package repository
import (
2017-06-04 09:16:55 +00:00
"context"
"crypto/sha256"
"os"
"sync"
2017-07-23 12:21:03 +00:00
"github.com/restic/restic/internal/errors"
"github.com/restic/restic/internal/hashing"
2017-07-24 15:42:25 +00:00
"github.com/restic/restic/internal/restic"
2017-07-23 12:21:03 +00:00
"github.com/restic/restic/internal/crypto"
"github.com/restic/restic/internal/debug"
"github.com/restic/restic/internal/fs"
"github.com/restic/restic/internal/pack"
)
// Saver implements saving data in a backend.
type Saver interface {
Save(context.Context, restic.Handle, restic.RewindReader) error
}
// Packer holds a pack.Packer together with a hash writer.
type Packer struct {
*pack.Packer
hw *hashing.Writer
tmpfile *os.File
}
// packerManager keeps a list of open packs and creates new on demand.
type packerManager struct {
be Saver
key *crypto.Key
pm sync.Mutex
packers []*Packer
}
const minPackSize = 4 * 1024 * 1024
2016-03-06 13:20:48 +00:00
// newPackerManager returns an new packer manager which writes temporary files
// to a temporary directory
2016-03-06 13:20:48 +00:00
func newPackerManager(be Saver, key *crypto.Key) *packerManager {
2016-03-06 12:14:06 +00:00
return &packerManager{
be: be,
key: key,
}
}
// findPacker returns a packer for a new blob of size bytes. Either a new one is
// created or one is returned that already has some blobs.
func (r *packerManager) findPacker() (packer *Packer, err error) {
r.pm.Lock()
defer r.pm.Unlock()
// search for a suitable packer
if len(r.packers) > 0 {
p := r.packers[0]
r.packers = r.packers[1:]
return p, nil
}
// no suitable packer found, return new
debug.Log("create new pack")
2017-05-10 17:48:22 +00:00
tmpfile, err := fs.TempFile("", "restic-temp-pack-")
if err != nil {
2017-05-10 17:48:22 +00:00
return nil, errors.Wrap(err, "fs.TempFile")
}
hw := hashing.NewWriter(tmpfile, sha256.New())
p := pack.NewPacker(r.key, hw)
packer = &Packer{
Packer: p,
hw: hw,
tmpfile: tmpfile,
}
return packer, nil
}
// insertPacker appends p to s.packs.
func (r *packerManager) insertPacker(p *Packer) {
r.pm.Lock()
defer r.pm.Unlock()
r.packers = append(r.packers, p)
debug.Log("%d packers\n", len(r.packers))
}
// savePacker stores p in the backend.
func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packer) error {
debug.Log("save packer for %v with %d blobs (%d bytes)\n", t, p.Packer.Count(), p.Packer.Size())
_, err := p.Packer.Finalize()
if err != nil {
return err
}
id := restic.IDFromHash(p.hw.Sum(nil))
2016-09-01 19:19:30 +00:00
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
2016-01-24 18:30:14 +00:00
rd, err := restic.NewFileReader(p.tmpfile)
if err != nil {
return err
}
err = r.be.Save(ctx, h, rd)
if err != nil {
2016-09-27 20:35:08 +00:00
debug.Log("Save(%v) error: %v", h, err)
return err
}
2016-09-27 20:35:08 +00:00
debug.Log("saved as %v", h)
if t == restic.TreeBlob && r.Cache != nil {
debug.Log("saving tree pack file in cache")
_, err = p.tmpfile.Seek(0, 0)
if err != nil {
return errors.Wrap(err, "Seek")
}
err := r.Cache.Save(h, p.tmpfile)
if err != nil {
return err
}
}
err = p.tmpfile.Close()
if err != nil {
return errors.Wrap(err, "close tempfile")
}
2017-05-10 17:48:22 +00:00
err = fs.RemoveIfExists(p.tmpfile.Name())
2016-03-06 12:14:06 +00:00
if err != nil {
2016-08-29 20:16:58 +00:00
return errors.Wrap(err, "Remove")
2016-03-06 12:14:06 +00:00
}
// update blobs in the index
for _, b := range p.Packer.Blobs() {
2018-01-25 19:49:41 +00:00
debug.Log(" updating blob %v to pack %v", b.ID, id)
r.idx.Store(restic.PackedBlob{
2016-08-31 20:39:36 +00:00
Blob: restic.Blob{
Type: b.Type,
ID: b.ID,
Offset: b.Offset,
Length: uint(b.Length),
},
2016-01-24 18:30:14 +00:00
PackID: id,
})
}
return nil
}
// countPacker returns the number of open (unfinished) packers.
func (r *packerManager) countPacker() int {
r.pm.Lock()
defer r.pm.Unlock()
return len(r.packers)
}