2015-05-09 21:52:03 +00:00
|
|
|
package repository
|
2014-12-21 16:02:49 +00:00
|
|
|
|
|
|
|
import (
|
2015-04-26 15:44:38 +00:00
|
|
|
"bytes"
|
2017-06-04 09:16:55 +00:00
|
|
|
"context"
|
2015-01-10 22:40:10 +00:00
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
2018-02-12 03:41:59 +00:00
|
|
|
"io"
|
2015-07-26 19:58:03 +00:00
|
|
|
"os"
|
2014-12-21 16:02:49 +00:00
|
|
|
|
2017-09-24 20:54:04 +00:00
|
|
|
"github.com/restic/restic/internal/cache"
|
2018-10-28 20:12:15 +00:00
|
|
|
"github.com/restic/restic/internal/crypto"
|
|
|
|
"github.com/restic/restic/internal/debug"
|
2017-07-23 12:21:03 +00:00
|
|
|
"github.com/restic/restic/internal/errors"
|
2018-02-12 03:41:59 +00:00
|
|
|
"github.com/restic/restic/internal/fs"
|
|
|
|
"github.com/restic/restic/internal/hashing"
|
2017-07-23 12:21:03 +00:00
|
|
|
"github.com/restic/restic/internal/pack"
|
2018-10-28 20:12:15 +00:00
|
|
|
"github.com/restic/restic/internal/restic"
|
2020-03-19 10:27:19 +00:00
|
|
|
|
|
|
|
"github.com/minio/sha256-simd"
|
2019-03-24 20:27:28 +00:00
|
|
|
"golang.org/x/sync/errgroup"
|
2014-12-21 16:02:49 +00:00
|
|
|
)
|
|
|
|
|
2015-05-09 21:59:58 +00:00
|
|
|
// Repository is used to access a repository in a backend.
|
|
|
|
type Repository struct {
|
2016-08-31 18:29:54 +00:00
|
|
|
be restic.Backend
|
2016-08-31 20:39:36 +00:00
|
|
|
cfg restic.Config
|
2015-05-03 16:04:13 +00:00
|
|
|
key *crypto.Key
|
|
|
|
keyName string
|
2015-10-12 20:34:12 +00:00
|
|
|
idx *MasterIndex
|
2017-06-10 11:10:08 +00:00
|
|
|
restic.Cache
|
2020-06-12 07:24:38 +00:00
|
|
|
noAutoIndexUpdate bool
|
2015-04-26 15:44:38 +00:00
|
|
|
|
2017-07-16 18:16:02 +00:00
|
|
|
treePM *packerManager
|
|
|
|
dataPM *packerManager
|
2014-12-21 16:02:49 +00:00
|
|
|
}
|
|
|
|
|
2015-07-02 20:53:03 +00:00
|
|
|
// New returns a new repository with backend be.
|
2016-08-31 18:29:54 +00:00
|
|
|
func New(be restic.Backend) *Repository {
|
2016-03-06 11:26:25 +00:00
|
|
|
repo := &Repository{
|
2017-07-16 18:16:02 +00:00
|
|
|
be: be,
|
|
|
|
idx: NewMasterIndex(),
|
|
|
|
dataPM: newPackerManager(be, nil),
|
|
|
|
treePM: newPackerManager(be, nil),
|
2016-03-06 11:26:25 +00:00
|
|
|
}
|
|
|
|
|
2016-03-06 12:14:06 +00:00
|
|
|
return repo
|
2014-12-21 16:02:49 +00:00
|
|
|
}
|
|
|
|
|
2020-06-12 07:24:38 +00:00
|
|
|
func (r *Repository) DisableAutoIndexUpdate() {
|
|
|
|
r.noAutoIndexUpdate = true
|
|
|
|
}
|
|
|
|
|
2016-08-31 20:51:35 +00:00
|
|
|
// Config returns the repository configuration.
|
2016-08-31 20:39:36 +00:00
|
|
|
func (r *Repository) Config() restic.Config {
|
|
|
|
return r.cfg
|
|
|
|
}
|
|
|
|
|
2017-06-10 11:10:08 +00:00
|
|
|
// UseCache replaces the backend with the wrapped cache.
|
|
|
|
func (r *Repository) UseCache(c restic.Cache) {
|
|
|
|
if c == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
debug.Log("using cache")
|
|
|
|
r.Cache = c
|
|
|
|
r.be = c.Wrap(r.be)
|
|
|
|
}
|
|
|
|
|
2014-12-21 16:02:49 +00:00
|
|
|
// PrefixLength returns the number of bytes required so that all prefixes of
|
|
|
|
// all IDs of type t are unique.
|
2016-08-31 18:29:54 +00:00
|
|
|
func (r *Repository) PrefixLength(t restic.FileType) (int, error) {
|
2016-08-31 20:39:36 +00:00
|
|
|
return restic.PrefixLength(r.be, t)
|
2014-12-21 16:02:49 +00:00
|
|
|
}
|
|
|
|
|
2019-03-24 20:59:14 +00:00
|
|
|
// LoadAndDecrypt loads and decrypts the file with the given type and ID, using
|
|
|
|
// the supplied buffer (which must be empty). If the buffer is nil, a new
|
|
|
|
// buffer will be allocated and returned.
|
|
|
|
func (r *Repository) LoadAndDecrypt(ctx context.Context, buf []byte, t restic.FileType, id restic.ID) ([]byte, error) {
|
|
|
|
if len(buf) != 0 {
|
|
|
|
panic("buf is not empty")
|
|
|
|
}
|
|
|
|
|
2018-01-25 19:49:41 +00:00
|
|
|
debug.Log("load %v with id %v", t, id)
|
2015-04-26 15:44:38 +00:00
|
|
|
|
2016-09-01 19:19:30 +00:00
|
|
|
h := restic.Handle{Type: t, Name: id.String()}
|
2019-03-24 20:59:14 +00:00
|
|
|
err := r.be.Load(ctx, h, 0, 0, func(rd io.Reader) error {
|
|
|
|
// make sure this call is idempotent, in case an error occurs
|
|
|
|
wr := bytes.NewBuffer(buf[:0])
|
|
|
|
_, cerr := io.Copy(wr, rd)
|
|
|
|
if cerr != nil {
|
|
|
|
return cerr
|
|
|
|
}
|
|
|
|
buf = wr.Bytes()
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
2015-03-28 10:50:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2016-08-31 18:29:54 +00:00
|
|
|
if t != restic.ConfigFile && !restic.Hash(buf).Equal(id) {
|
2017-02-11 13:28:15 +00:00
|
|
|
return nil, errors.Errorf("load %v: invalid data returned", h)
|
2015-03-28 10:50:23 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 10:33:57 +00:00
|
|
|
nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():]
|
|
|
|
plaintext, err := r.key.Open(ciphertext[:0], nonce, ciphertext, nil)
|
2015-04-26 15:44:38 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2015-01-10 22:40:10 +00:00
|
|
|
}
|
|
|
|
|
2017-10-29 10:33:57 +00:00
|
|
|
return plaintext, nil
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
|
|
|
|
2020-03-06 08:17:33 +00:00
|
|
|
type haver interface {
|
|
|
|
Has(restic.Handle) bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// sortCachedPacksFirst moves all cached pack files to the front of blobs.
|
2020-03-10 14:56:08 +00:00
|
|
|
func sortCachedPacksFirst(cache haver, blobs []restic.PackedBlob) {
|
2020-03-06 08:17:33 +00:00
|
|
|
if cache == nil {
|
2020-03-10 14:56:08 +00:00
|
|
|
return
|
2017-07-16 19:06:43 +00:00
|
|
|
}
|
|
|
|
|
2019-07-06 15:42:29 +00:00
|
|
|
// no need to sort a list with one element
|
|
|
|
if len(blobs) == 1 {
|
2020-03-10 14:56:08 +00:00
|
|
|
return
|
2019-07-06 15:42:29 +00:00
|
|
|
}
|
|
|
|
|
2020-03-06 08:18:38 +00:00
|
|
|
cached := blobs[:0]
|
2017-07-16 19:06:43 +00:00
|
|
|
noncached := make([]restic.PackedBlob, 0, len(blobs)/2)
|
|
|
|
|
|
|
|
for _, blob := range blobs {
|
2020-08-16 09:16:38 +00:00
|
|
|
if cache.Has(restic.Handle{Type: restic.PackFile, Name: blob.PackID.String()}) {
|
2017-07-16 19:06:43 +00:00
|
|
|
cached = append(cached, blob)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
noncached = append(noncached, blob)
|
|
|
|
}
|
|
|
|
|
2020-03-10 14:56:08 +00:00
|
|
|
copy(blobs[len(cached):], noncached)
|
2017-07-16 19:06:43 +00:00
|
|
|
}
|
|
|
|
|
2020-03-10 16:52:14 +00:00
|
|
|
// LoadBlob loads a blob of type t from the repository.
|
|
|
|
// It may use all of buf[:cap(buf)] as scratch space.
|
|
|
|
func (r *Repository) LoadBlob(ctx context.Context, t restic.BlobType, id restic.ID, buf []byte) ([]byte, error) {
|
|
|
|
debug.Log("load %v with id %v (buf len %v, cap %d)", t, id, len(buf), cap(buf))
|
2016-08-03 20:38:05 +00:00
|
|
|
|
|
|
|
// lookup packs
|
2020-06-14 11:26:10 +00:00
|
|
|
blobs := r.idx.Lookup(id, t)
|
|
|
|
if len(blobs) == 0 {
|
2018-01-25 19:49:41 +00:00
|
|
|
debug.Log("id %v not found in index", id)
|
2020-03-10 16:52:14 +00:00
|
|
|
return nil, errors.Errorf("id %v not found in repository", id)
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
|
|
|
|
2017-07-16 19:06:43 +00:00
|
|
|
// try cached pack files first
|
2020-03-10 14:56:08 +00:00
|
|
|
sortCachedPacksFirst(r.Cache, blobs)
|
2017-07-16 19:06:43 +00:00
|
|
|
|
2016-08-28 20:18:02 +00:00
|
|
|
var lastError error
|
2016-08-03 20:38:05 +00:00
|
|
|
for _, blob := range blobs {
|
2018-01-25 19:49:41 +00:00
|
|
|
debug.Log("blob %v/%v found: %v", t, id, blob)
|
2016-08-03 20:38:05 +00:00
|
|
|
|
|
|
|
if blob.Type != t {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("blob %v has wrong block type, want %v", blob, t)
|
2016-08-03 20:38:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// load blob from pack
|
2020-08-16 09:16:38 +00:00
|
|
|
h := restic.Handle{Type: restic.PackFile, Name: blob.PackID.String()}
|
2017-01-24 10:27:36 +00:00
|
|
|
|
2020-03-10 16:52:14 +00:00
|
|
|
switch {
|
|
|
|
case cap(buf) < int(blob.Length):
|
|
|
|
buf = make([]byte, blob.Length)
|
|
|
|
case len(buf) != int(blob.Length):
|
|
|
|
buf = buf[:blob.Length]
|
2017-01-24 10:27:36 +00:00
|
|
|
}
|
|
|
|
|
2020-03-10 16:52:14 +00:00
|
|
|
n, err := restic.ReadAt(ctx, r.be, h, int64(blob.Offset), buf)
|
2016-08-03 20:38:05 +00:00
|
|
|
if err != nil {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("error loading blob %v: %v", blob, err)
|
2016-08-28 20:18:02 +00:00
|
|
|
lastError = err
|
2016-08-03 20:38:05 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if uint(n) != blob.Length {
|
2016-08-28 20:18:02 +00:00
|
|
|
lastError = errors.Errorf("error loading blob %v: wrong length returned, want %d, got %d",
|
|
|
|
id.Str(), blob.Length, uint(n))
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("lastError: %v", lastError)
|
2016-08-03 20:38:05 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// decrypt
|
2020-03-10 16:52:14 +00:00
|
|
|
nonce, ciphertext := buf[:r.key.NonceSize()], buf[r.key.NonceSize():]
|
2017-10-29 10:33:57 +00:00
|
|
|
plaintext, err := r.key.Open(ciphertext[:0], nonce, ciphertext, nil)
|
2016-08-03 20:38:05 +00:00
|
|
|
if err != nil {
|
2016-08-28 20:18:02 +00:00
|
|
|
lastError = errors.Errorf("decrypting blob %v failed: %v", id, err)
|
2016-08-03 20:38:05 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// check hash
|
2017-10-29 10:33:57 +00:00
|
|
|
if !restic.Hash(plaintext).Equal(id) {
|
2016-08-28 20:18:02 +00:00
|
|
|
lastError = errors.Errorf("blob %v returned invalid hash", id)
|
2016-08-03 20:38:05 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-03-10 16:52:14 +00:00
|
|
|
// move decrypted data to the start of the buffer
|
|
|
|
copy(buf, plaintext)
|
|
|
|
return buf[:len(plaintext)], nil
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
2015-01-10 22:40:10 +00:00
|
|
|
|
2016-08-28 20:18:02 +00:00
|
|
|
if lastError != nil {
|
2020-03-10 16:52:14 +00:00
|
|
|
return nil, lastError
|
2016-08-28 20:18:02 +00:00
|
|
|
}
|
|
|
|
|
2020-03-10 16:52:14 +00:00
|
|
|
return nil, errors.Errorf("loading blob %v from %v packs failed", id.Str(), len(blobs))
|
2015-01-10 22:40:10 +00:00
|
|
|
}
|
|
|
|
|
2015-05-04 18:39:45 +00:00
|
|
|
// LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on
|
2015-04-26 15:44:38 +00:00
|
|
|
// the item.
|
2017-06-04 09:16:55 +00:00
|
|
|
func (r *Repository) LoadJSONUnpacked(ctx context.Context, t restic.FileType, id restic.ID, item interface{}) (err error) {
|
2019-03-24 20:59:14 +00:00
|
|
|
buf, err := r.LoadAndDecrypt(ctx, nil, t, id)
|
2015-04-26 15:44:38 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2016-01-23 23:12:09 +00:00
|
|
|
return json.Unmarshal(buf, item)
|
2015-01-10 22:40:10 +00:00
|
|
|
}
|
|
|
|
|
2015-07-26 14:43:42 +00:00
|
|
|
// LookupBlobSize returns the size of blob id.
|
2018-01-12 06:20:12 +00:00
|
|
|
func (r *Repository) LookupBlobSize(id restic.ID, tpe restic.BlobType) (uint, bool) {
|
2016-08-03 20:38:05 +00:00
|
|
|
return r.idx.LookupSize(id, tpe)
|
2015-07-26 14:43:42 +00:00
|
|
|
}
|
|
|
|
|
2016-02-01 22:50:56 +00:00
|
|
|
// SaveAndEncrypt encrypts data and stores it to the backend as type t. If data
|
|
|
|
// is small enough, it will be packed together with other small blobs.
|
2020-06-06 20:20:44 +00:00
|
|
|
// The caller must ensure that the id matches the data.
|
|
|
|
func (r *Repository) SaveAndEncrypt(ctx context.Context, t restic.BlobType, data []byte, id restic.ID) error {
|
2018-01-25 19:49:41 +00:00
|
|
|
debug.Log("save id %v (%v, %d bytes)", id, t, len(data))
|
2015-01-10 22:40:10 +00:00
|
|
|
|
2017-10-29 10:33:57 +00:00
|
|
|
nonce := crypto.NewRandomNonce()
|
2020-02-26 22:26:11 +00:00
|
|
|
|
|
|
|
ciphertext := make([]byte, 0, restic.CiphertextLength(len(data)))
|
2017-10-29 10:33:57 +00:00
|
|
|
ciphertext = append(ciphertext, nonce...)
|
|
|
|
|
2015-01-10 22:40:10 +00:00
|
|
|
// encrypt blob
|
2017-10-29 10:33:57 +00:00
|
|
|
ciphertext = r.key.Seal(ciphertext, nonce, data, nil)
|
2015-01-10 22:40:10 +00:00
|
|
|
|
2015-04-26 15:44:38 +00:00
|
|
|
// find suitable packer and add blob
|
2017-07-16 18:16:02 +00:00
|
|
|
var pm *packerManager
|
|
|
|
|
|
|
|
switch t {
|
|
|
|
case restic.TreeBlob:
|
|
|
|
pm = r.treePM
|
|
|
|
case restic.DataBlob:
|
|
|
|
pm = r.dataPM
|
|
|
|
default:
|
|
|
|
panic(fmt.Sprintf("invalid type: %v", t))
|
|
|
|
}
|
|
|
|
|
|
|
|
packer, err := pm.findPacker()
|
2015-02-15 16:26:08 +00:00
|
|
|
if err != nil {
|
2020-06-06 20:20:44 +00:00
|
|
|
return err
|
2015-02-15 16:26:08 +00:00
|
|
|
}
|
|
|
|
|
2015-04-26 15:44:38 +00:00
|
|
|
// save ciphertext
|
2020-06-06 20:20:44 +00:00
|
|
|
_, err = packer.Add(t, id, ciphertext)
|
2015-10-12 20:34:12 +00:00
|
|
|
if err != nil {
|
2020-06-06 20:20:44 +00:00
|
|
|
return err
|
2015-10-12 20:34:12 +00:00
|
|
|
}
|
2015-04-26 15:44:38 +00:00
|
|
|
|
2017-07-16 18:02:59 +00:00
|
|
|
// if the pack is not full enough, put back to the list
|
|
|
|
if packer.Size() < minPackSize {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("pack is not full enough (%d bytes)", packer.Size())
|
2017-07-16 18:16:02 +00:00
|
|
|
pm.insertPacker(packer)
|
2020-06-06 20:20:44 +00:00
|
|
|
return nil
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// else write the pack to the backend
|
2020-06-06 20:20:44 +00:00
|
|
|
return r.savePacker(ctx, t, packer)
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
|
|
|
|
// backend as type t, without a pack. It returns the storage hash.
|
2017-06-04 09:16:55 +00:00
|
|
|
func (r *Repository) SaveJSONUnpacked(ctx context.Context, t restic.FileType, item interface{}) (restic.ID, error) {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("save new blob %v", t)
|
2016-01-24 17:50:41 +00:00
|
|
|
plaintext, err := json.Marshal(item)
|
2015-02-15 16:26:08 +00:00
|
|
|
if err != nil {
|
2016-08-31 18:29:54 +00:00
|
|
|
return restic.ID{}, errors.Wrap(err, "json.Marshal")
|
2015-02-15 16:26:08 +00:00
|
|
|
}
|
|
|
|
|
2017-06-04 09:16:55 +00:00
|
|
|
return r.SaveUnpacked(ctx, t, plaintext)
|
2016-01-24 17:52:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// SaveUnpacked encrypts data and stores it in the backend. Returned is the
|
|
|
|
// storage hash.
|
2017-06-04 09:16:55 +00:00
|
|
|
func (r *Repository) SaveUnpacked(ctx context.Context, t restic.FileType, p []byte) (id restic.ID, err error) {
|
2017-01-13 11:57:05 +00:00
|
|
|
ciphertext := restic.NewBlobBuffer(len(p))
|
2017-10-29 10:33:57 +00:00
|
|
|
ciphertext = ciphertext[:0]
|
|
|
|
nonce := crypto.NewRandomNonce()
|
|
|
|
ciphertext = append(ciphertext, nonce...)
|
|
|
|
|
|
|
|
ciphertext = r.key.Seal(ciphertext, nonce, p, nil)
|
2015-02-15 16:26:08 +00:00
|
|
|
|
2016-08-31 18:29:54 +00:00
|
|
|
id = restic.Hash(ciphertext)
|
2016-09-01 19:19:30 +00:00
|
|
|
h := restic.Handle{Type: t, Name: id.String()}
|
2015-04-26 15:44:38 +00:00
|
|
|
|
2018-03-03 13:20:54 +00:00
|
|
|
err = r.be.Save(ctx, h, restic.NewByteReader(ciphertext))
|
2015-02-15 16:26:08 +00:00
|
|
|
if err != nil {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("error saving blob %v: %v", h, err)
|
2016-08-31 18:29:54 +00:00
|
|
|
return restic.ID{}, err
|
2015-02-15 16:26:08 +00:00
|
|
|
}
|
|
|
|
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("blob %v saved", h)
|
2016-01-24 17:50:41 +00:00
|
|
|
return id, nil
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
|
|
|
|
2020-06-06 20:20:44 +00:00
|
|
|
// Flush saves all remaining packs and the index
|
2017-11-22 11:27:29 +00:00
|
|
|
func (r *Repository) Flush(ctx context.Context) error {
|
2020-06-06 20:20:44 +00:00
|
|
|
if err := r.FlushPacks(ctx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save index after flushing
|
|
|
|
return r.SaveIndex(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
// FlushPacks saves all remaining packs.
|
|
|
|
func (r *Repository) FlushPacks(ctx context.Context) error {
|
2017-07-16 18:24:37 +00:00
|
|
|
pms := []struct {
|
|
|
|
t restic.BlobType
|
|
|
|
pm *packerManager
|
|
|
|
}{
|
|
|
|
{restic.DataBlob, r.dataPM},
|
|
|
|
{restic.TreeBlob, r.treePM},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, p := range pms {
|
|
|
|
p.pm.pm.Lock()
|
2017-07-16 18:16:02 +00:00
|
|
|
|
2017-07-16 18:24:37 +00:00
|
|
|
debug.Log("manually flushing %d packs", len(p.pm.packers))
|
|
|
|
for _, packer := range p.pm.packers {
|
2017-11-22 11:27:29 +00:00
|
|
|
err := r.savePacker(ctx, p.t, packer)
|
2017-07-16 18:16:02 +00:00
|
|
|
if err != nil {
|
2017-07-16 18:24:37 +00:00
|
|
|
p.pm.pm.Unlock()
|
2017-07-16 18:16:02 +00:00
|
|
|
return err
|
|
|
|
}
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
2017-07-16 18:24:37 +00:00
|
|
|
p.pm.packers = p.pm.packers[:0]
|
|
|
|
p.pm.pm.Unlock()
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-07-02 20:53:03 +00:00
|
|
|
// Backend returns the backend for the repository.
|
2016-08-31 18:29:54 +00:00
|
|
|
func (r *Repository) Backend() restic.Backend {
|
2015-07-02 19:52:57 +00:00
|
|
|
return r.be
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
|
|
|
|
2015-10-12 20:34:12 +00:00
|
|
|
// Index returns the currently used MasterIndex.
|
2020-07-25 19:19:46 +00:00
|
|
|
func (r *Repository) Index() restic.MasterIndex {
|
2015-07-02 19:52:57 +00:00
|
|
|
return r.idx
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
|
|
|
|
2015-05-09 11:25:52 +00:00
|
|
|
// SetIndex instructs the repository to use the given index.
|
2020-07-25 19:19:46 +00:00
|
|
|
func (r *Repository) SetIndex(i restic.MasterIndex) error {
|
2016-08-31 20:39:36 +00:00
|
|
|
r.idx = i.(*MasterIndex)
|
2018-03-31 08:02:09 +00:00
|
|
|
|
|
|
|
ids := restic.NewIDSet()
|
|
|
|
for _, idx := range r.idx.All() {
|
2020-07-04 05:06:14 +00:00
|
|
|
indexIDs, err := idx.IDs()
|
2018-03-31 08:02:09 +00:00
|
|
|
if err != nil {
|
|
|
|
debug.Log("not using index, ID() returned error %v", err)
|
|
|
|
continue
|
|
|
|
}
|
2020-07-04 05:06:14 +00:00
|
|
|
for _, id := range indexIDs {
|
|
|
|
ids.Insert(id)
|
|
|
|
}
|
2018-03-31 08:02:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return r.PrepareCache(ids)
|
2015-02-08 21:54:45 +00:00
|
|
|
}
|
|
|
|
|
2016-01-24 17:52:11 +00:00
|
|
|
// SaveIndex saves an index in the repository.
|
2017-06-04 09:16:55 +00:00
|
|
|
func SaveIndex(ctx context.Context, repo restic.Repository, index *Index) (restic.ID, error) {
|
2016-01-24 17:52:11 +00:00
|
|
|
buf := bytes.NewBuffer(nil)
|
2015-10-25 16:05:54 +00:00
|
|
|
|
2020-06-06 20:20:44 +00:00
|
|
|
err := index.Encode(buf)
|
2015-10-25 16:05:54 +00:00
|
|
|
if err != nil {
|
2016-08-31 18:29:54 +00:00
|
|
|
return restic.ID{}, err
|
2015-10-25 16:05:54 +00:00
|
|
|
}
|
|
|
|
|
2017-06-04 09:16:55 +00:00
|
|
|
return repo.SaveUnpacked(ctx, restic.IndexFile, buf.Bytes())
|
2015-10-25 16:05:54 +00:00
|
|
|
}
|
|
|
|
|
2015-10-12 21:59:17 +00:00
|
|
|
// saveIndex saves all indexes in the backend.
|
2017-06-04 09:16:55 +00:00
|
|
|
func (r *Repository) saveIndex(ctx context.Context, indexes ...*Index) error {
|
2015-10-12 21:59:17 +00:00
|
|
|
for i, idx := range indexes {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("Saving index %d", i)
|
2015-08-08 10:40:37 +00:00
|
|
|
|
2017-06-04 09:16:55 +00:00
|
|
|
sid, err := SaveIndex(ctx, r, idx)
|
2015-10-12 20:34:12 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-02-15 23:24:58 +00:00
|
|
|
|
2018-01-25 19:49:41 +00:00
|
|
|
debug.Log("Saved index %d as %v", i, sid)
|
2015-10-12 20:34:12 +00:00
|
|
|
}
|
2020-07-04 05:06:14 +00:00
|
|
|
r.idx.MergeFinalIndexes()
|
2015-01-10 22:40:10 +00:00
|
|
|
|
2015-10-12 20:34:12 +00:00
|
|
|
return nil
|
2015-01-10 22:40:10 +00:00
|
|
|
}
|
|
|
|
|
2015-10-12 21:59:17 +00:00
|
|
|
// SaveIndex saves all new indexes in the backend.
|
2017-06-04 09:16:55 +00:00
|
|
|
func (r *Repository) SaveIndex(ctx context.Context) error {
|
2020-06-06 20:20:44 +00:00
|
|
|
return r.saveIndex(ctx, r.idx.FinalizeNotFinalIndexes()...)
|
2015-10-12 21:59:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// SaveFullIndex saves all full indexes in the backend.
|
2017-06-04 09:16:55 +00:00
|
|
|
func (r *Repository) SaveFullIndex(ctx context.Context) error {
|
2020-06-06 20:20:44 +00:00
|
|
|
return r.saveIndex(ctx, r.idx.FinalizeFullIndexes()...)
|
2015-10-12 21:59:17 +00:00
|
|
|
}
|
|
|
|
|
2018-01-26 20:10:38 +00:00
|
|
|
const loadIndexParallelism = 4
|
2015-07-04 16:38:32 +00:00
|
|
|
|
2015-10-12 20:34:12 +00:00
|
|
|
// LoadIndex loads all index files from the backend in parallel and stores them
|
|
|
|
// in the master index. The first error that occurred is returned.
|
2017-06-04 09:16:55 +00:00
|
|
|
func (r *Repository) LoadIndex(ctx context.Context) error {
|
2016-09-27 20:35:08 +00:00
|
|
|
debug.Log("Loading index")
|
2015-04-26 15:44:38 +00:00
|
|
|
|
2019-03-24 20:27:28 +00:00
|
|
|
// track spawned goroutines using wg, create a new context which is
|
|
|
|
// cancelled as soon as an error occurs.
|
|
|
|
wg, ctx := errgroup.WithContext(ctx)
|
2015-07-04 16:38:32 +00:00
|
|
|
|
2019-03-24 20:27:28 +00:00
|
|
|
type FileInfo struct {
|
|
|
|
restic.ID
|
|
|
|
Size int64
|
|
|
|
}
|
|
|
|
ch := make(chan FileInfo)
|
|
|
|
indexCh := make(chan *Index)
|
|
|
|
|
|
|
|
// send list of index files through ch, which is closed afterwards
|
|
|
|
wg.Go(func() error {
|
|
|
|
defer close(ch)
|
|
|
|
return r.List(ctx, restic.IndexFile, func(id restic.ID, size int64) error {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return nil
|
|
|
|
case ch <- FileInfo{id, size}:
|
|
|
|
}
|
2017-10-05 18:39:53 +00:00
|
|
|
return nil
|
2019-03-24 20:27:28 +00:00
|
|
|
})
|
|
|
|
})
|
2015-07-04 16:38:32 +00:00
|
|
|
|
2019-03-24 20:27:28 +00:00
|
|
|
// a worker receives an index ID from ch, loads the index, and sends it to indexCh
|
|
|
|
worker := func() error {
|
2019-03-24 21:12:38 +00:00
|
|
|
var buf []byte
|
2019-03-24 20:27:28 +00:00
|
|
|
for fi := range ch {
|
2019-03-24 21:12:38 +00:00
|
|
|
var err error
|
|
|
|
var idx *Index
|
|
|
|
idx, buf, err = LoadIndexWithDecoder(ctx, r, buf[:0], fi.ID, DecodeIndex)
|
|
|
|
if err != nil && errors.Cause(err) == ErrOldIndexFormat {
|
|
|
|
idx, buf, err = LoadIndexWithDecoder(ctx, r, buf[:0], fi.ID, DecodeOldIndex)
|
2019-03-24 20:27:28 +00:00
|
|
|
}
|
|
|
|
|
2019-06-30 19:34:53 +00:00
|
|
|
if err != nil {
|
|
|
|
return errors.Wrap(err, fmt.Sprintf("unable to load index %v", fi.ID.Str()))
|
|
|
|
}
|
|
|
|
|
2019-03-24 20:27:28 +00:00
|
|
|
select {
|
|
|
|
case indexCh <- idx:
|
|
|
|
case <-ctx.Done():
|
|
|
|
}
|
2015-07-04 16:38:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-03-24 20:27:28 +00:00
|
|
|
// final closes indexCh after all workers have terminated
|
2020-01-27 15:28:21 +00:00
|
|
|
final := func() {
|
2019-03-24 20:27:28 +00:00
|
|
|
close(indexCh)
|
|
|
|
}
|
|
|
|
|
|
|
|
// run workers on ch
|
|
|
|
wg.Go(func() error {
|
|
|
|
return RunWorkers(ctx, loadIndexParallelism, worker, final)
|
|
|
|
})
|
2015-07-04 16:38:32 +00:00
|
|
|
|
2019-03-24 20:27:28 +00:00
|
|
|
// receive decoded indexes
|
2017-09-24 09:25:45 +00:00
|
|
|
validIndex := restic.NewIDSet()
|
2019-03-24 20:27:28 +00:00
|
|
|
wg.Go(func() error {
|
|
|
|
for idx := range indexCh {
|
2020-07-04 05:06:14 +00:00
|
|
|
ids, err := idx.IDs()
|
2019-03-24 20:27:28 +00:00
|
|
|
if err == nil {
|
2020-07-04 05:06:14 +00:00
|
|
|
for _, id := range ids {
|
|
|
|
validIndex.Insert(id)
|
|
|
|
}
|
2019-03-24 20:27:28 +00:00
|
|
|
}
|
2020-07-04 05:06:14 +00:00
|
|
|
|
2019-03-24 20:27:28 +00:00
|
|
|
r.idx.Insert(idx)
|
2017-09-24 09:25:45 +00:00
|
|
|
}
|
2020-07-04 05:06:14 +00:00
|
|
|
r.idx.MergeFinalIndexes()
|
2019-03-24 20:27:28 +00:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
|
|
|
|
err := wg.Wait()
|
|
|
|
if err != nil {
|
2019-06-30 19:34:53 +00:00
|
|
|
return errors.Fatal(err.Error())
|
2015-07-04 16:38:32 +00:00
|
|
|
}
|
|
|
|
|
2019-03-24 20:27:28 +00:00
|
|
|
// remove index files from the cache which have been removed in the repo
|
|
|
|
err = r.PrepareCache(validIndex)
|
2018-03-31 07:50:45 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-07-18 21:16:50 +00:00
|
|
|
|
2019-03-24 20:27:28 +00:00
|
|
|
return nil
|
2018-03-31 07:50:45 +00:00
|
|
|
}
|
2017-07-18 21:16:50 +00:00
|
|
|
|
2018-03-31 07:50:45 +00:00
|
|
|
// PrepareCache initializes the local cache. indexIDs is the list of IDs of
|
|
|
|
// index files still present in the repo.
|
|
|
|
func (r *Repository) PrepareCache(indexIDs restic.IDSet) error {
|
|
|
|
if r.Cache == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-03-31 08:02:09 +00:00
|
|
|
debug.Log("prepare cache with %d index files", len(indexIDs))
|
|
|
|
|
2018-03-31 07:50:45 +00:00
|
|
|
// clear old index files
|
|
|
|
err := r.Cache.Clear(restic.IndexFile, indexIDs)
|
|
|
|
if err != nil {
|
|
|
|
fmt.Fprintf(os.Stderr, "error clearing index files in cache: %v\n", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
packs := restic.NewIDSet()
|
|
|
|
for _, idx := range r.idx.All() {
|
|
|
|
for id := range idx.Packs() {
|
|
|
|
packs.Insert(id)
|
2017-07-18 21:16:50 +00:00
|
|
|
}
|
2018-03-31 07:50:45 +00:00
|
|
|
}
|
2017-09-24 20:54:04 +00:00
|
|
|
|
2020-08-16 09:16:38 +00:00
|
|
|
// clear old packs
|
|
|
|
err = r.Cache.Clear(restic.PackFile, packs)
|
2018-03-31 07:50:45 +00:00
|
|
|
if err != nil {
|
2020-08-16 09:16:38 +00:00
|
|
|
fmt.Fprintf(os.Stderr, "error clearing pack files in cache: %v\n", err)
|
2018-03-31 07:50:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
treePacks := restic.NewIDSet()
|
|
|
|
for _, idx := range r.idx.All() {
|
|
|
|
for _, id := range idx.TreePacks() {
|
|
|
|
treePacks.Insert(id)
|
2017-09-24 20:54:04 +00:00
|
|
|
}
|
2018-03-31 07:50:45 +00:00
|
|
|
}
|
2017-09-24 20:54:04 +00:00
|
|
|
|
2018-03-31 07:50:45 +00:00
|
|
|
// use readahead
|
|
|
|
debug.Log("using readahead")
|
|
|
|
cache := r.Cache.(*cache.Cache)
|
|
|
|
cache.PerformReadahead = func(h restic.Handle) bool {
|
2020-08-16 09:16:38 +00:00
|
|
|
if h.Type != restic.PackFile {
|
|
|
|
debug.Log("no readahead for %v, is not a pack file", h)
|
2018-03-31 07:50:45 +00:00
|
|
|
return false
|
|
|
|
}
|
2017-09-24 20:54:04 +00:00
|
|
|
|
2018-03-31 07:50:45 +00:00
|
|
|
id, err := restic.ParseID(h.Name)
|
|
|
|
if err != nil {
|
|
|
|
debug.Log("no readahead for %v, invalid ID", h)
|
|
|
|
return false
|
|
|
|
}
|
2017-09-24 20:54:04 +00:00
|
|
|
|
2018-03-31 07:50:45 +00:00
|
|
|
if treePacks.Has(id) {
|
|
|
|
debug.Log("perform readahead for %v", h)
|
|
|
|
return true
|
2017-09-24 20:54:04 +00:00
|
|
|
}
|
2018-03-31 07:50:45 +00:00
|
|
|
debug.Log("no readahead for %v, not tree file", h)
|
|
|
|
return false
|
2017-09-24 09:25:45 +00:00
|
|
|
}
|
|
|
|
|
2018-03-31 07:50:45 +00:00
|
|
|
return nil
|
2015-04-26 15:44:38 +00:00
|
|
|
}
|
|
|
|
|
2015-07-04 16:38:32 +00:00
|
|
|
// LoadIndex loads the index id from backend and returns it.
|
2017-06-04 09:16:55 +00:00
|
|
|
func LoadIndex(ctx context.Context, repo restic.Repository, id restic.ID) (*Index, error) {
|
2019-03-24 21:12:38 +00:00
|
|
|
idx, _, err := LoadIndexWithDecoder(ctx, repo, nil, id, DecodeIndex)
|
2015-07-26 19:58:03 +00:00
|
|
|
if err == nil {
|
|
|
|
return idx, nil
|
|
|
|
}
|
|
|
|
|
2016-08-29 17:18:57 +00:00
|
|
|
if errors.Cause(err) == ErrOldIndexFormat {
|
2016-02-24 21:41:32 +00:00
|
|
|
fmt.Fprintf(os.Stderr, "index %v has old format\n", id.Str())
|
2019-03-24 21:12:38 +00:00
|
|
|
idx, _, err := LoadIndexWithDecoder(ctx, repo, nil, id, DecodeOldIndex)
|
|
|
|
return idx, err
|
2015-07-26 19:58:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2015-05-04 18:39:45 +00:00
|
|
|
// SearchKey finds a key with the supplied password, afterwards the config is
|
2016-08-21 11:09:31 +00:00
|
|
|
// read and parsed. It tries at most maxKeys key files in the repo.
|
2018-11-25 14:10:45 +00:00
|
|
|
func (r *Repository) SearchKey(ctx context.Context, password string, maxKeys int, keyHint string) error {
|
|
|
|
key, err := SearchKey(ctx, r, password, maxKeys, keyHint)
|
2014-12-21 17:10:19 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-07-02 19:52:57 +00:00
|
|
|
r.key = key.master
|
2017-07-16 18:16:02 +00:00
|
|
|
r.dataPM.key = key.master
|
|
|
|
r.treePM.key = key.master
|
2015-07-02 19:52:57 +00:00
|
|
|
r.keyName = key.Name()
|
2017-06-04 09:16:55 +00:00
|
|
|
r.cfg, err = restic.LoadConfig(ctx, r)
|
2018-03-09 20:05:14 +00:00
|
|
|
if err != nil {
|
|
|
|
return errors.Fatalf("config cannot be loaded: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
2015-05-03 14:36:52 +00:00
|
|
|
}
|
2014-12-21 17:10:19 +00:00
|
|
|
|
2015-07-02 20:36:31 +00:00
|
|
|
// Init creates a new master key with the supplied password, initializes and
|
|
|
|
// saves the repository config.
|
2017-06-04 09:16:55 +00:00
|
|
|
func (r *Repository) Init(ctx context.Context, password string) error {
|
|
|
|
has, err := r.be.Test(ctx, restic.Handle{Type: restic.ConfigFile})
|
2015-05-03 15:46:18 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if has {
|
|
|
|
return errors.New("repository master key and config already initialized")
|
|
|
|
}
|
|
|
|
|
2016-08-31 20:39:36 +00:00
|
|
|
cfg, err := restic.CreateConfig()
|
2016-07-31 14:27:36 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-06-04 09:16:55 +00:00
|
|
|
return r.init(ctx, password, cfg)
|
2016-07-31 14:27:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// init creates a new master key with the supplied password and uses it to save
|
|
|
|
// the config into the repo.
|
2017-06-04 09:16:55 +00:00
|
|
|
func (r *Repository) init(ctx context.Context, password string, cfg restic.Config) error {
|
2015-07-02 19:52:57 +00:00
|
|
|
key, err := createMasterKey(r, password)
|
2015-05-03 14:36:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2015-07-02 19:52:57 +00:00
|
|
|
r.key = key.master
|
2017-07-16 18:16:02 +00:00
|
|
|
r.dataPM.key = key.master
|
|
|
|
r.treePM.key = key.master
|
2015-07-02 19:52:57 +00:00
|
|
|
r.keyName = key.Name()
|
2016-08-31 20:39:36 +00:00
|
|
|
r.cfg = cfg
|
2017-06-04 09:16:55 +00:00
|
|
|
_, err = r.SaveJSONUnpacked(ctx, restic.ConfigFile, cfg)
|
2015-07-02 20:36:31 +00:00
|
|
|
return err
|
2014-12-21 17:10:19 +00:00
|
|
|
}
|
|
|
|
|
2015-07-02 20:53:03 +00:00
|
|
|
// Key returns the current master key.
|
2015-07-02 19:52:57 +00:00
|
|
|
func (r *Repository) Key() *crypto.Key {
|
|
|
|
return r.key
|
2014-12-21 17:10:19 +00:00
|
|
|
}
|
|
|
|
|
2015-07-02 20:53:03 +00:00
|
|
|
// KeyName returns the name of the current key in the backend.
|
2015-07-02 19:52:57 +00:00
|
|
|
func (r *Repository) KeyName() string {
|
|
|
|
return r.keyName
|
2015-05-03 16:04:13 +00:00
|
|
|
}
|
|
|
|
|
2018-01-21 16:25:36 +00:00
|
|
|
// List runs fn for all files of type t in the repo.
|
|
|
|
func (r *Repository) List(ctx context.Context, t restic.FileType, fn func(restic.ID, int64) error) error {
|
|
|
|
return r.be.List(ctx, t, func(fi restic.FileInfo) error {
|
|
|
|
id, err := restic.ParseID(fi.Name)
|
|
|
|
if err != nil {
|
|
|
|
debug.Log("unable to parse %v as an ID", fi.Name)
|
2018-02-26 19:53:38 +00:00
|
|
|
return nil
|
2017-03-06 21:19:38 +00:00
|
|
|
}
|
2018-01-21 16:25:36 +00:00
|
|
|
return fn(id, fi.Size)
|
|
|
|
})
|
2014-12-21 16:02:49 +00:00
|
|
|
}
|
|
|
|
|
2016-08-07 19:56:42 +00:00
|
|
|
// ListPack returns the list of blobs saved in the pack id and the length of
|
|
|
|
// the file as stored in the backend.
|
2018-01-24 02:43:21 +00:00
|
|
|
func (r *Repository) ListPack(ctx context.Context, id restic.ID, size int64) ([]restic.Blob, int64, error) {
|
2020-08-16 09:16:38 +00:00
|
|
|
h := restic.Handle{Type: restic.PackFile, Name: id.String()}
|
2016-08-07 19:56:42 +00:00
|
|
|
|
2018-01-24 02:43:21 +00:00
|
|
|
blobs, err := pack.List(r.Key(), restic.ReaderAt(r.Backend(), h), size)
|
2016-08-07 19:56:42 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, 0, err
|
|
|
|
}
|
|
|
|
|
2018-01-24 02:43:21 +00:00
|
|
|
return blobs, size, nil
|
2016-05-08 11:51:21 +00:00
|
|
|
}
|
|
|
|
|
2015-07-02 20:53:03 +00:00
|
|
|
// Delete calls backend.Delete() if implemented, and returns an error
|
|
|
|
// otherwise.
|
2017-06-04 09:16:55 +00:00
|
|
|
func (r *Repository) Delete(ctx context.Context) error {
|
2017-10-14 13:56:38 +00:00
|
|
|
return r.be.Delete(ctx)
|
2014-12-21 16:02:49 +00:00
|
|
|
}
|
2015-03-14 10:56:45 +00:00
|
|
|
|
2015-07-02 20:53:03 +00:00
|
|
|
// Close closes the repository by closing the backend.
|
2015-07-02 19:52:57 +00:00
|
|
|
func (r *Repository) Close() error {
|
|
|
|
return r.be.Close()
|
2015-03-28 10:50:23 +00:00
|
|
|
}
|
2016-09-03 09:22:01 +00:00
|
|
|
|
2020-06-06 20:20:44 +00:00
|
|
|
// SaveBlob saves a blob of type t into the repository.
|
|
|
|
// It takes care that no duplicates are saved; this can be overwritten
|
|
|
|
// by setting storeDuplicate to true.
|
|
|
|
// If id is the null id, it will be computed and returned.
|
|
|
|
// Also returns if the blob was already known before
|
|
|
|
func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, err error) {
|
|
|
|
|
|
|
|
// compute plaintext hash if not already set
|
|
|
|
if id.IsNull() {
|
|
|
|
newID = restic.Hash(buf)
|
|
|
|
} else {
|
|
|
|
newID = id
|
|
|
|
}
|
|
|
|
|
|
|
|
// first try to add to pending blobs; if not successful, this blob is already known
|
|
|
|
known = !r.idx.addPending(newID, t)
|
|
|
|
|
|
|
|
// only save when needed or explicitely told
|
|
|
|
if !known || storeDuplicate {
|
|
|
|
err = r.SaveAndEncrypt(ctx, t, buf, newID)
|
2016-09-03 18:55:22 +00:00
|
|
|
}
|
2020-06-06 20:20:44 +00:00
|
|
|
|
|
|
|
return newID, known, err
|
2016-09-03 18:55:22 +00:00
|
|
|
}
|
|
|
|
|
2016-09-03 09:22:01 +00:00
|
|
|
// LoadTree loads a tree from the repository.
|
2017-06-04 09:16:55 +00:00
|
|
|
func (r *Repository) LoadTree(ctx context.Context, id restic.ID) (*restic.Tree, error) {
|
2018-01-25 19:49:41 +00:00
|
|
|
debug.Log("load tree %v", id)
|
2016-09-03 11:34:04 +00:00
|
|
|
|
2020-03-10 16:52:14 +00:00
|
|
|
buf, err := r.LoadBlob(ctx, restic.TreeBlob, id, nil)
|
2016-09-03 09:22:01 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
t := &restic.Tree{}
|
|
|
|
err = json.Unmarshal(buf, t)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return t, nil
|
|
|
|
}
|
|
|
|
|
2016-09-03 18:55:22 +00:00
|
|
|
// SaveTree stores a tree into the repository and returns the ID. The ID is
|
|
|
|
// checked against the index. The tree is only stored when the index does not
|
|
|
|
// contain the ID.
|
2017-06-04 09:16:55 +00:00
|
|
|
func (r *Repository) SaveTree(ctx context.Context, t *restic.Tree) (restic.ID, error) {
|
2016-09-03 18:55:22 +00:00
|
|
|
buf, err := json.Marshal(t)
|
2016-09-03 09:22:01 +00:00
|
|
|
if err != nil {
|
2016-09-03 18:55:22 +00:00
|
|
|
return restic.ID{}, errors.Wrap(err, "MarshalJSON")
|
2016-09-03 09:22:01 +00:00
|
|
|
}
|
|
|
|
|
2016-09-03 18:55:22 +00:00
|
|
|
// append a newline so that the data is always consistent (json.Encoder
|
|
|
|
// adds a newline after each object)
|
|
|
|
buf = append(buf, '\n')
|
2016-09-03 09:22:01 +00:00
|
|
|
|
2020-06-06 20:20:44 +00:00
|
|
|
id, _, err := r.SaveBlob(ctx, restic.TreeBlob, buf, restic.ID{}, false)
|
2016-09-03 18:55:22 +00:00
|
|
|
return id, err
|
2016-09-03 18:11:10 +00:00
|
|
|
}
|
2018-02-12 03:41:59 +00:00
|
|
|
|
2018-10-28 20:12:15 +00:00
|
|
|
// Loader allows loading data from a backend.
|
|
|
|
type Loader interface {
|
|
|
|
Load(ctx context.Context, h restic.Handle, length int, offset int64, fn func(rd io.Reader) error) error
|
|
|
|
}
|
|
|
|
|
2018-02-12 03:41:59 +00:00
|
|
|
// DownloadAndHash is all-in-one helper to download content of the file at h to a temporary filesystem location
|
|
|
|
// and calculate ID of the contents. Returned (temporary) file is positioned at the beginning of the file;
|
|
|
|
// it is reponsibility of the caller to close and delete the file.
|
2018-10-28 20:12:15 +00:00
|
|
|
func DownloadAndHash(ctx context.Context, be Loader, h restic.Handle) (tmpfile *os.File, hash restic.ID, size int64, err error) {
|
2018-02-12 03:41:59 +00:00
|
|
|
tmpfile, err = fs.TempFile("", "restic-temp-")
|
|
|
|
if err != nil {
|
|
|
|
return nil, restic.ID{}, -1, errors.Wrap(err, "TempFile")
|
|
|
|
}
|
|
|
|
|
2018-10-28 20:12:15 +00:00
|
|
|
err = be.Load(ctx, h, 0, 0, func(rd io.Reader) (ierr error) {
|
2018-02-12 03:41:59 +00:00
|
|
|
_, ierr = tmpfile.Seek(0, io.SeekStart)
|
|
|
|
if ierr == nil {
|
|
|
|
ierr = tmpfile.Truncate(0)
|
|
|
|
}
|
|
|
|
if ierr != nil {
|
|
|
|
return ierr
|
|
|
|
}
|
|
|
|
hrd := hashing.NewReader(rd, sha256.New())
|
|
|
|
size, ierr = io.Copy(tmpfile, hrd)
|
|
|
|
hash = restic.IDFromHash(hrd.Sum(nil))
|
|
|
|
return ierr
|
|
|
|
})
|
2018-10-28 20:12:15 +00:00
|
|
|
if err != nil {
|
|
|
|
tmpfile.Close()
|
|
|
|
os.Remove(tmpfile.Name())
|
|
|
|
return nil, restic.ID{}, -1, errors.Wrap(err, "Load")
|
|
|
|
}
|
2018-02-12 03:41:59 +00:00
|
|
|
|
|
|
|
_, err = tmpfile.Seek(0, io.SeekStart)
|
|
|
|
if err != nil {
|
|
|
|
tmpfile.Close()
|
|
|
|
os.Remove(tmpfile.Name())
|
|
|
|
return nil, restic.ID{}, -1, errors.Wrap(err, "Seek")
|
|
|
|
}
|
|
|
|
|
|
|
|
return tmpfile, hash, size, err
|
|
|
|
}
|