2
2
mirror of https://github.com/octoleo/restic.git synced 2024-09-22 11:59:00 +00:00

Merge pull request #213 from restic/refactor

Minor refactor
This commit is contained in:
Alexander Neumann 2015-07-05 18:13:08 +02:00
commit de5a530d2a
12 changed files with 626 additions and 203 deletions

View File

@ -10,6 +10,7 @@ import (
"os/exec"
"path/filepath"
"sort"
"syscall"
"github.com/juju/errors"
"github.com/pkg/sftp"
@ -35,6 +36,9 @@ func startClient(program string, args ...string) (*SFTP, error) {
// send errors from ssh to stderr
cmd.Stderr = os.Stderr
// ignore signals sent to the parent (e.g. SIGINT)
cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true}
// get stdin and stdout
wr, err := cmd.StdinPipe()
if err != nil {
@ -452,6 +456,10 @@ func (s *SFTP) Close() error {
}
s.c.Close()
// TODO: add timeout after which the process is killed
if err := s.cmd.Process.Kill(); err != nil {
return err
}
return s.cmd.Wait()
}

View File

@ -79,7 +79,7 @@ func (cmd CmdCat) Execute(args []string) error {
fmt.Println(string(buf))
return nil
case "index":
buf, err := repo.Load(backend.Index, id)
buf, err := repo.LoadAndDecrypt(backend.Index, id)
if err != nil {
return err
}

View File

@ -49,7 +49,9 @@ func lockRepository(repo *repository.Repository, exclusive bool) (*restic.Lock,
}
func unlockRepo(lock *restic.Lock) error {
debug.Log("unlockRepo", "unlocking repository")
if err := lock.Unlock(); err != nil {
debug.Log("unlockRepo", "error while unlocking: %v", err)
return err
}
@ -67,8 +69,10 @@ func unlockAll() error {
debug.Log("unlockAll", "unlocking %d locks", len(globalLocks))
for _, lock := range globalLocks {
if err := lock.Unlock(); err != nil {
debug.Log("unlockAll", "error while unlocking: %v", err)
return err
}
debug.Log("unlockAll", "successfully removed lock")
}
return nil

View File

@ -162,32 +162,44 @@ used to reconstruct the index. The files are encrypted and authenticated like
Data and Tree Blobs, so the outer structure is `IV || Ciphertext || MAC` again.
The plaintext consists of a JSON document like the following:
[ {
"id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c",
"blobs": [
{
"obsolete": [
"ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452"
],
"packs": [
{
"id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce",
"type": "data",
"offset": 0,
"length": 25
},{
"id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae",
"type": "tree",
"offset": 38,
"length": 100
},
{
"id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66",
"type": "data",
"offset": 150,
"length": 123
}
"id": "73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c",
"blobs": [
{
"id": "3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce",
"type": "data",
"offset": 0,
"length": 25
},{
"id": "9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae",
"type": "tree",
"offset": 38,
"length": 100
},
{
"id": "d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66",
"type": "data",
"offset": 150,
"length": 123
}
]
}, [...]
]
} ]
}
This JSON document lists Blobs with contents. In this example, the Pack
`73d04e61` contains two data Blobs and one Tree blob, the plaintext hashes are
listed afterwards.
This JSON document lists Packs and the blobs contained therein. In this
example, the Pack `73d04e61` contains two data Blobs and one Tree blob, the
plaintext hashes are listed afterwards.
The field `obsolete` lists the storage IDs of index files that have been
replaced with the current index file. This happens when index files are
repacked, this happens for example when old snapshots are removed and Packs are
recombined.
There may be an arbitrary number of index files, containing information on
non-disjoint sets of Packs. The number of packs described in a single file is

87
repository/config.go Normal file
View File

@ -0,0 +1,87 @@
package repository
import (
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"errors"
"io"
"github.com/restic/restic/backend"
"github.com/restic/restic/chunker"
"github.com/restic/restic/debug"
)
// Config contains the configuration for a repository.
type Config struct {
Version uint `json:"version"`
ID string `json:"id"`
ChunkerPolynomial chunker.Pol `json:"chunker_polynomial"`
}
// repositoryIDSize is the length of the ID chosen at random for a new repository.
const repositoryIDSize = sha256.Size
// RepoVersion is the version that is written to the config when a repository
// is newly created with Init().
const RepoVersion = 1
// JSONUnpackedSaver saves unpacked JSON.
type JSONUnpackedSaver interface {
SaveJSONUnpacked(backend.Type, interface{}) (backend.ID, error)
}
// JSONUnpackedLoader loads unpacked JSON.
type JSONUnpackedLoader interface {
LoadJSONUnpacked(backend.Type, backend.ID, interface{}) error
}
// CreateConfig creates a config file with a randomly selected polynomial and
// ID and saves the config in the repository.
func CreateConfig(r JSONUnpackedSaver) (Config, error) {
var (
err error
cfg Config
)
cfg.ChunkerPolynomial, err = chunker.RandomPolynomial()
if err != nil {
return Config{}, err
}
newID := make([]byte, repositoryIDSize)
_, err = io.ReadFull(rand.Reader, newID)
if err != nil {
return Config{}, err
}
cfg.ID = hex.EncodeToString(newID)
cfg.Version = RepoVersion
debug.Log("Repo.CreateConfig", "New config: %#v", cfg)
_, err = r.SaveJSONUnpacked(backend.Config, cfg)
return cfg, err
}
// LoadConfig returns loads, checks and returns the config for a repository.
func LoadConfig(r JSONUnpackedLoader) (Config, error) {
var (
cfg Config
)
err := r.LoadJSONUnpacked(backend.Config, nil, &cfg)
if err != nil {
return Config{}, err
}
if cfg.Version != RepoVersion {
return Config{}, errors.New("unsupported repository version")
}
if !cfg.ChunkerPolynomial.Irreducible() {
return Config{}, errors.New("invalid chunker polynomial")
}
return cfg, nil
}

53
repository/config_test.go Normal file
View File

@ -0,0 +1,53 @@
package repository_test
import (
"testing"
"github.com/restic/restic/backend"
"github.com/restic/restic/repository"
. "github.com/restic/restic/test"
)
type saver func(backend.Type, interface{}) (backend.ID, error)
func (s saver) SaveJSONUnpacked(t backend.Type, arg interface{}) (backend.ID, error) {
return s(t, arg)
}
type loader func(backend.Type, backend.ID, interface{}) error
func (l loader) LoadJSONUnpacked(t backend.Type, id backend.ID, arg interface{}) error {
return l(t, id, arg)
}
func TestConfig(t *testing.T) {
resultConfig := repository.Config{}
save := func(tpe backend.Type, arg interface{}) (backend.ID, error) {
Assert(t, tpe == backend.Config,
"wrong backend type: got %v, wanted %v",
tpe, backend.Config)
cfg := arg.(repository.Config)
resultConfig = cfg
return backend.ID{}, nil
}
cfg1, err := repository.CreateConfig(saver(save))
OK(t, err)
load := func(tpe backend.Type, id backend.ID, arg interface{}) error {
Assert(t, tpe == backend.Config,
"wrong backend type: got %v, wanted %v",
tpe, backend.Config)
cfg := arg.(*repository.Config)
*cfg = resultConfig
return nil
}
cfg2, err := repository.LoadConfig(loader(load))
OK(t, err)
Assert(t, cfg1 == cfg2,
"configs aren't equal: %v != %v", cfg1, cfg2)
}

71
repository/parallel.go Normal file
View File

@ -0,0 +1,71 @@
package repository
import (
"sync"
"github.com/restic/restic/backend"
)
func closeIfOpen(ch chan struct{}) {
// only close ch when it is not already closed, in which the case statement runs.
select {
case <-ch:
return
default:
close(ch)
}
}
// ParallelWorkFunc gets one file ID to work on. If an error is returned,
// processing stops. If done is closed, the function should return.
type ParallelWorkFunc func(id string, done <-chan struct{}) error
// FilesInParallel runs n workers of f in parallel, on the IDs that
// repo.List(t) yield. If f returns an error, the process is aborted and the
// first error is returned.
func FilesInParallel(repo backend.Lister, t backend.Type, n uint, f ParallelWorkFunc) error {
done := make(chan struct{})
defer closeIfOpen(done)
wg := &sync.WaitGroup{}
ch := repo.List(t, done)
errors := make(chan error, n)
for i := 0; uint(i) < n; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for {
select {
case id, ok := <-ch:
if !ok {
return
}
err := f(id, done)
if err != nil {
closeIfOpen(done)
errors <- err
return
}
case <-done:
return
}
}
}()
}
wg.Wait()
select {
case err := <-errors:
return err
default:
break
}
return nil
}

125
repository/parallel_test.go Normal file
View File

@ -0,0 +1,125 @@
package repository_test
import (
"errors"
"math/rand"
"testing"
"time"
"github.com/restic/restic/backend"
"github.com/restic/restic/repository"
. "github.com/restic/restic/test"
)
type testIDs []string
var lister = testIDs{
"40bb581cd36de952985c97a3ff6b21df41ee897d4db2040354caa36a17ff5268",
"2e15811a4d14ffac66d36a9ff456019d8de4c10c949d45b643f8477d17e92ff3",
"70c11b3ed521ad6b76d905c002ca98b361fca06aca060a063432c7311155a4da",
"8056a33e75dccdda701b6c989c7ed0cb71bbb6da13c6427fe5986f0896cc91c0",
"79d8776200596aa0237b10d470f7b850b86f8a1a80988ef5c8bee2874ce992e2",
"f9f1f29791c6b79b90b35efd083f17a3b163bbbafb1a2fdf43d46d56cffda289",
"3834178d05d0f6dd07f872ee0262ff1ace0f0f375768227d3c902b0b66591369",
"66d5cc68c9186414806f366ae5493ce7f229212993750a4992be4030f6af28c5",
"ebca5af4f397944f68cd215e3dfa2b197a7ba0f7c17d65d9f7390d0a15cde296",
"d4511ce6ff732d106275a57e40745c599e987c0da44c42cddbef592aac102437",
"f366202f0bfeefaedd7b49e2f21a90d3cbddb97d257a74d788dd34e19a684dae",
"a5c17728ab2433cd50636dd5c6c7068c7a44f2999d09c46e8f528466da8a059d",
"bae0f9492b9b208233029b87692a1a55cbd7fbe1cf3f6d7bc693ac266a6d6f0e",
"9d500187913c7510d71d1902703d312c7aaa56f1e98351385b9535fdabae595e",
"ffbddd8a4c1e54d258bb3e16d3929b546b61af63cb560b3e3061a8bef5b24552",
"201bb3abf655e7ef71e79ed4fb1079b0502b5acb4d9fad5e72a0de690c50a386",
"08eb57bbd559758ea96e99f9b7688c30e7b3bcf0c4562ff4535e2d8edeffaeed",
"e50b7223b04985ff38d9e11d1cba333896ef4264f82bd5d0653a028bce70e542",
"65a9421cd59cc7b7a71dcd9076136621af607fb4701d2e5c2af23b6396cf2f37",
"995a655b3521c19b4d0c266222266d89c8fc62889597d61f45f336091e646d57",
"51ec6f0bce77ed97df2dd7ae849338c3a8155a057da927eedd66e3d61be769ad",
"7b3923a0c0666431efecdbf6cb171295ec1710b6595eebcba3b576b49d13e214",
"2cedcc3d14698bea7e4b0546f7d5d48951dd90add59e6f2d44b693fd8913717d",
"fd6770cbd54858fdbd3d7b4239b985e5599180064d93ca873f27e86e8407d011",
"9edc51d8e6e04d05c9757848c1bfbfdc8e86b6330982294632488922e59fdb1b",
"1a6c4fbb24ad724c968b2020417c3d057e6c89e49bdfb11d91006def65eab6a0",
"cb3b29808cd0adfa2dca1f3a04f98114fbccf4eb487cdd4022f49bd70eeb049b",
"f55edcb40c619e29a20e432f8aaddc83a649be2c2d1941ccdc474cd2af03d490",
"e8ccc1763a92de23566b95c3ad1414a098016ece69a885fc8a72782a7517d17c",
"0fe2e3db8c5a12ad7101a63a0fffee901be54319cfe146bead7aec851722f82d",
"36be45a6ae7c95ad97cee1b33023be324bce7a7b4b7036e24125679dd9ff5b44",
"1685ed1a57c37859fbef1f7efb7509f20b84ec17a765605de43104d2fa37884b",
"9d83629a6a004c505b100a0b5d0b246833b63aa067aa9b59e3abd6b74bc4d3a8",
"be49a66b60175c5e2ee273b42165f86ef11bb6518c1c79950bcd3f4c196c98bd",
"0fd89885d821761b4a890782908e75793028747d15ace3c6cbf0ad56582b4fa5",
"94a767519a4e352a88796604943841fea21429f3358b4d5d55596dbda7d15dce",
"8dd07994afe6e572ddc9698fb0d13a0d4c26a38b7992818a71a99d1e0ac2b034",
"f7380a6f795ed31fbeb2945c72c5fd1d45044e5ab152311e75e007fa530f5847",
"5ca1ce01458e484393d7e9c8af42b0ff37a73a2fee0f18e14cff0fb180e33014",
"8f44178be3fe0a2bd41f922576fb7a9b19d589754504be746f56c759df328fda",
"12d33847c2be711c989f37360dd7aa8537fd14972262a4530634a08fdf32a767",
"31e077f5080f78846a00093caff2b6b839519cc47516142eeba9c41d4072a605",
"14f01db8a0054e70222b76d2555d70114b4bf8a0f02084324af2df226f14a795",
"7f5dbbaf31b4551828e8e76cef408375db9fbcdcdb6b5949f2d1b0c4b8632132",
"42a5d9b9bb7e4a16f23ba916bcf87f38c1aa1f2de2ab79736f725850a8ff6a1b",
"e06f8f901ea708beba8712a11b6e2d0be7c4b018d0254204ef269bcdf5e8c6cc",
"d9ba75785bf45b0c4fd3b2365c968099242483f2f0d0c7c20306dac11fae96e9",
"428debbb280873907cef2ec099efe1566e42a59775d6ec74ded0c4048d5a6515",
"3b51049d4dae701098e55a69536fa31ad2be1adc17b631a695a40e8a294fe9c0",
"168f88aa4b105e9811f5f79439cc1a689be4eec77f3361d42f22fe8f7ddc74a9",
"0baa0ab2249b33d64449a899cb7bd8eae5231f0d4ff70f09830dc1faa2e4abee",
"0c3896d346b580306a49de29f3a78913a41e14b8461b124628c33a64636241f2",
"b18313f1651c15e100e7179aa3eb8ffa62c3581159eaf7f83156468d19781e42",
"996361f7d988e48267ccc7e930fed4637be35fe7562b8601dceb7a32313a14c8",
"dfb4e6268437d53048d22b811048cd045df15693fc6789affd002a0fc80a6e60",
"34dd044c228727f2226a0c9c06a3e5ceb5e30e31cb7854f8fa1cde846b395a58",
}
func (tests testIDs) List(t backend.Type, done <-chan struct{}) <-chan string {
ch := make(chan string)
go func() {
defer close(ch)
for i := 0; i < 500; i++ {
for _, id := range tests {
select {
case ch <- id:
case <-done:
return
}
}
}
}()
return ch
}
func TestFilesInParallel(t *testing.T) {
f := func(id string, done <-chan struct{}) error {
time.Sleep(1 * time.Millisecond)
return nil
}
for n := uint(1); n < 5; n++ {
err := repository.FilesInParallel(lister, backend.Data, n*100, f)
OK(t, err)
}
}
var errTest = errors.New("test error")
func TestFilesInParallelWithError(t *testing.T) {
f := func(id string, done <-chan struct{}) error {
time.Sleep(1 * time.Millisecond)
if rand.Float32() < 0.01 {
return errTest
}
return nil
}
for n := uint(1); n < 5; n++ {
err := repository.FilesInParallel(lister, backend.Data, n*100, f)
Equals(t, errTest, err)
}
}

View File

@ -2,9 +2,7 @@ package repository
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
@ -19,13 +17,6 @@ import (
"github.com/restic/restic/pack"
)
// Config contains the configuration for a repository.
type Config struct {
Version uint `json:"version"`
ID string `json:"id"`
ChunkerPolynomial chunker.Pol `json:"chunker_polynomial"`
}
// Repository is used to access a repository in a backend.
type Repository struct {
be backend.Backend
@ -38,6 +29,7 @@ type Repository struct {
packs []*pack.Packer
}
// New returns a new repository with backend be.
func New(be backend.Backend) *Repository {
return &Repository{
be: be,
@ -48,23 +40,23 @@ func New(be backend.Backend) *Repository {
// Find loads the list of all blobs of type t and searches for names which start
// with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If
// more than one is found, nil and ErrMultipleIDMatches is returned.
func (s *Repository) Find(t backend.Type, prefix string) (string, error) {
return backend.Find(s.be, t, prefix)
func (r *Repository) Find(t backend.Type, prefix string) (string, error) {
return backend.Find(r.be, t, prefix)
}
// PrefixLength returns the number of bytes required so that all prefixes of
// all IDs of type t are unique.
func (s *Repository) PrefixLength(t backend.Type) (int, error) {
return backend.PrefixLength(s.be, t)
func (r *Repository) PrefixLength(t backend.Type) (int, error) {
return backend.PrefixLength(r.be, t)
}
// Load tries to load and decrypt content identified by t and id from the
// LoadAndDecrypt loads and decrypts data identified by t and id from the
// backend.
func (s *Repository) Load(t backend.Type, id backend.ID) ([]byte, error) {
func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, error) {
debug.Log("Repo.Load", "load %v with id %v", t, id.Str())
// load blob from pack
rd, err := s.be.Get(t, id.String())
rd, err := r.be.Get(t, id.String())
if err != nil {
debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err)
return nil, err
@ -86,7 +78,7 @@ func (s *Repository) Load(t backend.Type, id backend.ID) ([]byte, error) {
}
// decrypt
plain, err := s.Decrypt(buf)
plain, err := r.Decrypt(buf)
if err != nil {
return nil, err
}
@ -96,10 +88,10 @@ func (s *Repository) Load(t backend.Type, id backend.ID) ([]byte, error) {
// LoadBlob tries to load and decrypt content identified by t and id from a
// pack from the backend.
func (s *Repository) LoadBlob(t pack.BlobType, id backend.ID) ([]byte, error) {
func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID) ([]byte, error) {
debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str())
// lookup pack
packID, tpe, offset, length, err := s.idx.Lookup(id)
packID, tpe, offset, length, err := r.idx.Lookup(id)
if err != nil {
debug.Log("Repo.LoadBlob", "id %v not found in index: %v", id.Str(), err)
return nil, err
@ -113,7 +105,7 @@ func (s *Repository) LoadBlob(t pack.BlobType, id backend.ID) ([]byte, error) {
debug.Log("Repo.LoadBlob", "id %v found in pack %v at offset %v (length %d)", id.Str(), packID.Str(), offset, length)
// load blob from pack
rd, err := s.be.GetReader(backend.Data, packID.String(), offset, length)
rd, err := r.be.GetReader(backend.Data, packID.String(), offset, length)
if err != nil {
debug.Log("Repo.LoadBlob", "error loading pack %v for %v: %v", packID.Str(), id.Str(), err)
return nil, err
@ -130,7 +122,7 @@ func (s *Repository) LoadBlob(t pack.BlobType, id backend.ID) ([]byte, error) {
}
// decrypt
plain, err := s.Decrypt(buf)
plain, err := r.Decrypt(buf)
if err != nil {
return nil, err
}
@ -145,16 +137,16 @@ func (s *Repository) LoadBlob(t pack.BlobType, id backend.ID) ([]byte, error) {
// LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on
// the item.
func (s *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interface{}) error {
func (r *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interface{}) error {
// load blob from backend
rd, err := s.be.Get(t, id.String())
rd, err := r.be.Get(t, id.String())
if err != nil {
return err
}
defer rd.Close()
// decrypt
decryptRd, err := crypto.DecryptFrom(s.key, rd)
decryptRd, err := crypto.DecryptFrom(r.key, rd)
defer decryptRd.Close()
if err != nil {
return err
@ -172,22 +164,22 @@ func (s *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interf
// LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the
// data and afterwards call json.Unmarshal on the item.
func (s *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) error {
func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) error {
// lookup pack
packID, _, offset, length, err := s.idx.Lookup(id)
packID, _, offset, length, err := r.idx.Lookup(id)
if err != nil {
return err
}
// load blob from pack
rd, err := s.be.GetReader(backend.Data, packID.String(), offset, length)
rd, err := r.be.GetReader(backend.Data, packID.String(), offset, length)
if err != nil {
return err
}
defer rd.Close()
// decrypt
decryptRd, err := crypto.DecryptFrom(s.key, rd)
decryptRd, err := crypto.DecryptFrom(r.key, rd)
defer decryptRd.Close()
if err != nil {
return err
@ -209,43 +201,43 @@ const maxPackers = 200
// findPacker returns a packer for a new blob of size bytes. Either a new one is
// created or one is returned that already has some blobs.
func (s *Repository) findPacker(size uint) (*pack.Packer, error) {
s.pm.Lock()
defer s.pm.Unlock()
func (r *Repository) findPacker(size uint) (*pack.Packer, error) {
r.pm.Lock()
defer r.pm.Unlock()
// search for a suitable packer
if len(s.packs) > 0 {
if len(r.packs) > 0 {
debug.Log("Repo.findPacker", "searching packer for %d bytes\n", size)
for i, p := range s.packs {
for i, p := range r.packs {
if p.Size()+size < maxPackSize {
debug.Log("Repo.findPacker", "found packer %v", p)
// remove from list
s.packs = append(s.packs[:i], s.packs[i+1:]...)
r.packs = append(r.packs[:i], r.packs[i+1:]...)
return p, nil
}
}
}
// no suitable packer found, return new
blob, err := s.be.Create()
blob, err := r.be.Create()
if err != nil {
return nil, err
}
debug.Log("Repo.findPacker", "create new pack %p", blob)
return pack.NewPacker(s.key, blob), nil
return pack.NewPacker(r.key, blob), nil
}
// insertPacker appends p to s.packs.
func (s *Repository) insertPacker(p *pack.Packer) {
s.pm.Lock()
defer s.pm.Unlock()
func (r *Repository) insertPacker(p *pack.Packer) {
r.pm.Lock()
defer r.pm.Unlock()
s.packs = append(s.packs, p)
debug.Log("Repo.insertPacker", "%d packers\n", len(s.packs))
r.packs = append(r.packs, p)
debug.Log("Repo.insertPacker", "%d packers\n", len(r.packs))
}
// savePacker stores p in the backend.
func (s *Repository) savePacker(p *pack.Packer) error {
func (r *Repository) savePacker(p *pack.Packer) error {
debug.Log("Repo.savePacker", "save packer with %d blobs\n", p.Count())
_, err := p.Finalize()
if err != nil {
@ -265,23 +257,23 @@ func (s *Repository) savePacker(p *pack.Packer) error {
// update blobs in the index
for _, b := range p.Blobs() {
debug.Log("Repo.savePacker", " updating blob %v to pack %v", b.ID.Str(), sid.Str())
s.idx.Store(b.Type, b.ID, sid, b.Offset, uint(b.Length))
r.idx.Store(b.Type, b.ID, sid, b.Offset, uint(b.Length))
}
return nil
}
// countPacker returns the number of open (unfinished) packers.
func (s *Repository) countPacker() int {
s.pm.Lock()
defer s.pm.Unlock()
func (r *Repository) countPacker() int {
r.pm.Lock()
defer r.pm.Unlock()
return len(s.packs)
return len(r.packs)
}
// Save encrypts data and stores it to the backend as type t. If data is small
// SaveAndEncrypt encrypts data and stores it to the backend as type t. If data is small
// enough, it will be packed together with other small blobs.
func (s *Repository) Save(t pack.BlobType, data []byte, id backend.ID) (backend.ID, error) {
func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id backend.ID) (backend.ID, error) {
if id == nil {
// compute plaintext hash
id = backend.Hash(data)
@ -294,13 +286,13 @@ func (s *Repository) Save(t pack.BlobType, data []byte, id backend.ID) (backend.
defer freeBuf(ciphertext)
// encrypt blob
ciphertext, err := s.Encrypt(ciphertext, data)
ciphertext, err := r.Encrypt(ciphertext, data)
if err != nil {
return nil, err
}
// find suitable packer and add blob
packer, err := s.findPacker(uint(len(ciphertext)))
packer, err := r.findPacker(uint(len(ciphertext)))
if err != nil {
return nil, err
}
@ -310,23 +302,23 @@ func (s *Repository) Save(t pack.BlobType, data []byte, id backend.ID) (backend.
// add this id to the index, although we don't know yet in which pack it
// will be saved, the entry will be updated when the pack is written.
s.idx.Store(t, id, nil, 0, 0)
r.idx.Store(t, id, nil, 0, 0)
debug.Log("Repo.Save", "saving stub for %v (%v) in index", id.Str, t)
// if the pack is not full enough and there are less than maxPackers
// packers, put back to the list
if packer.Size() < minPackSize && s.countPacker() < maxPackers {
if packer.Size() < minPackSize && r.countPacker() < maxPackers {
debug.Log("Repo.Save", "pack is not full enough (%d bytes)", packer.Size())
s.insertPacker(packer)
r.insertPacker(packer)
return id, nil
}
// else write the pack to the backend
return id, s.savePacker(packer)
return id, r.savePacker(packer)
}
// SaveFrom encrypts data read from rd and stores it in a pack in the backend as type t.
func (s *Repository) SaveFrom(t pack.BlobType, id backend.ID, length uint, rd io.Reader) error {
func (r *Repository) SaveFrom(t pack.BlobType, id backend.ID, length uint, rd io.Reader) error {
debug.Log("Repo.SaveFrom", "save id %v (%v, %d bytes)", id.Str(), t, length)
if id == nil {
return errors.New("id is nil")
@ -337,7 +329,7 @@ func (s *Repository) SaveFrom(t pack.BlobType, id backend.ID, length uint, rd io
return err
}
_, err = s.Save(t, buf, id)
_, err = r.SaveAndEncrypt(t, buf, id)
if err != nil {
return err
}
@ -347,7 +339,7 @@ func (s *Repository) SaveFrom(t pack.BlobType, id backend.ID, length uint, rd io
// SaveJSON serialises item as JSON and encrypts and saves it in a pack in the
// backend as type t.
func (s *Repository) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, error) {
func (r *Repository) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, error) {
debug.Log("Repo.SaveJSON", "save %v blob", t)
buf := getBuf()[:0]
defer freeBuf(buf)
@ -361,14 +353,14 @@ func (s *Repository) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, er
}
buf = wr.Bytes()
return s.Save(t, buf, nil)
return r.SaveAndEncrypt(t, buf, nil)
}
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
// backend as type t, without a pack. It returns the storage hash.
func (s *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend.ID, error) {
func (r *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend.ID, error) {
// create file
blob, err := s.be.Create()
blob, err := r.be.Create()
if err != nil {
return nil, err
}
@ -378,7 +370,7 @@ func (s *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend
hw := backend.NewHashingWriter(blob, sha256.New())
// encrypt blob
ewr := crypto.EncryptTo(s.key, hw)
ewr := crypto.EncryptTo(r.key, hw)
enc := json.NewEncoder(ewr)
err = enc.Encode(item)
@ -406,43 +398,45 @@ func (s *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend
}
// Flush saves all remaining packs.
func (s *Repository) Flush() error {
s.pm.Lock()
defer s.pm.Unlock()
func (r *Repository) Flush() error {
r.pm.Lock()
defer r.pm.Unlock()
debug.Log("Repo.Flush", "manually flushing %d packs", len(s.packs))
debug.Log("Repo.Flush", "manually flushing %d packs", len(r.packs))
for _, p := range s.packs {
err := s.savePacker(p)
for _, p := range r.packs {
err := r.savePacker(p)
if err != nil {
return err
}
}
s.packs = s.packs[:0]
r.packs = r.packs[:0]
return nil
}
func (s *Repository) Backend() backend.Backend {
return s.be
// Backend returns the backend for the repository.
func (r *Repository) Backend() backend.Backend {
return r.be
}
func (s *Repository) Index() *Index {
return s.idx
// Index returns the currently loaded Index.
func (r *Repository) Index() *Index {
return r.idx
}
// SetIndex instructs the repository to use the given index.
func (s *Repository) SetIndex(i *Index) {
s.idx = i
func (r *Repository) SetIndex(i *Index) {
r.idx = i
}
// SaveIndex saves all new packs in the index in the backend, returned is the
// storage ID.
func (s *Repository) SaveIndex() (backend.ID, error) {
func (r *Repository) SaveIndex() (backend.ID, error) {
debug.Log("Repo.SaveIndex", "Saving index")
// create blob
blob, err := s.be.Create()
blob, err := r.be.Create()
if err != nil {
return nil, err
}
@ -453,9 +447,9 @@ func (s *Repository) SaveIndex() (backend.ID, error) {
hw := backend.NewHashingWriter(blob, sha256.New())
// encrypt blob
ewr := crypto.EncryptTo(s.key, hw)
ewr := crypto.EncryptTo(r.key, hw)
err = s.idx.Encode(ewr)
err = r.idx.Encode(ewr)
if err != nil {
return nil, err
}
@ -478,112 +472,90 @@ func (s *Repository) SaveIndex() (backend.ID, error) {
return sid, nil
}
// LoadIndex loads all index files from the backend and merges them with the
// current index.
func (s *Repository) LoadIndex() error {
debug.Log("Repo.LoadIndex", "Loading index")
done := make(chan struct{})
defer close(done)
const loadIndexParallelism = 20
for id := range s.be.List(backend.Index, done) {
err := s.loadIndex(id)
// LoadIndex loads all index files from the backend in parallel and merges them
// with the current index. The first error that occurred is returned.
func (r *Repository) LoadIndex() error {
debug.Log("Repo.LoadIndex", "Loading index")
errCh := make(chan error, 1)
indexes := make(chan *Index)
worker := func(id string, done <-chan struct{}) error {
idx, err := LoadIndex(r, id)
if err != nil {
return err
}
select {
case indexes <- idx:
case <-done:
}
return nil
}
go func() {
defer close(indexes)
errCh <- FilesInParallel(r.be, backend.Index, loadIndexParallelism, worker)
}()
for idx := range indexes {
r.idx.Merge(idx)
}
if err := <-errCh; err != nil {
return err
}
return nil
}
// loadIndex loads the index id and merges it with the currently used index.
func (s *Repository) loadIndex(id string) error {
debug.Log("Repo.loadIndex", "Loading index %v", id[:8])
before := len(s.idx.pack)
// LoadIndex loads the index id from backend and returns it.
func LoadIndex(repo *Repository, id string) (*Index, error) {
debug.Log("LoadIndex", "Loading index %v", id[:8])
rd, err := s.be.Get(backend.Index, id)
rd, err := repo.be.Get(backend.Index, id)
defer rd.Close()
if err != nil {
return err
return nil, err
}
// decrypt
decryptRd, err := crypto.DecryptFrom(s.key, rd)
decryptRd, err := crypto.DecryptFrom(repo.key, rd)
defer decryptRd.Close()
if err != nil {
return err
return nil, err
}
idx, err := DecodeIndex(decryptRd)
if err != nil {
debug.Log("Repo.loadIndex", "error while decoding index %v: %v", id, err)
return err
debug.Log("LoadIndex", "error while decoding index %v: %v", id, err)
return nil, err
}
s.idx.Merge(idx)
after := len(s.idx.pack)
debug.Log("Repo.loadIndex", "Loaded index %v, added %v blobs", id[:8], after-before)
return nil
}
const repositoryIDSize = sha256.Size
const RepoVersion = 1
func createConfig(s *Repository) (err error) {
s.Config.ChunkerPolynomial, err = chunker.RandomPolynomial()
if err != nil {
return err
}
newID := make([]byte, repositoryIDSize)
_, err = io.ReadFull(rand.Reader, newID)
if err != nil {
return err
}
s.Config.ID = hex.EncodeToString(newID)
s.Config.Version = RepoVersion
debug.Log("Repo.createConfig", "New config: %#v", s.Config)
_, err = s.SaveJSONUnpacked(backend.Config, s.Config)
return err
}
func (s *Repository) loadConfig(cfg *Config) error {
err := s.LoadJSONUnpacked(backend.Config, nil, cfg)
if err != nil {
return err
}
if cfg.Version != RepoVersion {
return errors.New("unsupported repository version")
}
if !cfg.ChunkerPolynomial.Irreducible() {
return errors.New("invalid chunker polynomial")
}
return nil
return idx, nil
}
// SearchKey finds a key with the supplied password, afterwards the config is
// read and parsed.
func (s *Repository) SearchKey(password string) error {
key, err := SearchKey(s, password)
func (r *Repository) SearchKey(password string) error {
key, err := SearchKey(r, password)
if err != nil {
return err
}
s.key = key.master
s.keyName = key.Name()
return s.loadConfig(&s.Config)
r.key = key.master
r.keyName = key.Name()
r.Config, err = LoadConfig(r)
return err
}
// Init creates a new master key with the supplied password and initializes the
// repository config.
func (s *Repository) Init(password string) error {
has, err := s.be.Test(backend.Config, "")
// Init creates a new master key with the supplied password, initializes and
// saves the repository config.
func (r *Repository) Init(password string) error {
has, err := r.be.Test(backend.Config, "")
if err != nil {
return err
}
@ -591,52 +563,58 @@ func (s *Repository) Init(password string) error {
return errors.New("repository master key and config already initialized")
}
key, err := createMasterKey(s, password)
key, err := createMasterKey(r, password)
if err != nil {
return err
}
s.key = key.master
s.keyName = key.Name()
return createConfig(s)
r.key = key.master
r.keyName = key.Name()
r.Config, err = CreateConfig(r)
return err
}
func (s *Repository) Decrypt(ciphertext []byte) ([]byte, error) {
if s.key == nil {
// Decrypt authenticates and decrypts ciphertext and returns the plaintext.
func (r *Repository) Decrypt(ciphertext []byte) ([]byte, error) {
if r.key == nil {
return nil, errors.New("key for repository not set")
}
return crypto.Decrypt(s.key, nil, ciphertext)
return crypto.Decrypt(r.key, nil, ciphertext)
}
func (s *Repository) Encrypt(ciphertext, plaintext []byte) ([]byte, error) {
if s.key == nil {
// Encrypt encrypts and authenticates the plaintext and saves the result in
// ciphertext.
func (r *Repository) Encrypt(ciphertext, plaintext []byte) ([]byte, error) {
if r.key == nil {
return nil, errors.New("key for repository not set")
}
return crypto.Encrypt(s.key, ciphertext, plaintext)
return crypto.Encrypt(r.key, ciphertext, plaintext)
}
func (s *Repository) Key() *crypto.Key {
return s.key
// Key returns the current master key.
func (r *Repository) Key() *crypto.Key {
return r.key
}
func (s *Repository) KeyName() string {
return s.keyName
// KeyName returns the name of the current key in the backend.
func (r *Repository) KeyName() string {
return r.keyName
}
// Count returns the number of blobs of a given type in the backend.
func (s *Repository) Count(t backend.Type) (n uint) {
for _ = range s.be.List(t, nil) {
func (r *Repository) Count(t backend.Type) (n uint) {
for _ = range r.be.List(t, nil) {
n++
}
return
}
func (s *Repository) list(t backend.Type, done <-chan struct{}, out chan<- backend.ID) {
func (r *Repository) list(t backend.Type, done <-chan struct{}, out chan<- backend.ID) {
defer close(out)
in := s.be.List(t, done)
in := r.be.List(t, done)
var (
// disable sending on the outCh until we received a job
@ -671,22 +649,26 @@ func (s *Repository) list(t backend.Type, done <-chan struct{}, out chan<- backe
}
}
func (s *Repository) List(t backend.Type, done <-chan struct{}) <-chan backend.ID {
// List returns a channel that yields all IDs of type t in the backend.
func (r *Repository) List(t backend.Type, done <-chan struct{}) <-chan backend.ID {
outCh := make(chan backend.ID)
go s.list(t, done, outCh)
go r.list(t, done, outCh)
return outCh
}
func (s *Repository) Delete() error {
if b, ok := s.be.(backend.Deleter); ok {
// Delete calls backend.Delete() if implemented, and returns an error
// otherwise.
func (r *Repository) Delete() error {
if b, ok := r.be.(backend.Deleter); ok {
return b.Delete()
}
return errors.New("Delete() called for backend that does not implement this method")
}
func (s *Repository) Close() error {
return s.be.Close()
// Close closes the repository by closing the backend.
func (r *Repository) Close() error {
return r.be.Close()
}

View File

@ -6,11 +6,13 @@ import (
"crypto/sha256"
"encoding/json"
"io"
"path/filepath"
"testing"
"github.com/restic/restic"
"github.com/restic/restic/backend"
"github.com/restic/restic/pack"
"github.com/restic/restic/repository"
. "github.com/restic/restic/test"
)
@ -80,7 +82,7 @@ func TestSave(t *testing.T) {
id := backend.Hash(data)
// save
sid, err := repo.Save(pack.Data, data, nil)
sid, err := repo.SaveAndEncrypt(pack.Data, data, nil)
OK(t, err)
Equals(t, id, sid)
@ -194,3 +196,24 @@ func TestLoadJSONUnpacked(t *testing.T) {
Equals(t, sn.Hostname, sn2.Hostname)
Equals(t, sn.Username, sn2.Username)
}
var repoFixture = filepath.Join("testdata", "test-repo.tar.gz")
func TestLoadIndex(t *testing.T) {
WithTestEnvironment(t, repoFixture, func(repodir string) {
repo := OpenLocalRepo(t, repodir)
OK(t, repo.LoadIndex())
})
}
func BenchmarkLoadIndex(b *testing.B) {
WithTestEnvironment(b, repoFixture, func(repodir string) {
repo := OpenLocalRepo(b, repodir)
b.ResetTimer()
for i := 0; i < b.N; i++ {
repo.SetIndex(repository.NewIndex())
OK(b, repo.LoadIndex())
}
})
}

BIN
repository/testdata/test-repo.tar.gz vendored Normal file

Binary file not shown.

View File

@ -3,13 +3,18 @@ package test_helper
import (
"bytes"
"fmt"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"path/filepath"
"reflect"
"runtime"
"testing"
"github.com/restic/restic/backend"
"github.com/restic/restic/backend/local"
"github.com/restic/restic/repository"
)
// Assert fails the test if the condition is false.
@ -65,3 +70,56 @@ func Random(seed, count int) []byte {
func RandomReader(seed, size int) *bytes.Reader {
return bytes.NewReader(Random(seed, size))
}
// SetupTarTestFixture extracts the tarFile to outputDir.
func SetupTarTestFixture(t testing.TB, outputDir, tarFile string) {
err := System("sh", "-c", `(cd "$1" && tar xz) < "$2"`,
"sh", outputDir, tarFile)
OK(t, err)
}
// System runs the command and returns the exit code. Output is passed on to
// stdout/stderr.
func System(command string, args ...string) error {
cmd := exec.Command(command, args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
}
// WithTestEnvironment creates a test environment, extracts the repository
// fixture and and calls f with the repository dir.
func WithTestEnvironment(t testing.TB, repoFixture string, f func(repodir string)) {
tempdir, err := ioutil.TempDir(TestTempDir, "restic-test-")
OK(t, err)
fd, err := os.Open(repoFixture)
if err != nil {
panic(err)
}
OK(t, fd.Close())
SetupTarTestFixture(t, tempdir, repoFixture)
f(filepath.Join(tempdir, "repo"))
if !TestCleanup {
t.Logf("leaving temporary directory %v used for test", tempdir)
return
}
OK(t, os.RemoveAll(tempdir))
}
// OpenLocalRepo opens the local repository located at dir.
func OpenLocalRepo(t testing.TB, dir string) *repository.Repository {
be, err := local.Open(dir)
OK(t, err)
repo := repository.New(be)
err = repo.SearchKey(TestPassword)
OK(t, err)
return repo
}