mirror of
https://github.com/octoleo/restic.git
synced 2025-02-08 22:58:24 +00:00
wip
This commit is contained in:
parent
90da66261a
commit
f0600c1d5f
@ -3,10 +3,8 @@ package restic
|
|||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"io"
|
"io"
|
||||||
"restic/backend"
|
|
||||||
"restic/debug"
|
"restic/debug"
|
||||||
"restic/pack"
|
"restic/pack"
|
||||||
"restic/repository"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -14,15 +12,15 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// saveTreeJSON stores a tree in the repository.
|
// saveTreeJSON stores a tree in the repository.
|
||||||
func saveTreeJSON(repo *repository.Repository, item interface{}) (backend.ID, error) {
|
func saveTreeJSON(repo Repository, item interface{}) (ID, error) {
|
||||||
data, err := json.Marshal(item)
|
data, err := json.Marshal(item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return backend.ID{}, errors.Wrap(err, "")
|
return ID{}, errors.Wrap(err, "")
|
||||||
}
|
}
|
||||||
data = append(data, '\n')
|
data = append(data, '\n')
|
||||||
|
|
||||||
// check if tree has been saved before
|
// check if tree has been saved before
|
||||||
id := backend.Hash(data)
|
id := Hash(data)
|
||||||
if repo.Index().Has(id, pack.Tree) {
|
if repo.Index().Has(id, pack.Tree) {
|
||||||
return id, nil
|
return id, nil
|
||||||
}
|
}
|
||||||
@ -32,19 +30,19 @@ func saveTreeJSON(repo *repository.Repository, item interface{}) (backend.ID, er
|
|||||||
|
|
||||||
// ArchiveReader reads from the reader and archives the data. Returned is the
|
// ArchiveReader reads from the reader and archives the data. Returned is the
|
||||||
// resulting snapshot and its ID.
|
// resulting snapshot and its ID.
|
||||||
func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name string) (*Snapshot, backend.ID, error) {
|
func ArchiveReader(repo Repository, p *Progress, rd io.Reader, name string) (*Snapshot, ID, error) {
|
||||||
debug.Log("ArchiveReader", "start archiving %s", name)
|
debug.Log("ArchiveReader", "start archiving %s", name)
|
||||||
sn, err := NewSnapshot([]string{name})
|
sn, err := NewSnapshot([]string{name})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, backend.ID{}, err
|
return nil, ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Start()
|
p.Start()
|
||||||
defer p.Done()
|
defer p.Done()
|
||||||
|
|
||||||
chnker := chunker.New(rd, repo.Config.ChunkerPolynomial)
|
chnker := chunker.New(rd, repo.Config().ChunkerPolynomial())
|
||||||
|
|
||||||
var ids backend.IDs
|
var ids IDs
|
||||||
var fileSize uint64
|
var fileSize uint64
|
||||||
|
|
||||||
for {
|
for {
|
||||||
@ -54,15 +52,15 @@ func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, backend.ID{}, errors.Wrap(err, "chunker.Next()")
|
return nil, ID{}, errors.Wrap(err, "chunker.Next()")
|
||||||
}
|
}
|
||||||
|
|
||||||
id := backend.Hash(chunk.Data)
|
id := Hash(chunk.Data)
|
||||||
|
|
||||||
if !repo.Index().Has(id, pack.Data) {
|
if !repo.Index().Has(id, pack.Data) {
|
||||||
_, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil)
|
_, err := repo.SaveAndEncrypt(pack.Data, chunk.Data, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, backend.ID{}, err
|
return nil, ID{}, err
|
||||||
}
|
}
|
||||||
debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length)
|
debug.Log("ArchiveReader", "saved blob %v (%d bytes)\n", id.Str(), chunk.Length)
|
||||||
} else {
|
} else {
|
||||||
@ -96,14 +94,14 @@ func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name
|
|||||||
|
|
||||||
treeID, err := saveTreeJSON(repo, tree)
|
treeID, err := saveTreeJSON(repo, tree)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, backend.ID{}, err
|
return nil, ID{}, err
|
||||||
}
|
}
|
||||||
sn.Tree = &treeID
|
sn.Tree = &treeID
|
||||||
debug.Log("ArchiveReader", "tree saved as %v", treeID.Str())
|
debug.Log("ArchiveReader", "tree saved as %v", treeID.Str())
|
||||||
|
|
||||||
id, err := repo.SaveJSONUnpacked(backend.Snapshot, sn)
|
id, err := repo.SaveJSONUnpacked(SnapshotFile, sn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, backend.ID{}, err
|
return nil, ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
sn.id = &id
|
sn.id = &id
|
||||||
@ -111,12 +109,12 @@ func ArchiveReader(repo *repository.Repository, p *Progress, rd io.Reader, name
|
|||||||
|
|
||||||
err = repo.Flush()
|
err = repo.Flush()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, backend.ID{}, err
|
return nil, ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = repo.SaveIndex()
|
err = repo.SaveIndex()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, backend.ID{}, err
|
return nil, ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return sn, id, nil
|
return sn, id, nil
|
@ -12,12 +12,10 @@ import (
|
|||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"restic/backend"
|
|
||||||
"restic/debug"
|
"restic/debug"
|
||||||
"restic/fs"
|
"restic/fs"
|
||||||
"restic/pack"
|
"restic/pack"
|
||||||
"restic/pipe"
|
"restic/pipe"
|
||||||
"restic/repository"
|
|
||||||
|
|
||||||
"github.com/restic/chunker"
|
"github.com/restic/chunker"
|
||||||
)
|
)
|
||||||
@ -32,9 +30,9 @@ var archiverAllowAllFiles = func(string, os.FileInfo) bool { return true }
|
|||||||
|
|
||||||
// Archiver is used to backup a set of directories.
|
// Archiver is used to backup a set of directories.
|
||||||
type Archiver struct {
|
type Archiver struct {
|
||||||
repo *repository.Repository
|
repo Repository
|
||||||
knownBlobs struct {
|
knownBlobs struct {
|
||||||
backend.IDSet
|
IDSet
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -46,15 +44,15 @@ type Archiver struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewArchiver returns a new archiver.
|
// NewArchiver returns a new archiver.
|
||||||
func NewArchiver(repo *repository.Repository) *Archiver {
|
func NewArchiver(repo Repository) *Archiver {
|
||||||
arch := &Archiver{
|
arch := &Archiver{
|
||||||
repo: repo,
|
repo: repo,
|
||||||
blobToken: make(chan struct{}, maxConcurrentBlobs),
|
blobToken: make(chan struct{}, maxConcurrentBlobs),
|
||||||
knownBlobs: struct {
|
knownBlobs: struct {
|
||||||
backend.IDSet
|
IDSet
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
}{
|
}{
|
||||||
IDSet: backend.NewIDSet(),
|
IDSet: NewIDSet(),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -72,7 +70,7 @@ func NewArchiver(repo *repository.Repository) *Archiver {
|
|||||||
// When the blob is not known, false is returned and the blob is added to the
|
// When the blob is not known, false is returned and the blob is added to the
|
||||||
// list. This means that the caller false is returned to is responsible to save
|
// list. This means that the caller false is returned to is responsible to save
|
||||||
// the blob to the backend.
|
// the blob to the backend.
|
||||||
func (arch *Archiver) isKnownBlob(id backend.ID, t pack.BlobType) bool {
|
func (arch *Archiver) isKnownBlob(id ID, t pack.BlobType) bool {
|
||||||
arch.knownBlobs.Lock()
|
arch.knownBlobs.Lock()
|
||||||
defer arch.knownBlobs.Unlock()
|
defer arch.knownBlobs.Unlock()
|
||||||
|
|
||||||
@ -91,7 +89,7 @@ func (arch *Archiver) isKnownBlob(id backend.ID, t pack.BlobType) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Save stores a blob read from rd in the repository.
|
// Save stores a blob read from rd in the repository.
|
||||||
func (arch *Archiver) Save(t pack.BlobType, data []byte, id backend.ID) error {
|
func (arch *Archiver) Save(t pack.BlobType, data []byte, id ID) error {
|
||||||
debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str())
|
debug.Log("Archiver.Save", "Save(%v, %v)\n", t, id.Str())
|
||||||
|
|
||||||
if arch.isKnownBlob(id, pack.Data) {
|
if arch.isKnownBlob(id, pack.Data) {
|
||||||
@ -110,15 +108,15 @@ func (arch *Archiver) Save(t pack.BlobType, data []byte, id backend.ID) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SaveTreeJSON stores a tree in the repository.
|
// SaveTreeJSON stores a tree in the repository.
|
||||||
func (arch *Archiver) SaveTreeJSON(item interface{}) (backend.ID, error) {
|
func (arch *Archiver) SaveTreeJSON(item interface{}) (ID, error) {
|
||||||
data, err := json.Marshal(item)
|
data, err := json.Marshal(item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return backend.ID{}, errors.Wrap(err, "Marshal")
|
return ID{}, errors.Wrap(err, "Marshal")
|
||||||
}
|
}
|
||||||
data = append(data, '\n')
|
data = append(data, '\n')
|
||||||
|
|
||||||
// check if tree has been saved before
|
// check if tree has been saved before
|
||||||
id := backend.Hash(data)
|
id := Hash(data)
|
||||||
if arch.isKnownBlob(id, pack.Tree) {
|
if arch.isKnownBlob(id, pack.Tree) {
|
||||||
return id, nil
|
return id, nil
|
||||||
}
|
}
|
||||||
@ -151,14 +149,14 @@ func (arch *Archiver) reloadFileIfChanged(node *Node, file fs.File) (*Node, erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
type saveResult struct {
|
type saveResult struct {
|
||||||
id backend.ID
|
id ID
|
||||||
bytes uint64
|
bytes uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) {
|
func (arch *Archiver) saveChunk(chunk chunker.Chunk, p *Progress, token struct{}, file fs.File, resultChannel chan<- saveResult) {
|
||||||
defer freeBuf(chunk.Data)
|
defer freeBuf(chunk.Data)
|
||||||
|
|
||||||
id := backend.Hash(chunk.Data)
|
id := Hash(chunk.Data)
|
||||||
err := arch.Save(pack.Data, chunk.Data, id)
|
err := arch.Save(pack.Data, chunk.Data, id)
|
||||||
// TODO handle error
|
// TODO handle error
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -188,7 +186,7 @@ func updateNodeContent(node *Node, results []saveResult) error {
|
|||||||
debug.Log("Archiver.Save", "checking size for file %s", node.path)
|
debug.Log("Archiver.Save", "checking size for file %s", node.path)
|
||||||
|
|
||||||
var bytes uint64
|
var bytes uint64
|
||||||
node.Content = make([]backend.ID, len(results))
|
node.Content = make([]ID, len(results))
|
||||||
|
|
||||||
for i, b := range results {
|
for i, b := range results {
|
||||||
node.Content[i] = b.id
|
node.Content[i] = b.id
|
||||||
@ -220,7 +218,7 @@ func (arch *Archiver) SaveFile(p *Progress, node *Node) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
chnker := chunker.New(file, arch.repo.Config.ChunkerPolynomial)
|
chnker := chunker.New(file, arch.repo.Config().ChunkerPolynomial())
|
||||||
resultChannels := [](<-chan saveResult){}
|
resultChannels := [](<-chan saveResult){}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
@ -290,7 +288,7 @@ func (arch *Archiver) fileWorker(wg *sync.WaitGroup, p *Progress, done <-chan st
|
|||||||
// check if all content is still available in the repository
|
// check if all content is still available in the repository
|
||||||
contentMissing := false
|
contentMissing := false
|
||||||
for _, blob := range oldNode.blobs {
|
for _, blob := range oldNode.blobs {
|
||||||
if ok, err := arch.repo.Backend().Test(backend.Data, blob.Storage.String()); !ok || err != nil {
|
if ok, err := arch.repo.Backend().Test(DataFile, blob.Storage.String()); !ok || err != nil {
|
||||||
debug.Log("Archiver.fileWorker", " %v not using old data, %v (%v) is missing", e.Path(), blob.ID.Str(), blob.Storage.Str())
|
debug.Log("Archiver.fileWorker", " %v not using old data, %v (%v) is missing", e.Path(), blob.ID.Str(), blob.Storage.Str())
|
||||||
contentMissing = true
|
contentMissing = true
|
||||||
break
|
break
|
||||||
@ -635,7 +633,7 @@ func (p baseNameSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
|||||||
// Snapshot creates a snapshot of the given paths. If parentID is set, this is
|
// Snapshot creates a snapshot of the given paths. If parentID is set, this is
|
||||||
// used to compare the files to the ones archived at the time this snapshot was
|
// used to compare the files to the ones archived at the time this snapshot was
|
||||||
// taken.
|
// taken.
|
||||||
func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID) (*Snapshot, backend.ID, error) {
|
func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *ID) (*Snapshot, ID, error) {
|
||||||
paths = unique(paths)
|
paths = unique(paths)
|
||||||
sort.Sort(baseNameSlice(paths))
|
sort.Sort(baseNameSlice(paths))
|
||||||
|
|
||||||
@ -653,7 +651,7 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
|
|||||||
// create new snapshot
|
// create new snapshot
|
||||||
sn, err := NewSnapshot(paths)
|
sn, err := NewSnapshot(paths)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, backend.ID{}, err
|
return nil, ID{}, err
|
||||||
}
|
}
|
||||||
sn.Excludes = arch.Excludes
|
sn.Excludes = arch.Excludes
|
||||||
|
|
||||||
@ -666,7 +664,7 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
|
|||||||
// load parent snapshot
|
// load parent snapshot
|
||||||
parent, err := LoadSnapshot(arch.repo, *parentID)
|
parent, err := LoadSnapshot(arch.repo, *parentID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, backend.ID{}, err
|
return nil, ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// start walker on old tree
|
// start walker on old tree
|
||||||
@ -735,9 +733,9 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
|
|||||||
sn.Tree = root.Subtree
|
sn.Tree = root.Subtree
|
||||||
|
|
||||||
// save snapshot
|
// save snapshot
|
||||||
id, err := arch.repo.SaveJSONUnpacked(backend.Snapshot, sn)
|
id, err := arch.repo.SaveJSONUnpacked(SnapshotFile, sn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, backend.ID{}, err
|
return nil, ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// store ID in snapshot struct
|
// store ID in snapshot struct
|
||||||
@ -747,14 +745,14 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, parentID *backend.ID
|
|||||||
// flush repository
|
// flush repository
|
||||||
err = arch.repo.Flush()
|
err = arch.repo.Flush()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, backend.ID{}, err
|
return nil, ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// save index
|
// save index
|
||||||
err = arch.repo.SaveIndex()
|
err = arch.repo.SaveIndex()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
debug.Log("Archiver.Snapshot", "error saving index: %v", err)
|
debug.Log("Archiver.Snapshot", "error saving index: %v", err)
|
||||||
return nil, backend.ID{}, err
|
return nil, ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
debug.Log("Archiver.Snapshot", "saved indexes")
|
debug.Log("Archiver.Snapshot", "saved indexes")
|
@ -11,7 +11,6 @@ import (
|
|||||||
"restic/checker"
|
"restic/checker"
|
||||||
"restic/crypto"
|
"restic/crypto"
|
||||||
"restic/pack"
|
"restic/pack"
|
||||||
"restic/repository"
|
|
||||||
. "restic/test"
|
. "restic/test"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -302,7 +301,7 @@ func getRandomData(seed int, size int) []chunker.Chunk {
|
|||||||
return chunks
|
return chunks
|
||||||
}
|
}
|
||||||
|
|
||||||
func createAndInitChecker(t *testing.T, repo *repository.Repository) *checker.Checker {
|
func createAndInitChecker(t *testing.T, repo Repository) *checker.Checker {
|
||||||
chkr := checker.New(repo)
|
chkr := checker.New(repo)
|
||||||
|
|
||||||
hints, errs := chkr.LoadIndex()
|
hints, errs := chkr.LoadIndex()
|
@ -1,28 +1,15 @@
|
|||||||
package restic
|
package restic
|
||||||
|
|
||||||
// FileType is the type of a file in the backend.
|
|
||||||
type FileType string
|
|
||||||
|
|
||||||
// These are the different data types a backend can store.
|
|
||||||
const (
|
|
||||||
DataFile FileType = "data"
|
|
||||||
KeyFile = "key"
|
|
||||||
LockFile = "lock"
|
|
||||||
SnapshotFile = "snapshot"
|
|
||||||
IndexFile = "index"
|
|
||||||
ConfigFile = "config"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Backend is used to store and access data.
|
// Backend is used to store and access data.
|
||||||
type Backend interface {
|
type Backend interface {
|
||||||
// Location returns a string that describes the type and location of the
|
// Location returns a string that describes the type and location of the
|
||||||
// repository.
|
// repository.
|
||||||
Location() string
|
Location() string
|
||||||
|
|
||||||
// Test a boolean value whether a Blob with the name and type exists.
|
// Test a boolean value whether a File with the name and type exists.
|
||||||
Test(t FileType, name string) (bool, error)
|
Test(t FileType, name string) (bool, error)
|
||||||
|
|
||||||
// Remove removes a Blob with type t and name.
|
// Remove removes a File with type t and name.
|
||||||
Remove(t FileType, name string) error
|
Remove(t FileType, name string) error
|
||||||
|
|
||||||
// Close the backend
|
// Close the backend
|
||||||
@ -37,10 +24,10 @@ type Backend interface {
|
|||||||
// Save stores the data in the backend under the given handle.
|
// Save stores the data in the backend under the given handle.
|
||||||
Save(h Handle, p []byte) error
|
Save(h Handle, p []byte) error
|
||||||
|
|
||||||
// Stat returns information about the blob identified by h.
|
// Stat returns information about the File identified by h.
|
||||||
Stat(h Handle) (BlobInfo, error)
|
Stat(h Handle) (FileInfo, error)
|
||||||
|
|
||||||
// List returns a channel that yields all names of blobs of type t in an
|
// List returns a channel that yields all names of files of type t in an
|
||||||
// arbitrary order. A goroutine is started for this. If the channel done is
|
// arbitrary order. A goroutine is started for this. If the channel done is
|
||||||
// closed, sending stops.
|
// closed, sending stops.
|
||||||
List(t FileType, done <-chan struct{}) <-chan string
|
List(t FileType, done <-chan struct{}) <-chan string
|
||||||
@ -49,7 +36,6 @@ type Backend interface {
|
|||||||
Delete() error
|
Delete() error
|
||||||
}
|
}
|
||||||
|
|
||||||
// BlobInfo is returned by Stat() and contains information about a stored blob.
|
// FileInfo is returned by Stat() and contains information about a file in the
|
||||||
type BlobInfo struct {
|
// backend.
|
||||||
Size int64
|
type FileInfo struct{ Size int64 }
|
||||||
}
|
|
||||||
|
@ -1,17 +0,0 @@
|
|||||||
package backend
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/rand"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RandomID retuns a randomly generated ID. This is mainly used for testing.
|
|
||||||
// When reading from rand fails, the function panics.
|
|
||||||
func RandomID() ID {
|
|
||||||
id := ID{}
|
|
||||||
_, err := io.ReadFull(rand.Reader, id[:])
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return id
|
|
||||||
}
|
|
103
src/restic/blob.go
Normal file
103
src/restic/blob.go
Normal file
@ -0,0 +1,103 @@
|
|||||||
|
package restic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Blob struct {
|
||||||
|
ID *ID `json:"id,omitempty"`
|
||||||
|
Size uint64 `json:"size,omitempty"`
|
||||||
|
Storage *ID `json:"sid,omitempty"` // encrypted ID
|
||||||
|
StorageSize uint64 `json:"ssize,omitempty"` // encrypted Size
|
||||||
|
}
|
||||||
|
|
||||||
|
type Blobs []Blob
|
||||||
|
|
||||||
|
func (b Blob) Valid() bool {
|
||||||
|
if b.ID == nil || b.Storage == nil || b.StorageSize == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b Blob) String() string {
|
||||||
|
return fmt.Sprintf("Blob<%s (%d) -> %s (%d)>",
|
||||||
|
b.ID.Str(), b.Size,
|
||||||
|
b.Storage.Str(), b.StorageSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compare compares two blobs by comparing the ID and the size. It returns -1,
|
||||||
|
// 0, or 1.
|
||||||
|
func (b Blob) Compare(other Blob) int {
|
||||||
|
if res := b.ID.Compare(*other.ID); res != 0 {
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
if b.Size < other.Size {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if b.Size > other.Size {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobHandle identifies a blob of a given type.
|
||||||
|
type BlobHandle struct {
|
||||||
|
ID ID
|
||||||
|
Type BlobType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h BlobHandle) String() string {
|
||||||
|
return fmt.Sprintf("<%s/%s>", h.Type, h.ID.Str())
|
||||||
|
}
|
||||||
|
|
||||||
|
// BlobType specifies what a blob stored in a pack is.
|
||||||
|
type BlobType uint8
|
||||||
|
|
||||||
|
// These are the blob types that can be stored in a pack.
|
||||||
|
const (
|
||||||
|
InvalidBlob BlobType = iota
|
||||||
|
DataBlob
|
||||||
|
TreeBlob
|
||||||
|
)
|
||||||
|
|
||||||
|
func (t BlobType) String() string {
|
||||||
|
switch t {
|
||||||
|
case DataBlob:
|
||||||
|
return "data"
|
||||||
|
case TreeBlob:
|
||||||
|
return "tree"
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("<BlobType %d>", t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON encodes the BlobType into JSON.
|
||||||
|
func (t BlobType) MarshalJSON() ([]byte, error) {
|
||||||
|
switch t {
|
||||||
|
case DataBlob:
|
||||||
|
return []byte(`"data"`), nil
|
||||||
|
case TreeBlob:
|
||||||
|
return []byte(`"tree"`), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, errors.New("unknown blob type")
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON decodes the BlobType from JSON.
|
||||||
|
func (t *BlobType) UnmarshalJSON(buf []byte) error {
|
||||||
|
switch string(buf) {
|
||||||
|
case `"data"`:
|
||||||
|
*t = DataBlob
|
||||||
|
case `"tree"`:
|
||||||
|
*t = TreeBlob
|
||||||
|
default:
|
||||||
|
return errors.New("unknown blob type")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
41
src/restic/blob_test.go
Normal file
41
src/restic/blob_test.go
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
package restic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
var blobTypeJSON = []struct {
|
||||||
|
t BlobType
|
||||||
|
res string
|
||||||
|
}{
|
||||||
|
{DataBlob, `"data"`},
|
||||||
|
{TreeBlob, `"tree"`},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestBlobTypeJSON(t *testing.T) {
|
||||||
|
for _, test := range blobTypeJSON {
|
||||||
|
// test serialize
|
||||||
|
buf, err := json.Marshal(test.t)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if test.res != string(buf) {
|
||||||
|
t.Errorf("want %q, got %q", test.res, string(buf))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// test unserialize
|
||||||
|
var v BlobType
|
||||||
|
err = json.Unmarshal([]byte(test.res), &v)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if test.t != v {
|
||||||
|
t.Errorf("want %v, got %v", test.t, v)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -6,6 +6,19 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// FileType is the type of a file in the backend.
|
||||||
|
type FileType string
|
||||||
|
|
||||||
|
// These are the different data types a backend can store.
|
||||||
|
const (
|
||||||
|
DataFile FileType = "data"
|
||||||
|
KeyFile = "key"
|
||||||
|
LockFile = "lock"
|
||||||
|
SnapshotFile = "snapshot"
|
||||||
|
IndexFile = "index"
|
||||||
|
ConfigFile = "config"
|
||||||
|
)
|
||||||
|
|
||||||
// Handle is used to store and access data in a backend.
|
// Handle is used to store and access data in a backend.
|
||||||
type Handle struct {
|
type Handle struct {
|
||||||
FileType FileType
|
FileType FileType
|
@ -1,15 +1,11 @@
|
|||||||
package restic
|
package restic
|
||||||
|
|
||||||
import (
|
import "restic/pack"
|
||||||
"restic/backend"
|
|
||||||
"restic/pack"
|
|
||||||
"restic/repository"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data
|
// FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data
|
||||||
// blobs) to the set blobs. The tree blobs in the `seen` BlobSet will not be visited
|
// blobs) to the set blobs. The tree blobs in the `seen` BlobSet will not be visited
|
||||||
// again.
|
// again.
|
||||||
func FindUsedBlobs(repo *repository.Repository, treeID backend.ID, blobs pack.BlobSet, seen pack.BlobSet) error {
|
func FindUsedBlobs(repo Repository, treeID ID, blobs pack.BlobSet, seen pack.BlobSet) error {
|
||||||
blobs.Insert(pack.Handle{ID: treeID, Type: pack.Tree})
|
blobs.Insert(pack.Handle{ID: treeID, Type: pack.Tree})
|
||||||
|
|
||||||
tree, err := LoadTree(repo, treeID)
|
tree, err := LoadTree(repo, treeID)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
package restic
|
package restic_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
@ -7,6 +7,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"restic"
|
||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -92,7 +93,7 @@ func TestFindUsedBlobs(t *testing.T) {
|
|||||||
|
|
||||||
for i, sn := range snapshots {
|
for i, sn := range snapshots {
|
||||||
usedBlobs := pack.NewBlobSet()
|
usedBlobs := pack.NewBlobSet()
|
||||||
err := FindUsedBlobs(repo, *sn.Tree, usedBlobs, pack.NewBlobSet())
|
err := restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, pack.NewBlobSet())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("FindUsedBlobs returned error: %v", err)
|
t.Errorf("FindUsedBlobs returned error: %v", err)
|
||||||
continue
|
continue
|
||||||
@ -128,7 +129,7 @@ func BenchmarkFindUsedBlobs(b *testing.B) {
|
|||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
seen := pack.NewBlobSet()
|
seen := pack.NewBlobSet()
|
||||||
blobs := pack.NewBlobSet()
|
blobs := pack.NewBlobSet()
|
||||||
err := FindUsedBlobs(repo, *sn.Tree, blobs, seen)
|
err := restic.FindUsedBlobs(repo, *sn.Tree, blobs, seen)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Error(err)
|
b.Error(err)
|
||||||
}
|
}
|
||||||
|
@ -11,9 +11,7 @@ import (
|
|||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"restic/backend"
|
|
||||||
"restic/debug"
|
"restic/debug"
|
||||||
"restic/repository"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Lock represents a process locking the repository for an operation.
|
// Lock represents a process locking the repository for an operation.
|
||||||
@ -33,8 +31,8 @@ type Lock struct {
|
|||||||
UID uint32 `json:"uid,omitempty"`
|
UID uint32 `json:"uid,omitempty"`
|
||||||
GID uint32 `json:"gid,omitempty"`
|
GID uint32 `json:"gid,omitempty"`
|
||||||
|
|
||||||
repo *repository.Repository
|
repo Repository
|
||||||
lockID *backend.ID
|
lockID *ID
|
||||||
}
|
}
|
||||||
|
|
||||||
// ErrAlreadyLocked is returned when NewLock or NewExclusiveLock are unable to
|
// ErrAlreadyLocked is returned when NewLock or NewExclusiveLock are unable to
|
||||||
@ -59,20 +57,20 @@ func IsAlreadyLocked(err error) bool {
|
|||||||
// NewLock returns a new, non-exclusive lock for the repository. If an
|
// NewLock returns a new, non-exclusive lock for the repository. If an
|
||||||
// exclusive lock is already held by another process, ErrAlreadyLocked is
|
// exclusive lock is already held by another process, ErrAlreadyLocked is
|
||||||
// returned.
|
// returned.
|
||||||
func NewLock(repo *repository.Repository) (*Lock, error) {
|
func NewLock(repo Repository) (*Lock, error) {
|
||||||
return newLock(repo, false)
|
return newLock(repo, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewExclusiveLock returns a new, exclusive lock for the repository. If
|
// NewExclusiveLock returns a new, exclusive lock for the repository. If
|
||||||
// another lock (normal and exclusive) is already held by another process,
|
// another lock (normal and exclusive) is already held by another process,
|
||||||
// ErrAlreadyLocked is returned.
|
// ErrAlreadyLocked is returned.
|
||||||
func NewExclusiveLock(repo *repository.Repository) (*Lock, error) {
|
func NewExclusiveLock(repo Repository) (*Lock, error) {
|
||||||
return newLock(repo, true)
|
return newLock(repo, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
const waitBeforeLockCheck = 200 * time.Millisecond
|
const waitBeforeLockCheck = 200 * time.Millisecond
|
||||||
|
|
||||||
func newLock(repo *repository.Repository, excl bool) (*Lock, error) {
|
func newLock(repo Repository, excl bool) (*Lock, error) {
|
||||||
lock := &Lock{
|
lock := &Lock{
|
||||||
Time: time.Now(),
|
Time: time.Now(),
|
||||||
PID: os.Getpid(),
|
PID: os.Getpid(),
|
||||||
@ -128,7 +126,7 @@ func (l *Lock) fillUserInfo() error {
|
|||||||
// non-exclusive lock is to be created, an error is only returned when an
|
// non-exclusive lock is to be created, an error is only returned when an
|
||||||
// exclusive lock is found.
|
// exclusive lock is found.
|
||||||
func (l *Lock) checkForOtherLocks() error {
|
func (l *Lock) checkForOtherLocks() error {
|
||||||
return eachLock(l.repo, func(id backend.ID, lock *Lock, err error) error {
|
return eachLock(l.repo, func(id ID, lock *Lock, err error) error {
|
||||||
if l.lockID != nil && id.Equal(*l.lockID) {
|
if l.lockID != nil && id.Equal(*l.lockID) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -150,11 +148,11 @@ func (l *Lock) checkForOtherLocks() error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func eachLock(repo *repository.Repository, f func(backend.ID, *Lock, error) error) error {
|
func eachLock(repo Repository, f func(ID, *Lock, error) error) error {
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
defer close(done)
|
defer close(done)
|
||||||
|
|
||||||
for id := range repo.List(backend.Lock, done) {
|
for id := range repo.List(LockFile, done) {
|
||||||
lock, err := LoadLock(repo, id)
|
lock, err := LoadLock(repo, id)
|
||||||
err = f(id, lock, err)
|
err = f(id, lock, err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -166,10 +164,10 @@ func eachLock(repo *repository.Repository, f func(backend.ID, *Lock, error) erro
|
|||||||
}
|
}
|
||||||
|
|
||||||
// createLock acquires the lock by creating a file in the repository.
|
// createLock acquires the lock by creating a file in the repository.
|
||||||
func (l *Lock) createLock() (backend.ID, error) {
|
func (l *Lock) createLock() (ID, error) {
|
||||||
id, err := l.repo.SaveJSONUnpacked(backend.Lock, l)
|
id, err := l.repo.SaveJSONUnpacked(LockFile, l)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return backend.ID{}, err
|
return ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return id, nil
|
return id, nil
|
||||||
@ -181,7 +179,7 @@ func (l *Lock) Unlock() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
return l.repo.Backend().Remove(backend.Lock, l.lockID.String())
|
return l.repo.Backend().Remove(LockFile, l.lockID.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
var staleTimeout = 30 * time.Minute
|
var staleTimeout = 30 * time.Minute
|
||||||
@ -229,7 +227,7 @@ func (l *Lock) Refresh() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = l.repo.Backend().Remove(backend.Lock, l.lockID.String())
|
err = l.repo.Backend().Remove(LockFile, l.lockID.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -269,9 +267,9 @@ func init() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LoadLock loads and unserializes a lock from a repository.
|
// LoadLock loads and unserializes a lock from a repository.
|
||||||
func LoadLock(repo *repository.Repository, id backend.ID) (*Lock, error) {
|
func LoadLock(repo Repository, id ID) (*Lock, error) {
|
||||||
lock := &Lock{}
|
lock := &Lock{}
|
||||||
if err := repo.LoadJSONUnpacked(backend.Lock, id, lock); err != nil {
|
if err := repo.LoadJSONUnpacked(LockFile, id, lock); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
lock.lockID = &id
|
lock.lockID = &id
|
||||||
@ -280,15 +278,15 @@ func LoadLock(repo *repository.Repository, id backend.ID) (*Lock, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RemoveStaleLocks deletes all locks detected as stale from the repository.
|
// RemoveStaleLocks deletes all locks detected as stale from the repository.
|
||||||
func RemoveStaleLocks(repo *repository.Repository) error {
|
func RemoveStaleLocks(repo Repository) error {
|
||||||
return eachLock(repo, func(id backend.ID, lock *Lock, err error) error {
|
return eachLock(repo, func(id ID, lock *Lock, err error) error {
|
||||||
// ignore locks that cannot be loaded
|
// ignore locks that cannot be loaded
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if lock.Stale() {
|
if lock.Stale() {
|
||||||
return repo.Backend().Remove(backend.Lock, id.String())
|
return repo.Backend().Remove(LockFile, id.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -296,8 +294,8 @@ func RemoveStaleLocks(repo *repository.Repository) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// RemoveAllLocks removes all locks forcefully.
|
// RemoveAllLocks removes all locks forcefully.
|
||||||
func RemoveAllLocks(repo *repository.Repository) error {
|
func RemoveAllLocks(repo Repository) error {
|
||||||
return eachLock(repo, func(id backend.ID, lock *Lock, err error) error {
|
return eachLock(repo, func(id ID, lock *Lock, err error) error {
|
||||||
return repo.Backend().Remove(backend.Lock, id.String())
|
return repo.Backend().Remove(LockFile, id.String())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -6,8 +6,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"restic"
|
"restic"
|
||||||
"restic/backend"
|
|
||||||
"restic/repository"
|
|
||||||
. "restic/test"
|
. "restic/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -92,18 +90,18 @@ func TestExclusiveLockOnLockedRepo(t *testing.T) {
|
|||||||
OK(t, elock.Unlock())
|
OK(t, elock.Unlock())
|
||||||
}
|
}
|
||||||
|
|
||||||
func createFakeLock(repo *repository.Repository, t time.Time, pid int) (backend.ID, error) {
|
func createFakeLock(repo restic.Repository, t time.Time, pid int) (restic.ID, error) {
|
||||||
hostname, err := os.Hostname()
|
hostname, err := os.Hostname()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return backend.ID{}, err
|
return restic.ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
newLock := &restic.Lock{Time: t, PID: pid, Hostname: hostname}
|
newLock := &restic.Lock{Time: t, PID: pid, Hostname: hostname}
|
||||||
return repo.SaveJSONUnpacked(backend.Lock, &newLock)
|
return repo.SaveJSONUnpacked(restic.LockFile, &newLock)
|
||||||
}
|
}
|
||||||
|
|
||||||
func removeLock(repo *repository.Repository, id backend.ID) error {
|
func removeLock(repo restic.Repository, id restic.ID) error {
|
||||||
return repo.Backend().Remove(backend.Lock, id.String())
|
return repo.Backend().Remove(restic.LockFile, id.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
var staleLockTests = []struct {
|
var staleLockTests = []struct {
|
||||||
@ -162,8 +160,8 @@ func TestLockStale(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func lockExists(repo *repository.Repository, t testing.TB, id backend.ID) bool {
|
func lockExists(repo restic.Repository, t testing.TB, id restic.ID) bool {
|
||||||
exists, err := repo.Backend().Test(backend.Lock, id.String())
|
exists, err := repo.Backend().Test(restic.LockFile, id.String())
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
return exists
|
return exists
|
||||||
@ -224,8 +222,8 @@ func TestLockRefresh(t *testing.T) {
|
|||||||
lock, err := restic.NewLock(repo)
|
lock, err := restic.NewLock(repo)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
var lockID *backend.ID
|
var lockID *restic.ID
|
||||||
for id := range repo.List(backend.Lock, nil) {
|
for id := range repo.List(restic.LockFile, nil) {
|
||||||
if lockID != nil {
|
if lockID != nil {
|
||||||
t.Error("more than one lock found")
|
t.Error("more than one lock found")
|
||||||
}
|
}
|
||||||
@ -234,8 +232,8 @@ func TestLockRefresh(t *testing.T) {
|
|||||||
|
|
||||||
OK(t, lock.Refresh())
|
OK(t, lock.Refresh())
|
||||||
|
|
||||||
var lockID2 *backend.ID
|
var lockID2 *restic.ID
|
||||||
for id := range repo.List(backend.Lock, nil) {
|
for id := range repo.List(restic.LockFile, nil) {
|
||||||
if lockID2 != nil {
|
if lockID2 != nil {
|
||||||
t.Error("more than one lock found")
|
t.Error("more than one lock found")
|
||||||
}
|
}
|
||||||
|
@ -14,32 +14,30 @@ import (
|
|||||||
|
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"restic/backend"
|
|
||||||
"restic/debug"
|
"restic/debug"
|
||||||
"restic/fs"
|
"restic/fs"
|
||||||
"restic/pack"
|
"restic/pack"
|
||||||
"restic/repository"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Node is a file, directory or other item in a backup.
|
// Node is a file, directory or other item in a backup.
|
||||||
type Node struct {
|
type Node struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
FileType string `json:"type"`
|
FileType string `json:"type"`
|
||||||
Mode os.FileMode `json:"mode,omitempty"`
|
Mode os.FileMode `json:"mode,omitempty"`
|
||||||
ModTime time.Time `json:"mtime,omitempty"`
|
ModTime time.Time `json:"mtime,omitempty"`
|
||||||
AccessTime time.Time `json:"atime,omitempty"`
|
AccessTime time.Time `json:"atime,omitempty"`
|
||||||
ChangeTime time.Time `json:"ctime,omitempty"`
|
ChangeTime time.Time `json:"ctime,omitempty"`
|
||||||
UID uint32 `json:"uid"`
|
UID uint32 `json:"uid"`
|
||||||
GID uint32 `json:"gid"`
|
GID uint32 `json:"gid"`
|
||||||
User string `json:"user,omitempty"`
|
User string `json:"user,omitempty"`
|
||||||
Group string `json:"group,omitempty"`
|
Group string `json:"group,omitempty"`
|
||||||
Inode uint64 `json:"inode,omitempty"`
|
Inode uint64 `json:"inode,omitempty"`
|
||||||
Size uint64 `json:"size,omitempty"`
|
Size uint64 `json:"size,omitempty"`
|
||||||
Links uint64 `json:"links,omitempty"`
|
Links uint64 `json:"links,omitempty"`
|
||||||
LinkTarget string `json:"linktarget,omitempty"`
|
LinkTarget string `json:"linktarget,omitempty"`
|
||||||
Device uint64 `json:"device,omitempty"`
|
Device uint64 `json:"device,omitempty"`
|
||||||
Content []backend.ID `json:"content"`
|
Content IDs `json:"content"`
|
||||||
Subtree *backend.ID `json:"subtree,omitempty"`
|
Subtree *ID `json:"subtree,omitempty"`
|
||||||
|
|
||||||
Error string `json:"error,omitempty"`
|
Error string `json:"error,omitempty"`
|
||||||
|
|
||||||
@ -47,7 +45,7 @@ type Node struct {
|
|||||||
|
|
||||||
path string
|
path string
|
||||||
err error
|
err error
|
||||||
blobs repository.Blobs
|
blobs Blobs
|
||||||
}
|
}
|
||||||
|
|
||||||
func (node Node) String() string {
|
func (node Node) String() string {
|
||||||
@ -108,7 +106,7 @@ func nodeTypeFromFileInfo(fi os.FileInfo) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// CreateAt creates the node at the given path and restores all the meta data.
|
// CreateAt creates the node at the given path and restores all the meta data.
|
||||||
func (node *Node) CreateAt(path string, repo *repository.Repository) error {
|
func (node *Node) CreateAt(path string, repo Repository) error {
|
||||||
debug.Log("Node.CreateAt", "create node %v at %v", node.Name, path)
|
debug.Log("Node.CreateAt", "create node %v at %v", node.Name, path)
|
||||||
|
|
||||||
switch node.FileType {
|
switch node.FileType {
|
||||||
@ -202,7 +200,7 @@ func (node Node) createDirAt(path string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (node Node) createFileAt(path string, repo *repository.Repository) error {
|
func (node Node) createFileAt(path string, repo Repository) error {
|
||||||
f, err := fs.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0600)
|
f, err := fs.OpenFile(path, os.O_CREATE|os.O_WRONLY, 0600)
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
package repository
|
package restic
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
@ -1,6 +1,10 @@
|
|||||||
package restic
|
package restic
|
||||||
|
|
||||||
import "restic/repository"
|
import (
|
||||||
|
"restic/pack"
|
||||||
|
|
||||||
|
"github.com/restic/chunker"
|
||||||
|
)
|
||||||
|
|
||||||
// Repository stores data in a backend. It provides high-level functions and
|
// Repository stores data in a backend. It provides high-level functions and
|
||||||
// transparently encrypts/decrypts data.
|
// transparently encrypts/decrypts data.
|
||||||
@ -9,5 +13,43 @@ type Repository interface {
|
|||||||
// Backend returns the backend used by the repository
|
// Backend returns the backend used by the repository
|
||||||
Backend() Backend
|
Backend() Backend
|
||||||
|
|
||||||
SetIndex(*repository.MasterIndex)
|
SetIndex(interface{})
|
||||||
|
|
||||||
|
Index() Index
|
||||||
|
SaveFullIndex() error
|
||||||
|
|
||||||
|
SaveJSON(pack.BlobType, interface{}) (ID, error)
|
||||||
|
|
||||||
|
Config() Config
|
||||||
|
|
||||||
|
SaveAndEncrypt(pack.BlobType, []byte, *ID) (ID, error)
|
||||||
|
SaveJSONUnpacked(FileType, interface{}) (ID, error)
|
||||||
|
SaveIndex() error
|
||||||
|
|
||||||
|
LoadJSONPack(pack.BlobType, ID, interface{}) error
|
||||||
|
LoadJSONUnpacked(FileType, ID, interface{}) error
|
||||||
|
LoadBlob(ID, pack.BlobType, []byte) ([]byte, error)
|
||||||
|
|
||||||
|
LookupBlobSize(ID, pack.BlobType) (uint, error)
|
||||||
|
|
||||||
|
List(FileType, <-chan struct{}) <-chan ID
|
||||||
|
|
||||||
|
Flush() error
|
||||||
|
}
|
||||||
|
|
||||||
|
type Index interface {
|
||||||
|
Has(ID, pack.BlobType) bool
|
||||||
|
Lookup(ID, pack.BlobType) ([]PackedBlob, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Config interface {
|
||||||
|
ChunkerPolynomial() chunker.Pol
|
||||||
|
}
|
||||||
|
|
||||||
|
type PackedBlob interface {
|
||||||
|
Type() pack.BlobType
|
||||||
|
Length() uint
|
||||||
|
ID() ID
|
||||||
|
Offset() uint
|
||||||
|
PackID() ID
|
||||||
}
|
}
|
||||||
|
@ -1,47 +0,0 @@
|
|||||||
package repository
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"restic/backend"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Blob struct {
|
|
||||||
ID *backend.ID `json:"id,omitempty"`
|
|
||||||
Size uint64 `json:"size,omitempty"`
|
|
||||||
Storage *backend.ID `json:"sid,omitempty"` // encrypted ID
|
|
||||||
StorageSize uint64 `json:"ssize,omitempty"` // encrypted Size
|
|
||||||
}
|
|
||||||
|
|
||||||
type Blobs []Blob
|
|
||||||
|
|
||||||
func (b Blob) Valid() bool {
|
|
||||||
if b.ID == nil || b.Storage == nil || b.StorageSize == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b Blob) String() string {
|
|
||||||
return fmt.Sprintf("Blob<%s (%d) -> %s (%d)>",
|
|
||||||
b.ID.Str(), b.Size,
|
|
||||||
b.Storage.Str(), b.StorageSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compare compares two blobs by comparing the ID and the size. It returns -1,
|
|
||||||
// 0, or 1.
|
|
||||||
func (b Blob) Compare(other Blob) int {
|
|
||||||
if res := b.ID.Compare(*other.ID); res != 0 {
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
if b.Size < other.Size {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
if b.Size > other.Size {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0
|
|
||||||
}
|
|
@ -5,11 +5,11 @@ import (
|
|||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"io"
|
"io"
|
||||||
|
"restic"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"restic/backend"
|
|
||||||
"restic/debug"
|
"restic/debug"
|
||||||
|
|
||||||
"github.com/restic/chunker"
|
"github.com/restic/chunker"
|
||||||
@ -31,12 +31,12 @@ const RepoVersion = 1
|
|||||||
|
|
||||||
// JSONUnpackedSaver saves unpacked JSON.
|
// JSONUnpackedSaver saves unpacked JSON.
|
||||||
type JSONUnpackedSaver interface {
|
type JSONUnpackedSaver interface {
|
||||||
SaveJSONUnpacked(backend.Type, interface{}) (backend.ID, error)
|
SaveJSONUnpacked(restic.FileType, interface{}) (restic.ID, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// JSONUnpackedLoader loads unpacked JSON.
|
// JSONUnpackedLoader loads unpacked JSON.
|
||||||
type JSONUnpackedLoader interface {
|
type JSONUnpackedLoader interface {
|
||||||
LoadJSONUnpacked(backend.Type, backend.ID, interface{}) error
|
LoadJSONUnpacked(restic.FileType, restic.ID, interface{}) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateConfig creates a config file with a randomly selected polynomial and
|
// CreateConfig creates a config file with a randomly selected polynomial and
|
||||||
@ -87,7 +87,7 @@ func LoadConfig(r JSONUnpackedLoader) (Config, error) {
|
|||||||
cfg Config
|
cfg Config
|
||||||
)
|
)
|
||||||
|
|
||||||
err := r.LoadJSONUnpacked(backend.Config, backend.ID{}, &cfg)
|
err := r.LoadJSONUnpacked(restic.ConfigFile, restic.ID{}, &cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return Config{}, err
|
return Config{}, err
|
||||||
}
|
}
|
||||||
|
@ -1,46 +1,46 @@
|
|||||||
package repository_test
|
package repository_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"restic"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"restic/backend"
|
|
||||||
"restic/repository"
|
"restic/repository"
|
||||||
. "restic/test"
|
. "restic/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
type saver func(backend.Type, interface{}) (backend.ID, error)
|
type saver func(restic.FileType, interface{}) (restic.ID, error)
|
||||||
|
|
||||||
func (s saver) SaveJSONUnpacked(t backend.Type, arg interface{}) (backend.ID, error) {
|
func (s saver) SaveJSONUnpacked(t restic.FileType, arg interface{}) (restic.ID, error) {
|
||||||
return s(t, arg)
|
return s(t, arg)
|
||||||
}
|
}
|
||||||
|
|
||||||
type loader func(backend.Type, backend.ID, interface{}) error
|
type loader func(restic.FileType, restic.ID, interface{}) error
|
||||||
|
|
||||||
func (l loader) LoadJSONUnpacked(t backend.Type, id backend.ID, arg interface{}) error {
|
func (l loader) LoadJSONUnpacked(t restic.FileType, id restic.ID, arg interface{}) error {
|
||||||
return l(t, id, arg)
|
return l(t, id, arg)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConfig(t *testing.T) {
|
func TestConfig(t *testing.T) {
|
||||||
resultConfig := repository.Config{}
|
resultConfig := repository.Config{}
|
||||||
save := func(tpe backend.Type, arg interface{}) (backend.ID, error) {
|
save := func(tpe restic.FileType, arg interface{}) (restic.ID, error) {
|
||||||
Assert(t, tpe == backend.Config,
|
Assert(t, tpe == restic.ConfigFile,
|
||||||
"wrong backend type: got %v, wanted %v",
|
"wrong backend type: got %v, wanted %v",
|
||||||
tpe, backend.Config)
|
tpe, restic.ConfigFile)
|
||||||
|
|
||||||
cfg := arg.(repository.Config)
|
cfg := arg.(repository.Config)
|
||||||
resultConfig = cfg
|
resultConfig = cfg
|
||||||
return backend.ID{}, nil
|
return restic.ID{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg1, err := repository.CreateConfig()
|
cfg1, err := repository.CreateConfig()
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
_, err = saver(save).SaveJSONUnpacked(backend.Config, cfg1)
|
_, err = saver(save).SaveJSONUnpacked(restic.ConfigFile, cfg1)
|
||||||
|
|
||||||
load := func(tpe backend.Type, id backend.ID, arg interface{}) error {
|
load := func(tpe restic.FileType, id restic.ID, arg interface{}) error {
|
||||||
Assert(t, tpe == backend.Config,
|
Assert(t, tpe == restic.ConfigFile,
|
||||||
"wrong backend type: got %v, wanted %v",
|
"wrong backend type: got %v, wanted %v",
|
||||||
tpe, backend.Config)
|
tpe, restic.ConfigFile)
|
||||||
|
|
||||||
cfg := arg.(*repository.Config)
|
cfg := arg.(*repository.Config)
|
||||||
*cfg = resultConfig
|
*cfg = resultConfig
|
||||||
|
@ -5,12 +5,12 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"restic"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"restic/backend"
|
|
||||||
"restic/crypto"
|
"restic/crypto"
|
||||||
"restic/debug"
|
"restic/debug"
|
||||||
"restic/pack"
|
"restic/pack"
|
||||||
@ -21,14 +21,14 @@ type Index struct {
|
|||||||
m sync.Mutex
|
m sync.Mutex
|
||||||
pack map[pack.Handle][]indexEntry
|
pack map[pack.Handle][]indexEntry
|
||||||
|
|
||||||
final bool // set to true for all indexes read from the backend ("finalized")
|
final bool // set to true for all indexes read from the backend ("finalized")
|
||||||
id backend.ID // set to the ID of the index when it's finalized
|
id restic.ID // set to the ID of the index when it's finalized
|
||||||
supersedes backend.IDs
|
supersedes restic.IDs
|
||||||
created time.Time
|
created time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
type indexEntry struct {
|
type indexEntry struct {
|
||||||
packID backend.ID
|
packID restic.ID
|
||||||
offset uint
|
offset uint
|
||||||
length uint
|
length uint
|
||||||
}
|
}
|
||||||
@ -112,7 +112,7 @@ func (idx *Index) Store(blob PackedBlob) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Lookup queries the index for the blob ID and returns a PackedBlob.
|
// Lookup queries the index for the blob ID and returns a PackedBlob.
|
||||||
func (idx *Index) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) {
|
func (idx *Index) Lookup(id restic.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) {
|
||||||
idx.m.Lock()
|
idx.m.Lock()
|
||||||
defer idx.m.Unlock()
|
defer idx.m.Unlock()
|
||||||
|
|
||||||
@ -144,7 +144,7 @@ func (idx *Index) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedBlob,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ListPack returns a list of blobs contained in a pack.
|
// ListPack returns a list of blobs contained in a pack.
|
||||||
func (idx *Index) ListPack(id backend.ID) (list []PackedBlob) {
|
func (idx *Index) ListPack(id restic.ID) (list []PackedBlob) {
|
||||||
idx.m.Lock()
|
idx.m.Lock()
|
||||||
defer idx.m.Unlock()
|
defer idx.m.Unlock()
|
||||||
|
|
||||||
@ -166,7 +166,7 @@ func (idx *Index) ListPack(id backend.ID) (list []PackedBlob) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Has returns true iff the id is listed in the index.
|
// Has returns true iff the id is listed in the index.
|
||||||
func (idx *Index) Has(id backend.ID, tpe pack.BlobType) bool {
|
func (idx *Index) Has(id restic.ID, tpe pack.BlobType) bool {
|
||||||
_, err := idx.Lookup(id, tpe)
|
_, err := idx.Lookup(id, tpe)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return true
|
return true
|
||||||
@ -177,7 +177,7 @@ func (idx *Index) Has(id backend.ID, tpe pack.BlobType) bool {
|
|||||||
|
|
||||||
// LookupSize returns the length of the cleartext content behind the
|
// LookupSize returns the length of the cleartext content behind the
|
||||||
// given id
|
// given id
|
||||||
func (idx *Index) LookupSize(id backend.ID, tpe pack.BlobType) (cleartextLength uint, err error) {
|
func (idx *Index) LookupSize(id restic.ID, tpe pack.BlobType) (cleartextLength uint, err error) {
|
||||||
blobs, err := idx.Lookup(id, tpe)
|
blobs, err := idx.Lookup(id, tpe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
@ -187,13 +187,13 @@ func (idx *Index) LookupSize(id backend.ID, tpe pack.BlobType) (cleartextLength
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Supersedes returns the list of indexes this index supersedes, if any.
|
// Supersedes returns the list of indexes this index supersedes, if any.
|
||||||
func (idx *Index) Supersedes() backend.IDs {
|
func (idx *Index) Supersedes() restic.IDs {
|
||||||
return idx.supersedes
|
return idx.supersedes
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddToSupersedes adds the ids to the list of indexes superseded by this
|
// AddToSupersedes adds the ids to the list of indexes superseded by this
|
||||||
// index. If the index has already been finalized, an error is returned.
|
// index. If the index has already been finalized, an error is returned.
|
||||||
func (idx *Index) AddToSupersedes(ids ...backend.ID) error {
|
func (idx *Index) AddToSupersedes(ids ...restic.ID) error {
|
||||||
idx.m.Lock()
|
idx.m.Lock()
|
||||||
defer idx.m.Unlock()
|
defer idx.m.Unlock()
|
||||||
|
|
||||||
@ -209,9 +209,9 @@ func (idx *Index) AddToSupersedes(ids ...backend.ID) error {
|
|||||||
type PackedBlob struct {
|
type PackedBlob struct {
|
||||||
Type pack.BlobType
|
Type pack.BlobType
|
||||||
Length uint
|
Length uint
|
||||||
ID backend.ID
|
ID restic.ID
|
||||||
Offset uint
|
Offset uint
|
||||||
PackID backend.ID
|
PackID restic.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pb PackedBlob) String() string {
|
func (pb PackedBlob) String() string {
|
||||||
@ -259,11 +259,11 @@ func (idx *Index) Each(done chan struct{}) <-chan PackedBlob {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Packs returns all packs in this index
|
// Packs returns all packs in this index
|
||||||
func (idx *Index) Packs() backend.IDSet {
|
func (idx *Index) Packs() restic.IDSet {
|
||||||
idx.m.Lock()
|
idx.m.Lock()
|
||||||
defer idx.m.Unlock()
|
defer idx.m.Unlock()
|
||||||
|
|
||||||
packs := backend.NewIDSet()
|
packs := restic.NewIDSet()
|
||||||
for _, list := range idx.pack {
|
for _, list := range idx.pack {
|
||||||
for _, entry := range list {
|
for _, entry := range list {
|
||||||
packs.Insert(entry.packID)
|
packs.Insert(entry.packID)
|
||||||
@ -300,12 +300,12 @@ func (idx *Index) Length() uint {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type packJSON struct {
|
type packJSON struct {
|
||||||
ID backend.ID `json:"id"`
|
ID restic.ID `json:"id"`
|
||||||
Blobs []blobJSON `json:"blobs"`
|
Blobs []blobJSON `json:"blobs"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type blobJSON struct {
|
type blobJSON struct {
|
||||||
ID backend.ID `json:"id"`
|
ID restic.ID `json:"id"`
|
||||||
Type pack.BlobType `json:"type"`
|
Type pack.BlobType `json:"type"`
|
||||||
Offset uint `json:"offset"`
|
Offset uint `json:"offset"`
|
||||||
Length uint `json:"length"`
|
Length uint `json:"length"`
|
||||||
@ -314,7 +314,7 @@ type blobJSON struct {
|
|||||||
// generatePackList returns a list of packs.
|
// generatePackList returns a list of packs.
|
||||||
func (idx *Index) generatePackList() ([]*packJSON, error) {
|
func (idx *Index) generatePackList() ([]*packJSON, error) {
|
||||||
list := []*packJSON{}
|
list := []*packJSON{}
|
||||||
packs := make(map[backend.ID]*packJSON)
|
packs := make(map[restic.ID]*packJSON)
|
||||||
|
|
||||||
for h, packedBlobs := range idx.pack {
|
for h, packedBlobs := range idx.pack {
|
||||||
for _, blob := range packedBlobs {
|
for _, blob := range packedBlobs {
|
||||||
@ -357,7 +357,7 @@ func (idx *Index) generatePackList() ([]*packJSON, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type jsonIndex struct {
|
type jsonIndex struct {
|
||||||
Supersedes backend.IDs `json:"supersedes,omitempty"`
|
Supersedes restic.IDs `json:"supersedes,omitempty"`
|
||||||
Packs []*packJSON `json:"packs"`
|
Packs []*packJSON `json:"packs"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -402,12 +402,12 @@ func (idx *Index) Finalize(w io.Writer) error {
|
|||||||
|
|
||||||
// ID returns the ID of the index, if available. If the index is not yet
|
// ID returns the ID of the index, if available. If the index is not yet
|
||||||
// finalized, an error is returned.
|
// finalized, an error is returned.
|
||||||
func (idx *Index) ID() (backend.ID, error) {
|
func (idx *Index) ID() (restic.ID, error) {
|
||||||
idx.m.Lock()
|
idx.m.Lock()
|
||||||
defer idx.m.Unlock()
|
defer idx.m.Unlock()
|
||||||
|
|
||||||
if !idx.final {
|
if !idx.final {
|
||||||
return backend.ID{}, errors.New("index not finalized")
|
return restic.ID{}, errors.New("index not finalized")
|
||||||
}
|
}
|
||||||
|
|
||||||
return idx.id, nil
|
return idx.id, nil
|
||||||
@ -415,7 +415,7 @@ func (idx *Index) ID() (backend.ID, error) {
|
|||||||
|
|
||||||
// SetID sets the ID the index has been written to. This requires that
|
// SetID sets the ID the index has been written to. This requires that
|
||||||
// Finalize() has been called before, otherwise an error is returned.
|
// Finalize() has been called before, otherwise an error is returned.
|
||||||
func (idx *Index) SetID(id backend.ID) error {
|
func (idx *Index) SetID(id restic.ID) error {
|
||||||
idx.m.Lock()
|
idx.m.Lock()
|
||||||
defer idx.m.Unlock()
|
defer idx.m.Unlock()
|
||||||
|
|
||||||
@ -545,10 +545,10 @@ func DecodeOldIndex(rd io.Reader) (idx *Index, err error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LoadIndexWithDecoder loads the index and decodes it with fn.
|
// LoadIndexWithDecoder loads the index and decodes it with fn.
|
||||||
func LoadIndexWithDecoder(repo *Repository, id backend.ID, fn func(io.Reader) (*Index, error)) (idx *Index, err error) {
|
func LoadIndexWithDecoder(repo *Repository, id restic.ID, fn func(io.Reader) (*Index, error)) (idx *Index, err error) {
|
||||||
debug.Log("LoadIndexWithDecoder", "Loading index %v", id[:8])
|
debug.Log("LoadIndexWithDecoder", "Loading index %v", id[:8])
|
||||||
|
|
||||||
buf, err := repo.LoadAndDecrypt(backend.Index, id)
|
buf, err := repo.LoadAndDecrypt(restic.IndexFile, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -568,7 +568,7 @@ func LoadIndexWithDecoder(repo *Repository, id backend.ID, fn func(io.Reader) (*
|
|||||||
// format (if necessary). When the conversion is succcessful, the old index
|
// format (if necessary). When the conversion is succcessful, the old index
|
||||||
// is removed. Returned is either the old id (if no conversion was needed) or
|
// is removed. Returned is either the old id (if no conversion was needed) or
|
||||||
// the new id.
|
// the new id.
|
||||||
func ConvertIndex(repo *Repository, id backend.ID) (backend.ID, error) {
|
func ConvertIndex(repo *Repository, id restic.ID) (restic.ID, error) {
|
||||||
debug.Log("ConvertIndex", "checking index %v", id.Str())
|
debug.Log("ConvertIndex", "checking index %v", id.Str())
|
||||||
|
|
||||||
idx, err := LoadIndexWithDecoder(repo, id, DecodeOldIndex)
|
idx, err := LoadIndexWithDecoder(repo, id, DecodeOldIndex)
|
||||||
@ -578,7 +578,7 @@ func ConvertIndex(repo *Repository, id backend.ID) (backend.ID, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
buf := bytes.NewBuffer(nil)
|
buf := bytes.NewBuffer(nil)
|
||||||
idx.supersedes = backend.IDs{id}
|
idx.supersedes = restic.IDs{id}
|
||||||
|
|
||||||
err = idx.Encode(buf)
|
err = idx.Encode(buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -586,5 +586,5 @@ func ConvertIndex(repo *Repository, id backend.ID) (backend.ID, error) {
|
|||||||
return id, err
|
return id, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return repo.SaveUnpacked(backend.Index, buf.Bytes())
|
return repo.SaveUnpacked(restic.IndexFile, buf.Bytes())
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@ package repository
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"restic/backend"
|
"restic"
|
||||||
"restic/debug"
|
"restic/debug"
|
||||||
"restic/list"
|
"restic/list"
|
||||||
"restic/worker"
|
"restic/worker"
|
||||||
@ -23,7 +23,7 @@ func RebuildIndex(repo *Repository) error {
|
|||||||
|
|
||||||
idx := NewIndex()
|
idx := NewIndex()
|
||||||
for job := range ch {
|
for job := range ch {
|
||||||
id := job.Data.(backend.ID)
|
id := job.Data.(restic.ID)
|
||||||
|
|
||||||
if job.Error != nil {
|
if job.Error != nil {
|
||||||
fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", id, job.Error)
|
fmt.Fprintf(os.Stderr, "error for pack %v: %v\n", id, job.Error)
|
||||||
@ -44,8 +44,8 @@ func RebuildIndex(repo *Repository) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
oldIndexes := backend.NewIDSet()
|
oldIndexes := restic.NewIDSet()
|
||||||
for id := range repo.List(backend.Index, done) {
|
for id := range repo.List(restic.IndexFile, done) {
|
||||||
idx.AddToSupersedes(id)
|
idx.AddToSupersedes(id)
|
||||||
oldIndexes.Insert(id)
|
oldIndexes.Insert(id)
|
||||||
}
|
}
|
||||||
@ -58,7 +58,7 @@ func RebuildIndex(repo *Repository) error {
|
|||||||
debug.Log("RebuildIndex.RebuildIndex", "new index saved as %v", id.Str())
|
debug.Log("RebuildIndex.RebuildIndex", "new index saved as %v", id.Str())
|
||||||
|
|
||||||
for indexID := range oldIndexes {
|
for indexID := range oldIndexes {
|
||||||
err := repo.Backend().Remove(backend.Index, indexID.String())
|
err := repo.Backend().Remove(restic.IndexFile, indexID.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", indexID.Str(), err)
|
fmt.Fprintf(os.Stderr, "unable to remove index %v: %v\n", indexID.Str(), err)
|
||||||
}
|
}
|
||||||
|
@ -2,6 +2,7 @@ package repository_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"restic"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"restic/backend"
|
"restic/backend"
|
||||||
@ -12,8 +13,8 @@ import (
|
|||||||
|
|
||||||
func TestIndexSerialize(t *testing.T) {
|
func TestIndexSerialize(t *testing.T) {
|
||||||
type testEntry struct {
|
type testEntry struct {
|
||||||
id backend.ID
|
id restic.ID
|
||||||
pack backend.ID
|
pack restic.ID
|
||||||
tpe pack.BlobType
|
tpe pack.BlobType
|
||||||
offset, length uint
|
offset, length uint
|
||||||
}
|
}
|
||||||
@ -249,7 +250,7 @@ var docOldExample = []byte(`
|
|||||||
`)
|
`)
|
||||||
|
|
||||||
var exampleTests = []struct {
|
var exampleTests = []struct {
|
||||||
id, packID backend.ID
|
id, packID restic.ID
|
||||||
tpe pack.BlobType
|
tpe pack.BlobType
|
||||||
offset, length uint
|
offset, length uint
|
||||||
}{
|
}{
|
||||||
@ -269,11 +270,11 @@ var exampleTests = []struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var exampleLookupTest = struct {
|
var exampleLookupTest = struct {
|
||||||
packID backend.ID
|
packID restic.ID
|
||||||
blobs map[backend.ID]pack.BlobType
|
blobs map[restic.ID]pack.BlobType
|
||||||
}{
|
}{
|
||||||
ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
|
ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
|
||||||
map[backend.ID]pack.BlobType{
|
map[restic.ID]pack.BlobType{
|
||||||
ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): pack.Data,
|
ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): pack.Data,
|
||||||
ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): pack.Tree,
|
ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): pack.Tree,
|
||||||
ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): pack.Data,
|
ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): pack.Data,
|
||||||
@ -281,7 +282,7 @@ var exampleLookupTest = struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestIndexUnserialize(t *testing.T) {
|
func TestIndexUnserialize(t *testing.T) {
|
||||||
oldIdx := backend.IDs{ParseID("ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452")}
|
oldIdx := restic.IDs{ParseID("ed54ae36197f4745ebc4b54d10e0f623eaaaedd03013eb7ae90df881b7781452")}
|
||||||
|
|
||||||
idx, err := repository.DecodeIndex(bytes.NewReader(docExample))
|
idx, err := repository.DecodeIndex(bytes.NewReader(docExample))
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
@ -345,7 +346,7 @@ func TestIndexUnserializeOld(t *testing.T) {
|
|||||||
|
|
||||||
func TestIndexPacks(t *testing.T) {
|
func TestIndexPacks(t *testing.T) {
|
||||||
idx := repository.NewIndex()
|
idx := repository.NewIndex()
|
||||||
packs := backend.NewIDSet()
|
packs := restic.NewIDSet()
|
||||||
|
|
||||||
for i := 0; i < 20; i++ {
|
for i := 0; i < 20; i++ {
|
||||||
packID := backend.RandomID()
|
packID := backend.RandomID()
|
||||||
|
@ -5,6 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/user"
|
"os/user"
|
||||||
|
"restic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
@ -142,7 +143,7 @@ func SearchKey(s *Repository, password string, maxKeys int) (*Key, error) {
|
|||||||
|
|
||||||
// LoadKey loads a key from the backend.
|
// LoadKey loads a key from the backend.
|
||||||
func LoadKey(s *Repository, name string) (k *Key, err error) {
|
func LoadKey(s *Repository, name string) (k *Key, err error) {
|
||||||
h := backend.Handle{Type: backend.Key, Name: name}
|
h := restic.Handle{Type: backend.Key, Name: name}
|
||||||
data, err := backend.LoadAll(s.be, h, nil)
|
data, err := backend.LoadAll(s.be, h, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -224,9 +225,9 @@ func AddKey(s *Repository, password string, template *crypto.Key) (*Key, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// store in repository and return
|
// store in repository and return
|
||||||
h := backend.Handle{
|
h := restic.Handle{
|
||||||
Type: backend.Key,
|
Type: backend.Key,
|
||||||
Name: backend.Hash(buf).String(),
|
Name: restic.Hash(buf).String(),
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.be.Save(h, buf)
|
err = s.be.Save(h, buf)
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
package repository
|
package repository
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"restic"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"restic/backend"
|
|
||||||
"restic/debug"
|
"restic/debug"
|
||||||
"restic/pack"
|
"restic/pack"
|
||||||
)
|
)
|
||||||
@ -22,7 +22,7 @@ func NewMasterIndex() *MasterIndex {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Lookup queries all known Indexes for the ID and returns the first match.
|
// Lookup queries all known Indexes for the ID and returns the first match.
|
||||||
func (mi *MasterIndex) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) {
|
func (mi *MasterIndex) Lookup(id restic.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) {
|
||||||
mi.idxMutex.RLock()
|
mi.idxMutex.RLock()
|
||||||
defer mi.idxMutex.RUnlock()
|
defer mi.idxMutex.RUnlock()
|
||||||
|
|
||||||
@ -42,7 +42,7 @@ func (mi *MasterIndex) Lookup(id backend.ID, tpe pack.BlobType) (blobs []PackedB
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LookupSize queries all known Indexes for the ID and returns the first match.
|
// LookupSize queries all known Indexes for the ID and returns the first match.
|
||||||
func (mi *MasterIndex) LookupSize(id backend.ID, tpe pack.BlobType) (uint, error) {
|
func (mi *MasterIndex) LookupSize(id restic.ID, tpe pack.BlobType) (uint, error) {
|
||||||
mi.idxMutex.RLock()
|
mi.idxMutex.RLock()
|
||||||
defer mi.idxMutex.RUnlock()
|
defer mi.idxMutex.RUnlock()
|
||||||
|
|
||||||
@ -58,7 +58,7 @@ func (mi *MasterIndex) LookupSize(id backend.ID, tpe pack.BlobType) (uint, error
|
|||||||
|
|
||||||
// ListPack returns the list of blobs in a pack. The first matching index is
|
// ListPack returns the list of blobs in a pack. The first matching index is
|
||||||
// returned, or nil if no index contains information about the pack id.
|
// returned, or nil if no index contains information about the pack id.
|
||||||
func (mi *MasterIndex) ListPack(id backend.ID) (list []PackedBlob) {
|
func (mi *MasterIndex) ListPack(id restic.ID) (list []PackedBlob) {
|
||||||
mi.idxMutex.RLock()
|
mi.idxMutex.RLock()
|
||||||
defer mi.idxMutex.RUnlock()
|
defer mi.idxMutex.RUnlock()
|
||||||
|
|
||||||
@ -73,7 +73,7 @@ func (mi *MasterIndex) ListPack(id backend.ID) (list []PackedBlob) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Has queries all known Indexes for the ID and returns the first match.
|
// Has queries all known Indexes for the ID and returns the first match.
|
||||||
func (mi *MasterIndex) Has(id backend.ID, tpe pack.BlobType) bool {
|
func (mi *MasterIndex) Has(id restic.ID, tpe pack.BlobType) bool {
|
||||||
mi.idxMutex.RLock()
|
mi.idxMutex.RLock()
|
||||||
defer mi.idxMutex.RUnlock()
|
defer mi.idxMutex.RUnlock()
|
||||||
|
|
||||||
@ -197,7 +197,7 @@ func (mi *MasterIndex) All() []*Index {
|
|||||||
// RebuildIndex combines all known indexes to a new index, leaving out any
|
// RebuildIndex combines all known indexes to a new index, leaving out any
|
||||||
// packs whose ID is contained in packBlacklist. The new index contains the IDs
|
// packs whose ID is contained in packBlacklist. The new index contains the IDs
|
||||||
// of all known indexes in the "supersedes" field.
|
// of all known indexes in the "supersedes" field.
|
||||||
func (mi *MasterIndex) RebuildIndex(packBlacklist backend.IDSet) (*Index, error) {
|
func (mi *MasterIndex) RebuildIndex(packBlacklist restic.IDSet) (*Index, error) {
|
||||||
mi.idxMutex.Lock()
|
mi.idxMutex.Lock()
|
||||||
defer mi.idxMutex.Unlock()
|
defer mi.idxMutex.Unlock()
|
||||||
|
|
||||||
|
@ -4,11 +4,11 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
|
"restic"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"restic/backend"
|
|
||||||
"restic/crypto"
|
"restic/crypto"
|
||||||
"restic/debug"
|
"restic/debug"
|
||||||
"restic/fs"
|
"restic/fs"
|
||||||
@ -17,7 +17,7 @@ import (
|
|||||||
|
|
||||||
// Saver implements saving data in a backend.
|
// Saver implements saving data in a backend.
|
||||||
type Saver interface {
|
type Saver interface {
|
||||||
Save(h backend.Handle, jp []byte) error
|
Save(h restic.Handle, jp []byte) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// packerManager keeps a list of open packs and creates new on demand.
|
// packerManager keeps a list of open packs and creates new on demand.
|
||||||
@ -114,8 +114,8 @@ func (r *Repository) savePacker(p *pack.Packer) error {
|
|||||||
return errors.Wrap(err, "Close")
|
return errors.Wrap(err, "Close")
|
||||||
}
|
}
|
||||||
|
|
||||||
id := backend.Hash(data)
|
id := restic.Hash(data)
|
||||||
h := backend.Handle{Type: backend.Data, Name: id.String()}
|
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
||||||
|
|
||||||
err = r.be.Save(h, data)
|
err = r.be.Save(h, data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -4,7 +4,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"restic/backend"
|
"restic"
|
||||||
"restic/backend/mem"
|
"restic/backend/mem"
|
||||||
"restic/crypto"
|
"restic/crypto"
|
||||||
"restic/pack"
|
"restic/pack"
|
||||||
@ -36,8 +36,8 @@ func (r *randReader) Read(p []byte) (n int, err error) {
|
|||||||
return len(p), nil
|
return len(p), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func randomID(rd io.Reader) backend.ID {
|
func randomID(rd io.Reader) restic.ID {
|
||||||
id := backend.ID{}
|
id := restic.ID{}
|
||||||
_, err := io.ReadFull(rd, id[:])
|
_, err := io.ReadFull(rd, id[:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -64,7 +64,7 @@ func saveFile(t testing.TB, be Saver, filename string, n int) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
h := backend.Handle{Type: backend.Data, Name: backend.Hash(data).String()}
|
h := restic.Handle{Type: restic.DataFile, Name: restic.Hash(data).String()}
|
||||||
|
|
||||||
err = be.Save(h, data)
|
err = be.Save(h, data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -137,7 +137,7 @@ func flushRemainingPacks(t testing.TB, rnd *randReader, be Saver, pm *packerMana
|
|||||||
|
|
||||||
type fakeBackend struct{}
|
type fakeBackend struct{}
|
||||||
|
|
||||||
func (f *fakeBackend) Save(h backend.Handle, data []byte) error {
|
func (f *fakeBackend) Save(h restic.Handle, data []byte) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
package repository
|
package repository
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"restic"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"restic/backend"
|
"restic/backend"
|
||||||
@ -23,12 +24,12 @@ type ParallelWorkFunc func(id string, done <-chan struct{}) error
|
|||||||
|
|
||||||
// ParallelIDWorkFunc gets one backend.ID to work on. If an error is returned,
|
// ParallelIDWorkFunc gets one backend.ID to work on. If an error is returned,
|
||||||
// processing stops. If done is closed, the function should return.
|
// processing stops. If done is closed, the function should return.
|
||||||
type ParallelIDWorkFunc func(id backend.ID, done <-chan struct{}) error
|
type ParallelIDWorkFunc func(id restic.ID, done <-chan struct{}) error
|
||||||
|
|
||||||
// FilesInParallel runs n workers of f in parallel, on the IDs that
|
// FilesInParallel runs n workers of f in parallel, on the IDs that
|
||||||
// repo.List(t) yield. If f returns an error, the process is aborted and the
|
// repo.List(t) yield. If f returns an error, the process is aborted and the
|
||||||
// first error is returned.
|
// first error is returned.
|
||||||
func FilesInParallel(repo backend.Lister, t backend.Type, n uint, f ParallelWorkFunc) error {
|
func FilesInParallel(repo backend.Lister, t restic.FileType, n uint, f ParallelWorkFunc) error {
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
defer closeIfOpen(done)
|
defer closeIfOpen(done)
|
||||||
|
|
||||||
|
@ -2,12 +2,12 @@ package repository_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"math/rand"
|
"math/rand"
|
||||||
|
"restic"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"restic/backend"
|
|
||||||
"restic/repository"
|
"restic/repository"
|
||||||
. "restic/test"
|
. "restic/test"
|
||||||
)
|
)
|
||||||
@ -73,7 +73,7 @@ var lister = testIDs{
|
|||||||
"34dd044c228727f2226a0c9c06a3e5ceb5e30e31cb7854f8fa1cde846b395a58",
|
"34dd044c228727f2226a0c9c06a3e5ceb5e30e31cb7854f8fa1cde846b395a58",
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tests testIDs) List(t backend.Type, done <-chan struct{}) <-chan string {
|
func (tests testIDs) List(t restic.FileType, done <-chan struct{}) <-chan string {
|
||||||
ch := make(chan string)
|
ch := make(chan string)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
@ -100,7 +100,7 @@ func TestFilesInParallel(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for n := uint(1); n < 5; n++ {
|
for n := uint(1); n < 5; n++ {
|
||||||
err := repository.FilesInParallel(lister, backend.Data, n*100, f)
|
err := repository.FilesInParallel(lister, restic.DataFile, n*100, f)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -120,7 +120,7 @@ func TestFilesInParallelWithError(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for n := uint(1); n < 5; n++ {
|
for n := uint(1); n < 5; n++ {
|
||||||
err := repository.FilesInParallel(lister, backend.Data, n*100, f)
|
err := repository.FilesInParallel(lister, restic.DataFile, n*100, f)
|
||||||
Equals(t, errTest, err)
|
Equals(t, errTest, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@ package repository
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"io"
|
"io"
|
||||||
"restic/backend"
|
"restic"
|
||||||
"restic/crypto"
|
"restic/crypto"
|
||||||
"restic/debug"
|
"restic/debug"
|
||||||
"restic/pack"
|
"restic/pack"
|
||||||
@ -15,13 +15,13 @@ import (
|
|||||||
// these packs. Each pack is loaded and the blobs listed in keepBlobs is saved
|
// these packs. Each pack is loaded and the blobs listed in keepBlobs is saved
|
||||||
// into a new pack. Afterwards, the packs are removed. This operation requires
|
// into a new pack. Afterwards, the packs are removed. This operation requires
|
||||||
// an exclusive lock on the repo.
|
// an exclusive lock on the repo.
|
||||||
func Repack(repo *Repository, packs backend.IDSet, keepBlobs pack.BlobSet) (err error) {
|
func Repack(repo *Repository, packs restic.IDSet, keepBlobs pack.BlobSet) (err error) {
|
||||||
debug.Log("Repack", "repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs))
|
debug.Log("Repack", "repacking %d packs while keeping %d blobs", len(packs), len(keepBlobs))
|
||||||
|
|
||||||
buf := make([]byte, 0, maxPackSize)
|
buf := make([]byte, 0, maxPackSize)
|
||||||
for packID := range packs {
|
for packID := range packs {
|
||||||
// load the complete pack
|
// load the complete pack
|
||||||
h := backend.Handle{Type: backend.Data, Name: packID.String()}
|
h := restic.Handle{Type: restic.DataFile, Name: packID.String()}
|
||||||
|
|
||||||
l, err := repo.Backend().Load(h, buf[:cap(buf)], 0)
|
l, err := repo.Backend().Load(h, buf[:cap(buf)], 0)
|
||||||
if errors.Cause(err) == io.ErrUnexpectedEOF {
|
if errors.Cause(err) == io.ErrUnexpectedEOF {
|
||||||
@ -75,7 +75,7 @@ func Repack(repo *Repository, packs backend.IDSet, keepBlobs pack.BlobSet) (err
|
|||||||
}
|
}
|
||||||
|
|
||||||
for packID := range packs {
|
for packID := range packs {
|
||||||
err := repo.Backend().Remove(backend.Data, packID.String())
|
err := repo.Backend().Remove(restic.DataFile, packID.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
debug.Log("Repack", "error removing pack %v: %v", packID.Str(), err)
|
debug.Log("Repack", "error removing pack %v: %v", packID.Str(), err)
|
||||||
return err
|
return err
|
||||||
|
@ -3,7 +3,7 @@ package repository_test
|
|||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"restic/backend"
|
"restic"
|
||||||
"restic/pack"
|
"restic/pack"
|
||||||
"restic/repository"
|
"restic/repository"
|
||||||
"testing"
|
"testing"
|
||||||
@ -14,7 +14,7 @@ func randomSize(min, max int) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func random(t testing.TB, length int) []byte {
|
func random(t testing.TB, length int) []byte {
|
||||||
rd := repository.NewRandReader(rand.New(rand.NewSource(int64(length))))
|
rd := restic.NewRandReader(rand.New(rand.NewSource(int64(length))))
|
||||||
buf := make([]byte, length)
|
buf := make([]byte, length)
|
||||||
_, err := io.ReadFull(rd, buf)
|
_, err := io.ReadFull(rd, buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -40,7 +40,7 @@ func createRandomBlobs(t testing.TB, repo *repository.Repository, blobs int, pDa
|
|||||||
}
|
}
|
||||||
|
|
||||||
buf := random(t, length)
|
buf := random(t, length)
|
||||||
id := backend.Hash(buf)
|
id := restic.Hash(buf)
|
||||||
|
|
||||||
if repo.Index().Has(id, pack.Data) {
|
if repo.Index().Has(id, pack.Data) {
|
||||||
t.Errorf("duplicate blob %v/%v ignored", id, pack.Data)
|
t.Errorf("duplicate blob %v/%v ignored", id, pack.Data)
|
||||||
@ -75,7 +75,7 @@ func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, l
|
|||||||
|
|
||||||
blobs := pack.NewBlobSet()
|
blobs := pack.NewBlobSet()
|
||||||
|
|
||||||
for id := range repo.List(backend.Data, done) {
|
for id := range repo.List(restic.DataFile, done) {
|
||||||
entries, _, err := repo.ListPack(id)
|
entries, _, err := repo.ListPack(id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error listing pack %v: %v", id, err)
|
t.Fatalf("error listing pack %v: %v", id, err)
|
||||||
@ -101,20 +101,20 @@ func selectBlobs(t *testing.T, repo *repository.Repository, p float32) (list1, l
|
|||||||
return list1, list2
|
return list1, list2
|
||||||
}
|
}
|
||||||
|
|
||||||
func listPacks(t *testing.T, repo *repository.Repository) backend.IDSet {
|
func listPacks(t *testing.T, repo *repository.Repository) restic.IDSet {
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
defer close(done)
|
defer close(done)
|
||||||
|
|
||||||
list := backend.NewIDSet()
|
list := restic.NewIDSet()
|
||||||
for id := range repo.List(backend.Data, done) {
|
for id := range repo.List(restic.DataFile, done) {
|
||||||
list.Insert(id)
|
list.Insert(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
return list
|
return list
|
||||||
}
|
}
|
||||||
|
|
||||||
func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs pack.BlobSet) backend.IDSet {
|
func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs pack.BlobSet) restic.IDSet {
|
||||||
packs := backend.NewIDSet()
|
packs := restic.NewIDSet()
|
||||||
|
|
||||||
idx := repo.Index()
|
idx := repo.Index()
|
||||||
for h := range blobs {
|
for h := range blobs {
|
||||||
@ -131,7 +131,7 @@ func findPacksForBlobs(t *testing.T, repo *repository.Repository, blobs pack.Blo
|
|||||||
return packs
|
return packs
|
||||||
}
|
}
|
||||||
|
|
||||||
func repack(t *testing.T, repo *repository.Repository, packs backend.IDSet, blobs pack.BlobSet) {
|
func repack(t *testing.T, repo *repository.Repository, packs restic.IDSet, blobs pack.BlobSet) {
|
||||||
err := repository.Repack(repo, packs, blobs)
|
err := repository.Repack(repo, packs, blobs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -6,6 +6,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
"restic"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
@ -17,7 +18,7 @@ import (
|
|||||||
|
|
||||||
// Repository is used to access a repository in a backend.
|
// Repository is used to access a repository in a backend.
|
||||||
type Repository struct {
|
type Repository struct {
|
||||||
be backend.Backend
|
be restic.Backend
|
||||||
Config Config
|
Config Config
|
||||||
key *crypto.Key
|
key *crypto.Key
|
||||||
keyName string
|
keyName string
|
||||||
@ -27,7 +28,7 @@ type Repository struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// New returns a new repository with backend be.
|
// New returns a new repository with backend be.
|
||||||
func New(be backend.Backend) *Repository {
|
func New(be restic.Backend) *Repository {
|
||||||
repo := &Repository{
|
repo := &Repository{
|
||||||
be: be,
|
be: be,
|
||||||
idx: NewMasterIndex(),
|
idx: NewMasterIndex(),
|
||||||
@ -40,29 +41,29 @@ func New(be backend.Backend) *Repository {
|
|||||||
// Find loads the list of all blobs of type t and searches for names which start
|
// Find loads the list of all blobs of type t and searches for names which start
|
||||||
// with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If
|
// with prefix. If none is found, nil and ErrNoIDPrefixFound is returned. If
|
||||||
// more than one is found, nil and ErrMultipleIDMatches is returned.
|
// more than one is found, nil and ErrMultipleIDMatches is returned.
|
||||||
func (r *Repository) Find(t backend.Type, prefix string) (string, error) {
|
func (r *Repository) Find(t restic.FileType, prefix string) (string, error) {
|
||||||
return backend.Find(r.be, t, prefix)
|
return backend.Find(r.be, t, prefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrefixLength returns the number of bytes required so that all prefixes of
|
// PrefixLength returns the number of bytes required so that all prefixes of
|
||||||
// all IDs of type t are unique.
|
// all IDs of type t are unique.
|
||||||
func (r *Repository) PrefixLength(t backend.Type) (int, error) {
|
func (r *Repository) PrefixLength(t restic.FileType) (int, error) {
|
||||||
return backend.PrefixLength(r.be, t)
|
return backend.PrefixLength(r.be, t)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadAndDecrypt loads and decrypts data identified by t and id from the
|
// LoadAndDecrypt loads and decrypts data identified by t and id from the
|
||||||
// backend.
|
// backend.
|
||||||
func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, error) {
|
func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, error) {
|
||||||
debug.Log("Repo.Load", "load %v with id %v", t, id.Str())
|
debug.Log("Repo.Load", "load %v with id %v", t, id.Str())
|
||||||
|
|
||||||
h := backend.Handle{Type: t, Name: id.String()}
|
h := restic.Handle{Type: t, Name: id.String()}
|
||||||
buf, err := backend.LoadAll(r.be, h, nil)
|
buf, err := backend.LoadAll(r.be, h, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err)
|
debug.Log("Repo.Load", "error loading %v: %v", id.Str(), err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if t != backend.Config && !backend.Hash(buf).Equal(id) {
|
if t != restic.ConfigFile && !restic.Hash(buf).Equal(id) {
|
||||||
return nil, errors.New("invalid data returned")
|
return nil, errors.New("invalid data returned")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -78,7 +79,7 @@ func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, erro
|
|||||||
// LoadBlob tries to load and decrypt content identified by t and id from a
|
// LoadBlob tries to load and decrypt content identified by t and id from a
|
||||||
// pack from the backend, the result is stored in plaintextBuf, which must be
|
// pack from the backend, the result is stored in plaintextBuf, which must be
|
||||||
// large enough to hold the complete blob.
|
// large enough to hold the complete blob.
|
||||||
func (r *Repository) LoadBlob(id backend.ID, t pack.BlobType, plaintextBuf []byte) ([]byte, error) {
|
func (r *Repository) LoadBlob(id restic.ID, t pack.BlobType, plaintextBuf []byte) ([]byte, error) {
|
||||||
debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str())
|
debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str())
|
||||||
|
|
||||||
// lookup plaintext size of blob
|
// lookup plaintext size of blob
|
||||||
@ -111,7 +112,7 @@ func (r *Repository) LoadBlob(id backend.ID, t pack.BlobType, plaintextBuf []byt
|
|||||||
}
|
}
|
||||||
|
|
||||||
// load blob from pack
|
// load blob from pack
|
||||||
h := backend.Handle{Type: backend.Data, Name: blob.PackID.String()}
|
h := restic.Handle{Type: restic.DataFile, Name: blob.PackID.String()}
|
||||||
ciphertextBuf := make([]byte, blob.Length)
|
ciphertextBuf := make([]byte, blob.Length)
|
||||||
n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset))
|
n, err := r.be.Load(h, ciphertextBuf, int64(blob.Offset))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -135,7 +136,7 @@ func (r *Repository) LoadBlob(id backend.ID, t pack.BlobType, plaintextBuf []byt
|
|||||||
}
|
}
|
||||||
|
|
||||||
// check hash
|
// check hash
|
||||||
if !backend.Hash(plaintextBuf).Equal(id) {
|
if !restic.Hash(plaintextBuf).Equal(id) {
|
||||||
lastError = errors.Errorf("blob %v returned invalid hash", id)
|
lastError = errors.Errorf("blob %v returned invalid hash", id)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -162,7 +163,7 @@ func closeOrErr(cl io.Closer, err *error) {
|
|||||||
|
|
||||||
// LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on
|
// LoadJSONUnpacked decrypts the data and afterwards calls json.Unmarshal on
|
||||||
// the item.
|
// the item.
|
||||||
func (r *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interface{}) (err error) {
|
func (r *Repository) LoadJSONUnpacked(t restic.FileType, id restic.ID, item interface{}) (err error) {
|
||||||
buf, err := r.LoadAndDecrypt(t, id)
|
buf, err := r.LoadAndDecrypt(t, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -173,7 +174,7 @@ func (r *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interf
|
|||||||
|
|
||||||
// LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the
|
// LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the
|
||||||
// data and afterwards call json.Unmarshal on the item.
|
// data and afterwards call json.Unmarshal on the item.
|
||||||
func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) (err error) {
|
func (r *Repository) LoadJSONPack(t pack.BlobType, id restic.ID, item interface{}) (err error) {
|
||||||
buf, err := r.LoadBlob(id, t, nil)
|
buf, err := r.LoadBlob(id, t, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -183,16 +184,16 @@ func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LookupBlobSize returns the size of blob id.
|
// LookupBlobSize returns the size of blob id.
|
||||||
func (r *Repository) LookupBlobSize(id backend.ID, tpe pack.BlobType) (uint, error) {
|
func (r *Repository) LookupBlobSize(id restic.ID, tpe pack.BlobType) (uint, error) {
|
||||||
return r.idx.LookupSize(id, tpe)
|
return r.idx.LookupSize(id, tpe)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SaveAndEncrypt encrypts data and stores it to the backend as type t. If data
|
// SaveAndEncrypt encrypts data and stores it to the backend as type t. If data
|
||||||
// is small enough, it will be packed together with other small blobs.
|
// is small enough, it will be packed together with other small blobs.
|
||||||
func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *backend.ID) (backend.ID, error) {
|
func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *restic.ID) (restic.ID, error) {
|
||||||
if id == nil {
|
if id == nil {
|
||||||
// compute plaintext hash
|
// compute plaintext hash
|
||||||
hashedID := backend.Hash(data)
|
hashedID := restic.Hash(data)
|
||||||
id = &hashedID
|
id = &hashedID
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -205,19 +206,19 @@ func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *backend.ID
|
|||||||
// encrypt blob
|
// encrypt blob
|
||||||
ciphertext, err := r.Encrypt(ciphertext, data)
|
ciphertext, err := r.Encrypt(ciphertext, data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return backend.ID{}, err
|
return restic.ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// find suitable packer and add blob
|
// find suitable packer and add blob
|
||||||
packer, err := r.findPacker(uint(len(ciphertext)))
|
packer, err := r.findPacker(uint(len(ciphertext)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return backend.ID{}, err
|
return restic.ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// save ciphertext
|
// save ciphertext
|
||||||
_, err = packer.Add(t, *id, ciphertext)
|
_, err = packer.Add(t, *id, ciphertext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return backend.ID{}, err
|
return restic.ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// if the pack is not full enough and there are less than maxPackers
|
// if the pack is not full enough and there are less than maxPackers
|
||||||
@ -234,7 +235,7 @@ func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *backend.ID
|
|||||||
|
|
||||||
// SaveJSON serialises item as JSON and encrypts and saves it in a pack in the
|
// SaveJSON serialises item as JSON and encrypts and saves it in a pack in the
|
||||||
// backend as type t.
|
// backend as type t.
|
||||||
func (r *Repository) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, error) {
|
func (r *Repository) SaveJSON(t pack.BlobType, item interface{}) (restic.ID, error) {
|
||||||
debug.Log("Repo.SaveJSON", "save %v blob", t)
|
debug.Log("Repo.SaveJSON", "save %v blob", t)
|
||||||
buf := getBuf()[:0]
|
buf := getBuf()[:0]
|
||||||
defer freeBuf(buf)
|
defer freeBuf(buf)
|
||||||
@ -244,7 +245,7 @@ func (r *Repository) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, er
|
|||||||
enc := json.NewEncoder(wr)
|
enc := json.NewEncoder(wr)
|
||||||
err := enc.Encode(item)
|
err := enc.Encode(item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return backend.ID{}, errors.Errorf("json.Encode: %v", err)
|
return restic.ID{}, errors.Errorf("json.Encode: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
buf = wr.Bytes()
|
buf = wr.Bytes()
|
||||||
@ -253,11 +254,11 @@ func (r *Repository) SaveJSON(t pack.BlobType, item interface{}) (backend.ID, er
|
|||||||
|
|
||||||
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
|
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
|
||||||
// backend as type t, without a pack. It returns the storage hash.
|
// backend as type t, without a pack. It returns the storage hash.
|
||||||
func (r *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend.ID, error) {
|
func (r *Repository) SaveJSONUnpacked(t restic.FileType, item interface{}) (restic.ID, error) {
|
||||||
debug.Log("Repo.SaveJSONUnpacked", "save new blob %v", t)
|
debug.Log("Repo.SaveJSONUnpacked", "save new blob %v", t)
|
||||||
plaintext, err := json.Marshal(item)
|
plaintext, err := json.Marshal(item)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return backend.ID{}, errors.Wrap(err, "json.Marshal")
|
return restic.ID{}, errors.Wrap(err, "json.Marshal")
|
||||||
}
|
}
|
||||||
|
|
||||||
return r.SaveUnpacked(t, plaintext)
|
return r.SaveUnpacked(t, plaintext)
|
||||||
@ -265,20 +266,20 @@ func (r *Repository) SaveJSONUnpacked(t backend.Type, item interface{}) (backend
|
|||||||
|
|
||||||
// SaveUnpacked encrypts data and stores it in the backend. Returned is the
|
// SaveUnpacked encrypts data and stores it in the backend. Returned is the
|
||||||
// storage hash.
|
// storage hash.
|
||||||
func (r *Repository) SaveUnpacked(t backend.Type, p []byte) (id backend.ID, err error) {
|
func (r *Repository) SaveUnpacked(t restic.FileType, p []byte) (id restic.ID, err error) {
|
||||||
ciphertext := make([]byte, len(p)+crypto.Extension)
|
ciphertext := make([]byte, len(p)+crypto.Extension)
|
||||||
ciphertext, err = r.Encrypt(ciphertext, p)
|
ciphertext, err = r.Encrypt(ciphertext, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return backend.ID{}, err
|
return restic.ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
id = backend.Hash(ciphertext)
|
id = restic.Hash(ciphertext)
|
||||||
h := backend.Handle{Type: t, Name: id.String()}
|
h := restic.Handle{Type: t, Name: id.String()}
|
||||||
|
|
||||||
err = r.be.Save(h, ciphertext)
|
err = r.be.Save(h, ciphertext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
debug.Log("Repo.SaveJSONUnpacked", "error saving blob %v: %v", h, err)
|
debug.Log("Repo.SaveJSONUnpacked", "error saving blob %v: %v", h, err)
|
||||||
return backend.ID{}, err
|
return restic.ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
debug.Log("Repo.SaveJSONUnpacked", "blob %v saved", h)
|
debug.Log("Repo.SaveJSONUnpacked", "blob %v saved", h)
|
||||||
@ -303,7 +304,7 @@ func (r *Repository) Flush() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Backend returns the backend for the repository.
|
// Backend returns the backend for the repository.
|
||||||
func (r *Repository) Backend() backend.Backend {
|
func (r *Repository) Backend() restic.Backend {
|
||||||
return r.be
|
return r.be
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -318,15 +319,15 @@ func (r *Repository) SetIndex(i *MasterIndex) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SaveIndex saves an index in the repository.
|
// SaveIndex saves an index in the repository.
|
||||||
func SaveIndex(repo *Repository, index *Index) (backend.ID, error) {
|
func SaveIndex(repo *Repository, index *Index) (restic.ID, error) {
|
||||||
buf := bytes.NewBuffer(nil)
|
buf := bytes.NewBuffer(nil)
|
||||||
|
|
||||||
err := index.Finalize(buf)
|
err := index.Finalize(buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return backend.ID{}, err
|
return restic.ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return repo.SaveUnpacked(backend.Index, buf.Bytes())
|
return repo.SaveUnpacked(restic.IndexFile, buf.Bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
// saveIndex saves all indexes in the backend.
|
// saveIndex saves all indexes in the backend.
|
||||||
@ -365,7 +366,7 @@ func (r *Repository) LoadIndex() error {
|
|||||||
errCh := make(chan error, 1)
|
errCh := make(chan error, 1)
|
||||||
indexes := make(chan *Index)
|
indexes := make(chan *Index)
|
||||||
|
|
||||||
worker := func(id backend.ID, done <-chan struct{}) error {
|
worker := func(id restic.ID, done <-chan struct{}) error {
|
||||||
idx, err := LoadIndex(r, id)
|
idx, err := LoadIndex(r, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -381,7 +382,7 @@ func (r *Repository) LoadIndex() error {
|
|||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
defer close(indexes)
|
defer close(indexes)
|
||||||
errCh <- FilesInParallel(r.be, backend.Index, loadIndexParallelism,
|
errCh <- FilesInParallel(r.be, restic.IndexFile, loadIndexParallelism,
|
||||||
ParallelWorkFuncParseID(worker))
|
ParallelWorkFuncParseID(worker))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -397,7 +398,7 @@ func (r *Repository) LoadIndex() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LoadIndex loads the index id from backend and returns it.
|
// LoadIndex loads the index id from backend and returns it.
|
||||||
func LoadIndex(repo *Repository, id backend.ID) (*Index, error) {
|
func LoadIndex(repo *Repository, id restic.ID) (*Index, error) {
|
||||||
idx, err := LoadIndexWithDecoder(repo, id, DecodeIndex)
|
idx, err := LoadIndexWithDecoder(repo, id, DecodeIndex)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return idx, nil
|
return idx, nil
|
||||||
@ -429,7 +430,7 @@ func (r *Repository) SearchKey(password string, maxKeys int) error {
|
|||||||
// Init creates a new master key with the supplied password, initializes and
|
// Init creates a new master key with the supplied password, initializes and
|
||||||
// saves the repository config.
|
// saves the repository config.
|
||||||
func (r *Repository) Init(password string) error {
|
func (r *Repository) Init(password string) error {
|
||||||
has, err := r.be.Test(backend.Config, "")
|
has, err := r.be.Test(restic.ConfigFile, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -457,7 +458,7 @@ func (r *Repository) init(password string, cfg Config) error {
|
|||||||
r.packerManager.key = key.master
|
r.packerManager.key = key.master
|
||||||
r.keyName = key.Name()
|
r.keyName = key.Name()
|
||||||
r.Config = cfg
|
r.Config = cfg
|
||||||
_, err = r.SaveJSONUnpacked(backend.Config, cfg)
|
_, err = r.SaveJSONUnpacked(restic.ConfigFile, cfg)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -497,7 +498,7 @@ func (r *Repository) KeyName() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Count returns the number of blobs of a given type in the backend.
|
// Count returns the number of blobs of a given type in the backend.
|
||||||
func (r *Repository) Count(t backend.Type) (n uint) {
|
func (r *Repository) Count(t restic.FileType) (n uint) {
|
||||||
for _ = range r.be.List(t, nil) {
|
for _ = range r.be.List(t, nil) {
|
||||||
n++
|
n++
|
||||||
}
|
}
|
||||||
@ -505,16 +506,16 @@ func (r *Repository) Count(t backend.Type) (n uint) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Repository) list(t backend.Type, done <-chan struct{}, out chan<- backend.ID) {
|
func (r *Repository) list(t restic.FileType, done <-chan struct{}, out chan<- restic.ID) {
|
||||||
defer close(out)
|
defer close(out)
|
||||||
in := r.be.List(t, done)
|
in := r.be.List(t, done)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// disable sending on the outCh until we received a job
|
// disable sending on the outCh until we received a job
|
||||||
outCh chan<- backend.ID
|
outCh chan<- restic.ID
|
||||||
// enable receiving from in
|
// enable receiving from in
|
||||||
inCh = in
|
inCh = in
|
||||||
id backend.ID
|
id restic.ID
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -543,8 +544,8 @@ func (r *Repository) list(t backend.Type, done <-chan struct{}, out chan<- backe
|
|||||||
}
|
}
|
||||||
|
|
||||||
// List returns a channel that yields all IDs of type t in the backend.
|
// List returns a channel that yields all IDs of type t in the backend.
|
||||||
func (r *Repository) List(t backend.Type, done <-chan struct{}) <-chan backend.ID {
|
func (r *Repository) List(t restic.FileType, done <-chan struct{}) <-chan restic.ID {
|
||||||
outCh := make(chan backend.ID)
|
outCh := make(chan restic.ID)
|
||||||
|
|
||||||
go r.list(t, done, outCh)
|
go r.list(t, done, outCh)
|
||||||
|
|
||||||
@ -553,8 +554,8 @@ func (r *Repository) List(t backend.Type, done <-chan struct{}) <-chan backend.I
|
|||||||
|
|
||||||
// ListPack returns the list of blobs saved in the pack id and the length of
|
// ListPack returns the list of blobs saved in the pack id and the length of
|
||||||
// the file as stored in the backend.
|
// the file as stored in the backend.
|
||||||
func (r *Repository) ListPack(id backend.ID) ([]pack.Blob, int64, error) {
|
func (r *Repository) ListPack(id restic.ID) ([]pack.Blob, int64, error) {
|
||||||
h := backend.Handle{Type: backend.Data, Name: id.String()}
|
h := restic.Handle{Type: restic.DataFile, Name: id.String()}
|
||||||
|
|
||||||
blobInfo, err := r.Backend().Stat(h)
|
blobInfo, err := r.Backend().Stat(h)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -11,7 +11,6 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"restic"
|
"restic"
|
||||||
"restic/backend"
|
|
||||||
"restic/pack"
|
"restic/pack"
|
||||||
"restic/repository"
|
"restic/repository"
|
||||||
. "restic/test"
|
. "restic/test"
|
||||||
@ -80,7 +79,7 @@ func TestSave(t *testing.T) {
|
|||||||
_, err := io.ReadFull(rand.Reader, data)
|
_, err := io.ReadFull(rand.Reader, data)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
id := backend.Hash(data)
|
id := restic.Hash(data)
|
||||||
|
|
||||||
// save
|
// save
|
||||||
sid, err := repo.SaveAndEncrypt(pack.Data, data, nil)
|
sid, err := repo.SaveAndEncrypt(pack.Data, data, nil)
|
||||||
@ -114,7 +113,7 @@ func TestSaveFrom(t *testing.T) {
|
|||||||
_, err := io.ReadFull(rand.Reader, data)
|
_, err := io.ReadFull(rand.Reader, data)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
id := backend.Hash(data)
|
id := restic.Hash(data)
|
||||||
|
|
||||||
// save
|
// save
|
||||||
id2, err := repo.SaveAndEncrypt(pack.Data, data, &id)
|
id2, err := repo.SaveAndEncrypt(pack.Data, data, &id)
|
||||||
@ -147,7 +146,7 @@ func BenchmarkSaveAndEncrypt(t *testing.B) {
|
|||||||
_, err := io.ReadFull(rand.Reader, data)
|
_, err := io.ReadFull(rand.Reader, data)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
id := backend.ID(sha256.Sum256(data))
|
id := restic.ID(sha256.Sum256(data))
|
||||||
|
|
||||||
t.ResetTimer()
|
t.ResetTimer()
|
||||||
t.SetBytes(int64(size))
|
t.SetBytes(int64(size))
|
||||||
@ -211,13 +210,13 @@ func TestLoadJSONUnpacked(t *testing.T) {
|
|||||||
sn.Hostname = "foobar"
|
sn.Hostname = "foobar"
|
||||||
sn.Username = "test!"
|
sn.Username = "test!"
|
||||||
|
|
||||||
id, err := repo.SaveJSONUnpacked(backend.Snapshot, &sn)
|
id, err := repo.SaveJSONUnpacked(restic.SnapshotFile, &sn)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
var sn2 restic.Snapshot
|
var sn2 restic.Snapshot
|
||||||
|
|
||||||
// restore
|
// restore
|
||||||
err = repo.LoadJSONUnpacked(backend.Snapshot, id, &sn2)
|
err = repo.LoadJSONUnpacked(restic.SnapshotFile, id, &sn2)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
Equals(t, sn.Hostname, sn2.Hostname)
|
Equals(t, sn.Hostname, sn2.Hostname)
|
||||||
@ -286,19 +285,19 @@ func TestRepositoryIncrementalIndex(t *testing.T) {
|
|||||||
OK(t, repo.SaveIndex())
|
OK(t, repo.SaveIndex())
|
||||||
|
|
||||||
type packEntry struct {
|
type packEntry struct {
|
||||||
id backend.ID
|
id restic.ID
|
||||||
indexes []*repository.Index
|
indexes []*repository.Index
|
||||||
}
|
}
|
||||||
|
|
||||||
packEntries := make(map[backend.ID]map[backend.ID]struct{})
|
packEntries := make(map[restic.ID]map[restic.ID]struct{})
|
||||||
|
|
||||||
for id := range repo.List(backend.Index, nil) {
|
for id := range repo.List(restic.IndexFile, nil) {
|
||||||
idx, err := repository.LoadIndex(repo, id)
|
idx, err := repository.LoadIndex(repo, id)
|
||||||
OK(t, err)
|
OK(t, err)
|
||||||
|
|
||||||
for pb := range idx.Each(nil) {
|
for pb := range idx.Each(nil) {
|
||||||
if _, ok := packEntries[pb.PackID]; !ok {
|
if _, ok := packEntries[pb.PackID]; !ok {
|
||||||
packEntries[pb.PackID] = make(map[backend.ID]struct{})
|
packEntries[pb.PackID] = make(map[restic.ID]struct{})
|
||||||
}
|
}
|
||||||
|
|
||||||
packEntries[pb.PackID][id] = struct{}{}
|
packEntries[pb.PackID][id] = struct{}{}
|
||||||
|
@ -2,7 +2,7 @@ package repository
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
"os"
|
||||||
"restic/backend"
|
"restic"
|
||||||
"restic/backend/local"
|
"restic/backend/local"
|
||||||
"restic/backend/mem"
|
"restic/backend/mem"
|
||||||
"restic/crypto"
|
"restic/crypto"
|
||||||
@ -25,7 +25,7 @@ func TestUseLowSecurityKDFParameters(t testing.TB) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TestBackend returns a fully configured in-memory backend.
|
// TestBackend returns a fully configured in-memory backend.
|
||||||
func TestBackend(t testing.TB) (be backend.Backend, cleanup func()) {
|
func TestBackend(t testing.TB) (be restic.Backend, cleanup func()) {
|
||||||
return mem.New(), func() {}
|
return mem.New(), func() {}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -37,7 +37,7 @@ const testChunkerPol = chunker.Pol(0x3DA3358B4DC173)
|
|||||||
// TestRepositoryWithBackend returns a repository initialized with a test
|
// TestRepositoryWithBackend returns a repository initialized with a test
|
||||||
// password. If be is nil, an in-memory backend is used. A constant polynomial
|
// password. If be is nil, an in-memory backend is used. A constant polynomial
|
||||||
// is used for the chunker and low-security test parameters.
|
// is used for the chunker and low-security test parameters.
|
||||||
func TestRepositoryWithBackend(t testing.TB, be backend.Backend) (r *Repository, cleanup func()) {
|
func TestRepositoryWithBackend(t testing.TB, be restic.Backend) (r *Repository, cleanup func()) {
|
||||||
TestUseLowSecurityKDFParameters(t)
|
TestUseLowSecurityKDFParameters(t)
|
||||||
|
|
||||||
var beCleanup func()
|
var beCleanup func()
|
||||||
|
@ -6,15 +6,13 @@ import (
|
|||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"restic/backend"
|
|
||||||
"restic/debug"
|
"restic/debug"
|
||||||
"restic/fs"
|
"restic/fs"
|
||||||
"restic/repository"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Restorer is used to restore a snapshot to a directory.
|
// Restorer is used to restore a snapshot to a directory.
|
||||||
type Restorer struct {
|
type Restorer struct {
|
||||||
repo *repository.Repository
|
repo Repository
|
||||||
sn *Snapshot
|
sn *Snapshot
|
||||||
|
|
||||||
Error func(dir string, node *Node, err error) error
|
Error func(dir string, node *Node, err error) error
|
||||||
@ -24,7 +22,7 @@ type Restorer struct {
|
|||||||
var restorerAbortOnAllErrors = func(str string, node *Node, err error) error { return err }
|
var restorerAbortOnAllErrors = func(str string, node *Node, err error) error { return err }
|
||||||
|
|
||||||
// NewRestorer creates a restorer preloaded with the content from the snapshot id.
|
// NewRestorer creates a restorer preloaded with the content from the snapshot id.
|
||||||
func NewRestorer(repo *repository.Repository, id backend.ID) (*Restorer, error) {
|
func NewRestorer(repo Repository, id ID) (*Restorer, error) {
|
||||||
r := &Restorer{
|
r := &Restorer{
|
||||||
repo: repo, Error: restorerAbortOnAllErrors,
|
repo: repo, Error: restorerAbortOnAllErrors,
|
||||||
SelectFilter: func(string, string, *Node) bool { return true },
|
SelectFilter: func(string, string, *Node) bool { return true },
|
||||||
@ -40,7 +38,7 @@ func NewRestorer(repo *repository.Repository, id backend.ID) (*Restorer, error)
|
|||||||
return r, nil
|
return r, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (res *Restorer) restoreTo(dst string, dir string, treeID backend.ID) error {
|
func (res *Restorer) restoreTo(dst string, dir string, treeID ID) error {
|
||||||
tree, err := LoadTree(res.repo, treeID)
|
tree, err := LoadTree(res.repo, treeID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return res.Error(dir, nil, err)
|
return res.Error(dir, nil, err)
|
||||||
|
@ -10,22 +10,21 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"restic/backend"
|
"restic/backend"
|
||||||
"restic/repository"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Snapshot is the state of a resource at one point in time.
|
// Snapshot is the state of a resource at one point in time.
|
||||||
type Snapshot struct {
|
type Snapshot struct {
|
||||||
Time time.Time `json:"time"`
|
Time time.Time `json:"time"`
|
||||||
Parent *backend.ID `json:"parent,omitempty"`
|
Parent *ID `json:"parent,omitempty"`
|
||||||
Tree *backend.ID `json:"tree"`
|
Tree *ID `json:"tree"`
|
||||||
Paths []string `json:"paths"`
|
Paths []string `json:"paths"`
|
||||||
Hostname string `json:"hostname,omitempty"`
|
Hostname string `json:"hostname,omitempty"`
|
||||||
Username string `json:"username,omitempty"`
|
Username string `json:"username,omitempty"`
|
||||||
UID uint32 `json:"uid,omitempty"`
|
UID uint32 `json:"uid,omitempty"`
|
||||||
GID uint32 `json:"gid,omitempty"`
|
GID uint32 `json:"gid,omitempty"`
|
||||||
Excludes []string `json:"excludes,omitempty"`
|
Excludes []string `json:"excludes,omitempty"`
|
||||||
|
|
||||||
id *backend.ID // plaintext ID, used during restore
|
id *ID // plaintext ID, used during restore
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSnapshot returns an initialized snapshot struct for the current user and
|
// NewSnapshot returns an initialized snapshot struct for the current user and
|
||||||
@ -56,9 +55,9 @@ func NewSnapshot(paths []string) (*Snapshot, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LoadSnapshot loads the snapshot with the id and returns it.
|
// LoadSnapshot loads the snapshot with the id and returns it.
|
||||||
func LoadSnapshot(repo *repository.Repository, id backend.ID) (*Snapshot, error) {
|
func LoadSnapshot(repo Repository, id ID) (*Snapshot, error) {
|
||||||
sn := &Snapshot{id: &id}
|
sn := &Snapshot{id: &id}
|
||||||
err := repo.LoadJSONUnpacked(backend.Snapshot, id, sn)
|
err := repo.LoadJSONUnpacked(SnapshotFile, id, sn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -67,11 +66,11 @@ func LoadSnapshot(repo *repository.Repository, id backend.ID) (*Snapshot, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// LoadAllSnapshots returns a list of all snapshots in the repo.
|
// LoadAllSnapshots returns a list of all snapshots in the repo.
|
||||||
func LoadAllSnapshots(repo *repository.Repository) (snapshots []*Snapshot, err error) {
|
func LoadAllSnapshots(repo Repository) (snapshots []*Snapshot, err error) {
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
defer close(done)
|
defer close(done)
|
||||||
|
|
||||||
for id := range repo.List(backend.Snapshot, done) {
|
for id := range repo.List(SnapshotFile, done) {
|
||||||
sn, err := LoadSnapshot(repo, id)
|
sn, err := LoadSnapshot(repo, id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -89,7 +88,7 @@ func (sn Snapshot) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ID retuns the snapshot's ID.
|
// ID retuns the snapshot's ID.
|
||||||
func (sn Snapshot) ID() *backend.ID {
|
func (sn Snapshot) ID() *ID {
|
||||||
return sn.id
|
return sn.id
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -131,17 +130,17 @@ func SamePaths(expected, actual []string) bool {
|
|||||||
var ErrNoSnapshotFound = errors.New("no snapshot found")
|
var ErrNoSnapshotFound = errors.New("no snapshot found")
|
||||||
|
|
||||||
// FindLatestSnapshot finds latest snapshot with optional target/directory and source filters
|
// FindLatestSnapshot finds latest snapshot with optional target/directory and source filters
|
||||||
func FindLatestSnapshot(repo *repository.Repository, targets []string, source string) (backend.ID, error) {
|
func FindLatestSnapshot(repo Repository, targets []string, source string) (ID, error) {
|
||||||
var (
|
var (
|
||||||
latest time.Time
|
latest time.Time
|
||||||
latestID backend.ID
|
latestID ID
|
||||||
found bool
|
found bool
|
||||||
)
|
)
|
||||||
|
|
||||||
for snapshotID := range repo.List(backend.Snapshot, make(chan struct{})) {
|
for snapshotID := range repo.List(SnapshotFile, make(chan struct{})) {
|
||||||
snapshot, err := LoadSnapshot(repo, snapshotID)
|
snapshot, err := LoadSnapshot(repo, snapshotID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return backend.ID{}, errors.Errorf("Error listing snapshot: %v", err)
|
return ID{}, errors.Errorf("Error listing snapshot: %v", err)
|
||||||
}
|
}
|
||||||
if snapshot.Time.After(latest) && SamePaths(snapshot.Paths, targets) && (source == "" || source == snapshot.Hostname) {
|
if snapshot.Time.After(latest) && SamePaths(snapshot.Paths, targets) && (source == "" || source == snapshot.Hostname) {
|
||||||
latest = snapshot.Time
|
latest = snapshot.Time
|
||||||
@ -151,7 +150,7 @@ func FindLatestSnapshot(repo *repository.Repository, targets []string, source st
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !found {
|
if !found {
|
||||||
return backend.ID{}, ErrNoSnapshotFound
|
return ID{}, ErrNoSnapshotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return latestID, nil
|
return latestID, nil
|
||||||
@ -159,13 +158,13 @@ func FindLatestSnapshot(repo *repository.Repository, targets []string, source st
|
|||||||
|
|
||||||
// FindSnapshot takes a string and tries to find a snapshot whose ID matches
|
// FindSnapshot takes a string and tries to find a snapshot whose ID matches
|
||||||
// the string as closely as possible.
|
// the string as closely as possible.
|
||||||
func FindSnapshot(repo *repository.Repository, s string) (backend.ID, error) {
|
func FindSnapshot(repo Repository, s string) (ID, error) {
|
||||||
|
|
||||||
// find snapshot id with prefix
|
// find snapshot id with prefix
|
||||||
name, err := backend.Find(repo.Backend(), backend.Snapshot, s)
|
name, err := backend.Find(repo.Backend(), SnapshotFile, s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return backend.ID{}, err
|
return ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return backend.ParseID(name)
|
return ParseID(name)
|
||||||
}
|
}
|
||||||
|
@ -1,13 +1,12 @@
|
|||||||
package restic
|
package restic
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
crand "crypto/rand"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"restic/backend"
|
|
||||||
"restic/pack"
|
"restic/pack"
|
||||||
"restic/repository"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -17,21 +16,21 @@ import (
|
|||||||
|
|
||||||
// fakeFile returns a reader which yields deterministic pseudo-random data.
|
// fakeFile returns a reader which yields deterministic pseudo-random data.
|
||||||
func fakeFile(t testing.TB, seed, size int64) io.Reader {
|
func fakeFile(t testing.TB, seed, size int64) io.Reader {
|
||||||
return io.LimitReader(repository.NewRandReader(rand.New(rand.NewSource(seed))), size)
|
return io.LimitReader(NewRandReader(rand.New(rand.NewSource(seed))), size)
|
||||||
}
|
}
|
||||||
|
|
||||||
type fakeFileSystem struct {
|
type fakeFileSystem struct {
|
||||||
t testing.TB
|
t testing.TB
|
||||||
repo *repository.Repository
|
repo Repository
|
||||||
knownBlobs backend.IDSet
|
knownBlobs IDSet
|
||||||
duplication float32
|
duplication float32
|
||||||
}
|
}
|
||||||
|
|
||||||
// saveFile reads from rd and saves the blobs in the repository. The list of
|
// saveFile reads from rd and saves the blobs in the repository. The list of
|
||||||
// IDs is returned.
|
// IDs is returned.
|
||||||
func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs backend.IDs) {
|
func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs IDs) {
|
||||||
blobs = backend.IDs{}
|
blobs = IDs{}
|
||||||
ch := chunker.New(rd, fs.repo.Config.ChunkerPolynomial)
|
ch := chunker.New(rd, fs.repo.Config().ChunkerPolynomial())
|
||||||
|
|
||||||
for {
|
for {
|
||||||
chunk, err := ch.Next(getBuf())
|
chunk, err := ch.Next(getBuf())
|
||||||
@ -43,7 +42,7 @@ func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs backend.IDs) {
|
|||||||
fs.t.Fatalf("unable to save chunk in repo: %v", err)
|
fs.t.Fatalf("unable to save chunk in repo: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
id := backend.Hash(chunk.Data)
|
id := Hash(chunk.Data)
|
||||||
if !fs.blobIsKnown(id, pack.Data) {
|
if !fs.blobIsKnown(id, pack.Data) {
|
||||||
_, err := fs.repo.SaveAndEncrypt(pack.Data, chunk.Data, &id)
|
_, err := fs.repo.SaveAndEncrypt(pack.Data, chunk.Data, &id)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -66,20 +65,20 @@ const (
|
|||||||
maxNodes = 32
|
maxNodes = 32
|
||||||
)
|
)
|
||||||
|
|
||||||
func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, backend.ID) {
|
func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, ID) {
|
||||||
data, err := json.Marshal(tree)
|
data, err := json.Marshal(tree)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fs.t.Fatalf("json.Marshal(tree) returned error: %v", err)
|
fs.t.Fatalf("json.Marshal(tree) returned error: %v", err)
|
||||||
return false, backend.ID{}
|
return false, ID{}
|
||||||
}
|
}
|
||||||
data = append(data, '\n')
|
data = append(data, '\n')
|
||||||
|
|
||||||
id := backend.Hash(data)
|
id := Hash(data)
|
||||||
return fs.blobIsKnown(id, pack.Tree), id
|
return fs.blobIsKnown(id, pack.Tree), id
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs fakeFileSystem) blobIsKnown(id backend.ID, t pack.BlobType) bool {
|
func (fs fakeFileSystem) blobIsKnown(id ID, t pack.BlobType) bool {
|
||||||
if rand.Float32() < fs.duplication {
|
if rand.Float32() < fs.duplication {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -97,7 +96,7 @@ func (fs fakeFileSystem) blobIsKnown(id backend.ID, t pack.BlobType) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// saveTree saves a tree of fake files in the repo and returns the ID.
|
// saveTree saves a tree of fake files in the repo and returns the ID.
|
||||||
func (fs fakeFileSystem) saveTree(seed int64, depth int) backend.ID {
|
func (fs fakeFileSystem) saveTree(seed int64, depth int) ID {
|
||||||
rnd := rand.NewSource(seed)
|
rnd := rand.NewSource(seed)
|
||||||
numNodes := int(rnd.Int63() % maxNodes)
|
numNodes := int(rnd.Int63() % maxNodes)
|
||||||
|
|
||||||
@ -151,7 +150,7 @@ func (fs fakeFileSystem) saveTree(seed int64, depth int) backend.ID {
|
|||||||
// also used as the snapshot's timestamp. The tree's depth can be specified
|
// also used as the snapshot's timestamp. The tree's depth can be specified
|
||||||
// with the parameter depth. The parameter duplication is a probability that
|
// with the parameter depth. The parameter duplication is a probability that
|
||||||
// the same blob will saved again.
|
// the same blob will saved again.
|
||||||
func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time, depth int, duplication float32) *Snapshot {
|
func TestCreateSnapshot(t testing.TB, repo Repository, at time.Time, depth int, duplication float32) *Snapshot {
|
||||||
seed := at.Unix()
|
seed := at.Unix()
|
||||||
t.Logf("create fake snapshot at %s with seed %d", at, seed)
|
t.Logf("create fake snapshot at %s with seed %d", at, seed)
|
||||||
|
|
||||||
@ -165,14 +164,14 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time,
|
|||||||
fs := fakeFileSystem{
|
fs := fakeFileSystem{
|
||||||
t: t,
|
t: t,
|
||||||
repo: repo,
|
repo: repo,
|
||||||
knownBlobs: backend.NewIDSet(),
|
knownBlobs: NewIDSet(),
|
||||||
duplication: duplication,
|
duplication: duplication,
|
||||||
}
|
}
|
||||||
|
|
||||||
treeID := fs.saveTree(seed, depth)
|
treeID := fs.saveTree(seed, depth)
|
||||||
snapshot.Tree = &treeID
|
snapshot.Tree = &treeID
|
||||||
|
|
||||||
id, err := repo.SaveJSONUnpacked(backend.Snapshot, snapshot)
|
id, err := repo.SaveJSONUnpacked(SnapshotFile, snapshot)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -194,24 +193,7 @@ func TestCreateSnapshot(t testing.TB, repo *repository.Repository, at time.Time,
|
|||||||
return snapshot
|
return snapshot
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestResetRepository removes all packs and indexes from the repository.
|
// TestParseID parses s as a ID and panics if that fails.
|
||||||
func TestResetRepository(t testing.TB, repo Repository) {
|
|
||||||
done := make(chan struct{})
|
|
||||||
defer close(done)
|
|
||||||
|
|
||||||
for _, tpe := range []FileType{SnapshotFile, IndexFile, DataFile} {
|
|
||||||
for id := range repo.Backend().List(tpe, done) {
|
|
||||||
err := repo.Backend().Remove(tpe, id)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("removing %v (%v) failed: %v", id[0:12], tpe, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
repo.SetIndex(repository.NewMasterIndex())
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestParseID parses s as a backend.ID and panics if that fails.
|
|
||||||
func TestParseID(s string) ID {
|
func TestParseID(s string) ID {
|
||||||
id, err := ParseID(s)
|
id, err := ParseID(s)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -220,3 +202,14 @@ func TestParseID(s string) ID {
|
|||||||
|
|
||||||
return id
|
return id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestRandomID retuns a randomly generated ID. When reading from rand fails,
|
||||||
|
// the function panics.
|
||||||
|
func TestRandomID() ID {
|
||||||
|
id := ID{}
|
||||||
|
_, err := io.ReadFull(crand.Reader, id[:])
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return id
|
||||||
|
}
|
||||||
|
@ -47,15 +47,3 @@ func TestCreateSnapshot(t *testing.T) {
|
|||||||
|
|
||||||
checker.TestCheckRepo(t, repo)
|
checker.TestCheckRepo(t, repo)
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkCreateSnapshot(b *testing.B) {
|
|
||||||
repo, cleanup := repository.TestRepository(b)
|
|
||||||
defer cleanup()
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
restic.TestCreateSnapshot(b, repo, testSnapshotTime, testDepth, 0)
|
|
||||||
restic.TestResetRepository(b, repo)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"restic/backend"
|
|
||||||
"restic/debug"
|
"restic/debug"
|
||||||
"restic/pack"
|
"restic/pack"
|
||||||
)
|
)
|
||||||
@ -31,10 +30,10 @@ func (t Tree) String() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type TreeLoader interface {
|
type TreeLoader interface {
|
||||||
LoadJSONPack(pack.BlobType, backend.ID, interface{}) error
|
LoadJSONPack(pack.BlobType, ID, interface{}) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func LoadTree(repo TreeLoader, id backend.ID) (*Tree, error) {
|
func LoadTree(repo TreeLoader, id ID) (*Tree, error) {
|
||||||
tree := &Tree{}
|
tree := &Tree{}
|
||||||
err := repo.LoadJSONPack(pack.Tree, id, tree)
|
err := repo.LoadJSONPack(pack.Tree, id, tree)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -95,7 +94,7 @@ func (t Tree) Find(name string) (*Node, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Subtrees returns a slice of all subtree IDs of the tree.
|
// Subtrees returns a slice of all subtree IDs of the tree.
|
||||||
func (t Tree) Subtrees() (trees backend.IDs) {
|
func (t Tree) Subtrees() (trees IDs) {
|
||||||
for _, node := range t.Nodes {
|
for _, node := range t.Nodes {
|
||||||
if node.FileType == "dir" && node.Subtree != nil {
|
if node.FileType == "dir" && node.Subtree != nil {
|
||||||
trees = append(trees, *node.Subtree)
|
trees = append(trees, *node.Subtree)
|
||||||
|
@ -6,7 +6,6 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"restic/backend"
|
|
||||||
"restic/debug"
|
"restic/debug"
|
||||||
"restic/pack"
|
"restic/pack"
|
||||||
)
|
)
|
||||||
@ -35,7 +34,7 @@ func NewTreeWalker(ch chan<- loadTreeJob, out chan<- WalkTreeJob) *TreeWalker {
|
|||||||
|
|
||||||
// Walk starts walking the tree given by id. When the channel done is closed,
|
// Walk starts walking the tree given by id. When the channel done is closed,
|
||||||
// processing stops.
|
// processing stops.
|
||||||
func (tw *TreeWalker) Walk(path string, id backend.ID, done chan struct{}) {
|
func (tw *TreeWalker) Walk(path string, id ID, done chan struct{}) {
|
||||||
debug.Log("TreeWalker.Walk", "starting on tree %v for %v", id.Str(), path)
|
debug.Log("TreeWalker.Walk", "starting on tree %v for %v", id.Str(), path)
|
||||||
defer debug.Log("TreeWalker.Walk", "done walking tree %v for %v", id.Str(), path)
|
defer debug.Log("TreeWalker.Walk", "done walking tree %v for %v", id.Str(), path)
|
||||||
|
|
||||||
@ -119,11 +118,11 @@ type loadTreeResult struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type loadTreeJob struct {
|
type loadTreeJob struct {
|
||||||
id backend.ID
|
id ID
|
||||||
res chan<- loadTreeResult
|
res chan<- loadTreeResult
|
||||||
}
|
}
|
||||||
|
|
||||||
type treeLoader func(backend.ID) (*Tree, error)
|
type treeLoader func(ID) (*Tree, error)
|
||||||
|
|
||||||
func loadTreeWorker(wg *sync.WaitGroup, in <-chan loadTreeJob, load treeLoader, done <-chan struct{}) {
|
func loadTreeWorker(wg *sync.WaitGroup, in <-chan loadTreeJob, load treeLoader, done <-chan struct{}) {
|
||||||
debug.Log("loadTreeWorker", "start")
|
debug.Log("loadTreeWorker", "start")
|
||||||
@ -162,10 +161,10 @@ const loadTreeWorkers = 10
|
|||||||
// WalkTree walks the tree specified by id recursively and sends a job for each
|
// WalkTree walks the tree specified by id recursively and sends a job for each
|
||||||
// file and directory it finds. When the channel done is closed, processing
|
// file and directory it finds. When the channel done is closed, processing
|
||||||
// stops.
|
// stops.
|
||||||
func WalkTree(repo TreeLoader, id backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
|
func WalkTree(repo TreeLoader, id ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
|
||||||
debug.Log("WalkTree", "start on %v, start workers", id.Str())
|
debug.Log("WalkTree", "start on %v, start workers", id.Str())
|
||||||
|
|
||||||
load := func(id backend.ID) (*Tree, error) {
|
load := func(id ID) (*Tree, error) {
|
||||||
tree := &Tree{}
|
tree := &Tree{}
|
||||||
err := repo.LoadJSONPack(pack.Tree, id, tree)
|
err := repo.LoadJSONPack(pack.Tree, id, tree)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user