2
2
mirror of https://github.com/octoleo/restic.git synced 2024-05-30 15:40:50 +00:00
This commit is contained in:
Alexander Neumann 2016-08-31 20:58:57 +02:00
parent f0600c1d5f
commit 51d8e6aa28
23 changed files with 143 additions and 264 deletions

View File

@ -5,44 +5,18 @@ import (
"fmt"
)
// Blob is one part of a file or a tree.
type Blob struct {
ID *ID `json:"id,omitempty"`
Size uint64 `json:"size,omitempty"`
Storage *ID `json:"sid,omitempty"` // encrypted ID
StorageSize uint64 `json:"ssize,omitempty"` // encrypted Size
Type BlobType
Length uint
ID ID
Offset uint
}
type Blobs []Blob
func (b Blob) Valid() bool {
if b.ID == nil || b.Storage == nil || b.StorageSize == 0 {
return false
}
return true
}
func (b Blob) String() string {
return fmt.Sprintf("Blob<%s (%d) -> %s (%d)>",
b.ID.Str(), b.Size,
b.Storage.Str(), b.StorageSize)
}
// Compare compares two blobs by comparing the ID and the size. It returns -1,
// 0, or 1.
func (b Blob) Compare(other Blob) int {
if res := b.ID.Compare(*other.ID); res != 0 {
return res
}
if b.Size < other.Size {
return -1
}
if b.Size > other.Size {
return 1
}
return 0
// PackedBlob is a blob stored within a file.
type PackedBlob struct {
Blob
PackID ID
}
// BlobHandle identifies a blob of a given type.
@ -101,3 +75,38 @@ func (t *BlobType) UnmarshalJSON(buf []byte) error {
return nil
}
// BlobHandles is an ordered list of BlobHandles that implements sort.Interface.
type BlobHandles []BlobHandle
func (h BlobHandles) Len() int {
return len(h)
}
func (h BlobHandles) Less(i, j int) bool {
for k, b := range h[i].ID {
if b == h[j].ID[k] {
continue
}
if b < h[j].ID[k] {
return true
}
return false
}
return h[i].Type < h[j].Type
}
func (h BlobHandles) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}
func (h BlobHandles) String() string {
elements := make([]string, 0, len(h))
for _, e := range h {
elements = append(elements, e.String())
}
return fmt.Sprintf("%v", elements)
}

View File

@ -1,12 +1,12 @@
package pack
package restic
import "sort"
// BlobSet is a set of blobs.
type BlobSet map[Handle]struct{}
type BlobSet map[BlobHandle]struct{}
// NewBlobSet returns a new BlobSet, populated with ids.
func NewBlobSet(handles ...Handle) BlobSet {
func NewBlobSet(handles ...BlobHandle) BlobSet {
m := make(BlobSet)
for _, h := range handles {
m[h] = struct{}{}
@ -16,18 +16,18 @@ func NewBlobSet(handles ...Handle) BlobSet {
}
// Has returns true iff id is contained in the set.
func (s BlobSet) Has(h Handle) bool {
func (s BlobSet) Has(h BlobHandle) bool {
_, ok := s[h]
return ok
}
// Insert adds id to the set.
func (s BlobSet) Insert(h Handle) {
func (s BlobSet) Insert(h BlobHandle) {
s[h] = struct{}{}
}
// Delete removes id from the set.
func (s BlobSet) Delete(h Handle) {
func (s BlobSet) Delete(h BlobHandle) {
delete(s, h)
}
@ -87,9 +87,9 @@ func (s BlobSet) Sub(other BlobSet) (result BlobSet) {
return result
}
// List returns a slice of all Handles in the set.
func (s BlobSet) List() Handles {
list := make(Handles, 0, len(s))
// List returns a sorted slice of all BlobHandle in the set.
func (s BlobSet) List() BlobHandles {
list := make(BlobHandles, 0, len(s))
for h := range s {
list = append(list, h)
}

View File

@ -1,12 +1,10 @@
package restic
import "restic/pack"
// FindUsedBlobs traverses the tree ID and adds all seen blobs (trees and data
// blobs) to the set blobs. The tree blobs in the `seen` BlobSet will not be visited
// again.
func FindUsedBlobs(repo Repository, treeID ID, blobs pack.BlobSet, seen pack.BlobSet) error {
blobs.Insert(pack.Handle{ID: treeID, Type: pack.Tree})
func FindUsedBlobs(repo Repository, treeID ID, blobs BlobSet, seen BlobSet) error {
blobs.Insert(BlobHandle{ID: treeID, Type: TreeBlob})
tree, err := LoadTree(repo, treeID)
if err != nil {
@ -17,11 +15,11 @@ func FindUsedBlobs(repo Repository, treeID ID, blobs pack.BlobSet, seen pack.Blo
switch node.FileType {
case "file":
for _, blob := range node.Content {
blobs.Insert(pack.Handle{ID: blob, Type: pack.Data})
blobs.Insert(BlobHandle{ID: blob, Type: DataBlob})
}
case "dir":
subtreeID := *node.Subtree
h := pack.Handle{ID: subtreeID, Type: pack.Tree}
h := BlobHandle{ID: subtreeID, Type: TreeBlob}
if seen.Has(h) {
continue
}

View File

@ -12,22 +12,21 @@ import (
"testing"
"time"
"restic/pack"
"restic/repository"
)
func loadIDSet(t testing.TB, filename string) pack.BlobSet {
func loadIDSet(t testing.TB, filename string) BlobSet {
f, err := os.Open(filename)
if err != nil {
t.Logf("unable to open golden file %v: %v", filename, err)
return pack.NewBlobSet()
return NewBlobSet()
}
sc := bufio.NewScanner(f)
blobs := pack.NewBlobSet()
blobs := NewBlobSet()
for sc.Scan() {
var h pack.Handle
var h Handle
err := json.Unmarshal([]byte(sc.Text()), &h)
if err != nil {
t.Errorf("file %v contained invalid blob: %#v", filename, err)
@ -44,14 +43,14 @@ func loadIDSet(t testing.TB, filename string) pack.BlobSet {
return blobs
}
func saveIDSet(t testing.TB, filename string, s pack.BlobSet) {
func saveIDSet(t testing.TB, filename string, s BlobSet) {
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE, 0644)
if err != nil {
t.Fatalf("unable to update golden file %v: %v", filename, err)
return
}
var hs pack.Handles
var hs Handles
for h := range s {
hs = append(hs, h)
}
@ -92,8 +91,8 @@ func TestFindUsedBlobs(t *testing.T) {
}
for i, sn := range snapshots {
usedBlobs := pack.NewBlobSet()
err := restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, pack.NewBlobSet())
usedBlobs := NewBlobSet()
err := restic.FindUsedBlobs(repo, *sn.Tree, usedBlobs, NewBlobSet())
if err != nil {
t.Errorf("FindUsedBlobs returned error: %v", err)
continue
@ -127,8 +126,8 @@ func BenchmarkFindUsedBlobs(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
seen := pack.NewBlobSet()
blobs := pack.NewBlobSet()
seen := NewBlobSet()
blobs := NewBlobSet()
err := restic.FindUsedBlobs(repo, *sn.Tree, blobs, seen)
if err != nil {
b.Error(err)

View File

@ -1,8 +1,7 @@
package list
import (
"restic/backend"
"restic/pack"
"restic"
"restic/worker"
)
@ -10,19 +9,19 @@ const listPackWorkers = 10
// Lister combines lists packs in a repo and blobs in a pack.
type Lister interface {
List(backend.Type, <-chan struct{}) <-chan backend.ID
ListPack(backend.ID) ([]pack.Blob, int64, error)
List(restic.FileType, <-chan struct{}) <-chan restic.ID
ListPack(restic.ID) ([]restic.Blob, int64, error)
}
// Result is returned in the channel from LoadBlobsFromAllPacks.
type Result struct {
packID backend.ID
packID restic.ID
size int64
entries []pack.Blob
entries []restic.Blob
}
// PackID returns the pack ID of this result.
func (l Result) PackID() backend.ID {
func (l Result) PackID() restic.ID {
return l.packID
}
@ -32,14 +31,14 @@ func (l Result) Size() int64 {
}
// Entries returns a list of all blobs saved in the pack.
func (l Result) Entries() []pack.Blob {
func (l Result) Entries() []restic.Blob {
return l.entries
}
// AllPacks sends the contents of all packs to ch.
func AllPacks(repo Lister, ch chan<- worker.Job, done <-chan struct{}) {
f := func(job worker.Job, done <-chan struct{}) (interface{}, error) {
packID := job.Data.(backend.ID)
packID := job.Data.(restic.ID)
entries, size, err := repo.ListPack(packID)
return Result{
@ -54,7 +53,7 @@ func AllPacks(repo Lister, ch chan<- worker.Job, done <-chan struct{}) {
go func() {
defer close(jobCh)
for id := range repo.List(backend.Data, done) {
for id := range repo.List(restic.DataFile, done) {
select {
case jobCh <- worker.Job{Data: id}:
case <-done:

View File

@ -16,7 +16,6 @@ import (
"restic/debug"
"restic/fs"
"restic/pack"
)
// Node is a file, directory or other item in a backup.
@ -43,9 +42,8 @@ type Node struct {
tree *Tree
path string
err error
blobs Blobs
path string
err error
}
func (node Node) String() string {
@ -210,7 +208,7 @@ func (node Node) createFileAt(path string, repo Repository) error {
var buf []byte
for _, id := range node.Content {
size, err := repo.LookupBlobSize(id, pack.Data)
size, err := repo.LookupBlobSize(id, DataBlob)
if err != nil {
return err
}
@ -220,7 +218,7 @@ func (node Node) createFileAt(path string, repo Repository) error {
buf = make([]byte, size)
}
buf, err := repo.LoadBlob(id, pack.Data, buf)
buf, err := repo.LoadBlob(id, DataBlob, buf)
if err != nil {
return err
}

View File

@ -1,51 +0,0 @@
package pack
import (
"fmt"
"restic/backend"
)
// Handle identifies a blob of a given type.
type Handle struct {
ID backend.ID
Type BlobType
}
func (h Handle) String() string {
return fmt.Sprintf("<%s/%s>", h.Type, h.ID.Str())
}
// Handles is an ordered list of Handles that implements sort.Interface.
type Handles []Handle
func (h Handles) Len() int {
return len(h)
}
func (h Handles) Less(i, j int) bool {
for k, b := range h[i].ID {
if b == h[j].ID[k] {
continue
}
if b < h[j].ID[k] {
return true
}
return false
}
return h[i].Type < h[j].Type
}
func (h Handles) Swap(i, j int) {
h[i], h[j] = h[j], h[i]
}
func (h Handles) String() string {
elements := make([]string, 0, len(h))
for _, e := range h {
elements = append(elements, e.String())
}
return fmt.Sprintf("%v", elements)
}

View File

@ -5,6 +5,7 @@ import (
"encoding/binary"
"fmt"
"io"
"restic"
"sync"
"github.com/pkg/errors"
@ -13,58 +14,11 @@ import (
"restic/crypto"
)
// BlobType specifies what a blob stored in a pack is.
type BlobType uint8
// These are the blob types that can be stored in a pack.
const (
Invalid BlobType = iota
Data
Tree
)
func (t BlobType) String() string {
switch t {
case Data:
return "data"
case Tree:
return "tree"
}
return fmt.Sprintf("<BlobType %d>", t)
}
// MarshalJSON encodes the BlobType into JSON.
func (t BlobType) MarshalJSON() ([]byte, error) {
switch t {
case Data:
return []byte(`"data"`), nil
case Tree:
return []byte(`"tree"`), nil
}
return nil, errors.New("unknown blob type")
}
// UnmarshalJSON decodes the BlobType from JSON.
func (t *BlobType) UnmarshalJSON(buf []byte) error {
switch string(buf) {
case `"data"`:
*t = Data
case `"tree"`:
*t = Tree
default:
return errors.New("unknown blob type")
}
return nil
}
// Blob is a blob within a pack.
type Blob struct {
Type BlobType
Type restic.BlobType
Length uint
ID backend.ID
ID restic.ID
Offset uint
}
@ -95,7 +49,7 @@ func NewPacker(k *crypto.Key, wr io.Writer) *Packer {
// Add saves the data read from rd as a new blob to the packer. Returned is the
// number of bytes written to the pack.
func (p *Packer) Add(t BlobType, id backend.ID, data []byte) (int, error) {
func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte) (int, error) {
p.m.Lock()
defer p.m.Unlock()
@ -110,7 +64,7 @@ func (p *Packer) Add(t BlobType, id backend.ID, data []byte) (int, error) {
return n, errors.Wrap(err, "Write")
}
var entrySize = uint(binary.Size(BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize)
var entrySize = uint(binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + backend.IDSize)
// headerEntry is used with encoding/binary to read and write header entries
type headerEntry struct {
@ -177,9 +131,9 @@ func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) {
}
switch b.Type {
case Data:
case restic.DataBlob:
entry.Type = 0
case Tree:
case restic.TreeBlob:
entry.Type = 1
default:
return 0, errors.Errorf("invalid blob type %v", b.Type)
@ -312,9 +266,9 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []Blob, err error)
switch e.Type {
case 0:
entry.Type = Data
entry.Type = restic.DataBlob
case 1:
entry.Type = Tree
entry.Type = restic.TreeBlob
default:
return nil, errors.Errorf("invalid type %d", e.Type)
}

View File

@ -1,10 +1,6 @@
package restic
import (
"restic/pack"
"github.com/restic/chunker"
)
import "github.com/restic/chunker"
// Repository stores data in a backend. It provides high-level functions and
// transparently encrypts/decrypts data.
@ -18,38 +14,33 @@ type Repository interface {
Index() Index
SaveFullIndex() error
SaveJSON(pack.BlobType, interface{}) (ID, error)
SaveJSON(BlobType, interface{}) (ID, error)
Config() Config
SaveAndEncrypt(pack.BlobType, []byte, *ID) (ID, error)
SaveAndEncrypt(BlobType, []byte, *ID) (ID, error)
SaveJSONUnpacked(FileType, interface{}) (ID, error)
SaveIndex() error
LoadJSONPack(pack.BlobType, ID, interface{}) error
LoadJSONPack(BlobType, ID, interface{}) error
LoadJSONUnpacked(FileType, ID, interface{}) error
LoadBlob(ID, pack.BlobType, []byte) ([]byte, error)
LoadBlob(ID, BlobType, []byte) ([]byte, error)
LookupBlobSize(ID, pack.BlobType) (uint, error)
LookupBlobSize(ID, BlobType) (uint, error)
List(FileType, <-chan struct{}) <-chan ID
ListPack(ID) ([]Blob, int64, error)
Flush() error
}
// Index keeps track of the blobs are stored within files.
type Index interface {
Has(ID, pack.BlobType) bool
Lookup(ID, pack.BlobType) ([]PackedBlob, error)
Has(ID, BlobType) bool
Lookup(ID, BlobType) ([]PackedBlob, error)
}
// Config stores information about the repository.
type Config interface {
ChunkerPolynomial() chunker.Pol
}
type PackedBlob interface {
Type() pack.BlobType
Length() uint
ID() ID
Offset() uint
PackID() ID
}

View File

@ -13,13 +13,12 @@ import (
"restic/crypto"
"restic/debug"
"restic/pack"
)
// Index holds a lookup table for id -> pack.
type Index struct {
m sync.Mutex
pack map[pack.Handle][]indexEntry
pack map[restic.BlobHandle][]indexEntry
final bool // set to true for all indexes read from the backend ("finalized")
id restic.ID // set to the ID of the index when it's finalized
@ -36,7 +35,7 @@ type indexEntry struct {
// NewIndex returns a new index.
func NewIndex() *Index {
return &Index{
pack: make(map[pack.Handle][]indexEntry),
pack: make(map[restic.BlobHandle][]indexEntry),
created: time.Now(),
}
}
@ -47,7 +46,7 @@ func (idx *Index) store(blob PackedBlob) {
offset: blob.Offset,
length: blob.Length,
}
h := pack.Handle{ID: blob.ID, Type: blob.Type}
h := restic.BlobHandle{ID: blob.ID, Type: blob.Type}
idx.pack[h] = append(idx.pack[h], newEntry)
}
@ -112,11 +111,11 @@ func (idx *Index) Store(blob PackedBlob) {
}
// Lookup queries the index for the blob ID and returns a PackedBlob.
func (idx *Index) Lookup(id restic.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) {
func (idx *Index) Lookup(id restic.ID, tpe restic.BlobType) (blobs []PackedBlob, err error) {
idx.m.Lock()
defer idx.m.Unlock()
h := pack.Handle{ID: id, Type: tpe}
h := restic.BlobHandle{ID: id, Type: tpe}
if packs, ok := idx.pack[h]; ok {
blobs = make([]PackedBlob, 0, len(packs))
@ -166,7 +165,7 @@ func (idx *Index) ListPack(id restic.ID) (list []PackedBlob) {
}
// Has returns true iff the id is listed in the index.
func (idx *Index) Has(id restic.ID, tpe pack.BlobType) bool {
func (idx *Index) Has(id restic.ID, tpe restic.BlobType) bool {
_, err := idx.Lookup(id, tpe)
if err == nil {
return true
@ -177,7 +176,7 @@ func (idx *Index) Has(id restic.ID, tpe pack.BlobType) bool {
// LookupSize returns the length of the cleartext content behind the
// given id
func (idx *Index) LookupSize(id restic.ID, tpe pack.BlobType) (cleartextLength uint, err error) {
func (idx *Index) LookupSize(id restic.ID, tpe restic.BlobType) (cleartextLength uint, err error) {
blobs, err := idx.Lookup(id, tpe)
if err != nil {
return 0, err
@ -207,7 +206,7 @@ func (idx *Index) AddToSupersedes(ids ...restic.ID) error {
// PackedBlob is a blob already saved within a pack.
type PackedBlob struct {
Type pack.BlobType
Type restic.BlobType
Length uint
ID restic.ID
Offset uint
@ -274,7 +273,7 @@ func (idx *Index) Packs() restic.IDSet {
}
// Count returns the number of blobs of type t in the index.
func (idx *Index) Count(t pack.BlobType) (n uint) {
func (idx *Index) Count(t restic.BlobType) (n uint) {
debug.Log("Index.Count", "counting blobs of type %v", t)
idx.m.Lock()
defer idx.m.Unlock()
@ -305,10 +304,10 @@ type packJSON struct {
}
type blobJSON struct {
ID restic.ID `json:"id"`
Type pack.BlobType `json:"type"`
Offset uint `json:"offset"`
Length uint `json:"length"`
ID restic.ID `json:"id"`
Type restic.BlobType `json:"type"`
Offset uint `json:"offset"`
Length uint `json:"length"`
}
// generatePackList returns a list of packs.

View File

@ -12,7 +12,7 @@ import (
// RebuildIndex lists all packs in the repo, writes a new index and removes all
// old indexes. This operation should only be done with an exclusive lock in
// place.
func RebuildIndex(repo *Repository) error {
func RebuildIndex(repo restic.Repository) error {
debug.Log("RebuildIndex", "start rebuilding index")
done := make(chan struct{})

View File

@ -15,7 +15,7 @@ func TestIndexSerialize(t *testing.T) {
type testEntry struct {
id restic.ID
pack restic.ID
tpe pack.BlobType
tpe restic.BlobType
offset, length uint
}
tests := []testEntry{}
@ -251,7 +251,7 @@ var docOldExample = []byte(`
var exampleTests = []struct {
id, packID restic.ID
tpe pack.BlobType
tpe restic.BlobType
offset, length uint
}{
{
@ -271,10 +271,10 @@ var exampleTests = []struct {
var exampleLookupTest = struct {
packID restic.ID
blobs map[restic.ID]pack.BlobType
blobs map[restic.ID]restic.BlobType
}{
ParseID("73d04e6125cf3c28a299cc2f3cca3b78ceac396e4fcf9575e34536b26782413c"),
map[restic.ID]pack.BlobType{
map[restic.ID]restic.BlobType{
ParseID("3ec79977ef0cf5de7b08cd12b874cd0f62bbaf7f07f3497a5b1bbcc8cb39b1ce"): pack.Data,
ParseID("9ccb846e60d90d4eb915848add7aa7ea1e4bbabfc60e573db9f7bfb2789afbae"): pack.Tree,
ParseID("d3dc577b4ffd38cc4b32122cabf8655a0223ed22edfd93b353dc0c3f2b0fdf66"): pack.Data,

View File

@ -143,7 +143,7 @@ func SearchKey(s *Repository, password string, maxKeys int) (*Key, error) {
// LoadKey loads a key from the backend.
func LoadKey(s *Repository, name string) (k *Key, err error) {
h := restic.Handle{Type: backend.Key, Name: name}
h := restic.Handle{FileType: restic.KeyFile, Name: name}
data, err := backend.LoadAll(s.be, h, nil)
if err != nil {
return nil, err

View File

@ -22,7 +22,7 @@ func NewMasterIndex() *MasterIndex {
}
// Lookup queries all known Indexes for the ID and returns the first match.
func (mi *MasterIndex) Lookup(id restic.ID, tpe pack.BlobType) (blobs []PackedBlob, err error) {
func (mi *MasterIndex) Lookup(id restic.ID, tpe restic.BlobType) (blobs []PackedBlob, err error) {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()
@ -42,7 +42,7 @@ func (mi *MasterIndex) Lookup(id restic.ID, tpe pack.BlobType) (blobs []PackedBl
}
// LookupSize queries all known Indexes for the ID and returns the first match.
func (mi *MasterIndex) LookupSize(id restic.ID, tpe pack.BlobType) (uint, error) {
func (mi *MasterIndex) LookupSize(id restic.ID, tpe restic.BlobType) (uint, error) {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()
@ -73,7 +73,7 @@ func (mi *MasterIndex) ListPack(id restic.ID) (list []PackedBlob) {
}
// Has queries all known Indexes for the ID and returns the first match.
func (mi *MasterIndex) Has(id restic.ID, tpe pack.BlobType) bool {
func (mi *MasterIndex) Has(id restic.ID, tpe restic.BlobType) bool {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()
@ -87,7 +87,7 @@ func (mi *MasterIndex) Has(id restic.ID, tpe pack.BlobType) bool {
}
// Count returns the number of blobs of type t in the index.
func (mi *MasterIndex) Count(t pack.BlobType) (n uint) {
func (mi *MasterIndex) Count(t restic.BlobType) (n uint) {
mi.idxMutex.RLock()
defer mi.idxMutex.RUnlock()

View File

@ -27,7 +27,7 @@ func random(t testing.TB, length int) []byte {
func createRandomBlobs(t testing.TB, repo *repository.Repository, blobs int, pData float32) {
for i := 0; i < blobs; i++ {
var (
tpe pack.BlobType
tpe restic.BlobType
length int
)

View File

@ -79,7 +79,7 @@ func (r *Repository) LoadAndDecrypt(t restic.FileType, id restic.ID) ([]byte, er
// LoadBlob tries to load and decrypt content identified by t and id from a
// pack from the backend, the result is stored in plaintextBuf, which must be
// large enough to hold the complete blob.
func (r *Repository) LoadBlob(id restic.ID, t pack.BlobType, plaintextBuf []byte) ([]byte, error) {
func (r *Repository) LoadBlob(id restic.ID, t restic.BlobType, plaintextBuf []byte) ([]byte, error) {
debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str())
// lookup plaintext size of blob
@ -174,7 +174,7 @@ func (r *Repository) LoadJSONUnpacked(t restic.FileType, id restic.ID, item inte
// LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the
// data and afterwards call json.Unmarshal on the item.
func (r *Repository) LoadJSONPack(t pack.BlobType, id restic.ID, item interface{}) (err error) {
func (r *Repository) LoadJSONPack(t restic.BlobType, id restic.ID, item interface{}) (err error) {
buf, err := r.LoadBlob(id, t, nil)
if err != nil {
return err
@ -184,13 +184,13 @@ func (r *Repository) LoadJSONPack(t pack.BlobType, id restic.ID, item interface{
}
// LookupBlobSize returns the size of blob id.
func (r *Repository) LookupBlobSize(id restic.ID, tpe pack.BlobType) (uint, error) {
func (r *Repository) LookupBlobSize(id restic.ID, tpe restic.BlobType) (uint, error) {
return r.idx.LookupSize(id, tpe)
}
// SaveAndEncrypt encrypts data and stores it to the backend as type t. If data
// is small enough, it will be packed together with other small blobs.
func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *restic.ID) (restic.ID, error) {
func (r *Repository) SaveAndEncrypt(t restic.BlobType, data []byte, id *restic.ID) (restic.ID, error) {
if id == nil {
// compute plaintext hash
hashedID := restic.Hash(data)
@ -235,7 +235,7 @@ func (r *Repository) SaveAndEncrypt(t pack.BlobType, data []byte, id *restic.ID)
// SaveJSON serialises item as JSON and encrypts and saves it in a pack in the
// backend as type t.
func (r *Repository) SaveJSON(t pack.BlobType, item interface{}) (restic.ID, error) {
func (r *Repository) SaveJSON(t restic.BlobType, item interface{}) (restic.ID, error) {
debug.Log("Repo.SaveJSON", "save %v blob", t)
buf := getBuf()[:0]
defer freeBuf(buf)
@ -319,7 +319,7 @@ func (r *Repository) SetIndex(i *MasterIndex) {
}
// SaveIndex saves an index in the repository.
func SaveIndex(repo *Repository, index *Index) (restic.ID, error) {
func SaveIndex(repo restic.Repository, index *Index) (restic.ID, error) {
buf := bytes.NewBuffer(nil)
err := index.Finalize(buf)

View File

@ -8,8 +8,6 @@ import (
"time"
"github.com/pkg/errors"
"restic/backend"
)
// Snapshot is the state of a resource at one point in time.
@ -155,16 +153,3 @@ func FindLatestSnapshot(repo Repository, targets []string, source string) (ID, e
return latestID, nil
}
// FindSnapshot takes a string and tries to find a snapshot whose ID matches
// the string as closely as possible.
func FindSnapshot(repo Repository, s string) (ID, error) {
// find snapshot id with prefix
name, err := backend.Find(repo.Backend(), SnapshotFile, s)
if err != nil {
return ID{}, err
}
return ParseID(name)
}

View File

@ -2,6 +2,7 @@ package restic
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"path/filepath"
@ -11,6 +12,8 @@ import (
"time"
)
var updateGoldenFiles = flag.Bool("update", false, "update golden files in testdata/")
func parseTime(s string) time.Time {
t, err := time.Parse("2006-01-02 15:04:05", s)
if err != nil {

View File

@ -6,7 +6,6 @@ import (
"fmt"
"io"
"math/rand"
"restic/pack"
"testing"
"time"
@ -43,8 +42,8 @@ func (fs fakeFileSystem) saveFile(rd io.Reader) (blobs IDs) {
}
id := Hash(chunk.Data)
if !fs.blobIsKnown(id, pack.Data) {
_, err := fs.repo.SaveAndEncrypt(pack.Data, chunk.Data, &id)
if !fs.blobIsKnown(id, DataBlob) {
_, err := fs.repo.SaveAndEncrypt(DataBlob, chunk.Data, &id)
if err != nil {
fs.t.Fatalf("error saving chunk: %v", err)
}
@ -74,11 +73,11 @@ func (fs fakeFileSystem) treeIsKnown(tree *Tree) (bool, ID) {
data = append(data, '\n')
id := Hash(data)
return fs.blobIsKnown(id, pack.Tree), id
return fs.blobIsKnown(id, TreeBlob), id
}
func (fs fakeFileSystem) blobIsKnown(id ID, t pack.BlobType) bool {
func (fs fakeFileSystem) blobIsKnown(id ID, t BlobType) bool {
if rand.Float32() < fs.duplication {
return false
}
@ -137,7 +136,7 @@ func (fs fakeFileSystem) saveTree(seed int64, depth int) ID {
return id
}
id, err := fs.repo.SaveJSON(pack.Tree, tree)
id, err := fs.repo.SaveJSON(TreeBlob, tree)
if err != nil {
fs.t.Fatal(err)
}

View File

@ -7,7 +7,6 @@ import (
"github.com/pkg/errors"
"restic/debug"
"restic/pack"
)
type Tree struct {
@ -30,12 +29,12 @@ func (t Tree) String() string {
}
type TreeLoader interface {
LoadJSONPack(pack.BlobType, ID, interface{}) error
LoadJSONPack(BlobType, ID, interface{}) error
}
func LoadTree(repo TreeLoader, id ID) (*Tree, error) {
tree := &Tree{}
err := repo.LoadJSONPack(pack.Tree, id, tree)
err := repo.LoadJSONPack(TreeBlob, id, tree)
if err != nil {
return nil, err
}

View File

@ -8,7 +8,6 @@ import (
"testing"
"restic"
"restic/pack"
. "restic/test"
)
@ -98,7 +97,7 @@ func TestLoadTree(t *testing.T) {
// save tree
tree := restic.NewTree()
id, err := repo.SaveJSON(pack.Tree, tree)
id, err := repo.SaveJSON(TreeBlob, tree)
OK(t, err)
// save packs

View File

@ -7,7 +7,6 @@ import (
"sync"
"restic/debug"
"restic/pack"
)
// WalkTreeJob is a job sent from the tree walker.
@ -166,7 +165,7 @@ func WalkTree(repo TreeLoader, id ID, done chan struct{}, jobCh chan<- WalkTreeJ
load := func(id ID) (*Tree, error) {
tree := &Tree{}
err := repo.LoadJSONPack(pack.Tree, id, tree)
err := repo.LoadJSONPack(TreeBlob, id, tree)
if err != nil {
return nil, err
}

View File

@ -9,7 +9,6 @@ import (
"restic"
"restic/backend"
"restic/pack"
"restic/pipe"
"restic/repository"
. "restic/test"
@ -95,7 +94,7 @@ type delayRepo struct {
delay time.Duration
}
func (d delayRepo) LoadJSONPack(t pack.BlobType, id backend.ID, dst interface{}) error {
func (d delayRepo) LoadJSONPack(t BlobType, id backend.ID, dst interface{}) error {
time.Sleep(d.delay)
return d.repo.LoadJSONPack(t, id, dst)
}