mirror of
https://github.com/octoleo/restic.git
synced 2024-11-22 12:55:18 +00:00
Add checks for Server.Load(), use Blob for load
This commit is contained in:
parent
5e69788eac
commit
f157f775da
@ -736,7 +736,7 @@ func (arch *Archiver) Snapshot(p *Progress, paths []string, pid backend.ID) (*Sn
|
||||
|
||||
// start walker on old tree
|
||||
ch := make(chan WalkTreeJob)
|
||||
go WalkTree(arch.s, parent.Tree.Storage, done, ch)
|
||||
go WalkTree(arch.s, parent.Tree, done, ch)
|
||||
jobs.Old = ch
|
||||
} else {
|
||||
// use closed channel
|
||||
|
@ -130,10 +130,10 @@ func BenchmarkArchiveDirectory(b *testing.B) {
|
||||
b.Skip("benchdir not set, skipping BenchmarkArchiveDirectory")
|
||||
}
|
||||
|
||||
be := setupBackend(b)
|
||||
defer teardownBackend(b, be)
|
||||
key := setupKey(b, be, "geheim")
|
||||
server := restic.NewServerWithKey(be, key)
|
||||
server := setupBackend(b)
|
||||
defer teardownBackend(b, server)
|
||||
key := setupKey(b, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
arch, err := restic.NewArchiver(server)
|
||||
ok(b, err)
|
||||
@ -161,10 +161,10 @@ func archiveWithPreload(t testing.TB) {
|
||||
t.Skip("benchdir not set, skipping TestArchiverPreload")
|
||||
}
|
||||
|
||||
be := setupBackend(t)
|
||||
defer teardownBackend(t, be)
|
||||
key := setupKey(t, be, "geheim")
|
||||
server := restic.NewServerWithKey(be, key)
|
||||
server := setupBackend(t)
|
||||
defer teardownBackend(t, server)
|
||||
key := setupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
// archive a few files
|
||||
sn := snapshot(t, server, *benchArchiveDirectory, nil)
|
||||
@ -212,10 +212,10 @@ func BenchmarkPreload(t *testing.B) {
|
||||
t.Skip("benchdir not set, skipping TestArchiverPreload")
|
||||
}
|
||||
|
||||
be := setupBackend(t)
|
||||
defer teardownBackend(t, be)
|
||||
key := setupKey(t, be, "geheim")
|
||||
server := restic.NewServerWithKey(be, key)
|
||||
server := setupBackend(t)
|
||||
defer teardownBackend(t, server)
|
||||
key := setupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
// archive a few files
|
||||
arch, err := restic.NewArchiver(server)
|
||||
@ -240,10 +240,10 @@ func BenchmarkLoadTree(t *testing.B) {
|
||||
t.Skip("benchdir not set, skipping TestArchiverPreload")
|
||||
}
|
||||
|
||||
be := setupBackend(t)
|
||||
defer teardownBackend(t, be)
|
||||
key := setupKey(t, be, "geheim")
|
||||
server := restic.NewServerWithKey(be, key)
|
||||
server := setupBackend(t)
|
||||
defer teardownBackend(t, server)
|
||||
key := setupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
// archive a few files
|
||||
arch, err := restic.NewArchiver(server)
|
||||
@ -273,8 +273,8 @@ func BenchmarkLoadTree(t *testing.B) {
|
||||
t.ResetTimer()
|
||||
|
||||
for i := 0; i < t.N; i++ {
|
||||
for _, name := range list {
|
||||
_, err := restic.LoadTree(server, name)
|
||||
for _, id := range list {
|
||||
_, err := restic.LoadTree(server, restic.Blob{Storage: id})
|
||||
ok(t, err)
|
||||
}
|
||||
}
|
||||
|
4
cache.go
4
cache.go
@ -106,7 +106,7 @@ func (c *Cache) Purge(t backend.Type, subtype string, id backend.ID) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Cache) Clear(s backend.Backend) error {
|
||||
func (c *Cache) Clear(s Server) error {
|
||||
list, err := c.List(backend.Snapshot)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -296,7 +296,7 @@ func cacheSnapshotBlobs(p *Progress, s Server, c *Cache, id backend.ID) (*Map, e
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
WalkTree(s, sn.Tree.Storage, nil, ch)
|
||||
WalkTree(s, sn.Tree, nil, ch)
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
|
@ -9,10 +9,10 @@ import (
|
||||
)
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
be := setupBackend(t)
|
||||
defer teardownBackend(t, be)
|
||||
key := setupKey(t, be, "geheim")
|
||||
server := restic.NewServerWithKey(be, key)
|
||||
server := setupBackend(t)
|
||||
defer teardownBackend(t, server)
|
||||
key := setupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
cache, err := restic.NewCache(server)
|
||||
ok(t, err)
|
||||
|
@ -76,7 +76,7 @@ func (cmd CmdCat) Execute(args []string) error {
|
||||
case "tree":
|
||||
// try storage id
|
||||
tree := &restic.Tree{}
|
||||
err := s.LoadJSONID(backend.Tree, id.String(), tree)
|
||||
err := s.LoadJSONID(backend.Tree, id, tree)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -91,7 +91,7 @@ func (cmd CmdCat) Execute(args []string) error {
|
||||
return nil
|
||||
case "snapshot":
|
||||
sn := &restic.Snapshot{}
|
||||
err = s.LoadJSONID(backend.Snapshot, id.String(), sn)
|
||||
err = s.LoadJSONID(backend.Snapshot, id, sn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -105,7 +105,7 @@ func (cmd CmdCat) Execute(args []string) error {
|
||||
|
||||
return nil
|
||||
case "key":
|
||||
rd, err := s.Get(backend.Key, id.String())
|
||||
rd, err := s.Backend().Get(backend.Key, id.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ func parseTime(str string) (time.Time, error) {
|
||||
|
||||
func (c CmdFind) findInTree(s restic.Server, blob restic.Blob, path string) ([]findResult, error) {
|
||||
debug.Log("restic.find", "checking tree %v\n", blob)
|
||||
tree, err := restic.LoadTree(s, blob.Storage)
|
||||
tree, err := restic.LoadTree(s, blob)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -77,7 +77,7 @@ func fsckFile(opts CmdFsck, s restic.Server, m *restic.Map, IDs []backend.ID) (u
|
||||
func fsckTree(opts CmdFsck, s restic.Server, blob restic.Blob) error {
|
||||
debug.Log("restic.fsckTree", "checking tree %v", blob)
|
||||
|
||||
tree, err := restic.LoadTree(s, blob.Storage)
|
||||
tree, err := restic.LoadTree(s, blob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ func print_node(prefix string, n *restic.Node) string {
|
||||
}
|
||||
|
||||
func print_tree(prefix string, s restic.Server, blob restic.Blob) error {
|
||||
tree, err := restic.LoadTree(s, blob.Storage)
|
||||
tree, err := restic.LoadTree(s, blob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
4
key.go
4
key.go
@ -131,7 +131,7 @@ func SearchKey(s Server, password string) (*Key, error) {
|
||||
// LoadKey loads a key from the backend.
|
||||
func LoadKey(s Server, name string) (*Key, error) {
|
||||
// extract data from repo
|
||||
rd, err := s.Get(backend.Key, name)
|
||||
rd, err := s.be.Get(backend.Key, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -207,7 +207,7 @@ func AddKey(s Server, password string, template *Key) (*Key, error) {
|
||||
}
|
||||
|
||||
// store in repository and return
|
||||
blob, err := s.Create()
|
||||
blob, err := s.be.Create()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -37,7 +37,7 @@ func NewRestorer(s Server, snid backend.ID) (*Restorer, error) {
|
||||
}
|
||||
|
||||
func (res *Restorer) to(dst string, dir string, treeBlob Blob) error {
|
||||
tree, err := LoadTree(res.s, treeBlob.Storage)
|
||||
tree, err := LoadTree(res.s, treeBlob)
|
||||
if err != nil {
|
||||
return res.Error(dir, nil, arrar.Annotate(err, "LoadTree"))
|
||||
}
|
||||
|
81
server.go
81
server.go
@ -22,8 +22,8 @@ func NewServer(be backend.Backend) Server {
|
||||
return Server{be: be}
|
||||
}
|
||||
|
||||
func NewServerWithKey(be backend.Backend, key *Key) Server {
|
||||
return Server{be: be, key: key}
|
||||
func (s *Server) SetKey(k *Key) {
|
||||
s.key = k
|
||||
}
|
||||
|
||||
// Find loads the list of all blobs of type t and searches for names which start
|
||||
@ -45,10 +45,13 @@ func (s Server) PrefixLength(t backend.Type) (int, error) {
|
||||
return backend.PrefixLength(s.be, t)
|
||||
}
|
||||
|
||||
// Load tries to load and decrypt content identified by t and blob from the backend.
|
||||
// Load tries to load and decrypt content identified by t and blob from the
|
||||
// backend. If the blob specifies an ID, the decrypted plaintext is checked
|
||||
// against this ID. The same goes for blob.Size and blob.StorageSize: If they
|
||||
// are set to a value > 0, this value is checked.
|
||||
func (s Server) Load(t backend.Type, blob Blob) ([]byte, error) {
|
||||
// load data
|
||||
rd, err := s.Get(t, blob.Storage.String())
|
||||
rd, err := s.be.Get(t, blob.Storage.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -64,7 +67,7 @@ func (s Server) Load(t backend.Type, blob Blob) ([]byte, error) {
|
||||
}
|
||||
|
||||
// check length
|
||||
if len(buf) != int(blob.StorageSize) {
|
||||
if blob.StorageSize > 0 && len(buf) != int(blob.StorageSize) {
|
||||
return nil, errors.New("Invalid storage length")
|
||||
}
|
||||
|
||||
@ -75,14 +78,16 @@ func (s Server) Load(t backend.Type, blob Blob) ([]byte, error) {
|
||||
}
|
||||
|
||||
// check length
|
||||
if len(buf) != int(blob.Size) {
|
||||
if blob.Size > 0 && len(buf) != int(blob.Size) {
|
||||
return nil, errors.New("Invalid length")
|
||||
}
|
||||
|
||||
// check SHA256 sum
|
||||
id := backend.Hash(buf)
|
||||
if !blob.ID.Equal(id) {
|
||||
return nil, fmt.Errorf("load %v: expected plaintext hash %v, got %v", blob.Storage, blob.ID, id)
|
||||
if blob.ID != nil {
|
||||
id := backend.Hash(buf)
|
||||
if !blob.ID.Equal(id) {
|
||||
return nil, fmt.Errorf("load %v: expected plaintext hash %v, got %v", blob.Storage, blob.ID, id)
|
||||
}
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
@ -90,37 +95,25 @@ func (s Server) Load(t backend.Type, blob Blob) ([]byte, error) {
|
||||
|
||||
// Load tries to load and decrypt content identified by t and id from the backend.
|
||||
func (s Server) LoadID(t backend.Type, storageID backend.ID) ([]byte, error) {
|
||||
// load data
|
||||
rd, err := s.Get(t, storageID.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buf, err := ioutil.ReadAll(rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// decrypt
|
||||
buf, err = s.Decrypt(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
return s.Load(t, Blob{Storage: storageID})
|
||||
}
|
||||
|
||||
// LoadJSON calls Load() to get content from the backend and afterwards calls
|
||||
// json.Unmarshal on the item.
|
||||
func (s Server) LoadJSON(t backend.Type, blob Blob, item interface{}) error {
|
||||
return s.LoadJSONID(t, blob.Storage.String(), item)
|
||||
buf, err := s.Load(t, blob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return json.Unmarshal(buf, item)
|
||||
}
|
||||
|
||||
// LoadJSONID calls Load() to get content from the backend and afterwards calls
|
||||
// json.Unmarshal on the item.
|
||||
func (s Server) LoadJSONID(t backend.Type, name string, item interface{}) error {
|
||||
func (s Server) LoadJSONID(t backend.Type, id backend.ID, item interface{}) error {
|
||||
// read
|
||||
rd, err := s.Get(t, name)
|
||||
rd, err := s.be.Get(t, id.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -183,7 +176,7 @@ func (s Server) Save(t backend.Type, data []byte, id backend.ID) (Blob, error) {
|
||||
sid := backend.Hash(ciphertext)
|
||||
|
||||
// save blob
|
||||
backendBlob, err := s.Create()
|
||||
backendBlob, err := s.be.Create()
|
||||
if err != nil {
|
||||
return Blob{}, err
|
||||
}
|
||||
@ -210,7 +203,7 @@ func (s Server) SaveFrom(t backend.Type, id backend.ID, length uint, rd io.Reade
|
||||
return Blob{}, errors.New("id is nil")
|
||||
}
|
||||
|
||||
backendBlob, err := s.Create()
|
||||
backendBlob, err := s.be.Create()
|
||||
if err != nil {
|
||||
return Blob{}, err
|
||||
}
|
||||
@ -247,7 +240,7 @@ func (s Server) SaveFrom(t backend.Type, id backend.ID, length uint, rd io.Reade
|
||||
// SaveJSON serialises item as JSON and encrypts and saves it in the backend as
|
||||
// type t.
|
||||
func (s Server) SaveJSON(t backend.Type, item interface{}) (Blob, error) {
|
||||
backendBlob, err := s.Create()
|
||||
backendBlob, err := s.be.Create()
|
||||
if err != nil {
|
||||
return Blob{}, fmt.Errorf("Create: %v", err)
|
||||
}
|
||||
@ -331,9 +324,9 @@ func (s Server) Stats() (ServerStats, error) {
|
||||
blobs := backend.NewIDSet()
|
||||
|
||||
// load all trees, in parallel
|
||||
worker := func(wg *sync.WaitGroup, c <-chan backend.ID) {
|
||||
for id := range c {
|
||||
tree, err := LoadTree(s, id)
|
||||
worker := func(wg *sync.WaitGroup, b <-chan Blob) {
|
||||
for blob := range b {
|
||||
tree, err := LoadTree(s, blob)
|
||||
// ignore error and advance to next tree
|
||||
if err != nil {
|
||||
return
|
||||
@ -346,13 +339,13 @@ func (s Server) Stats() (ServerStats, error) {
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
idCh := make(chan backend.ID)
|
||||
blobCh := make(chan Blob)
|
||||
|
||||
// start workers
|
||||
var wg sync.WaitGroup
|
||||
for i := 0; i < maxConcurrency; i++ {
|
||||
wg.Add(1)
|
||||
go worker(&wg, idCh)
|
||||
go worker(&wg, blobCh)
|
||||
}
|
||||
|
||||
// list ids
|
||||
@ -366,10 +359,10 @@ func (s Server) Stats() (ServerStats, error) {
|
||||
debug.Log("Server.Stats", "unable to parse name %v as id: %v", name, err)
|
||||
continue
|
||||
}
|
||||
idCh <- id
|
||||
blobCh <- Blob{Storage: id}
|
||||
}
|
||||
|
||||
close(idCh)
|
||||
close(blobCh)
|
||||
|
||||
// wait for workers
|
||||
wg.Wait()
|
||||
@ -392,14 +385,6 @@ func (s Server) List(t backend.Type, done <-chan struct{}) <-chan string {
|
||||
return s.be.List(t, done)
|
||||
}
|
||||
|
||||
func (s Server) Get(t backend.Type, name string) (io.ReadCloser, error) {
|
||||
return s.be.Get(t, name)
|
||||
}
|
||||
|
||||
func (s Server) Create() (backend.Blob, error) {
|
||||
return s.be.Create()
|
||||
}
|
||||
|
||||
func (s Server) Test(t backend.Type, name string) (bool, error) {
|
||||
return s.be.Test(t, name)
|
||||
}
|
||||
|
@ -23,10 +23,10 @@ var serverTests = []testJSONStruct{
|
||||
}
|
||||
|
||||
func TestSaveJSON(t *testing.T) {
|
||||
be := setupBackend(t)
|
||||
defer teardownBackend(t, be)
|
||||
key := setupKey(t, be, "geheim")
|
||||
server := restic.NewServerWithKey(be, key)
|
||||
server := setupBackend(t)
|
||||
defer teardownBackend(t, server)
|
||||
key := setupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
for _, obj := range serverTests {
|
||||
data, err := json.Marshal(obj)
|
||||
@ -44,10 +44,10 @@ func TestSaveJSON(t *testing.T) {
|
||||
}
|
||||
|
||||
func BenchmarkSaveJSON(t *testing.B) {
|
||||
be := setupBackend(t)
|
||||
defer teardownBackend(t, be)
|
||||
key := setupKey(t, be, "geheim")
|
||||
server := restic.NewServerWithKey(be, key)
|
||||
server := setupBackend(t)
|
||||
defer teardownBackend(t, server)
|
||||
key := setupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
obj := serverTests[0]
|
||||
|
||||
@ -71,10 +71,10 @@ func BenchmarkSaveJSON(t *testing.B) {
|
||||
var testSizes = []int{5, 23, 2<<18 + 23, 1 << 20}
|
||||
|
||||
func TestSaveFrom(t *testing.T) {
|
||||
be := setupBackend(t)
|
||||
defer teardownBackend(t, be)
|
||||
key := setupKey(t, be, "geheim")
|
||||
server := restic.NewServerWithKey(be, key)
|
||||
server := setupBackend(t)
|
||||
defer teardownBackend(t, server)
|
||||
key := setupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
for _, size := range testSizes {
|
||||
data := make([]byte, size)
|
||||
@ -101,10 +101,10 @@ func TestSaveFrom(t *testing.T) {
|
||||
}
|
||||
|
||||
func BenchmarkSaveFrom(t *testing.B) {
|
||||
be := setupBackend(t)
|
||||
defer teardownBackend(t, be)
|
||||
key := setupKey(t, be, "geheim")
|
||||
server := restic.NewServerWithKey(be, key)
|
||||
server := setupBackend(t)
|
||||
defer teardownBackend(t, server)
|
||||
key := setupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
size := 4 << 20 // 4MiB
|
||||
|
||||
@ -129,10 +129,10 @@ func TestServerStats(t *testing.T) {
|
||||
t.Skip("benchdir not set, skipping TestServerStats")
|
||||
}
|
||||
|
||||
be := setupBackend(t)
|
||||
defer teardownBackend(t, be)
|
||||
key := setupKey(t, be, "geheim")
|
||||
server := restic.NewServerWithKey(be, key)
|
||||
server := setupBackend(t)
|
||||
defer teardownBackend(t, server)
|
||||
key := setupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
// archive a few files
|
||||
sn := snapshot(t, server, *benchArchiveDirectory, nil)
|
||||
@ -148,10 +148,10 @@ func TestLoadJSONID(t *testing.T) {
|
||||
t.Skip("benchdir not set, skipping TestServerStats")
|
||||
}
|
||||
|
||||
be := setupBackend(t)
|
||||
defer teardownBackend(t, be)
|
||||
key := setupKey(t, be, "geheim")
|
||||
server := restic.NewServerWithKey(be, key)
|
||||
server := setupBackend(t)
|
||||
defer teardownBackend(t, server)
|
||||
key := setupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
// archive a few files
|
||||
sn := snapshot(t, server, *benchArchiveDirectory, nil)
|
||||
@ -163,8 +163,11 @@ func TestLoadJSONID(t *testing.T) {
|
||||
assert(t, found, "no Trees in repository found")
|
||||
close(done)
|
||||
|
||||
id, err := backend.ParseID(first)
|
||||
ok(t, err)
|
||||
|
||||
tree := restic.NewTree()
|
||||
err := server.LoadJSONID(backend.Tree, first, &tree)
|
||||
err = server.LoadJSONID(backend.Tree, id, &tree)
|
||||
ok(t, err)
|
||||
}
|
||||
|
||||
@ -173,10 +176,10 @@ func BenchmarkLoadJSONID(t *testing.B) {
|
||||
t.Skip("benchdir not set, skipping TestServerStats")
|
||||
}
|
||||
|
||||
be := setupBackend(t)
|
||||
defer teardownBackend(t, be)
|
||||
key := setupKey(t, be, "geheim")
|
||||
server := restic.NewServerWithKey(be, key)
|
||||
server := setupBackend(t)
|
||||
defer teardownBackend(t, server)
|
||||
key := setupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
// archive a few files
|
||||
sn := snapshot(t, server, *benchArchiveDirectory, nil)
|
||||
@ -186,8 +189,10 @@ func BenchmarkLoadJSONID(t *testing.B) {
|
||||
|
||||
tree := restic.NewTree()
|
||||
for i := 0; i < t.N; i++ {
|
||||
for treeID := range be.List(backend.Tree, nil) {
|
||||
ok(t, server.LoadJSONID(backend.Tree, treeID, &tree))
|
||||
for name := range server.List(backend.Tree, nil) {
|
||||
id, err := backend.ParseID(name)
|
||||
ok(t, err)
|
||||
ok(t, server.LoadJSONID(backend.Tree, id, &tree))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ func NewSnapshot(paths []string) (*Snapshot, error) {
|
||||
|
||||
func LoadSnapshot(s Server, id backend.ID) (*Snapshot, error) {
|
||||
sn := &Snapshot{id: id}
|
||||
err := s.LoadJSONID(backend.Snapshot, id.String(), sn)
|
||||
err := s.LoadJSONID(backend.Snapshot, id, sn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
4
tree.go
4
tree.go
@ -30,9 +30,9 @@ func (t Tree) String() string {
|
||||
return fmt.Sprintf("Tree<%d nodes, %d blobs>", len(t.Nodes), len(t.Map.list))
|
||||
}
|
||||
|
||||
func LoadTree(s Server, id backend.ID) (*Tree, error) {
|
||||
func LoadTree(s Server, blob Blob) (*Tree, error) {
|
||||
tree := &Tree{}
|
||||
err := s.LoadJSONID(backend.Tree, id.String(), tree)
|
||||
err := s.LoadJSON(backend.Tree, blob, tree)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
19
walk.go
19
walk.go
@ -3,7 +3,6 @@ package restic
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/restic/restic/backend"
|
||||
"github.com/restic/restic/debug"
|
||||
)
|
||||
|
||||
@ -15,10 +14,10 @@ type WalkTreeJob struct {
|
||||
Tree *Tree
|
||||
}
|
||||
|
||||
func walkTree(s Server, path string, id backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
|
||||
debug.Log("walkTree", "start on %q (%v)", path, id.Str())
|
||||
func walkTree(s Server, path string, treeBlob Blob, done chan struct{}, jobCh chan<- WalkTreeJob) {
|
||||
debug.Log("walkTree", "start on %q (%v)", path, treeBlob)
|
||||
// load tree
|
||||
t, err := LoadTree(s, id)
|
||||
t, err := LoadTree(s, treeBlob)
|
||||
if err != nil {
|
||||
jobCh <- WalkTreeJob{Path: path, Error: err}
|
||||
return
|
||||
@ -32,27 +31,27 @@ func walkTree(s Server, path string, id backend.ID, done chan struct{}, jobCh ch
|
||||
jobCh <- WalkTreeJob{Path: p, Error: err}
|
||||
continue
|
||||
}
|
||||
walkTree(s, p, blob.Storage, done, jobCh)
|
||||
walkTree(s, p, blob, done, jobCh)
|
||||
} else {
|
||||
// load old blobs
|
||||
node.blobs, err = t.Map.Select(node.Content)
|
||||
if err != nil {
|
||||
debug.Log("walkTree", "unable to load bobs for %q (%v): %v", path, id.Str(), err)
|
||||
debug.Log("walkTree", "unable to load bobs for %q (%v): %v", path, treeBlob, err)
|
||||
}
|
||||
jobCh <- WalkTreeJob{Path: p, Node: node, Error: err}
|
||||
}
|
||||
}
|
||||
|
||||
jobCh <- WalkTreeJob{Path: filepath.Join(path), Tree: t}
|
||||
debug.Log("walkTree", "done for %q (%v)", path, id.Str())
|
||||
debug.Log("walkTree", "done for %q (%v)", path, treeBlob)
|
||||
}
|
||||
|
||||
// WalkTree walks the tree specified by ID recursively and sends a job for each
|
||||
// file and directory it finds. When the channel done is closed, processing
|
||||
// stops.
|
||||
func WalkTree(server Server, id backend.ID, done chan struct{}, jobCh chan<- WalkTreeJob) {
|
||||
debug.Log("WalkTree", "start on %v", id.Str())
|
||||
walkTree(server, "", id, done, jobCh)
|
||||
func WalkTree(server Server, blob Blob, done chan struct{}, jobCh chan<- WalkTreeJob) {
|
||||
debug.Log("WalkTree", "start on %v", blob)
|
||||
walkTree(server, "", blob, done, jobCh)
|
||||
close(jobCh)
|
||||
debug.Log("WalkTree", "done")
|
||||
}
|
||||
|
10
walk_test.go
10
walk_test.go
@ -15,10 +15,10 @@ func TestWalkTree(t *testing.T) {
|
||||
dirs, err := filepath.Glob(*testWalkDirectory)
|
||||
ok(t, err)
|
||||
|
||||
be := setupBackend(t)
|
||||
defer teardownBackend(t, be)
|
||||
key := setupKey(t, be, "geheim")
|
||||
server := restic.NewServerWithKey(be, key)
|
||||
server := setupBackend(t)
|
||||
defer teardownBackend(t, server)
|
||||
key := setupKey(t, server, "geheim")
|
||||
server.SetKey(key)
|
||||
|
||||
// archive a few files
|
||||
arch, err := restic.NewArchiver(server)
|
||||
@ -35,7 +35,7 @@ func TestWalkTree(t *testing.T) {
|
||||
|
||||
// start tree walker
|
||||
treeJobs := make(chan restic.WalkTreeJob)
|
||||
go restic.WalkTree(server, sn.Tree.Storage, done, treeJobs)
|
||||
go restic.WalkTree(server, sn.Tree, done, treeJobs)
|
||||
|
||||
// start filesystem walker
|
||||
fsJobs := make(chan pipe.Job)
|
||||
|
Loading…
Reference in New Issue
Block a user