2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-04 20:37:49 +00:00

Switch order of parameters to repo.LoadBlob()

This commit is contained in:
Alexander Neumann 2016-08-03 22:44:45 +02:00
parent 246302375d
commit 17e1872544
7 changed files with 14 additions and 14 deletions

View File

@ -13,7 +13,7 @@ import (
) )
func loadBlob(t *testing.T, repo *repository.Repository, id backend.ID, buf []byte) []byte { func loadBlob(t *testing.T, repo *repository.Repository, id backend.ID, buf []byte) []byte {
buf, err := repo.LoadBlob(pack.Data, id, buf) buf, err := repo.LoadBlob(id, pack.Data, buf)
if err != nil { if err != nil {
t.Fatalf("LoadBlob(%v) returned error %v", id, err) t.Fatalf("LoadBlob(%v) returned error %v", id, err)
} }

View File

@ -101,7 +101,7 @@ func testArchiverDuplication(t *testing.T) {
id := randomID() id := randomID()
if repo.Index().Has(id) { if repo.Index().Has(id, pack.Data) {
continue continue
} }

View File

@ -27,8 +27,8 @@ var _ = fs.HandleReleaser(&file{})
// BlobLoader is an abstracted repository with a reduced set of methods used // BlobLoader is an abstracted repository with a reduced set of methods used
// for fuse operations. // for fuse operations.
type BlobLoader interface { type BlobLoader interface {
LookupBlobSize(backend.ID) (uint, error) LookupBlobSize(backend.ID, pack.BlobType) (uint, error)
LoadBlob(pack.BlobType, backend.ID, []byte) ([]byte, error) LoadBlob(backend.ID, pack.BlobType, []byte) ([]byte, error)
} }
type file struct { type file struct {
@ -53,7 +53,7 @@ func newFile(repo BlobLoader, node *restic.Node, ownerIsRoot bool) (*file, error
var bytes uint64 var bytes uint64
sizes := make([]uint, len(node.Content)) sizes := make([]uint, len(node.Content))
for i, id := range node.Content { for i, id := range node.Content {
size, err := repo.LookupBlobSize(id) size, err := repo.LookupBlobSize(id, pack.Data)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -110,7 +110,7 @@ func (f *file) getBlobAt(i int) (blob []byte, err error) {
buf = make([]byte, f.sizes[i]) buf = make([]byte, f.sizes[i])
} }
blob, err = f.repo.LoadBlob(pack.Data, f.node.Content[i], buf) blob, err = f.repo.LoadBlob(f.node.Content[i], pack.Data, buf)
if err != nil { if err != nil {
debug.Log("file.getBlobAt", "LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err) debug.Log("file.getBlobAt", "LoadBlob(%v, %v) failed: %v", f.node.Name, f.node.Content[i], err)
return nil, err return nil, err

View File

@ -26,7 +26,7 @@ func NewMockRepo(content map[backend.ID][]byte) *MockRepo {
return &MockRepo{blobs: content} return &MockRepo{blobs: content}
} }
func (m *MockRepo) LookupBlobSize(id backend.ID) (uint, error) { func (m *MockRepo) LookupBlobSize(id backend.ID, t pack.BlobType) (uint, error) {
buf, ok := m.blobs[id] buf, ok := m.blobs[id]
if !ok { if !ok {
return 0, errors.New("blob not found") return 0, errors.New("blob not found")
@ -35,8 +35,8 @@ func (m *MockRepo) LookupBlobSize(id backend.ID) (uint, error) {
return uint(len(buf)), nil return uint(len(buf)), nil
} }
func (m *MockRepo) LoadBlob(t pack.BlobType, id backend.ID, buf []byte) ([]byte, error) { func (m *MockRepo) LoadBlob(id backend.ID, t pack.BlobType, buf []byte) ([]byte, error) {
size, err := m.LookupBlobSize(id) size, err := m.LookupBlobSize(id, t)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -226,7 +226,7 @@ func (node Node) createFileAt(path string, repo *repository.Repository) error {
buf = make([]byte, size) buf = make([]byte, size)
} }
buf, err := repo.LoadBlob(pack.Data, id, buf) buf, err := repo.LoadBlob(id, pack.Data, buf)
if err != nil { if err != nil {
return errors.Annotate(err, "Load") return errors.Annotate(err, "Load")
} }

View File

@ -77,7 +77,7 @@ func (r *Repository) LoadAndDecrypt(t backend.Type, id backend.ID) ([]byte, erro
// LoadBlob tries to load and decrypt content identified by t and id from a // LoadBlob tries to load and decrypt content identified by t and id from a
// pack from the backend, the result is stored in plaintextBuf, which must be // pack from the backend, the result is stored in plaintextBuf, which must be
// large enough to hold the complete blob. // large enough to hold the complete blob.
func (r *Repository) LoadBlob(t pack.BlobType, id backend.ID, plaintextBuf []byte) ([]byte, error) { func (r *Repository) LoadBlob(id backend.ID, t pack.BlobType, plaintextBuf []byte) ([]byte, error) {
debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str()) debug.Log("Repo.LoadBlob", "load %v with id %v", t, id.Str())
// lookup plaintext size of blob // lookup plaintext size of blob
@ -167,7 +167,7 @@ func (r *Repository) LoadJSONUnpacked(t backend.Type, id backend.ID, item interf
// LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the // LoadJSONPack calls LoadBlob() to load a blob from the backend, decrypt the
// data and afterwards call json.Unmarshal on the item. // data and afterwards call json.Unmarshal on the item.
func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) (err error) { func (r *Repository) LoadJSONPack(t pack.BlobType, id backend.ID, item interface{}) (err error) {
buf, err := r.LoadBlob(t, id, nil) buf, err := r.LoadBlob(id, t, nil)
if err != nil { if err != nil {
return err return err
} }

View File

@ -92,7 +92,7 @@ func TestSave(t *testing.T) {
// OK(t, repo.SaveIndex()) // OK(t, repo.SaveIndex())
// read back // read back
buf, err := repo.LoadBlob(pack.Data, id, make([]byte, size)) buf, err := repo.LoadBlob(id, pack.Data, make([]byte, size))
OK(t, err) OK(t, err)
Assert(t, len(buf) == len(data), Assert(t, len(buf) == len(data),
@ -124,7 +124,7 @@ func TestSaveFrom(t *testing.T) {
OK(t, repo.Flush()) OK(t, repo.Flush())
// read back // read back
buf, err := repo.LoadBlob(pack.Data, id, make([]byte, size)) buf, err := repo.LoadBlob(id, pack.Data, make([]byte, size))
OK(t, err) OK(t, err)
Assert(t, len(buf) == len(data), Assert(t, len(buf) == len(data),