2
2
mirror of https://github.com/octoleo/restic.git synced 2024-12-22 10:58:55 +00:00

Return real size from SaveBlob

This commit is contained in:
Alexander Neumann 2022-05-01 14:26:57 +02:00 committed by Michael Eischer
parent fdc53a9d32
commit 99634c0936
13 changed files with 50 additions and 40 deletions

View File

@ -386,7 +386,7 @@ func loadBlobs(ctx context.Context, repo restic.Repository, pack restic.ID, list
} }
} }
if reuploadBlobs { if reuploadBlobs {
_, _, err := repo.SaveBlob(ctx, blob.Type, plaintext, id, true) _, _, _, err := repo.SaveBlob(ctx, blob.Type, plaintext, id, true)
if err != nil { if err != nil {
return err return err
} }

View File

@ -415,16 +415,16 @@ type blobCountingRepo struct {
saved map[restic.BlobHandle]uint saved map[restic.BlobHandle]uint
} }
func (repo *blobCountingRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, error) { func (repo *blobCountingRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, int, error) {
id, exists, err := repo.Repository.SaveBlob(ctx, t, buf, id, false) id, exists, size, err := repo.Repository.SaveBlob(ctx, t, buf, id, false)
if exists { if exists {
return id, exists, err return id, exists, size, err
} }
h := restic.BlobHandle{ID: id, Type: t} h := restic.BlobHandle{ID: id, Type: t}
repo.m.Lock() repo.m.Lock()
repo.saved[h]++ repo.saved[h]++
repo.m.Unlock() repo.m.Unlock()
return id, exists, err return id, exists, size, err
} }
func (repo *blobCountingRepo) SaveTree(ctx context.Context, t *restic.Tree) (restic.ID, error) { func (repo *blobCountingRepo) SaveTree(ctx context.Context, t *restic.Tree) (restic.ID, error) {
@ -1944,10 +1944,10 @@ type failSaveRepo struct {
err error err error
} }
func (f *failSaveRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, error) { func (f *failSaveRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, int, error) {
val := atomic.AddInt32(&f.cnt, 1) val := atomic.AddInt32(&f.cnt, 1)
if val >= f.failAfter { if val >= f.failAfter {
return restic.ID{}, false, f.err return restic.ID{}, false, 0, f.err
} }
return f.Repository.SaveBlob(ctx, t, buf, id, storeDuplicate) return f.Repository.SaveBlob(ctx, t, buf, id, storeDuplicate)

View File

@ -10,7 +10,7 @@ import (
// Saver allows saving a blob. // Saver allows saving a blob.
type Saver interface { type Saver interface {
SaveBlob(ctx context.Context, t restic.BlobType, data []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, error) SaveBlob(ctx context.Context, t restic.BlobType, data []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, int, error)
Index() restic.MasterIndex Index() restic.MasterIndex
} }
@ -100,10 +100,11 @@ type saveBlobJob struct {
type saveBlobResponse struct { type saveBlobResponse struct {
id restic.ID id restic.ID
known bool known bool
size int
} }
func (s *BlobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte) (saveBlobResponse, error) { func (s *BlobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte) (saveBlobResponse, error) {
id, known, err := s.repo.SaveBlob(ctx, t, buf, restic.ID{}, false) id, known, size, err := s.repo.SaveBlob(ctx, t, buf, restic.ID{}, false)
if err != nil { if err != nil {
return saveBlobResponse{}, err return saveBlobResponse{}, err
@ -112,6 +113,7 @@ func (s *BlobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte)
return saveBlobResponse{ return saveBlobResponse{
id: id, id: id,
known: known, known: known,
size: size,
}, nil }, nil
} }

View File

@ -21,13 +21,13 @@ type saveFail struct {
failAt int32 failAt int32
} }
func (b *saveFail) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicates bool) (restic.ID, bool, error) { func (b *saveFail) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicates bool) (restic.ID, bool, int, error) {
val := atomic.AddInt32(&b.cnt, 1) val := atomic.AddInt32(&b.cnt, 1)
if val == b.failAt { if val == b.failAt {
return restic.ID{}, false, errTest return restic.ID{}, false, 0, errTest
} }
return id, false, nil return id, false, 0, nil
} }
func (b *saveFail) Index() restic.MasterIndex { func (b *saveFail) Index() restic.MasterIndex {

View File

@ -483,7 +483,7 @@ func TestCheckerBlobTypeConfusion(t *testing.T) {
buf, err := repo.LoadBlob(ctx, restic.TreeBlob, id, nil) buf, err := repo.LoadBlob(ctx, restic.TreeBlob, id, nil)
test.OK(t, err) test.OK(t, err)
_, _, err = repo.SaveBlob(ctx, restic.DataBlob, buf, id, false) _, _, _, err = repo.SaveBlob(ctx, restic.DataBlob, buf, id, false)
test.OK(t, err) test.OK(t, err)
malNode := &restic.Node{ malNode := &restic.Node{

View File

@ -23,7 +23,7 @@ func FuzzSaveLoadBlob(f *testing.F) {
id := restic.Hash(blob) id := restic.Hash(blob)
repo, _ := TestRepositoryWithBackend(t, mem.New(), 2) repo, _ := TestRepositoryWithBackend(t, mem.New(), 2)
_, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, blob, id, false) _, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, blob, id, false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -75,7 +75,7 @@ func Repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito
} }
// We do want to save already saved blobs! // We do want to save already saved blobs!
_, _, err = dstRepo.SaveBlob(wgCtx, blob.Type, buf, blob.ID, true) _, _, _, err = dstRepo.SaveBlob(wgCtx, blob.Type, buf, blob.ID, true)
if err != nil { if err != nil {
return err return err
} }

View File

@ -32,7 +32,7 @@ func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData fl
buf := make([]byte, length) buf := make([]byte, length)
rand.Read(buf) rand.Read(buf)
id, exists, err := repo.SaveBlob(context.TODO(), tpe, buf, restic.ID{}, false) id, exists, _, err := repo.SaveBlob(context.TODO(), tpe, buf, restic.ID{}, false)
if err != nil { if err != nil {
t.Fatalf("SaveFrom() error %v", err) t.Fatalf("SaveFrom() error %v", err)
} }
@ -62,7 +62,7 @@ func createRandomWrongBlob(t testing.TB, repo restic.Repository) {
// invert first data byte // invert first data byte
buf[0] ^= 0xff buf[0] ^= 0xff
_, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, id, false) _, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, id, false)
if err != nil { if err != nil {
t.Fatalf("SaveFrom() error %v", err) t.Fatalf("SaveFrom() error %v", err)
} }

View File

@ -378,9 +378,10 @@ func (r *Repository) getZstdDecoder() *zstd.Decoder {
} }
// saveAndEncrypt encrypts data and stores it to the backend as type t. If data // saveAndEncrypt encrypts data and stores it to the backend as type t. If data
// is small enough, it will be packed together with other small blobs. // is small enough, it will be packed together with other small blobs. The
// The caller must ensure that the id matches the data. // caller must ensure that the id matches the data. Returned is the size data
func (r *Repository) saveAndEncrypt(ctx context.Context, t restic.BlobType, data []byte, id restic.ID) error { // occupies in the repo (compressed or not, including the encryption overhead).
func (r *Repository) saveAndEncrypt(ctx context.Context, t restic.BlobType, data []byte, id restic.ID) (size int, err error) {
debug.Log("save id %v (%v, %d bytes)", id, t, len(data)) debug.Log("save id %v (%v, %d bytes)", id, t, len(data))
uncompressedLength := 0 uncompressedLength := 0
@ -417,24 +418,29 @@ func (r *Repository) saveAndEncrypt(ctx context.Context, t restic.BlobType, data
packer, err := pm.findPacker() packer, err := pm.findPacker()
if err != nil { if err != nil {
return err return 0, err
} }
// save ciphertext // save ciphertext
_, err = packer.Add(t, id, ciphertext, uncompressedLength) size, err = packer.Add(t, id, ciphertext, uncompressedLength)
if err != nil { if err != nil {
return err return 0, err
} }
// if the pack is not full enough, put back to the list // if the pack is not full enough, put back to the list
if packer.Size() < minPackSize { if packer.Size() < minPackSize {
debug.Log("pack is not full enough (%d bytes)", packer.Size()) debug.Log("pack is not full enough (%d bytes)", packer.Size())
pm.insertPacker(packer) pm.insertPacker(packer)
return nil return size, nil
} }
// else write the pack to the backend // else write the pack to the backend
return r.savePacker(ctx, t, packer) err = r.savePacker(ctx, t, packer)
if err != nil {
return 0, err
}
return size, nil
} }
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the // SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
@ -815,8 +821,10 @@ func (r *Repository) Close() error {
// It takes care that no duplicates are saved; this can be overwritten // It takes care that no duplicates are saved; this can be overwritten
// by setting storeDuplicate to true. // by setting storeDuplicate to true.
// If id is the null id, it will be computed and returned. // If id is the null id, it will be computed and returned.
// Also returns if the blob was already known before // Also returns if the blob was already known before.
func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, err error) { // If the blob was not known before, it returns the number of bytes the blob
// occupies in the repo (compressed or not, including encryption overhead).
func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, size int, err error) {
// compute plaintext hash if not already set // compute plaintext hash if not already set
if id.IsNull() { if id.IsNull() {
@ -830,10 +838,10 @@ func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte
// only save when needed or explicitly told // only save when needed or explicitly told
if !known || storeDuplicate { if !known || storeDuplicate {
err = r.saveAndEncrypt(ctx, t, buf, newID) size, err = r.saveAndEncrypt(ctx, t, buf, newID)
} }
return newID, known, err return newID, known, size, err
} }
// LoadTree loads a tree from the repository. // LoadTree loads a tree from the repository.
@ -867,7 +875,7 @@ func (r *Repository) SaveTree(ctx context.Context, t *restic.Tree) (restic.ID, e
// adds a newline after each object) // adds a newline after each object)
buf = append(buf, '\n') buf = append(buf, '\n')
id, _, err := r.SaveBlob(ctx, restic.TreeBlob, buf, restic.ID{}, false) id, _, _, err := r.SaveBlob(ctx, restic.TreeBlob, buf, restic.ID{}, false)
return id, err return id, err
} }

View File

@ -44,7 +44,7 @@ func testSave(t *testing.T, version uint) {
id := restic.Hash(data) id := restic.Hash(data)
// save // save
sid, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, restic.ID{}, false) sid, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, restic.ID{}, false)
rtest.OK(t, err) rtest.OK(t, err)
rtest.Equals(t, id, sid) rtest.Equals(t, id, sid)
@ -83,7 +83,7 @@ func testSaveFrom(t *testing.T, version uint) {
id := restic.Hash(data) id := restic.Hash(data)
// save // save
id2, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, id, false) id2, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, id, false)
rtest.OK(t, err) rtest.OK(t, err)
rtest.Equals(t, id, id2) rtest.Equals(t, id, id2)
@ -125,7 +125,7 @@ func benchmarkSaveAndEncrypt(t *testing.B, version uint) {
t.SetBytes(int64(size)) t.SetBytes(int64(size))
for i := 0; i < t.N; i++ { for i := 0; i < t.N; i++ {
_, _, err = repo.SaveBlob(context.TODO(), restic.DataBlob, data, id, true) _, _, _, err = repo.SaveBlob(context.TODO(), restic.DataBlob, data, id, true)
rtest.OK(t, err) rtest.OK(t, err)
} }
} }
@ -187,7 +187,7 @@ func testLoadBlob(t *testing.T, version uint) {
_, err := io.ReadFull(rnd, buf) _, err := io.ReadFull(rnd, buf)
rtest.OK(t, err) rtest.OK(t, err)
id, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false) id, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false)
rtest.OK(t, err) rtest.OK(t, err)
rtest.OK(t, repo.Flush(context.Background())) rtest.OK(t, repo.Flush(context.Background()))
@ -220,7 +220,7 @@ func benchmarkLoadBlob(b *testing.B, version uint) {
_, err := io.ReadFull(rnd, buf) _, err := io.ReadFull(rnd, buf)
rtest.OK(b, err) rtest.OK(b, err)
id, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false) id, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false)
rtest.OK(b, err) rtest.OK(b, err)
rtest.OK(b, repo.Flush(context.Background())) rtest.OK(b, repo.Flush(context.Background()))
@ -396,7 +396,7 @@ func saveRandomDataBlobs(t testing.TB, repo restic.Repository, num int, sizeMax
_, err := io.ReadFull(rnd, buf) _, err := io.ReadFull(rnd, buf)
rtest.OK(t, err) rtest.OK(t, err)
_, _, err = repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false) _, _, _, err = repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false)
rtest.OK(t, err) rtest.OK(t, err)
} }
} }

View File

@ -46,7 +46,7 @@ type Repository interface {
LoadUnpacked(ctx context.Context, buf []byte, t FileType, id ID) (data []byte, err error) LoadUnpacked(ctx context.Context, buf []byte, t FileType, id ID) (data []byte, err error)
LoadBlob(context.Context, BlobType, ID, []byte) ([]byte, error) LoadBlob(context.Context, BlobType, ID, []byte) ([]byte, error)
SaveBlob(context.Context, BlobType, []byte, ID, bool) (ID, bool, error) SaveBlob(context.Context, BlobType, []byte, ID, bool) (ID, bool, int, error)
LoadTree(context.Context, ID) (*Tree, error) LoadTree(context.Context, ID) (*Tree, error)
SaveTree(context.Context, *Tree) (ID, error) SaveTree(context.Context, *Tree) (ID, error)

View File

@ -52,7 +52,7 @@ func (fs *fakeFileSystem) saveFile(ctx context.Context, rd io.Reader) (blobs IDs
id := Hash(chunk.Data) id := Hash(chunk.Data)
if !fs.blobIsKnown(BlobHandle{ID: id, Type: DataBlob}) { if !fs.blobIsKnown(BlobHandle{ID: id, Type: DataBlob}) {
_, _, err := fs.repo.SaveBlob(ctx, DataBlob, chunk.Data, id, true) _, _, _, err := fs.repo.SaveBlob(ctx, DataBlob, chunk.Data, id, true)
if err != nil { if err != nil {
fs.t.Fatalf("error saving chunk: %v", err) fs.t.Fatalf("error saving chunk: %v", err)
} }
@ -138,7 +138,7 @@ func (fs *fakeFileSystem) saveTree(ctx context.Context, seed int64, depth int) I
return id return id
} }
_, _, err := fs.repo.SaveBlob(ctx, TreeBlob, buf, id, false) _, _, _, err := fs.repo.SaveBlob(ctx, TreeBlob, buf, id, false)
if err != nil { if err != nil {
fs.t.Fatal(err) fs.t.Fatal(err)
} }

View File

@ -41,7 +41,7 @@ func saveFile(t testing.TB, repo restic.Repository, node File) restic.ID {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
id, _, err := repo.SaveBlob(ctx, restic.DataBlob, []byte(node.Data), restic.ID{}, false) id, _, _, err := repo.SaveBlob(ctx, restic.DataBlob, []byte(node.Data), restic.ID{}, false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }