Merge pull request #3733 from restic/improve-stats

Improve stats
This commit is contained in:
MichaelEischer 2022-07-02 19:07:31 +02:00 committed by GitHub
commit bb5f196b09
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 140 additions and 110 deletions

View File

@ -23,4 +23,7 @@ repository using `init --repository-version 2 --copy-chunker-params --repo2 path
Then use the `copy` command to copy all snapshots to the new repository. Then use the `copy` command to copy all snapshots to the new repository.
https://github.com/restic/restic/issues/21 https://github.com/restic/restic/issues/21
https://github.com/restic/restic/issues/3779
https://github.com/restic/restic/pull/3666 https://github.com/restic/restic/pull/3666
https://github.com/restic/restic/pull/3704
https://github.com/restic/restic/pull/3733

View File

@ -386,7 +386,7 @@ func loadBlobs(ctx context.Context, repo restic.Repository, pack restic.ID, list
} }
} }
if reuploadBlobs { if reuploadBlobs {
_, _, err := repo.SaveBlob(ctx, blob.Type, plaintext, id, true) _, _, _, err := repo.SaveBlob(ctx, blob.Type, plaintext, id, true)
if err != nil { if err != nil {
return err return err
} }

View File

@ -130,11 +130,11 @@ func runStats(gopts GlobalOptions, args []string) error {
if statsOptions.countMode == countModeRawData { if statsOptions.countMode == countModeRawData {
// the blob handles have been collected, but not yet counted // the blob handles have been collected, but not yet counted
for blobHandle := range stats.blobs { for blobHandle := range stats.blobs {
blobSize, found := repo.LookupBlobSize(blobHandle.ID, blobHandle.Type) pbs := repo.Index().Lookup(blobHandle)
if !found { if len(pbs) == 0 {
return fmt.Errorf("blob %v not found", blobHandle) return fmt.Errorf("blob %v not found", blobHandle)
} }
stats.TotalSize += uint64(blobSize) stats.TotalSize += uint64(pbs[0].Length)
stats.TotalBlobCount++ stats.TotalBlobCount++
} }
} }

View File

@ -31,18 +31,22 @@ type ErrorFunc func(file string, fi os.FileInfo, err error) error
// ItemStats collects some statistics about a particular file or directory. // ItemStats collects some statistics about a particular file or directory.
type ItemStats struct { type ItemStats struct {
DataBlobs int // number of new data blobs added for this item DataBlobs int // number of new data blobs added for this item
DataSize uint64 // sum of the sizes of all new data blobs DataSize uint64 // sum of the sizes of all new data blobs
TreeBlobs int // number of new tree blobs added for this item DataSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead)
TreeSize uint64 // sum of the sizes of all new tree blobs TreeBlobs int // number of new tree blobs added for this item
TreeSize uint64 // sum of the sizes of all new tree blobs
TreeSizeInRepo uint64 // sum of the bytes added to the repo (including compression and crypto overhead)
} }
// Add adds other to the current ItemStats. // Add adds other to the current ItemStats.
func (s *ItemStats) Add(other ItemStats) { func (s *ItemStats) Add(other ItemStats) {
s.DataBlobs += other.DataBlobs s.DataBlobs += other.DataBlobs
s.DataSize += other.DataSize s.DataSize += other.DataSize
s.DataSizeInRepo += other.DataSizeInRepo
s.TreeBlobs += other.TreeBlobs s.TreeBlobs += other.TreeBlobs
s.TreeSize += other.TreeSize s.TreeSize += other.TreeSize
s.TreeSizeInRepo += other.TreeSizeInRepo
} }
// Archiver saves a directory structure to the repo. // Archiver saves a directory structure to the repo.
@ -183,7 +187,8 @@ func (arch *Archiver) saveTree(ctx context.Context, t *restic.Tree) (restic.ID,
res.Wait(ctx) res.Wait(ctx)
if !res.Known() { if !res.Known() {
s.TreeBlobs++ s.TreeBlobs++
s.TreeSize += uint64(len(buf)) s.TreeSize += uint64(res.Length())
s.TreeSizeInRepo += uint64(res.SizeInRepo())
} }
// The context was canceled in the meantime, res.ID() might be invalid // The context was canceled in the meantime, res.ID() might be invalid
if ctx.Err() != nil { if ctx.Err() != nil {

View File

@ -415,16 +415,16 @@ type blobCountingRepo struct {
saved map[restic.BlobHandle]uint saved map[restic.BlobHandle]uint
} }
func (repo *blobCountingRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, error) { func (repo *blobCountingRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, int, error) {
id, exists, err := repo.Repository.SaveBlob(ctx, t, buf, id, false) id, exists, size, err := repo.Repository.SaveBlob(ctx, t, buf, id, false)
if exists { if exists {
return id, exists, err return id, exists, size, err
} }
h := restic.BlobHandle{ID: id, Type: t} h := restic.BlobHandle{ID: id, Type: t}
repo.m.Lock() repo.m.Lock()
repo.saved[h]++ repo.saved[h]++
repo.m.Unlock() repo.m.Unlock()
return id, exists, err return id, exists, size, err
} }
func (repo *blobCountingRepo) SaveTree(ctx context.Context, t *restic.Tree) (restic.ID, error) { func (repo *blobCountingRepo) SaveTree(ctx context.Context, t *restic.Tree) (restic.ID, error) {
@ -1019,7 +1019,7 @@ func TestArchiverSaveTree(t *testing.T) {
want: TestDir{ want: TestDir{
"targetfile": TestFile{Content: string("foobar")}, "targetfile": TestFile{Content: string("foobar")},
}, },
stat: ItemStats{1, 6, 0, 0}, stat: ItemStats{1, 6, 32 + 6, 0, 0, 0},
}, },
{ {
src: TestDir{ src: TestDir{
@ -1031,7 +1031,7 @@ func TestArchiverSaveTree(t *testing.T) {
"targetfile": TestFile{Content: string("foobar")}, "targetfile": TestFile{Content: string("foobar")},
"filesymlink": TestSymlink{Target: "targetfile"}, "filesymlink": TestSymlink{Target: "targetfile"},
}, },
stat: ItemStats{1, 6, 0, 0}, stat: ItemStats{1, 6, 32 + 6, 0, 0, 0},
}, },
{ {
src: TestDir{ src: TestDir{
@ -1051,7 +1051,7 @@ func TestArchiverSaveTree(t *testing.T) {
"symlink": TestSymlink{Target: "subdir"}, "symlink": TestSymlink{Target: "subdir"},
}, },
}, },
stat: ItemStats{0, 0, 1, 0x154}, stat: ItemStats{0, 0, 0, 1, 0x154, 0x16a},
}, },
{ {
src: TestDir{ src: TestDir{
@ -1075,7 +1075,7 @@ func TestArchiverSaveTree(t *testing.T) {
}, },
}, },
}, },
stat: ItemStats{1, 6, 3, 0x47f}, stat: ItemStats{1, 6, 32 + 6, 3, 0x47f, 0x4c1},
}, },
} }
@ -1140,7 +1140,8 @@ func TestArchiverSaveTree(t *testing.T) {
bothZeroOrNeither(t, uint64(test.stat.DataBlobs), uint64(stat.DataBlobs)) bothZeroOrNeither(t, uint64(test.stat.DataBlobs), uint64(stat.DataBlobs))
bothZeroOrNeither(t, uint64(test.stat.TreeBlobs), uint64(stat.TreeBlobs)) bothZeroOrNeither(t, uint64(test.stat.TreeBlobs), uint64(stat.TreeBlobs))
bothZeroOrNeither(t, test.stat.DataSize, stat.DataSize) bothZeroOrNeither(t, test.stat.DataSize, stat.DataSize)
bothZeroOrNeither(t, test.stat.TreeSize, stat.TreeSize) bothZeroOrNeither(t, test.stat.DataSizeInRepo, stat.DataSizeInRepo)
bothZeroOrNeither(t, test.stat.TreeSizeInRepo, stat.TreeSizeInRepo)
}) })
} }
} }
@ -1944,10 +1945,10 @@ type failSaveRepo struct {
err error err error
} }
func (f *failSaveRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, error) { func (f *failSaveRepo) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, int, error) {
val := atomic.AddInt32(&f.cnt, 1) val := atomic.AddInt32(&f.cnt, 1)
if val >= f.failAfter { if val >= f.failAfter {
return restic.ID{}, false, f.err return restic.ID{}, false, 0, f.err
} }
return f.Repository.SaveBlob(ctx, t, buf, id, storeDuplicate) return f.Repository.SaveBlob(ctx, t, buf, id, storeDuplicate)

View File

@ -10,7 +10,7 @@ import (
// Saver allows saving a blob. // Saver allows saving a blob.
type Saver interface { type Saver interface {
SaveBlob(ctx context.Context, t restic.BlobType, data []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, error) SaveBlob(ctx context.Context, t restic.BlobType, data []byte, id restic.ID, storeDuplicate bool) (restic.ID, bool, int, error)
Index() restic.MasterIndex Index() restic.MasterIndex
} }
@ -86,11 +86,17 @@ func (s *FutureBlob) Known() bool {
return s.res.known return s.res.known
} }
// Length returns the length of the blob. // Length returns the raw length of the blob.
func (s *FutureBlob) Length() int { func (s *FutureBlob) Length() int {
return s.length return s.length
} }
// SizeInRepo returns the number of bytes added to the repo (including
// compression and crypto overhead).
func (s *FutureBlob) SizeInRepo() int {
return s.res.size
}
type saveBlobJob struct { type saveBlobJob struct {
restic.BlobType restic.BlobType
buf *Buffer buf *Buffer
@ -100,10 +106,11 @@ type saveBlobJob struct {
type saveBlobResponse struct { type saveBlobResponse struct {
id restic.ID id restic.ID
known bool known bool
size int
} }
func (s *BlobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte) (saveBlobResponse, error) { func (s *BlobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte) (saveBlobResponse, error) {
id, known, err := s.repo.SaveBlob(ctx, t, buf, restic.ID{}, false) id, known, size, err := s.repo.SaveBlob(ctx, t, buf, restic.ID{}, false)
if err != nil { if err != nil {
return saveBlobResponse{}, err return saveBlobResponse{}, err
@ -112,6 +119,7 @@ func (s *BlobSaver) saveBlob(ctx context.Context, t restic.BlobType, buf []byte)
return saveBlobResponse{ return saveBlobResponse{
id: id, id: id,
known: known, known: known,
size: size,
}, nil }, nil
} }

View File

@ -21,13 +21,13 @@ type saveFail struct {
failAt int32 failAt int32
} }
func (b *saveFail) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicates bool) (restic.ID, bool, error) { func (b *saveFail) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicates bool) (restic.ID, bool, int, error) {
val := atomic.AddInt32(&b.cnt, 1) val := atomic.AddInt32(&b.cnt, 1)
if val == b.failAt { if val == b.failAt {
return restic.ID{}, false, errTest return restic.ID{}, false, 0, errTest
} }
return id, false, nil return id, false, 0, nil
} }
func (b *saveFail) Index() restic.MasterIndex { func (b *saveFail) Index() restic.MasterIndex {

View File

@ -210,6 +210,7 @@ func (s *FileSaver) saveFile(ctx context.Context, chnker *chunker.Chunker, snPat
if !res.Known() { if !res.Known() {
stats.DataBlobs++ stats.DataBlobs++
stats.DataSize += uint64(res.Length()) stats.DataSize += uint64(res.Length())
stats.DataSizeInRepo += uint64(res.SizeInRepo())
} }
node.Content = append(node.Content, res.ID()) node.Content = append(node.Content, res.ID())

View File

@ -483,7 +483,7 @@ func TestCheckerBlobTypeConfusion(t *testing.T) {
buf, err := repo.LoadBlob(ctx, restic.TreeBlob, id, nil) buf, err := repo.LoadBlob(ctx, restic.TreeBlob, id, nil)
test.OK(t, err) test.OK(t, err)
_, _, err = repo.SaveBlob(ctx, restic.DataBlob, buf, id, false) _, _, _, err = repo.SaveBlob(ctx, restic.DataBlob, buf, id, false)
test.OK(t, err) test.OK(t, err)
malNode := &restic.Node{ malNode := &restic.Node{

View File

@ -31,7 +31,7 @@ func NewPacker(k *crypto.Key, wr io.Writer) *Packer {
} }
// Add saves the data read from rd as a new blob to the packer. Returned is the // Add saves the data read from rd as a new blob to the packer. Returned is the
// number of bytes written to the pack. // number of bytes written to the pack plus the pack header entry size.
func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte, uncompressedLength int) (int, error) { func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte, uncompressedLength int) (int, error) {
p.m.Lock() p.m.Lock()
defer p.m.Unlock() defer p.m.Unlock()
@ -44,6 +44,7 @@ func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte, uncompressedL
c.UncompressedLength = uint(uncompressedLength) c.UncompressedLength = uint(uncompressedLength)
p.bytes += uint(n) p.bytes += uint(n)
p.blobs = append(p.blobs, c) p.blobs = append(p.blobs, c)
n += CalculateEntrySize(c)
return n, errors.Wrap(err, "Write") return n, errors.Wrap(err, "Write")
} }
@ -69,13 +70,11 @@ type compressedHeaderEntry struct {
} }
// Finalize writes the header for all added blobs and finalizes the pack. // Finalize writes the header for all added blobs and finalizes the pack.
// Returned are the number of bytes written, including the header. // Returned are the number of bytes written, not yet reported by Add.
func (p *Packer) Finalize() (uint, error) { func (p *Packer) Finalize() (int, error) {
p.m.Lock() p.m.Lock()
defer p.m.Unlock() defer p.m.Unlock()
bytesWritten := p.bytes
header, err := p.makeHeader() header, err := p.makeHeader()
if err != nil { if err != nil {
return 0, err return 0, err
@ -97,17 +96,14 @@ func (p *Packer) Finalize() (uint, error) {
return 0, errors.New("wrong number of bytes written") return 0, errors.New("wrong number of bytes written")
} }
bytesWritten += uint(hdrBytes)
// write length // write length
err = binary.Write(p.wr, binary.LittleEndian, uint32(hdrBytes)) err = binary.Write(p.wr, binary.LittleEndian, uint32(hdrBytes))
if err != nil { if err != nil {
return 0, errors.Wrap(err, "binary.Write") return 0, errors.Wrap(err, "binary.Write")
} }
bytesWritten += uint(binary.Size(uint32(0))) p.bytes += uint(hdrBytes + binary.Size(uint32(0)))
p.bytes = uint(bytesWritten) return restic.CiphertextLength(0) + binary.Size(uint32(0)), nil
return bytesWritten, nil
} }
// makeHeader constructs the header for p. // makeHeader constructs the header for p.

View File

@ -23,7 +23,7 @@ func FuzzSaveLoadBlob(f *testing.F) {
id := restic.Hash(blob) id := restic.Hash(blob)
repo, _ := TestRepositoryWithBackend(t, mem.New(), 2) repo, _ := TestRepositoryWithBackend(t, mem.New(), 2)
_, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, blob, id, false) _, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, blob, id, false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -106,11 +106,11 @@ func (r *packerManager) insertPacker(p *Packer) {
} }
// savePacker stores p in the backend. // savePacker stores p in the backend.
func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packer) error { func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packer) (int, error) {
debug.Log("save packer for %v with %d blobs (%d bytes)\n", t, p.Packer.Count(), p.Packer.Size()) debug.Log("save packer for %v with %d blobs (%d bytes)\n", t, p.Packer.Count(), p.Packer.Size())
_, err := p.Packer.Finalize() hdrOverhead, err := p.Packer.Finalize()
if err != nil { if err != nil {
return err return 0, err
} }
id := restic.IDFromHash(p.hw.Sum(nil)) id := restic.IDFromHash(p.hw.Sum(nil))
@ -122,27 +122,27 @@ func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packe
} }
rd, err := restic.NewFileReader(p.tmpfile, beHash) rd, err := restic.NewFileReader(p.tmpfile, beHash)
if err != nil { if err != nil {
return err return 0, err
} }
err = r.be.Save(ctx, h, rd) err = r.be.Save(ctx, h, rd)
if err != nil { if err != nil {
debug.Log("Save(%v) error: %v", h, err) debug.Log("Save(%v) error: %v", h, err)
return err return 0, err
} }
debug.Log("saved as %v", h) debug.Log("saved as %v", h)
err = p.tmpfile.Close() err = p.tmpfile.Close()
if err != nil { if err != nil {
return errors.Wrap(err, "close tempfile") return 0, errors.Wrap(err, "close tempfile")
} }
// on windows the tempfile is automatically deleted on close // on windows the tempfile is automatically deleted on close
if runtime.GOOS != "windows" { if runtime.GOOS != "windows" {
err = fs.RemoveIfExists(p.tmpfile.Name()) err = fs.RemoveIfExists(p.tmpfile.Name())
if err != nil { if err != nil {
return errors.Wrap(err, "Remove") return 0, errors.Wrap(err, "Remove")
} }
} }
@ -152,9 +152,9 @@ func (r *Repository) savePacker(ctx context.Context, t restic.BlobType, p *Packe
// Save index if full // Save index if full
if r.noAutoIndexUpdate { if r.noAutoIndexUpdate {
return nil return hdrOverhead, nil
} }
return r.idx.SaveFullIndex(ctx, r) return hdrOverhead, r.idx.SaveFullIndex(ctx, r)
} }
// countPacker returns the number of open (unfinished) packers. // countPacker returns the number of open (unfinished) packers.

View File

@ -74,8 +74,8 @@ func fillPacks(t testing.TB, rnd *rand.Rand, be Saver, pm *packerManager, buf []
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
if n != l { if n != l+37 {
t.Errorf("Add() returned invalid number of bytes: want %v, got %v", n, l) t.Errorf("Add() returned invalid number of bytes: want %v, got %v", l, n)
} }
bytes += l bytes += l
@ -107,7 +107,7 @@ func flushRemainingPacks(t testing.TB, be Saver, pm *packerManager) (bytes int)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
bytes += int(n) bytes += n
packID := restic.IDFromHash(packer.hw.Sum(nil)) packID := restic.IDFromHash(packer.hw.Sum(nil))
var beHash []byte var beHash []byte

View File

@ -75,7 +75,7 @@ func Repack(ctx context.Context, repo restic.Repository, dstRepo restic.Reposito
} }
// We do want to save already saved blobs! // We do want to save already saved blobs!
_, _, err = dstRepo.SaveBlob(wgCtx, blob.Type, buf, blob.ID, true) _, _, _, err = dstRepo.SaveBlob(wgCtx, blob.Type, buf, blob.ID, true)
if err != nil { if err != nil {
return err return err
} }

View File

@ -32,7 +32,7 @@ func createRandomBlobs(t testing.TB, repo restic.Repository, blobs int, pData fl
buf := make([]byte, length) buf := make([]byte, length)
rand.Read(buf) rand.Read(buf)
id, exists, err := repo.SaveBlob(context.TODO(), tpe, buf, restic.ID{}, false) id, exists, _, err := repo.SaveBlob(context.TODO(), tpe, buf, restic.ID{}, false)
if err != nil { if err != nil {
t.Fatalf("SaveFrom() error %v", err) t.Fatalf("SaveFrom() error %v", err)
} }
@ -62,7 +62,7 @@ func createRandomWrongBlob(t testing.TB, repo restic.Repository) {
// invert first data byte // invert first data byte
buf[0] ^= 0xff buf[0] ^= 0xff
_, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, id, false) _, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, id, false)
if err != nil { if err != nil {
t.Fatalf("SaveFrom() error %v", err) t.Fatalf("SaveFrom() error %v", err)
} }

View File

@ -378,9 +378,10 @@ func (r *Repository) getZstdDecoder() *zstd.Decoder {
} }
// saveAndEncrypt encrypts data and stores it to the backend as type t. If data // saveAndEncrypt encrypts data and stores it to the backend as type t. If data
// is small enough, it will be packed together with other small blobs. // is small enough, it will be packed together with other small blobs. The
// The caller must ensure that the id matches the data. // caller must ensure that the id matches the data. Returned is the size data
func (r *Repository) saveAndEncrypt(ctx context.Context, t restic.BlobType, data []byte, id restic.ID) error { // occupies in the repo (compressed or not, including the encryption overhead).
func (r *Repository) saveAndEncrypt(ctx context.Context, t restic.BlobType, data []byte, id restic.ID) (size int, err error) {
debug.Log("save id %v (%v, %d bytes)", id, t, len(data)) debug.Log("save id %v (%v, %d bytes)", id, t, len(data))
uncompressedLength := 0 uncompressedLength := 0
@ -417,24 +418,29 @@ func (r *Repository) saveAndEncrypt(ctx context.Context, t restic.BlobType, data
packer, err := pm.findPacker() packer, err := pm.findPacker()
if err != nil { if err != nil {
return err return 0, err
} }
// save ciphertext // save ciphertext
_, err = packer.Add(t, id, ciphertext, uncompressedLength) size, err = packer.Add(t, id, ciphertext, uncompressedLength)
if err != nil { if err != nil {
return err return 0, err
} }
// if the pack is not full enough, put back to the list // if the pack is not full enough, put back to the list
if packer.Size() < minPackSize { if packer.Size() < minPackSize {
debug.Log("pack is not full enough (%d bytes)", packer.Size()) debug.Log("pack is not full enough (%d bytes)", packer.Size())
pm.insertPacker(packer) pm.insertPacker(packer)
return nil return size, nil
} }
// else write the pack to the backend // else write the pack to the backend
return r.savePacker(ctx, t, packer) hdrSize, err := r.savePacker(ctx, t, packer)
if err != nil {
return 0, err
}
return size + hdrSize, nil
} }
// SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the // SaveJSONUnpacked serialises item as JSON and encrypts and saves it in the
@ -545,7 +551,7 @@ func (r *Repository) flushPacks(ctx context.Context) error {
debug.Log("manually flushing %d packs", len(p.pm.packers)) debug.Log("manually flushing %d packs", len(p.pm.packers))
for _, packer := range p.pm.packers { for _, packer := range p.pm.packers {
err := r.savePacker(ctx, p.t, packer) _, err := r.savePacker(ctx, p.t, packer)
if err != nil { if err != nil {
p.pm.pm.Unlock() p.pm.pm.Unlock()
return err return err
@ -815,8 +821,10 @@ func (r *Repository) Close() error {
// It takes care that no duplicates are saved; this can be overwritten // It takes care that no duplicates are saved; this can be overwritten
// by setting storeDuplicate to true. // by setting storeDuplicate to true.
// If id is the null id, it will be computed and returned. // If id is the null id, it will be computed and returned.
// Also returns if the blob was already known before // Also returns if the blob was already known before.
func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, err error) { // If the blob was not known before, it returns the number of bytes the blob
// occupies in the repo (compressed or not, including encryption overhead).
func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte, id restic.ID, storeDuplicate bool) (newID restic.ID, known bool, size int, err error) {
// compute plaintext hash if not already set // compute plaintext hash if not already set
if id.IsNull() { if id.IsNull() {
@ -830,10 +838,10 @@ func (r *Repository) SaveBlob(ctx context.Context, t restic.BlobType, buf []byte
// only save when needed or explicitly told // only save when needed or explicitly told
if !known || storeDuplicate { if !known || storeDuplicate {
err = r.saveAndEncrypt(ctx, t, buf, newID) size, err = r.saveAndEncrypt(ctx, t, buf, newID)
} }
return newID, known, err return newID, known, size, err
} }
// LoadTree loads a tree from the repository. // LoadTree loads a tree from the repository.
@ -867,7 +875,7 @@ func (r *Repository) SaveTree(ctx context.Context, t *restic.Tree) (restic.ID, e
// adds a newline after each object) // adds a newline after each object)
buf = append(buf, '\n') buf = append(buf, '\n')
id, _, err := r.SaveBlob(ctx, restic.TreeBlob, buf, restic.ID{}, false) id, _, _, err := r.SaveBlob(ctx, restic.TreeBlob, buf, restic.ID{}, false)
return id, err return id, err
} }

View File

@ -44,7 +44,7 @@ func testSave(t *testing.T, version uint) {
id := restic.Hash(data) id := restic.Hash(data)
// save // save
sid, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, restic.ID{}, false) sid, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, restic.ID{}, false)
rtest.OK(t, err) rtest.OK(t, err)
rtest.Equals(t, id, sid) rtest.Equals(t, id, sid)
@ -83,7 +83,7 @@ func testSaveFrom(t *testing.T, version uint) {
id := restic.Hash(data) id := restic.Hash(data)
// save // save
id2, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, id, false) id2, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, data, id, false)
rtest.OK(t, err) rtest.OK(t, err)
rtest.Equals(t, id, id2) rtest.Equals(t, id, id2)
@ -125,7 +125,7 @@ func benchmarkSaveAndEncrypt(t *testing.B, version uint) {
t.SetBytes(int64(size)) t.SetBytes(int64(size))
for i := 0; i < t.N; i++ { for i := 0; i < t.N; i++ {
_, _, err = repo.SaveBlob(context.TODO(), restic.DataBlob, data, id, true) _, _, _, err = repo.SaveBlob(context.TODO(), restic.DataBlob, data, id, true)
rtest.OK(t, err) rtest.OK(t, err)
} }
} }
@ -187,7 +187,7 @@ func testLoadBlob(t *testing.T, version uint) {
_, err := io.ReadFull(rnd, buf) _, err := io.ReadFull(rnd, buf)
rtest.OK(t, err) rtest.OK(t, err)
id, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false) id, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false)
rtest.OK(t, err) rtest.OK(t, err)
rtest.OK(t, repo.Flush(context.Background())) rtest.OK(t, repo.Flush(context.Background()))
@ -220,7 +220,7 @@ func benchmarkLoadBlob(b *testing.B, version uint) {
_, err := io.ReadFull(rnd, buf) _, err := io.ReadFull(rnd, buf)
rtest.OK(b, err) rtest.OK(b, err)
id, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false) id, _, _, err := repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false)
rtest.OK(b, err) rtest.OK(b, err)
rtest.OK(b, repo.Flush(context.Background())) rtest.OK(b, repo.Flush(context.Background()))
@ -396,7 +396,7 @@ func saveRandomDataBlobs(t testing.TB, repo restic.Repository, num int, sizeMax
_, err := io.ReadFull(rnd, buf) _, err := io.ReadFull(rnd, buf)
rtest.OK(t, err) rtest.OK(t, err)
_, _, err = repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false) _, _, _, err = repo.SaveBlob(context.TODO(), restic.DataBlob, buf, restic.ID{}, false)
rtest.OK(t, err) rtest.OK(t, err)
} }
} }

View File

@ -46,7 +46,7 @@ type Repository interface {
LoadUnpacked(ctx context.Context, buf []byte, t FileType, id ID) (data []byte, err error) LoadUnpacked(ctx context.Context, buf []byte, t FileType, id ID) (data []byte, err error)
LoadBlob(context.Context, BlobType, ID, []byte) ([]byte, error) LoadBlob(context.Context, BlobType, ID, []byte) ([]byte, error)
SaveBlob(context.Context, BlobType, []byte, ID, bool) (ID, bool, error) SaveBlob(context.Context, BlobType, []byte, ID, bool) (ID, bool, int, error)
LoadTree(context.Context, ID) (*Tree, error) LoadTree(context.Context, ID) (*Tree, error)
SaveTree(context.Context, *Tree) (ID, error) SaveTree(context.Context, *Tree) (ID, error)

View File

@ -52,7 +52,7 @@ func (fs *fakeFileSystem) saveFile(ctx context.Context, rd io.Reader) (blobs IDs
id := Hash(chunk.Data) id := Hash(chunk.Data)
if !fs.blobIsKnown(BlobHandle{ID: id, Type: DataBlob}) { if !fs.blobIsKnown(BlobHandle{ID: id, Type: DataBlob}) {
_, _, err := fs.repo.SaveBlob(ctx, DataBlob, chunk.Data, id, true) _, _, _, err := fs.repo.SaveBlob(ctx, DataBlob, chunk.Data, id, true)
if err != nil { if err != nil {
fs.t.Fatalf("error saving chunk: %v", err) fs.t.Fatalf("error saving chunk: %v", err)
} }
@ -138,7 +138,7 @@ func (fs *fakeFileSystem) saveTree(ctx context.Context, seed int64, depth int) I
return id return id
} }
_, _, err := fs.repo.SaveBlob(ctx, TreeBlob, buf, id, false) _, _, _, err := fs.repo.SaveBlob(ctx, TreeBlob, buf, id, false)
if err != nil { if err != nil {
fs.t.Fatal(err) fs.t.Fatal(err)
} }

View File

@ -41,7 +41,7 @@ func saveFile(t testing.TB, repo restic.Repository, node File) restic.ID {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
id, _, err := repo.SaveBlob(ctx, restic.DataBlob, []byte(node.Data), restic.ID{}, false) id, _, _, err := repo.SaveBlob(ctx, restic.DataBlob, []byte(node.Data), restic.ID{}, false)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -110,12 +110,14 @@ func (b *JSONProgress) CompleteItem(messageType, item string, previous, current
switch messageType { switch messageType {
case "dir new": case "dir new":
b.print(verboseUpdate{ b.print(verboseUpdate{
MessageType: "verbose_status", MessageType: "verbose_status",
Action: "new", Action: "new",
Item: item, Item: item,
Duration: d.Seconds(), Duration: d.Seconds(),
DataSize: s.DataSize, DataSize: s.DataSize,
MetadataSize: s.TreeSize, DataSizeInRepo: s.DataSizeInRepo,
MetadataSize: s.TreeSize,
MetadataSizeInRepo: s.TreeSizeInRepo,
}) })
case "dir unchanged": case "dir unchanged":
b.print(verboseUpdate{ b.print(verboseUpdate{
@ -125,20 +127,23 @@ func (b *JSONProgress) CompleteItem(messageType, item string, previous, current
}) })
case "dir modified": case "dir modified":
b.print(verboseUpdate{ b.print(verboseUpdate{
MessageType: "verbose_status", MessageType: "verbose_status",
Action: "modified", Action: "modified",
Item: item, Item: item,
Duration: d.Seconds(), Duration: d.Seconds(),
DataSize: s.DataSize, DataSize: s.DataSize,
MetadataSize: s.TreeSize, DataSizeInRepo: s.DataSizeInRepo,
MetadataSize: s.TreeSize,
MetadataSizeInRepo: s.TreeSizeInRepo,
}) })
case "file new": case "file new":
b.print(verboseUpdate{ b.print(verboseUpdate{
MessageType: "verbose_status", MessageType: "verbose_status",
Action: "new", Action: "new",
Item: item, Item: item,
Duration: d.Seconds(), Duration: d.Seconds(),
DataSize: s.DataSize, DataSize: s.DataSize,
DataSizeInRepo: s.DataSizeInRepo,
}) })
case "file unchanged": case "file unchanged":
b.print(verboseUpdate{ b.print(verboseUpdate{
@ -148,11 +153,12 @@ func (b *JSONProgress) CompleteItem(messageType, item string, previous, current
}) })
case "file modified": case "file modified":
b.print(verboseUpdate{ b.print(verboseUpdate{
MessageType: "verbose_status", MessageType: "verbose_status",
Action: "modified", Action: "modified",
Item: item, Item: item,
Duration: d.Seconds(), Duration: d.Seconds(),
DataSize: s.DataSize, DataSize: s.DataSize,
DataSizeInRepo: s.DataSizeInRepo,
}) })
} }
} }
@ -216,13 +222,15 @@ type errorUpdate struct {
} }
type verboseUpdate struct { type verboseUpdate struct {
MessageType string `json:"message_type"` // "verbose_status" MessageType string `json:"message_type"` // "verbose_status"
Action string `json:"action"` Action string `json:"action"`
Item string `json:"item"` Item string `json:"item"`
Duration float64 `json:"duration"` // in seconds Duration float64 `json:"duration"` // in seconds
DataSize uint64 `json:"data_size"` DataSize uint64 `json:"data_size"`
MetadataSize uint64 `json:"metadata_size"` DataSizeInRepo uint64 `json:"data_size_in_repo"`
TotalFiles uint `json:"total_files"` MetadataSize uint64 `json:"metadata_size"`
MetadataSizeInRepo uint64 `json:"metadata_size_in_repo"`
TotalFiles uint `json:"total_files"`
} }
type summaryOutput struct { type summaryOutput struct {

View File

@ -138,17 +138,17 @@ func formatBytes(c uint64) string {
func (b *TextProgress) CompleteItem(messageType, item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) { func (b *TextProgress) CompleteItem(messageType, item string, previous, current *restic.Node, s archiver.ItemStats, d time.Duration) {
switch messageType { switch messageType {
case "dir new": case "dir new":
b.VV("new %v, saved in %.3fs (%v added, %v metadata)", item, d.Seconds(), formatBytes(s.DataSize), formatBytes(s.TreeSize)) b.VV("new %v, saved in %.3fs (%v added, %v stored, %v metadata)", item, d.Seconds(), formatBytes(s.DataSize), formatBytes(s.DataSizeInRepo), formatBytes(s.TreeSizeInRepo))
case "dir unchanged": case "dir unchanged":
b.VV("unchanged %v", item) b.VV("unchanged %v", item)
case "dir modified": case "dir modified":
b.VV("modified %v, saved in %.3fs (%v added, %v metadata)", item, d.Seconds(), formatBytes(s.DataSize), formatBytes(s.TreeSize)) b.VV("modified %v, saved in %.3fs (%v added, %v stored, %v metadata)", item, d.Seconds(), formatBytes(s.DataSize), formatBytes(s.DataSizeInRepo), formatBytes(s.TreeSizeInRepo))
case "file new": case "file new":
b.VV("new %v, saved in %.3fs (%v added)", item, d.Seconds(), formatBytes(s.DataSize)) b.VV("new %v, saved in %.3fs (%v added)", item, d.Seconds(), formatBytes(s.DataSize))
case "file unchanged": case "file unchanged":
b.VV("unchanged %v", item) b.VV("unchanged %v", item)
case "file modified": case "file modified":
b.VV("modified %v, saved in %.3fs (%v added)", item, d.Seconds(), formatBytes(s.DataSize)) b.VV("modified %v, saved in %.3fs (%v added, %v stored)", item, d.Seconds(), formatBytes(s.DataSize), formatBytes(s.DataSizeInRepo))
} }
} }
@ -178,7 +178,7 @@ func (b *TextProgress) Finish(snapshotID restic.ID, start time.Time, summary *Su
if dryRun { if dryRun {
verb = "Would add" verb = "Would add"
} }
b.P("%s to the repo: %-5s\n", verb, formatBytes(summary.ItemStats.DataSize+summary.ItemStats.TreeSize)) b.P("%s to the repo: %-5s (%-5s stored)\n", verb, formatBytes(summary.ItemStats.DataSize+summary.ItemStats.TreeSize), formatBytes(summary.ItemStats.DataSizeInRepo+summary.ItemStats.TreeSizeInRepo))
b.P("\n") b.P("\n")
b.P("processed %v files, %v in %s", b.P("processed %v files, %v in %s",
summary.Files.New+summary.Files.Changed+summary.Files.Unchanged, summary.Files.New+summary.Files.Changed+summary.Files.Unchanged,