mirror of
https://github.com/octoleo/restic.git
synced 2025-01-03 07:12:28 +00:00
Add MasterIndex.PackSize()
This commit is contained in:
parent
c3ddde9e7d
commit
ce5d630681
@ -46,7 +46,7 @@ func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte) (int, error)
|
||||
return n, errors.Wrap(err, "Write")
|
||||
}
|
||||
|
||||
var entrySize = uint(binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + len(restic.ID{}))
|
||||
var EntrySize = uint(binary.Size(restic.BlobType(0)) + headerLengthSize + len(restic.ID{}))
|
||||
|
||||
// headerEntry describes the format of header entries. It serves only as
|
||||
// documentation.
|
||||
@ -88,7 +88,7 @@ func (p *Packer) Finalize() (uint, error) {
|
||||
bytesWritten += uint(hdrBytes)
|
||||
|
||||
// write length
|
||||
err = binary.Write(p.wr, binary.LittleEndian, uint32(restic.CiphertextLength(len(p.blobs)*int(entrySize))))
|
||||
err = binary.Write(p.wr, binary.LittleEndian, uint32(restic.CiphertextLength(len(p.blobs)*int(EntrySize))))
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "binary.Write")
|
||||
}
|
||||
@ -100,7 +100,7 @@ func (p *Packer) Finalize() (uint, error) {
|
||||
|
||||
// makeHeader constructs the header for p.
|
||||
func (p *Packer) makeHeader() ([]byte, error) {
|
||||
buf := make([]byte, 0, len(p.blobs)*int(entrySize))
|
||||
buf := make([]byte, 0, len(p.blobs)*int(EntrySize))
|
||||
|
||||
for _, b := range p.blobs {
|
||||
switch b.Type {
|
||||
@ -151,7 +151,7 @@ func (p *Packer) String() string {
|
||||
|
||||
var (
|
||||
// we require at least one entry in the header, and one blob for a pack file
|
||||
minFileSize = entrySize + crypto.Extension + uint(headerLengthSize)
|
||||
minFileSize = EntrySize + crypto.Extension + uint(headerLengthSize)
|
||||
)
|
||||
|
||||
const (
|
||||
@ -171,7 +171,7 @@ const (
|
||||
// the appropriate size.
|
||||
func readRecords(rd io.ReaderAt, size int64, max int) ([]byte, int, error) {
|
||||
var bufsize int
|
||||
bufsize += max * int(entrySize)
|
||||
bufsize += max * int(EntrySize)
|
||||
bufsize += crypto.Extension
|
||||
bufsize += headerLengthSize
|
||||
|
||||
@ -195,7 +195,7 @@ func readRecords(rd io.ReaderAt, size int64, max int) ([]byte, int, error) {
|
||||
err = InvalidFileError{Message: "header length is zero"}
|
||||
case hlen < crypto.Extension:
|
||||
err = InvalidFileError{Message: "header length is too small"}
|
||||
case (hlen-crypto.Extension)%uint32(entrySize) != 0:
|
||||
case (hlen-crypto.Extension)%uint32(EntrySize) != 0:
|
||||
err = InvalidFileError{Message: "header length is invalid"}
|
||||
case int64(hlen) > size-int64(headerLengthSize):
|
||||
err = InvalidFileError{Message: "header is larger than file"}
|
||||
@ -206,7 +206,7 @@ func readRecords(rd io.ReaderAt, size int64, max int) ([]byte, int, error) {
|
||||
return nil, 0, errors.Wrap(err, "readHeader")
|
||||
}
|
||||
|
||||
total := (int(hlen) - crypto.Extension) / int(entrySize)
|
||||
total := (int(hlen) - crypto.Extension) / int(EntrySize)
|
||||
if total < max {
|
||||
// truncate to the beginning of the pack header
|
||||
b = b[len(b)-int(hlen):]
|
||||
@ -272,7 +272,7 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, hdr
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
entries = make([]restic.Blob, 0, uint(len(buf))/entrySize)
|
||||
entries = make([]restic.Blob, 0, uint(len(buf))/EntrySize)
|
||||
|
||||
pos := uint(0)
|
||||
for len(buf) > 0 {
|
||||
@ -284,7 +284,7 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, hdr
|
||||
|
||||
entries = append(entries, entry)
|
||||
pos += entry.Length
|
||||
buf = buf[entrySize:]
|
||||
buf = buf[EntrySize:]
|
||||
}
|
||||
|
||||
return entries, hdrSize, nil
|
||||
@ -292,15 +292,15 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, hdr
|
||||
|
||||
// PackedSizeOfBlob returns the size a blob actually uses when saved in a pack
|
||||
func PackedSizeOfBlob(blobLength uint) uint {
|
||||
return blobLength + entrySize
|
||||
return blobLength + EntrySize
|
||||
}
|
||||
|
||||
func parseHeaderEntry(p []byte) (b restic.Blob, err error) {
|
||||
if uint(len(p)) < entrySize {
|
||||
if uint(len(p)) < EntrySize {
|
||||
err = errors.Errorf("parseHeaderEntry: buffer of size %d too short", len(p))
|
||||
return b, err
|
||||
}
|
||||
p = p[:entrySize]
|
||||
p = p[:EntrySize]
|
||||
|
||||
switch p[0] {
|
||||
case 0:
|
||||
|
@ -41,7 +41,7 @@ func TestParseHeaderEntry(t *testing.T) {
|
||||
buf.Reset()
|
||||
_ = binary.Write(buf, binary.LittleEndian, &h)
|
||||
|
||||
b, err = parseHeaderEntry(buf.Bytes()[:entrySize-1])
|
||||
b, err = parseHeaderEntry(buf.Bytes()[:EntrySize-1])
|
||||
rtest.Assert(t, err != nil, "no error for short input")
|
||||
}
|
||||
|
||||
@ -58,7 +58,7 @@ func (rd *countingReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
func TestReadHeaderEagerLoad(t *testing.T) {
|
||||
|
||||
testReadHeader := func(dataSize, entryCount, expectedReadInvocationCount int) {
|
||||
expectedHeader := rtest.Random(0, entryCount*int(entrySize)+crypto.Extension)
|
||||
expectedHeader := rtest.Random(0, entryCount*int(EntrySize)+crypto.Extension)
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
buf.Write(rtest.Random(0, dataSize)) // pack blobs data
|
||||
@ -83,8 +83,8 @@ func TestReadHeaderEagerLoad(t *testing.T) {
|
||||
testReadHeader(100, eagerEntries+1, 2)
|
||||
|
||||
// file size == eager header load size
|
||||
eagerLoadSize := int((eagerEntries * entrySize) + crypto.Extension)
|
||||
headerSize := int(1*entrySize) + crypto.Extension
|
||||
eagerLoadSize := int((eagerEntries * EntrySize) + crypto.Extension)
|
||||
headerSize := int(1*EntrySize) + crypto.Extension
|
||||
dataSize := eagerLoadSize - headerSize - binary.Size(uint32(0))
|
||||
testReadHeader(dataSize-1, 1, 1)
|
||||
testReadHeader(dataSize, 1, 1)
|
||||
@ -96,8 +96,8 @@ func TestReadHeaderEagerLoad(t *testing.T) {
|
||||
|
||||
func TestReadRecords(t *testing.T) {
|
||||
testReadRecords := func(dataSize, entryCount, totalRecords int) {
|
||||
totalHeader := rtest.Random(0, totalRecords*int(entrySize)+crypto.Extension)
|
||||
off := len(totalHeader) - (entryCount*int(entrySize) + crypto.Extension)
|
||||
totalHeader := rtest.Random(0, totalRecords*int(EntrySize)+crypto.Extension)
|
||||
off := len(totalHeader) - (entryCount*int(EntrySize) + crypto.Extension)
|
||||
if off < 0 {
|
||||
off = 0
|
||||
}
|
||||
@ -127,8 +127,8 @@ func TestReadRecords(t *testing.T) {
|
||||
testReadRecords(100, eagerEntries, eagerEntries+1)
|
||||
|
||||
// file size == eager header load size
|
||||
eagerLoadSize := int((eagerEntries * entrySize) + crypto.Extension)
|
||||
headerSize := int(1*entrySize) + crypto.Extension
|
||||
eagerLoadSize := int((eagerEntries * EntrySize) + crypto.Extension)
|
||||
headerSize := int(1*EntrySize) + crypto.Extension
|
||||
dataSize := eagerLoadSize - headerSize - binary.Size(uint32(0))
|
||||
testReadRecords(dataSize-1, 1, 1)
|
||||
testReadRecords(dataSize, 1, 1)
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/restic/restic/internal/debug"
|
||||
"github.com/restic/restic/internal/pack"
|
||||
"github.com/restic/restic/internal/restic"
|
||||
"github.com/restic/restic/internal/ui/progress"
|
||||
"golang.org/x/sync/errgroup"
|
||||
@ -111,6 +112,27 @@ func (mi *MasterIndex) Packs() restic.IDSet {
|
||||
return packs
|
||||
}
|
||||
|
||||
// PackSize returns the size of all packs computed by index information.
|
||||
// If onlyHdr is set to true, only the size of the header is returned
|
||||
// Note that this function only gives correct sizes, if there are no
|
||||
// duplicates in the index.
|
||||
func (mi *MasterIndex) PackSize(ctx context.Context, onlyHdr bool) map[restic.ID]int64 {
|
||||
packSize := make(map[restic.ID]int64)
|
||||
|
||||
for blob := range mi.Each(ctx) {
|
||||
size, ok := packSize[blob.PackID]
|
||||
if !ok {
|
||||
size = pack.HeaderSize
|
||||
}
|
||||
if !onlyHdr {
|
||||
size += int64(blob.Length)
|
||||
}
|
||||
packSize[blob.PackID] = size + int64(pack.EntrySize)
|
||||
}
|
||||
|
||||
return packSize
|
||||
}
|
||||
|
||||
// Count returns the number of blobs of type t in the index.
|
||||
func (mi *MasterIndex) Count(t restic.BlobType) (n uint) {
|
||||
mi.idxMutex.RLock()
|
||||
|
@ -66,6 +66,7 @@ type MasterIndex interface {
|
||||
Lookup(ID, BlobType) []PackedBlob
|
||||
Count(BlobType) uint
|
||||
Packs() IDSet
|
||||
PackSize(ctx context.Context, onlyHdr bool) map[ID]int64
|
||||
|
||||
// Each returns a channel that yields all blobs known to the index. When
|
||||
// the context is cancelled, the background goroutine terminates. This
|
||||
|
Loading…
Reference in New Issue
Block a user