2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-23 13:17:42 +00:00

repository: unexport PackBlobIterator

This commit is contained in:
Michael Eischer 2024-05-10 16:29:48 +02:00
parent 94e863885c
commit aa4647f773
2 changed files with 18 additions and 18 deletions

View File

@ -88,10 +88,10 @@ func checkPackInner(ctx context.Context, r restic.Repository, id restic.ID, blob
hrd := hashing.NewReader(rd, sha256.New()) hrd := hashing.NewReader(rd, sha256.New())
bufRd.Reset(hrd) bufRd.Reset(hrd)
it := NewPackBlobIterator(id, newBufReader(bufRd), 0, blobs, r.Key(), dec) it := newPackBlobIterator(id, newBufReader(bufRd), 0, blobs, r.Key(), dec)
for { for {
val, err := it.Next() val, err := it.Next()
if err == ErrPackEOF { if err == errPackEOF {
break break
} else if err != nil { } else if err != nil {
return &partialReadError{err} return &partialReadError{err}

View File

@ -271,7 +271,7 @@ func (r *Repository) loadBlob(ctx context.Context, blobs []restic.PackedBlob, bu
continue continue
} }
it := NewPackBlobIterator(blob.PackID, newByteReader(buf), uint(blob.Offset), []restic.Blob{blob.Blob}, r.key, r.getZstdDecoder()) it := newPackBlobIterator(blob.PackID, newByteReader(buf), uint(blob.Offset), []restic.Blob{blob.Blob}, r.key, r.getZstdDecoder())
pbv, err := it.Next() pbv, err := it.Next()
if err == nil { if err == nil {
@ -1029,11 +1029,11 @@ func streamPackPart(ctx context.Context, beLoad backendLoadFn, loadBlobFn loadBl
return errors.Wrap(err, "StreamPack") return errors.Wrap(err, "StreamPack")
} }
it := NewPackBlobIterator(packID, newByteReader(data), dataStart, blobs, key, dec) it := newPackBlobIterator(packID, newByteReader(data), dataStart, blobs, key, dec)
for { for {
val, err := it.Next() val, err := it.Next()
if err == ErrPackEOF { if err == errPackEOF {
break break
} else if err != nil { } else if err != nil {
return err return err
@ -1098,7 +1098,7 @@ func (b *byteReader) ReadFull(n int) (buf []byte, err error) {
return buf, nil return buf, nil
} }
type PackBlobIterator struct { type packBlobIterator struct {
packID restic.ID packID restic.ID
rd discardReader rd discardReader
currentOffset uint currentOffset uint
@ -1110,17 +1110,17 @@ type PackBlobIterator struct {
decode []byte decode []byte
} }
type PackBlobValue struct { type packBlobValue struct {
Handle restic.BlobHandle Handle restic.BlobHandle
Plaintext []byte Plaintext []byte
Err error Err error
} }
var ErrPackEOF = errors.New("reached EOF of pack file") var errPackEOF = errors.New("reached EOF of pack file")
func NewPackBlobIterator(packID restic.ID, rd discardReader, currentOffset uint, func newPackBlobIterator(packID restic.ID, rd discardReader, currentOffset uint,
blobs []restic.Blob, key *crypto.Key, dec *zstd.Decoder) *PackBlobIterator { blobs []restic.Blob, key *crypto.Key, dec *zstd.Decoder) *packBlobIterator {
return &PackBlobIterator{ return &packBlobIterator{
packID: packID, packID: packID,
rd: rd, rd: rd,
currentOffset: currentOffset, currentOffset: currentOffset,
@ -1131,9 +1131,9 @@ func NewPackBlobIterator(packID restic.ID, rd discardReader, currentOffset uint,
} }
// Next returns the next blob, an error or ErrPackEOF if all blobs were read // Next returns the next blob, an error or ErrPackEOF if all blobs were read
func (b *PackBlobIterator) Next() (PackBlobValue, error) { func (b *packBlobIterator) Next() (packBlobValue, error) {
if len(b.blobs) == 0 { if len(b.blobs) == 0 {
return PackBlobValue{}, ErrPackEOF return packBlobValue{}, errPackEOF
} }
entry := b.blobs[0] entry := b.blobs[0]
@ -1141,12 +1141,12 @@ func (b *PackBlobIterator) Next() (PackBlobValue, error) {
skipBytes := int(entry.Offset - b.currentOffset) skipBytes := int(entry.Offset - b.currentOffset)
if skipBytes < 0 { if skipBytes < 0 {
return PackBlobValue{}, fmt.Errorf("overlapping blobs in pack %v", b.packID) return packBlobValue{}, fmt.Errorf("overlapping blobs in pack %v", b.packID)
} }
_, err := b.rd.Discard(skipBytes) _, err := b.rd.Discard(skipBytes)
if err != nil { if err != nil {
return PackBlobValue{}, err return packBlobValue{}, err
} }
b.currentOffset = entry.Offset b.currentOffset = entry.Offset
@ -1156,14 +1156,14 @@ func (b *PackBlobIterator) Next() (PackBlobValue, error) {
buf, err := b.rd.ReadFull(int(entry.Length)) buf, err := b.rd.ReadFull(int(entry.Length))
if err != nil { if err != nil {
debug.Log(" read error %v", err) debug.Log(" read error %v", err)
return PackBlobValue{}, fmt.Errorf("readFull: %w", err) return packBlobValue{}, fmt.Errorf("readFull: %w", err)
} }
b.currentOffset = entry.Offset + entry.Length b.currentOffset = entry.Offset + entry.Length
if int(entry.Length) <= b.key.NonceSize() { if int(entry.Length) <= b.key.NonceSize() {
debug.Log("%v", b.blobs) debug.Log("%v", b.blobs)
return PackBlobValue{}, fmt.Errorf("invalid blob length %v", entry) return packBlobValue{}, fmt.Errorf("invalid blob length %v", entry)
} }
// decryption errors are likely permanent, give the caller a chance to skip them // decryption errors are likely permanent, give the caller a chance to skip them
@ -1191,7 +1191,7 @@ func (b *PackBlobIterator) Next() (PackBlobValue, error) {
} }
} }
return PackBlobValue{entry.BlobHandle, plaintext, err}, nil return packBlobValue{entry.BlobHandle, plaintext, err}, nil
} }
var zeroChunkOnce sync.Once var zeroChunkOnce sync.Once