mirror of
https://github.com/octoleo/restic.git
synced 2024-11-26 06:46:34 +00:00
Decrease allocation rate in internal/pack
internal/repository benchmark results: name old time/op new time/op delta PackerManager-8 179ms ± 1% 181ms ± 1% +0.78% (p=0.009 n=10+10) name old speed new speed delta PackerManager-8 294MB/s ± 1% 292MB/s ± 1% -0.77% (p=0.009 n=10+10) name old alloc/op new alloc/op delta PackerManager-8 91.3kB ± 0% 72.2kB ± 0% -20.92% (p=0.000 n=9+7) name old allocs/op new allocs/op delta PackerManager-8 1.38k ± 0% 0.76k ± 0% -45.20% (p=0.000 n=10+7)
This commit is contained in:
parent
9a8a2cae4c
commit
ab2b7d7f9a
@ -1,7 +1,6 @@
|
|||||||
package pack
|
package pack
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@ -49,7 +48,8 @@ func (p *Packer) Add(t restic.BlobType, id restic.ID, data []byte) (int, error)
|
|||||||
|
|
||||||
var entrySize = uint(binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + len(restic.ID{}))
|
var entrySize = uint(binary.Size(restic.BlobType(0)) + binary.Size(uint32(0)) + len(restic.ID{}))
|
||||||
|
|
||||||
// headerEntry is used with encoding/binary to read and write header entries
|
// headerEntry describes the format of header entries. It serves only as
|
||||||
|
// documentation.
|
||||||
type headerEntry struct {
|
type headerEntry struct {
|
||||||
Type uint8
|
Type uint8
|
||||||
Length uint32
|
Length uint32
|
||||||
@ -64,16 +64,15 @@ func (p *Packer) Finalize() (uint, error) {
|
|||||||
|
|
||||||
bytesWritten := p.bytes
|
bytesWritten := p.bytes
|
||||||
|
|
||||||
hdrBuf := bytes.NewBuffer(nil)
|
header, err := p.makeHeader()
|
||||||
bytesHeader, err := p.writeHeader(hdrBuf)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
encryptedHeader := make([]byte, 0, hdrBuf.Len()+p.k.Overhead()+p.k.NonceSize())
|
encryptedHeader := make([]byte, 0, len(header)+p.k.Overhead()+p.k.NonceSize())
|
||||||
nonce := crypto.NewRandomNonce()
|
nonce := crypto.NewRandomNonce()
|
||||||
encryptedHeader = append(encryptedHeader, nonce...)
|
encryptedHeader = append(encryptedHeader, nonce...)
|
||||||
encryptedHeader = p.k.Seal(encryptedHeader, nonce, hdrBuf.Bytes(), nil)
|
encryptedHeader = p.k.Seal(encryptedHeader, nonce, header, nil)
|
||||||
|
|
||||||
// append the header
|
// append the header
|
||||||
n, err := p.wr.Write(encryptedHeader)
|
n, err := p.wr.Write(encryptedHeader)
|
||||||
@ -81,7 +80,7 @@ func (p *Packer) Finalize() (uint, error) {
|
|||||||
return 0, errors.Wrap(err, "Write")
|
return 0, errors.Wrap(err, "Write")
|
||||||
}
|
}
|
||||||
|
|
||||||
hdrBytes := restic.CiphertextLength(int(bytesHeader))
|
hdrBytes := restic.CiphertextLength(len(header))
|
||||||
if n != hdrBytes {
|
if n != hdrBytes {
|
||||||
return 0, errors.New("wrong number of bytes written")
|
return 0, errors.New("wrong number of bytes written")
|
||||||
}
|
}
|
||||||
@ -99,32 +98,27 @@ func (p *Packer) Finalize() (uint, error) {
|
|||||||
return bytesWritten, nil
|
return bytesWritten, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeHeader constructs and writes the header to wr.
|
// makeHeader constructs the header for p.
|
||||||
func (p *Packer) writeHeader(wr io.Writer) (bytesWritten uint, err error) {
|
func (p *Packer) makeHeader() ([]byte, error) {
|
||||||
for _, b := range p.blobs {
|
buf := make([]byte, 0, len(p.blobs)*int(entrySize))
|
||||||
entry := headerEntry{
|
|
||||||
Length: uint32(b.Length),
|
|
||||||
ID: b.ID,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
for _, b := range p.blobs {
|
||||||
switch b.Type {
|
switch b.Type {
|
||||||
case restic.DataBlob:
|
case restic.DataBlob:
|
||||||
entry.Type = 0
|
buf = append(buf, 0)
|
||||||
case restic.TreeBlob:
|
case restic.TreeBlob:
|
||||||
entry.Type = 1
|
buf = append(buf, 1)
|
||||||
default:
|
default:
|
||||||
return 0, errors.Errorf("invalid blob type %v", b.Type)
|
return nil, errors.Errorf("invalid blob type %v", b.Type)
|
||||||
}
|
}
|
||||||
|
|
||||||
err := binary.Write(wr, binary.LittleEndian, entry)
|
var lenLE [4]byte
|
||||||
if err != nil {
|
binary.LittleEndian.PutUint32(lenLE[:], uint32(b.Length))
|
||||||
return bytesWritten, errors.Wrap(err, "binary.Write")
|
buf = append(buf, lenLE[:]...)
|
||||||
}
|
buf = append(buf, b.ID[:]...)
|
||||||
|
|
||||||
bytesWritten += entrySize
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return buf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size returns the number of bytes written so far.
|
// Size returns the number of bytes written so far.
|
||||||
@ -275,40 +269,19 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, err
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
hdrRd := bytes.NewReader(buf)
|
|
||||||
|
|
||||||
entries = make([]restic.Blob, 0, uint(len(buf))/entrySize)
|
entries = make([]restic.Blob, 0, uint(len(buf))/entrySize)
|
||||||
|
|
||||||
pos := uint(0)
|
pos := uint(0)
|
||||||
for {
|
for len(buf) > 0 {
|
||||||
e := headerEntry{}
|
entry, err := parseHeaderEntry(buf)
|
||||||
err = binary.Read(hdrRd, binary.LittleEndian, &e)
|
|
||||||
if errors.Cause(err) == io.EOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "binary.Read")
|
return nil, err
|
||||||
}
|
|
||||||
|
|
||||||
entry := restic.Blob{
|
|
||||||
Length: uint(e.Length),
|
|
||||||
ID: e.ID,
|
|
||||||
Offset: pos,
|
|
||||||
}
|
|
||||||
|
|
||||||
switch e.Type {
|
|
||||||
case 0:
|
|
||||||
entry.Type = restic.DataBlob
|
|
||||||
case 1:
|
|
||||||
entry.Type = restic.TreeBlob
|
|
||||||
default:
|
|
||||||
return nil, errors.Errorf("invalid type %d", e.Type)
|
|
||||||
}
|
}
|
||||||
|
entry.Offset = pos
|
||||||
|
|
||||||
entries = append(entries, entry)
|
entries = append(entries, entry)
|
||||||
|
pos += entry.Length
|
||||||
pos += uint(e.Length)
|
buf = buf[entrySize:]
|
||||||
}
|
}
|
||||||
|
|
||||||
return entries, nil
|
return entries, nil
|
||||||
@ -318,3 +291,25 @@ func List(k *crypto.Key, rd io.ReaderAt, size int64) (entries []restic.Blob, err
|
|||||||
func PackedSizeOfBlob(blobLength uint) uint {
|
func PackedSizeOfBlob(blobLength uint) uint {
|
||||||
return blobLength + entrySize
|
return blobLength + entrySize
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseHeaderEntry(p []byte) (b restic.Blob, err error) {
|
||||||
|
if uint(len(p)) < entrySize {
|
||||||
|
err = errors.Errorf("parseHeaderEntry: buffer of size %d too short", len(p))
|
||||||
|
return b, err
|
||||||
|
}
|
||||||
|
p = p[:entrySize]
|
||||||
|
|
||||||
|
switch p[0] {
|
||||||
|
case 0:
|
||||||
|
b.Type = restic.DataBlob
|
||||||
|
case 1:
|
||||||
|
b.Type = restic.TreeBlob
|
||||||
|
default:
|
||||||
|
return b, errors.Errorf("invalid type %d", p[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
b.Length = uint(binary.LittleEndian.Uint32(p[1:5]))
|
||||||
|
copy(b.ID[:], p[5:])
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
@ -7,9 +7,44 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/restic/restic/internal/crypto"
|
"github.com/restic/restic/internal/crypto"
|
||||||
|
"github.com/restic/restic/internal/restic"
|
||||||
rtest "github.com/restic/restic/internal/test"
|
rtest "github.com/restic/restic/internal/test"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestParseHeaderEntry(t *testing.T) {
|
||||||
|
h := headerEntry{
|
||||||
|
Type: 0, // Blob.
|
||||||
|
Length: 100,
|
||||||
|
}
|
||||||
|
for i := range h.ID {
|
||||||
|
h.ID[i] = byte(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
_ = binary.Write(buf, binary.LittleEndian, &h)
|
||||||
|
|
||||||
|
b, err := parseHeaderEntry(buf.Bytes())
|
||||||
|
rtest.OK(t, err)
|
||||||
|
rtest.Equals(t, restic.DataBlob, b.Type)
|
||||||
|
t.Logf("%v %v", h.ID, b.ID)
|
||||||
|
rtest.Assert(t, bytes.Equal(h.ID[:], b.ID[:]), "id mismatch")
|
||||||
|
rtest.Equals(t, uint(h.Length), b.Length)
|
||||||
|
|
||||||
|
h.Type = 0xae
|
||||||
|
buf.Reset()
|
||||||
|
_ = binary.Write(buf, binary.LittleEndian, &h)
|
||||||
|
|
||||||
|
b, err = parseHeaderEntry(buf.Bytes())
|
||||||
|
rtest.Assert(t, err != nil, "no error for invalid type")
|
||||||
|
|
||||||
|
h.Type = 0
|
||||||
|
buf.Reset()
|
||||||
|
_ = binary.Write(buf, binary.LittleEndian, &h)
|
||||||
|
|
||||||
|
b, err = parseHeaderEntry(buf.Bytes()[:entrySize-1])
|
||||||
|
rtest.Assert(t, err != nil, "no error for short input")
|
||||||
|
}
|
||||||
|
|
||||||
type countingReaderAt struct {
|
type countingReaderAt struct {
|
||||||
delegate io.ReaderAt
|
delegate io.ReaderAt
|
||||||
invocationCount int
|
invocationCount int
|
||||||
|
@ -57,7 +57,10 @@ func (r *packerManager) findPacker() (packer *Packer, err error) {
|
|||||||
// search for a suitable packer
|
// search for a suitable packer
|
||||||
if len(r.packers) > 0 {
|
if len(r.packers) > 0 {
|
||||||
p := r.packers[0]
|
p := r.packers[0]
|
||||||
r.packers = r.packers[1:]
|
last := len(r.packers) - 1
|
||||||
|
r.packers[0] = r.packers[last]
|
||||||
|
r.packers[last] = nil // Allow GC of stale reference.
|
||||||
|
r.packers = r.packers[:last]
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user