2
2
mirror of https://github.com/octoleo/restic.git synced 2024-12-22 02:48:55 +00:00

index: Optimize generatePackList

name                 old time/op    new time/op    delta
EncodeIndex/100-8      1.56ms ± 2%    1.48ms ± 3%   -5.37%  (p=0.000 n=10+10)
EncodeIndex/1000-8     14.5ms ± 2%    13.1ms ± 2%   -9.49%  (p=0.000 n=9+10)
EncodeIndex/10000-8     120ms ± 2%     116ms ± 2%   -3.58%  (p=0.000 n=10+10)

name                 old alloc/op   new alloc/op   delta
EncodeIndex/100-8       306kB ± 1%     275kB ± 1%  -10.28%  (p=0.000 n=10+10)
EncodeIndex/1000-8     3.69MB ±11%    2.88MB ± 5%  -22.07%  (p=0.000 n=10+9)
EncodeIndex/10000-8    35.9MB ±11%    31.9MB ±10%  -11.13%  (p=0.005 n=10+10)

name                 old allocs/op  new allocs/op  delta
EncodeIndex/100-8       3.39k ± 0%     2.39k ± 0%  -29.61%  (p=0.000 n=10+10)
EncodeIndex/1000-8      32.6k ± 0%     22.9k ± 0%  -29.63%  (p=0.000 n=10+9)
EncodeIndex/10000-8      326k ± 0%      229k ± 0%  -29.71%  (p=0.000 n=10+10)

The bulk of the allocation rate improvement comes from just removing the
debug.Log calls: every one of those copied a restic.ID to the heap.
This commit is contained in:
greatroar 2023-01-14 20:41:07 +01:00
parent c0b5ec55ab
commit 99755c634b
2 changed files with 31 additions and 17 deletions

View File

@ -317,9 +317,9 @@ type blobJSON struct {
}
// generatePackList returns a list of packs.
func (idx *Index) generatePackList() ([]*packJSON, error) {
list := []*packJSON{}
packs := make(map[restic.ID]*packJSON)
func (idx *Index) generatePackList() ([]packJSON, error) {
list := make([]packJSON, 0, len(idx.packs))
packs := make(map[restic.ID]int, len(list)) // Maps to index in list.
for typ := range idx.byType {
m := &idx.byType[typ]
@ -329,18 +329,13 @@ func (idx *Index) generatePackList() ([]*packJSON, error) {
panic("null pack id")
}
debug.Log("handle blob %v", e.id)
// see if pack is already in map
p, ok := packs[packID]
i, ok := packs[packID]
if !ok {
// else create new pack
p = &packJSON{ID: packID}
// and append it to the list and map
list = append(list, p)
packs[p.ID] = p
i = len(list)
list = append(list, packJSON{ID: packID})
packs[packID] = i
}
p := &list[i]
// add blob
p.Blobs = append(p.Blobs, blobJSON{
@ -355,14 +350,12 @@ func (idx *Index) generatePackList() ([]*packJSON, error) {
})
}
debug.Log("done")
return list, nil
}
type jsonIndex struct {
Supersedes restic.IDs `json:"supersedes,omitempty"`
Packs []*packJSON `json:"packs"`
Supersedes restic.IDs `json:"supersedes,omitempty"`
Packs []packJSON `json:"packs"`
}
// Encode writes the JSON serialization of the index to the writer w.

View File

@ -3,6 +3,7 @@ package index_test
import (
"bytes"
"context"
"fmt"
"math/rand"
"sync"
"testing"
@ -405,6 +406,26 @@ func BenchmarkDecodeIndexParallel(b *testing.B) {
})
}
func BenchmarkEncodeIndex(b *testing.B) {
for _, n := range []int{100, 1000, 10000} {
idx, _ := createRandomIndex(rand.New(rand.NewSource(0)), n)
b.Run(fmt.Sprint(n), func(b *testing.B) {
buf := new(bytes.Buffer)
err := idx.Encode(buf)
rtest.OK(b, err)
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
buf.Reset()
_ = idx.Encode(buf)
}
})
}
}
func TestIndexUnserializeOld(t *testing.T) {
idx, oldFormat, err := index.DecodeIndex(docOldExample, restic.NewRandomID())
rtest.OK(t, err)