2
2
mirror of https://github.com/octoleo/restic.git synced 2024-05-29 07:00:49 +00:00

index: Remove pointers from within indexentrys

The indexEntry objects are now allocated in a separate array. References
to an indexEntry are now stored as array indices. This has the benefit
of allowing the garbage collector to ignore the indexEntry objects as
these do not contain pointers and are part of a single large allocation.
This commit is contained in:
Michael Eischer 2022-02-05 21:25:23 +01:00
parent 0c1240360d
commit b217f38ee7

View File

@ -17,12 +17,12 @@ import (
// needs to be resized when the table grows, preventing memory usage spikes. // needs to be resized when the table grows, preventing memory usage spikes.
type indexMap struct { type indexMap struct {
// The number of buckets is always a power of two and never zero. // The number of buckets is always a power of two and never zero.
buckets []*indexEntry buckets []uint
numentries uint numentries uint
mh maphash.Hash mh maphash.Hash
free *indexEntry // Free list. blockList []indexEntry
} }
const ( const (
@ -41,7 +41,7 @@ func (m *indexMap) add(id restic.ID, packIdx int, offset, length uint32, uncompr
} }
h := m.hash(id) h := m.hash(id)
e := m.newEntry() e, idx := m.newEntry()
e.id = id e.id = id
e.next = m.buckets[h] // Prepend to existing chain. e.next = m.buckets[h] // Prepend to existing chain.
e.packIndex = packIdx e.packIndex = packIdx
@ -49,18 +49,19 @@ func (m *indexMap) add(id restic.ID, packIdx int, offset, length uint32, uncompr
e.length = length e.length = length
e.uncompressedLength = uncompressedLength e.uncompressedLength = uncompressedLength
m.buckets[h] = e m.buckets[h] = idx
m.numentries++ m.numentries++
} }
// foreach calls fn for all entries in the map, until fn returns false. // foreach calls fn for all entries in the map, until fn returns false.
func (m *indexMap) foreach(fn func(*indexEntry) bool) { func (m *indexMap) foreach(fn func(*indexEntry) bool) {
for _, e := range m.buckets { for _, ei := range m.buckets {
for e != nil { for ei != 0 {
e := m.resolve(ei)
if !fn(e) { if !fn(e) {
return return
} }
e = e.next ei = e.next
} }
} }
} }
@ -72,7 +73,10 @@ func (m *indexMap) foreachWithID(id restic.ID, fn func(*indexEntry)) {
} }
h := m.hash(id) h := m.hash(id)
for e := m.buckets[h]; e != nil; e = e.next { ei := m.buckets[h]
for ei != 0 {
e := m.resolve(ei)
ei = e.next
if e.id != id { if e.id != id {
continue continue
} }
@ -87,25 +91,29 @@ func (m *indexMap) get(id restic.ID) *indexEntry {
} }
h := m.hash(id) h := m.hash(id)
for e := m.buckets[h]; e != nil; e = e.next { ei := m.buckets[h]
for ei != 0 {
e := m.resolve(ei)
if e.id == id { if e.id == id {
return e return e
} }
ei = e.next
} }
return nil return nil
} }
func (m *indexMap) grow() { func (m *indexMap) grow() {
old := m.buckets old := m.buckets
m.buckets = make([]*indexEntry, growthFactor*len(m.buckets)) m.buckets = make([]uint, growthFactor*len(m.buckets))
for _, e := range old { for _, ei := range old {
for e != nil { for ei != 0 {
e := m.resolve(ei)
h := m.hash(e.id) h := m.hash(e.id)
next := e.next next := e.next
e.next = m.buckets[h] e.next = m.buckets[h]
m.buckets[h] = e m.buckets[h] = ei
e = next ei = next
} }
} }
} }
@ -124,45 +132,29 @@ func (m *indexMap) hash(id restic.ID) uint {
func (m *indexMap) init() { func (m *indexMap) init() {
const initialBuckets = 64 const initialBuckets = 64
m.buckets = make([]*indexEntry, initialBuckets) m.buckets = make([]uint, initialBuckets)
// first entry in blockList serves as null byte
m.blockList = make([]indexEntry, 1)
} }
func (m *indexMap) len() uint { return m.numentries } func (m *indexMap) len() uint { return m.numentries }
func (m *indexMap) newEntry() *indexEntry { func (m *indexMap) newEntry() (*indexEntry, uint) {
// We keep a free list of objects to speed up allocation and GC. m.blockList = append(m.blockList, indexEntry{})
// There's an obvious trade-off here: allocating in larger batches
// means we allocate faster and the GC has to keep fewer bits to track
// what we have in use, but it means we waste some space.
//
// Then again, allocating each indexEntry separately also wastes space
// on 32-bit platforms, because the Go malloc has no size class for
// exactly 52 bytes, so it puts the indexEntry in a 64-byte slot instead.
// See src/runtime/sizeclasses.go in the Go source repo.
//
// The batch size of 4 means we hit the size classes for 4×64=256 bytes
// (64-bit) and 4×52=208 bytes (32-bit), wasting nothing in malloc on
// 64-bit and relatively little on 32-bit.
const entryAllocBatch = 4
e := m.free idx := uint(len(m.blockList) - 1)
if e != nil { e := &m.blockList[idx]
m.free = e.next
} else {
free := new([entryAllocBatch]indexEntry)
e = &free[0]
for i := 1; i < len(free)-1; i++ {
free[i].next = &free[i+1]
}
m.free = &free[1]
}
return e return e, idx
}
func (m *indexMap) resolve(idx uint) *indexEntry {
return &m.blockList[idx]
} }
type indexEntry struct { type indexEntry struct {
id restic.ID id restic.ID
next *indexEntry next uint
packIndex int // Position in containing Index's packs field. packIndex int // Position in containing Index's packs field.
offset uint32 offset uint32
length uint32 length uint32