2016-07-04 10:40:29 +00:00
|
|
|
// Copyright (C) 2016 The Protocol Authors.
|
|
|
|
|
|
|
|
package protocol
|
|
|
|
|
2019-11-06 10:53:10 +00:00
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
|
|
|
)
|
2016-07-04 10:40:29 +00:00
|
|
|
|
2018-11-13 07:53:55 +00:00
|
|
|
// Global pool to get buffers from. Requires Blocksizes to be initialised,
|
|
|
|
// therefore it is initialized in the same init() as BlockSizes
|
|
|
|
var BufferPool bufferPool
|
|
|
|
|
2016-07-04 10:40:29 +00:00
|
|
|
type bufferPool struct {
|
2019-11-06 10:53:10 +00:00
|
|
|
puts int64
|
|
|
|
skips int64
|
|
|
|
misses int64
|
|
|
|
pools []sync.Pool
|
|
|
|
hits []int64 // start of slice allocation is always aligned
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
2018-11-13 07:53:55 +00:00
|
|
|
func newBufferPool() bufferPool {
|
2019-11-06 10:53:10 +00:00
|
|
|
return bufferPool{
|
|
|
|
pools: make([]sync.Pool, len(BlockSizes)),
|
|
|
|
hits: make([]int64, len(BlockSizes)),
|
|
|
|
}
|
2018-11-13 07:53:55 +00:00
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
|
2018-11-13 07:53:55 +00:00
|
|
|
func (p *bufferPool) Get(size int) []byte {
|
|
|
|
// Too big, isn't pooled
|
|
|
|
if size > MaxBlockSize {
|
2019-11-06 10:53:10 +00:00
|
|
|
atomic.AddInt64(&p.skips, 1)
|
2018-11-13 07:53:55 +00:00
|
|
|
return make([]byte, size)
|
|
|
|
}
|
2019-11-06 10:53:10 +00:00
|
|
|
|
2018-11-13 07:53:55 +00:00
|
|
|
// Try the fitting and all bigger pools
|
2019-11-06 10:53:10 +00:00
|
|
|
bkt := getBucketForLen(size)
|
|
|
|
for j := bkt; j < len(BlockSizes); j++ {
|
2018-11-13 07:53:55 +00:00
|
|
|
if intf := p.pools[j].Get(); intf != nil {
|
2019-11-06 10:53:10 +00:00
|
|
|
atomic.AddInt64(&p.hits[j], 1)
|
|
|
|
bs := *intf.(*[]byte)
|
2018-11-13 07:53:55 +00:00
|
|
|
return bs[:size]
|
|
|
|
}
|
|
|
|
}
|
2019-11-06 10:53:10 +00:00
|
|
|
|
|
|
|
atomic.AddInt64(&p.misses, 1)
|
|
|
|
|
|
|
|
// All pools are empty, must allocate. For very small slices where we
|
|
|
|
// didn't have a block to reuse, just allocate a small slice instead of
|
|
|
|
// a large one. We won't be able to reuse it, but avoid some overhead.
|
|
|
|
if size < MinBlockSize/64 {
|
|
|
|
return make([]byte, size)
|
|
|
|
}
|
|
|
|
return make([]byte, BlockSizes[bkt])[:size]
|
2018-11-13 07:53:55 +00:00
|
|
|
}
|
2016-07-04 10:40:29 +00:00
|
|
|
|
2019-11-06 10:53:10 +00:00
|
|
|
// Put makes the given byte slice available again in the global pool.
|
|
|
|
// You must only Put() slices that were returned by Get() or Upgrade().
|
2018-11-13 07:53:55 +00:00
|
|
|
func (p *bufferPool) Put(bs []byte) {
|
2019-11-06 10:53:10 +00:00
|
|
|
// Don't buffer slices outside of our pool range
|
|
|
|
if cap(bs) > MaxBlockSize || cap(bs) < MinBlockSize {
|
|
|
|
atomic.AddInt64(&p.skips, 1)
|
2018-11-13 07:53:55 +00:00
|
|
|
return
|
|
|
|
}
|
2019-11-06 10:53:10 +00:00
|
|
|
|
|
|
|
atomic.AddInt64(&p.puts, 1)
|
|
|
|
bkt := putBucketForCap(cap(bs))
|
|
|
|
p.pools[bkt].Put(&bs)
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
|
|
|
|
2018-11-13 07:53:55 +00:00
|
|
|
// Upgrade grows the buffer to the requested size, while attempting to reuse
|
2016-07-04 10:40:29 +00:00
|
|
|
// it if possible.
|
2018-11-13 07:53:55 +00:00
|
|
|
func (p *bufferPool) Upgrade(bs []byte, size int) []byte {
|
2016-07-04 10:40:29 +00:00
|
|
|
if cap(bs) >= size {
|
|
|
|
// Reslicing is enough, lets go!
|
|
|
|
return bs[:size]
|
|
|
|
}
|
|
|
|
|
|
|
|
// It was too small. But it pack into the pool and try to get another
|
|
|
|
// buffer.
|
2018-11-13 07:53:55 +00:00
|
|
|
p.Put(bs)
|
|
|
|
return p.Get(size)
|
2016-07-04 10:40:29 +00:00
|
|
|
}
|
2019-11-06 10:53:10 +00:00
|
|
|
|
|
|
|
// getBucketForLen returns the bucket where we should get a slice of a
|
|
|
|
// certain length. Each bucket is guaranteed to hold slices that are
|
|
|
|
// precisely the block size for that bucket, so if the block size is larger
|
|
|
|
// than our size we are good.
|
|
|
|
func getBucketForLen(len int) int {
|
|
|
|
for i, blockSize := range BlockSizes {
|
|
|
|
if len <= blockSize {
|
|
|
|
return i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
panic(fmt.Sprintf("bug: tried to get impossible block len %d", len))
|
|
|
|
}
|
|
|
|
|
|
|
|
// putBucketForCap returns the bucket where we should put a slice of a
|
|
|
|
// certain capacity. Each bucket is guaranteed to hold slices that are
|
|
|
|
// precisely the block size for that bucket, so we just find the matching
|
|
|
|
// one.
|
|
|
|
func putBucketForCap(cap int) int {
|
|
|
|
for i, blockSize := range BlockSizes {
|
|
|
|
if cap == blockSize {
|
|
|
|
return i
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
panic(fmt.Sprintf("bug: tried to put impossible block cap %d", cap))
|
|
|
|
}
|