2
2
mirror of https://github.com/octoleo/restic.git synced 2024-06-01 16:40:50 +00:00

archiver: Remove cleanup goroutine from BufferPool

This isn't doing anything. Channels should get cleaned up by the GC when
the last reference to them disappears, just like all other data
structures. Also inlined BufferPool.Put in Buffer.Release, its only
caller.
This commit is contained in:
greatroar 2022-05-29 17:07:37 +02:00
parent dde8e9e296
commit 0db1d11b2e
2 changed files with 15 additions and 50 deletions

View File

@ -1,52 +1,44 @@
package archiver package archiver
import (
"context"
"sync"
)
// Buffer is a reusable buffer. After the buffer has been used, Release should // Buffer is a reusable buffer. After the buffer has been used, Release should
// be called so the underlying slice is put back into the pool. // be called so the underlying slice is put back into the pool.
type Buffer struct { type Buffer struct {
Data []byte Data []byte
Put func(*Buffer) pool *BufferPool
} }
// Release puts the buffer back into the pool it came from. // Release puts the buffer back into the pool it came from.
func (b *Buffer) Release() { func (b *Buffer) Release() {
if b.Put != nil { pool := b.pool
b.Put(b) if pool == nil || cap(b.Data) > pool.defaultSize {
return
}
select {
case pool.ch <- b:
default:
} }
} }
// BufferPool implements a limited set of reusable buffers. // BufferPool implements a limited set of reusable buffers.
type BufferPool struct { type BufferPool struct {
ch chan *Buffer ch chan *Buffer
chM sync.Mutex
defaultSize int defaultSize int
clearOnce sync.Once
} }
// NewBufferPool initializes a new buffer pool. When the context is cancelled, // NewBufferPool initializes a new buffer pool. The pool stores at most max
// all buffers are released. The pool stores at most max items. New buffers are // items. New buffers are created with defaultSize. Buffers that have grown
// created with defaultSize, buffers that are larger are released and not put // larger are not put back.
// back. func NewBufferPool(max int, defaultSize int) *BufferPool {
func NewBufferPool(ctx context.Context, max int, defaultSize int) *BufferPool {
b := &BufferPool{ b := &BufferPool{
ch: make(chan *Buffer, max), ch: make(chan *Buffer, max),
defaultSize: defaultSize, defaultSize: defaultSize,
} }
go func() {
<-ctx.Done()
b.clear()
}()
return b return b
} }
// Get returns a new buffer, either from the pool or newly allocated. // Get returns a new buffer, either from the pool or newly allocated.
func (pool *BufferPool) Get() *Buffer { func (pool *BufferPool) Get() *Buffer {
pool.chM.Lock()
defer pool.chM.Unlock()
select { select {
case buf := <-pool.ch: case buf := <-pool.ch:
return buf return buf
@ -54,36 +46,9 @@ func (pool *BufferPool) Get() *Buffer {
} }
b := &Buffer{ b := &Buffer{
Put: pool.Put,
Data: make([]byte, pool.defaultSize), Data: make([]byte, pool.defaultSize),
pool: pool,
} }
return b return b
} }
// Put returns a buffer to the pool for reuse.
func (pool *BufferPool) Put(b *Buffer) {
if cap(b.Data) > pool.defaultSize {
return
}
pool.chM.Lock()
defer pool.chM.Unlock()
select {
case pool.ch <- b:
default:
}
}
// clear empties the buffer so that all items can be garbage collected.
func (pool *BufferPool) clear() {
pool.clearOnce.Do(func() {
ch := pool.ch
pool.chM.Lock()
pool.ch = nil
pool.chM.Unlock()
close(ch)
for range ch {
}
})
}

View File

@ -76,7 +76,7 @@ func NewFileSaver(ctx context.Context, t *tomb.Tomb, save SaveBlobFn, pol chunke
s := &FileSaver{ s := &FileSaver{
saveBlob: save, saveBlob: save,
saveFilePool: NewBufferPool(ctx, int(poolSize), chunker.MaxSize), saveFilePool: NewBufferPool(int(poolSize), chunker.MaxSize),
pol: pol, pol: pol,
ch: ch, ch: ch,