2
2
mirror of https://github.com/octoleo/restic.git synced 2024-11-22 12:55:18 +00:00

Remove pools for nodes and IDs

This removes the allocation pools for nodes and IDs. I feel they aren't
really needed. Benchmarks:

    benchmark                         old ns/op      new ns/op      delta
    BenchmarkChunkEncrypt             197890867      198616293      +0.37%
    BenchmarkChunkEncryptParallel     196127004      198819818      +1.37%
    BenchmarkArchiveDirectory         1098848419     1087237723     -1.06%
    BenchmarkPreload                  30464455       29910239       -1.82%
    BenchmarkLoadTree                 3265092        3088543        -5.41%
    BenchmarkEncryptWriter            37213511       37134683       -0.21%
    BenchmarkEncrypt                  36037879       36166546       +0.36%
    BenchmarkDecryptReader            38165659       38556734       +1.02%
    BenchmarkEncryptDecryptReader     77027044       77194987       +0.22%
    BenchmarkDecrypt                  36017602       35937888       -0.22%
    BenchmarkSaveJSON                 47906          50270          +4.93%
    BenchmarkSaveFrom                 49775973       50520969       +1.50%
    BenchmarkLoadJSONID               105290245      107281849      +1.89%
    BenchmarkChunkerWithSHA256        151501430      148264078      -2.14%
    BenchmarkChunkerWithMD5           93606346       94036392       +0.46%
    BenchmarkChunker                  74285431       75933882       +2.22%
    BenchmarkPipelineWalker           387689         346467         -10.63%

    benchmark                         old MB/s     new MB/s     speedup
    BenchmarkChunkEncrypt             52.99        52.79        1.00x
    BenchmarkChunkEncryptParallel     53.46        52.74        0.99x
    BenchmarkEncryptWriter            225.42       225.90       1.00x
    BenchmarkEncrypt                  232.77       231.94       1.00x
    BenchmarkDecryptReader            219.79       217.57       0.99x
    BenchmarkEncryptDecryptReader     108.90       108.67       1.00x
    BenchmarkDecrypt                  232.90       233.42       1.00x
    BenchmarkSaveFrom                 84.26        83.02        0.99x
    BenchmarkChunkerWithSHA256        69.21        70.72        1.02x
    BenchmarkChunkerWithMD5           112.02       111.51       1.00x
    BenchmarkChunker                  141.15       138.09       0.98x

    benchmark                         old allocs     new allocs     delta
    BenchmarkChunkEncrypt             110            110            +0.00%
    BenchmarkChunkEncryptParallel     100            100            +0.00%
    BenchmarkArchiveDirectory         475591         476635         +0.22%
    BenchmarkPreload                  28059          24182          -13.82%
    BenchmarkLoadTree                 3124           2889           -7.52%
    BenchmarkEncryptWriter            19             19             +0.00%
    BenchmarkEncrypt                  13             13             +0.00%
    BenchmarkDecryptReader            16             15             -6.25%
    BenchmarkEncryptDecryptReader     39             39             +0.00%
    BenchmarkDecrypt                  11             11             +0.00%
    BenchmarkSaveJSON                 74             74             +0.00%
    BenchmarkSaveFrom                 109            112            +2.75%
    BenchmarkLoadJSONID               103630         97849          -5.58%
    BenchmarkChunkerWithSHA256        13             13             +0.00%
    BenchmarkChunkerWithMD5           12             12             +0.00%
    BenchmarkChunker                  6              6              +0.00%
    BenchmarkPipelineWalker           212            165            -22.17%

    benchmark                         old bytes     new bytes     delta
    BenchmarkChunkEncrypt             64697         64697         +0.00%
    BenchmarkChunkEncryptParallel     64681         64681         +0.00%
    BenchmarkArchiveDirectory         193385504     193790864     +0.21%
    BenchmarkPreload                  4064701       3942000       -3.02%
    BenchmarkLoadTree                 344954        325396        -5.67%
    BenchmarkEncryptWriter            12793         12793         +0.00%
    BenchmarkEncrypt                  1950          1950          +0.00%
    BenchmarkDecryptReader            3120          2774          -11.09%
    BenchmarkEncryptDecryptReader     1528036       1528036       +0.00%
    BenchmarkDecrypt                  1919          1919          +0.00%
    BenchmarkSaveJSON                 5524          5524          +0.00%
    BenchmarkSaveFrom                 31353         40804         +30.14%
    BenchmarkLoadJSONID               12872020      16010968      +24.39%
    BenchmarkChunkerWithSHA256        26821         26821         +0.00%
    BenchmarkChunkerWithMD5           13554         13554         +0.00%
    BenchmarkChunker                  13458         13458         +0.00%
    BenchmarkPipelineWalker           58584         55560         -5.16%
This commit is contained in:
Alexander Neumann 2015-03-22 16:38:03 +01:00
parent 702b6cfc1e
commit 24b14e21cc
6 changed files with 9 additions and 47 deletions

View File

@ -108,7 +108,6 @@ func (arch *Archiver) Save(t backend.Type, id backend.ID, length uint, rd io.Rea
blob, err := arch.m.FindID(id) blob, err := arch.m.FindID(id)
if err == nil { if err == nil {
debug.Log("Archiver.Save", "Save(%v, %v): reusing %v\n", t, id.Str(), blob.Storage.Str()) debug.Log("Archiver.Save", "Save(%v, %v): reusing %v\n", t, id.Str(), blob.Storage.Str())
id.Free()
return blob, nil return blob, nil
} }

View File

@ -23,7 +23,7 @@ const hashSize = sha256.Size
// Hash returns the ID for data. // Hash returns the ID for data.
func Hash(data []byte) ID { func Hash(data []byte) ID {
h := hashData(data) h := hashData(data)
id := idPool.Get().(ID) id := make([]byte, IDSize)
copy(id, h[:]) copy(id, h[:])
return id return id
} }

View File

@ -5,7 +5,6 @@ import (
"encoding/hex" "encoding/hex"
"encoding/json" "encoding/json"
"errors" "errors"
"sync"
) )
// IDSize contains the size of an ID, in bytes. // IDSize contains the size of an ID, in bytes.
@ -14,8 +13,6 @@ const IDSize = hashSize
// References content within a repository. // References content within a repository.
type ID []byte type ID []byte
var idPool = sync.Pool{New: func() interface{} { return ID(make([]byte, IDSize)) }}
// ParseID converts the given string to an ID. // ParseID converts the given string to an ID.
func ParseID(s string) (ID, error) { func ParseID(s string) (ID, error) {
b, err := hex.DecodeString(s) b, err := hex.DecodeString(s)
@ -75,7 +72,7 @@ func (id *ID) UnmarshalJSON(b []byte) error {
return err return err
} }
*id = idPool.Get().(ID) *id = make([]byte, IDSize)
_, err = hex.Decode(*id, []byte(s)) _, err = hex.Decode(*id, []byte(s))
if err != nil { if err != nil {
return err return err
@ -86,16 +83,11 @@ func (id *ID) UnmarshalJSON(b []byte) error {
func IDFromData(d []byte) ID { func IDFromData(d []byte) ID {
hash := hashData(d) hash := hashData(d)
id := idPool.Get().(ID) id := make([]byte, IDSize)
copy(id, hash[:]) copy(id, hash[:])
return id return id
} }
// Free returns the ID byte slice back to the allocation pool.
func (id ID) Free() {
idPool.Put(id)
}
type IDs []ID type IDs []ID
func (ids IDs) Len() int { func (ids IDs) Len() int {

10
blob.go
View File

@ -16,16 +16,6 @@ type Blob struct {
type Blobs []Blob type Blobs []Blob
func (b Blob) Free() {
if b.ID != nil {
b.ID.Free()
}
if b.Storage != nil {
b.Storage.Free()
}
}
func (b Blob) Valid() bool { func (b Blob) Valid() bool {
if b.ID == nil || b.Storage == nil || b.StorageSize == 0 { if b.ID == nil || b.Storage == nil || b.StorageSize == 0 {
return false return false

11
node.go
View File

@ -58,11 +58,12 @@ func (node Node) Tree() *Tree {
} }
func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) { func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) {
node := GetNode() node := &Node{
node.path = path path: path,
node.Name = fi.Name() Name: fi.Name(),
node.Mode = fi.Mode() & os.ModePerm Mode: fi.Mode() & os.ModePerm,
node.ModTime = fi.ModTime() ModTime: fi.ModTime(),
}
node.Type = nodeTypeFromFileInfo(path, fi) node.Type = nodeTypeFromFileInfo(path, fi)
if node.Type == "file" { if node.Type == "file" {

View File

@ -67,7 +67,6 @@ func newPoolStats() *poolStats {
var ( var (
chunkPool = sync.Pool{New: newChunkBuf} chunkPool = sync.Pool{New: newChunkBuf}
nodePool = sync.Pool{New: newNode}
chunkerPool = sync.Pool{New: newChunker} chunkerPool = sync.Pool{New: newChunker}
chunkStats = newPoolStats() chunkStats = newPoolStats()
@ -84,15 +83,6 @@ func newChunkBuf() interface{} {
return make([]byte, maxCiphertextSize) return make([]byte, maxCiphertextSize)
} }
func newNode() interface{} {
nodeStats.m.Lock()
defer nodeStats.m.Unlock()
nodeStats.new++
// create buffer for iv, data and hmac
return new(Node)
}
func newChunker() interface{} { func newChunker() interface{} {
chunkStats.m.Lock() chunkStats.m.Lock()
defer chunkStats.m.Unlock() defer chunkStats.m.Unlock()
@ -112,16 +102,6 @@ func FreeChunkBuf(s string, buf []byte) {
chunkPool.Put(buf) chunkPool.Put(buf)
} }
func GetNode() *Node {
nodeStats.Get("")
return nodePool.Get().(*Node)
}
func FreeNode(n *Node) {
nodeStats.Put("")
nodePool.Put(n)
}
func GetChunker(s string) *chunker.Chunker { func GetChunker(s string) *chunker.Chunker {
chunkerStats.Get(s) chunkerStats.Get(s)
return chunkerPool.Get().(*chunker.Chunker) return chunkerPool.Get().(*chunker.Chunker)