mirror of
https://github.com/octoleo/syncthing.git
synced 2024-12-22 19:08:58 +00:00
Use batches in blockmap, speeds up and reduces memory usage on large Replace and Update ops
benchmark old ns/op new ns/op delta BenchmarkReplaceAll-8 2880834572 1868198122 -35.15% BenchmarkUpdateOneChanged-8 236596 231852 -2.01% BenchmarkUpdateOneUnchanged-8 227326 230624 +1.45% BenchmarkNeedHalf-8 105151538 104601744 -0.52% BenchmarkHave-8 28827492 29102480 +0.95% BenchmarkGlobal-8 150768724 150547687 -0.15% BenchmarkNeedHalfTruncated-8 104434216 102471355 -1.88% BenchmarkHaveTruncated-8 27860093 28758368 +3.22% BenchmarkGlobalTruncated-8 149972888 151192913 +0.81% benchmark old allocs new allocs delta BenchmarkReplaceAll-8 555451 555577 +0.02% BenchmarkUpdateOneChanged-8 1135 1135 +0.00% BenchmarkUpdateOneUnchanged-8 1135 1135 +0.00% BenchmarkNeedHalf-8 374779 374780 +0.00% BenchmarkHave-8 151996 151992 -0.00% BenchmarkGlobal-8 530066 530033 -0.01% BenchmarkNeedHalfTruncated-8 374702 374699 -0.00% BenchmarkHaveTruncated-8 151834 151834 +0.00% BenchmarkGlobalTruncated-8 530049 530037 -0.00% benchmark old bytes new bytes delta BenchmarkReplaceAll-8 5018351912 1765116216 -64.83% BenchmarkUpdateOneChanged-8 135085 135085 +0.00% BenchmarkUpdateOneUnchanged-8 134976 134976 +0.00% BenchmarkNeedHalf-8 44769400 44758752 -0.02% BenchmarkHave-8 11930612 11845052 -0.72% BenchmarkGlobal-8 81523668 80431136 -1.34% BenchmarkNeedHalfTruncated-8 46692342 46526459 -0.36% BenchmarkHaveTruncated-8 11348357 11348357 +0.00% BenchmarkGlobalTruncated-8 81843956 80977672 -1.06%
This commit is contained in:
parent
0d9a04c713
commit
918ef4dff8
@ -26,6 +26,8 @@ import (
|
||||
|
||||
var blockFinder *BlockFinder
|
||||
|
||||
const maxBatchSize = 256 << 10
|
||||
|
||||
type BlockMap struct {
|
||||
db *leveldb.DB
|
||||
folder string
|
||||
@ -44,6 +46,13 @@ func (m *BlockMap) Add(files []protocol.FileInfo) error {
|
||||
buf := make([]byte, 4)
|
||||
var key []byte
|
||||
for _, file := range files {
|
||||
if batch.Len() > maxBatchSize {
|
||||
if err := m.db.Write(batch, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
batch.Reset()
|
||||
}
|
||||
|
||||
if file.IsDirectory() || file.IsDeleted() || file.IsInvalid() {
|
||||
continue
|
||||
}
|
||||
@ -63,6 +72,13 @@ func (m *BlockMap) Update(files []protocol.FileInfo) error {
|
||||
buf := make([]byte, 4)
|
||||
var key []byte
|
||||
for _, file := range files {
|
||||
if batch.Len() > maxBatchSize {
|
||||
if err := m.db.Write(batch, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
batch.Reset()
|
||||
}
|
||||
|
||||
if file.IsDirectory() {
|
||||
continue
|
||||
}
|
||||
@ -89,6 +105,13 @@ func (m *BlockMap) Discard(files []protocol.FileInfo) error {
|
||||
batch := new(leveldb.Batch)
|
||||
var key []byte
|
||||
for _, file := range files {
|
||||
if batch.Len() > maxBatchSize {
|
||||
if err := m.db.Write(batch, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
batch.Reset()
|
||||
}
|
||||
|
||||
for _, block := range file.Blocks {
|
||||
key = m.blockKeyInto(key, block.Hash, file.Name)
|
||||
batch.Delete(key)
|
||||
@ -103,6 +126,13 @@ func (m *BlockMap) Drop() error {
|
||||
iter := m.db.NewIterator(util.BytesPrefix(m.blockKeyInto(nil, nil, "")[:1+64]), nil)
|
||||
defer iter.Release()
|
||||
for iter.Next() {
|
||||
if batch.Len() > maxBatchSize {
|
||||
if err := m.db.Write(batch, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
batch.Reset()
|
||||
}
|
||||
|
||||
batch.Delete(iter.Key())
|
||||
}
|
||||
if iter.Error() != nil {
|
||||
|
Loading…
Reference in New Issue
Block a user