mirror of
https://github.com/octoleo/syncthing.git
synced 2025-01-22 22:58:25 +00:00
Reuse blockkey, speeds up large Update and Replace calls
benchmark old ns/op new ns/op delta BenchmarkReplaceAll-8 2866418930 2880834572 +0.50% BenchmarkUpdateOneChanged-8 226635 236596 +4.40% BenchmarkUpdateOneUnchanged-8 229090 227326 -0.77% BenchmarkNeedHalf-8 104483393 105151538 +0.64% BenchmarkHave-8 29288220 28827492 -1.57% BenchmarkGlobal-8 159269126 150768724 -5.34% BenchmarkNeedHalfTruncated-8 108235000 104434216 -3.51% BenchmarkHaveTruncated-8 28945489 27860093 -3.75% BenchmarkGlobalTruncated-8 149355833 149972888 +0.41% benchmark old allocs new allocs delta BenchmarkReplaceAll-8 1054944 555451 -47.35% BenchmarkUpdateOneChanged-8 1135 1135 +0.00% BenchmarkUpdateOneUnchanged-8 1135 1135 +0.00% BenchmarkNeedHalf-8 374777 374779 +0.00% BenchmarkHave-8 151995 151996 +0.00% BenchmarkGlobal-8 530063 530066 +0.00% BenchmarkNeedHalfTruncated-8 374699 374702 +0.00% BenchmarkHaveTruncated-8 151834 151834 +0.00% BenchmarkGlobalTruncated-8 530021 530049 +0.01% benchmark old bytes new bytes delta BenchmarkReplaceAll-8 5074297112 5018351912 -1.10% BenchmarkUpdateOneChanged-8 135097 135085 -0.01% BenchmarkUpdateOneUnchanged-8 134976 134976 +0.00% BenchmarkNeedHalf-8 44759436 44769400 +0.02% BenchmarkHave-8 11911138 11930612 +0.16% BenchmarkGlobal-8 81609867 81523668 -0.11% BenchmarkNeedHalfTruncated-8 46588024 46692342 +0.22% BenchmarkHaveTruncated-8 11348354 11348357 +0.00% BenchmarkGlobalTruncated-8 79485168 81843956 +2.97%
This commit is contained in:
parent
0c0c69f0cf
commit
0d9a04c713
@ -42,6 +42,7 @@ func NewBlockMap(db *leveldb.DB, folder string) *BlockMap {
|
|||||||
func (m *BlockMap) Add(files []protocol.FileInfo) error {
|
func (m *BlockMap) Add(files []protocol.FileInfo) error {
|
||||||
batch := new(leveldb.Batch)
|
batch := new(leveldb.Batch)
|
||||||
buf := make([]byte, 4)
|
buf := make([]byte, 4)
|
||||||
|
var key []byte
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if file.IsDirectory() || file.IsDeleted() || file.IsInvalid() {
|
if file.IsDirectory() || file.IsDeleted() || file.IsInvalid() {
|
||||||
continue
|
continue
|
||||||
@ -49,7 +50,8 @@ func (m *BlockMap) Add(files []protocol.FileInfo) error {
|
|||||||
|
|
||||||
for i, block := range file.Blocks {
|
for i, block := range file.Blocks {
|
||||||
binary.BigEndian.PutUint32(buf, uint32(i))
|
binary.BigEndian.PutUint32(buf, uint32(i))
|
||||||
batch.Put(m.blockKey(block.Hash, file.Name), buf)
|
key = m.blockKeyInto(key, block.Hash, file.Name)
|
||||||
|
batch.Put(key, buf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return m.db.Write(batch, nil)
|
return m.db.Write(batch, nil)
|
||||||
@ -59,6 +61,7 @@ func (m *BlockMap) Add(files []protocol.FileInfo) error {
|
|||||||
func (m *BlockMap) Update(files []protocol.FileInfo) error {
|
func (m *BlockMap) Update(files []protocol.FileInfo) error {
|
||||||
batch := new(leveldb.Batch)
|
batch := new(leveldb.Batch)
|
||||||
buf := make([]byte, 4)
|
buf := make([]byte, 4)
|
||||||
|
var key []byte
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
if file.IsDirectory() {
|
if file.IsDirectory() {
|
||||||
continue
|
continue
|
||||||
@ -66,14 +69,16 @@ func (m *BlockMap) Update(files []protocol.FileInfo) error {
|
|||||||
|
|
||||||
if file.IsDeleted() || file.IsInvalid() {
|
if file.IsDeleted() || file.IsInvalid() {
|
||||||
for _, block := range file.Blocks {
|
for _, block := range file.Blocks {
|
||||||
batch.Delete(m.blockKey(block.Hash, file.Name))
|
key = m.blockKeyInto(key, block.Hash, file.Name)
|
||||||
|
batch.Delete(key)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, block := range file.Blocks {
|
for i, block := range file.Blocks {
|
||||||
binary.BigEndian.PutUint32(buf, uint32(i))
|
binary.BigEndian.PutUint32(buf, uint32(i))
|
||||||
batch.Put(m.blockKey(block.Hash, file.Name), buf)
|
key = m.blockKeyInto(key, block.Hash, file.Name)
|
||||||
|
batch.Put(key, buf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return m.db.Write(batch, nil)
|
return m.db.Write(batch, nil)
|
||||||
@ -82,9 +87,11 @@ func (m *BlockMap) Update(files []protocol.FileInfo) error {
|
|||||||
// Discard block map state, removing the given files
|
// Discard block map state, removing the given files
|
||||||
func (m *BlockMap) Discard(files []protocol.FileInfo) error {
|
func (m *BlockMap) Discard(files []protocol.FileInfo) error {
|
||||||
batch := new(leveldb.Batch)
|
batch := new(leveldb.Batch)
|
||||||
|
var key []byte
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
for _, block := range file.Blocks {
|
for _, block := range file.Blocks {
|
||||||
batch.Delete(m.blockKey(block.Hash, file.Name))
|
key = m.blockKeyInto(key, block.Hash, file.Name)
|
||||||
|
batch.Delete(key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return m.db.Write(batch, nil)
|
return m.db.Write(batch, nil)
|
||||||
@ -93,7 +100,7 @@ func (m *BlockMap) Discard(files []protocol.FileInfo) error {
|
|||||||
// Drop block map, removing all entries related to this block map from the db.
|
// Drop block map, removing all entries related to this block map from the db.
|
||||||
func (m *BlockMap) Drop() error {
|
func (m *BlockMap) Drop() error {
|
||||||
batch := new(leveldb.Batch)
|
batch := new(leveldb.Batch)
|
||||||
iter := m.db.NewIterator(util.BytesPrefix(m.blockKey(nil, "")[:1+64]), nil)
|
iter := m.db.NewIterator(util.BytesPrefix(m.blockKeyInto(nil, nil, "")[:1+64]), nil)
|
||||||
defer iter.Release()
|
defer iter.Release()
|
||||||
for iter.Next() {
|
for iter.Next() {
|
||||||
batch.Delete(iter.Key())
|
batch.Delete(iter.Key())
|
||||||
@ -104,8 +111,8 @@ func (m *BlockMap) Drop() error {
|
|||||||
return m.db.Write(batch, nil)
|
return m.db.Write(batch, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *BlockMap) blockKey(hash []byte, file string) []byte {
|
func (m *BlockMap) blockKeyInto(o, hash []byte, file string) []byte {
|
||||||
return toBlockKey(hash, m.folder, file)
|
return blockKeyInto(o, hash, m.folder, file)
|
||||||
}
|
}
|
||||||
|
|
||||||
type BlockFinder struct {
|
type BlockFinder struct {
|
||||||
@ -134,8 +141,9 @@ func (f *BlockFinder) String() string {
|
|||||||
// reason. The iterator finally returns the result, whether or not a
|
// reason. The iterator finally returns the result, whether or not a
|
||||||
// satisfying block was eventually found.
|
// satisfying block was eventually found.
|
||||||
func (f *BlockFinder) Iterate(folders []string, hash []byte, iterFn func(string, string, int32) bool) bool {
|
func (f *BlockFinder) Iterate(folders []string, hash []byte, iterFn func(string, string, int32) bool) bool {
|
||||||
|
var key []byte
|
||||||
for _, folder := range folders {
|
for _, folder := range folders {
|
||||||
key := toBlockKey(hash, folder, "")
|
key = blockKeyInto(key, hash, folder, "")
|
||||||
iter := f.db.NewIterator(util.BytesPrefix(key), nil)
|
iter := f.db.NewIterator(util.BytesPrefix(key), nil)
|
||||||
defer iter.Release()
|
defer iter.Release()
|
||||||
|
|
||||||
@ -157,8 +165,8 @@ func (f *BlockFinder) Fix(folder, file string, index int32, oldHash, newHash []b
|
|||||||
binary.BigEndian.PutUint32(buf, uint32(index))
|
binary.BigEndian.PutUint32(buf, uint32(index))
|
||||||
|
|
||||||
batch := new(leveldb.Batch)
|
batch := new(leveldb.Batch)
|
||||||
batch.Delete(toBlockKey(oldHash, folder, file))
|
batch.Delete(blockKeyInto(nil, oldHash, folder, file))
|
||||||
batch.Put(toBlockKey(newHash, folder, file), buf)
|
batch.Put(blockKeyInto(nil, newHash, folder, file), buf)
|
||||||
return f.db.Write(batch, nil)
|
return f.db.Write(batch, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -167,8 +175,13 @@ func (f *BlockFinder) Fix(folder, file string, index int32, oldHash, newHash []b
|
|||||||
// folder (64 bytes)
|
// folder (64 bytes)
|
||||||
// block hash (32 bytes)
|
// block hash (32 bytes)
|
||||||
// file name (variable size)
|
// file name (variable size)
|
||||||
func toBlockKey(hash []byte, folder, file string) []byte {
|
func blockKeyInto(o, hash []byte, folder, file string) []byte {
|
||||||
o := make([]byte, 1+64+32+len(file))
|
reqLen := 1 + 64 + 32 + len(file)
|
||||||
|
if cap(o) < reqLen {
|
||||||
|
o = make([]byte, reqLen)
|
||||||
|
} else {
|
||||||
|
o = o[:reqLen]
|
||||||
|
}
|
||||||
o[0] = KeyTypeBlock
|
o[0] = KeyTypeBlock
|
||||||
copy(o[1:], []byte(folder))
|
copy(o[1:], []byte(folder))
|
||||||
copy(o[1+64:], []byte(hash))
|
copy(o[1+64:], []byte(hash))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user