mirror of
https://github.com/octoleo/syncthing.git
synced 2024-11-09 23:00:58 +00:00
Merge pull request #2401 from calmh/blockmap2
Performance tweaks on leveldb code and blockmap
This commit is contained in:
commit
9f4a0d3216
188
lib/db/benchmark_test.go
Normal file
188
lib/db/benchmark_test.go
Normal file
@ -0,0 +1,188 @@
|
|||||||
|
// Copyright (C) 2015 The Syncthing Authors.
|
||||||
|
//
|
||||||
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||||
|
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
// +build benchmark
|
||||||
|
|
||||||
|
package db_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/syncthing/syncthing/lib/db"
|
||||||
|
"github.com/syncthing/syncthing/lib/protocol"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb"
|
||||||
|
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||||
|
)
|
||||||
|
|
||||||
|
var files, oneFile, firstHalf, secondHalf []protocol.FileInfo
|
||||||
|
var fs *db.FileSet
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
files = append(files, protocol.FileInfo{
|
||||||
|
Name: fmt.Sprintf("file%d", i),
|
||||||
|
Version: protocol.Vector{{ID: myID, Value: 1000}},
|
||||||
|
Blocks: genBlocks(i),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
middle := len(files) / 2
|
||||||
|
firstHalf = files[:middle]
|
||||||
|
secondHalf = files[middle:]
|
||||||
|
oneFile = firstHalf[middle-1 : middle]
|
||||||
|
|
||||||
|
ldb, _ := tempDB()
|
||||||
|
fs = db.NewFileSet("test", ldb)
|
||||||
|
fs.Replace(remoteDevice0, files)
|
||||||
|
fs.Replace(protocol.LocalDeviceID, firstHalf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func tempDB() (*leveldb.DB, string) {
|
||||||
|
dir, err := ioutil.TempDir("", "syncthing")
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
db, err := leveldb.OpenFile(filepath.Join(dir, "db"), &opt.Options{OpenFilesCacheCapacity: 100})
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return db, dir
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkReplaceAll(b *testing.B) {
|
||||||
|
ldb, dir := tempDB()
|
||||||
|
defer func() {
|
||||||
|
ldb.Close()
|
||||||
|
os.RemoveAll(dir)
|
||||||
|
}()
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
m := db.NewFileSet("test", ldb)
|
||||||
|
m.Replace(protocol.LocalDeviceID, files)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ReportAllocs()
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkUpdateOneChanged(b *testing.B) {
|
||||||
|
changed := make([]protocol.FileInfo, 1)
|
||||||
|
changed[0] = oneFile[0]
|
||||||
|
changed[0].Version = changed[0].Version.Update(myID)
|
||||||
|
changed[0].Blocks = genBlocks(len(changed[0].Blocks))
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
if i%1 == 0 {
|
||||||
|
fs.Update(protocol.LocalDeviceID, changed)
|
||||||
|
} else {
|
||||||
|
fs.Update(protocol.LocalDeviceID, oneFile)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ReportAllocs()
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkUpdateOneUnchanged(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
fs.Update(protocol.LocalDeviceID, oneFile)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ReportAllocs()
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkNeedHalf(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
count := 0
|
||||||
|
fs.WithNeed(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
|
||||||
|
count++
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if count != len(secondHalf) {
|
||||||
|
b.Errorf("wrong length %d != %d", count, len(secondHalf))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ReportAllocs()
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkHave(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
count := 0
|
||||||
|
fs.WithHave(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
|
||||||
|
count++
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if count != len(firstHalf) {
|
||||||
|
b.Errorf("wrong length %d != %d", count, len(firstHalf))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ReportAllocs()
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkGlobal(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
count := 0
|
||||||
|
fs.WithGlobal(func(fi db.FileIntf) bool {
|
||||||
|
count++
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if count != len(files) {
|
||||||
|
b.Errorf("wrong length %d != %d", count, len(files))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ReportAllocs()
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkNeedHalfTruncated(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
count := 0
|
||||||
|
fs.WithNeedTruncated(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
|
||||||
|
count++
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if count != len(secondHalf) {
|
||||||
|
b.Errorf("wrong length %d != %d", count, len(secondHalf))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ReportAllocs()
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkHaveTruncated(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
count := 0
|
||||||
|
fs.WithHaveTruncated(protocol.LocalDeviceID, func(fi db.FileIntf) bool {
|
||||||
|
count++
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if count != len(firstHalf) {
|
||||||
|
b.Errorf("wrong length %d != %d", count, len(firstHalf))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ReportAllocs()
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkGlobalTruncated(b *testing.B) {
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
count := 0
|
||||||
|
fs.WithGlobalTruncated(func(fi db.FileIntf) bool {
|
||||||
|
count++
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
if count != len(files) {
|
||||||
|
b.Errorf("wrong length %d != %d", count, len(files))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ReportAllocs()
|
||||||
|
}
|
@ -26,6 +26,8 @@ import (
|
|||||||
|
|
||||||
var blockFinder *BlockFinder
|
var blockFinder *BlockFinder
|
||||||
|
|
||||||
|
const maxBatchSize = 256 << 10
|
||||||
|
|
||||||
type BlockMap struct {
|
type BlockMap struct {
|
||||||
db *leveldb.DB
|
db *leveldb.DB
|
||||||
folder string
|
folder string
|
||||||
@ -42,14 +44,23 @@ func NewBlockMap(db *leveldb.DB, folder string) *BlockMap {
|
|||||||
func (m *BlockMap) Add(files []protocol.FileInfo) error {
|
func (m *BlockMap) Add(files []protocol.FileInfo) error {
|
||||||
batch := new(leveldb.Batch)
|
batch := new(leveldb.Batch)
|
||||||
buf := make([]byte, 4)
|
buf := make([]byte, 4)
|
||||||
|
var key []byte
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
|
if batch.Len() > maxBatchSize {
|
||||||
|
if err := m.db.Write(batch, nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
batch.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
if file.IsDirectory() || file.IsDeleted() || file.IsInvalid() {
|
if file.IsDirectory() || file.IsDeleted() || file.IsInvalid() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, block := range file.Blocks {
|
for i, block := range file.Blocks {
|
||||||
binary.BigEndian.PutUint32(buf, uint32(i))
|
binary.BigEndian.PutUint32(buf, uint32(i))
|
||||||
batch.Put(m.blockKey(block.Hash, file.Name), buf)
|
key = m.blockKeyInto(key, block.Hash, file.Name)
|
||||||
|
batch.Put(key, buf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return m.db.Write(batch, nil)
|
return m.db.Write(batch, nil)
|
||||||
@ -59,21 +70,31 @@ func (m *BlockMap) Add(files []protocol.FileInfo) error {
|
|||||||
func (m *BlockMap) Update(files []protocol.FileInfo) error {
|
func (m *BlockMap) Update(files []protocol.FileInfo) error {
|
||||||
batch := new(leveldb.Batch)
|
batch := new(leveldb.Batch)
|
||||||
buf := make([]byte, 4)
|
buf := make([]byte, 4)
|
||||||
|
var key []byte
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
|
if batch.Len() > maxBatchSize {
|
||||||
|
if err := m.db.Write(batch, nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
batch.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
if file.IsDirectory() {
|
if file.IsDirectory() {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if file.IsDeleted() || file.IsInvalid() {
|
if file.IsDeleted() || file.IsInvalid() {
|
||||||
for _, block := range file.Blocks {
|
for _, block := range file.Blocks {
|
||||||
batch.Delete(m.blockKey(block.Hash, file.Name))
|
key = m.blockKeyInto(key, block.Hash, file.Name)
|
||||||
|
batch.Delete(key)
|
||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, block := range file.Blocks {
|
for i, block := range file.Blocks {
|
||||||
binary.BigEndian.PutUint32(buf, uint32(i))
|
binary.BigEndian.PutUint32(buf, uint32(i))
|
||||||
batch.Put(m.blockKey(block.Hash, file.Name), buf)
|
key = m.blockKeyInto(key, block.Hash, file.Name)
|
||||||
|
batch.Put(key, buf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return m.db.Write(batch, nil)
|
return m.db.Write(batch, nil)
|
||||||
@ -82,9 +103,18 @@ func (m *BlockMap) Update(files []protocol.FileInfo) error {
|
|||||||
// Discard block map state, removing the given files
|
// Discard block map state, removing the given files
|
||||||
func (m *BlockMap) Discard(files []protocol.FileInfo) error {
|
func (m *BlockMap) Discard(files []protocol.FileInfo) error {
|
||||||
batch := new(leveldb.Batch)
|
batch := new(leveldb.Batch)
|
||||||
|
var key []byte
|
||||||
for _, file := range files {
|
for _, file := range files {
|
||||||
|
if batch.Len() > maxBatchSize {
|
||||||
|
if err := m.db.Write(batch, nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
batch.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
for _, block := range file.Blocks {
|
for _, block := range file.Blocks {
|
||||||
batch.Delete(m.blockKey(block.Hash, file.Name))
|
key = m.blockKeyInto(key, block.Hash, file.Name)
|
||||||
|
batch.Delete(key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return m.db.Write(batch, nil)
|
return m.db.Write(batch, nil)
|
||||||
@ -93,9 +123,16 @@ func (m *BlockMap) Discard(files []protocol.FileInfo) error {
|
|||||||
// Drop block map, removing all entries related to this block map from the db.
|
// Drop block map, removing all entries related to this block map from the db.
|
||||||
func (m *BlockMap) Drop() error {
|
func (m *BlockMap) Drop() error {
|
||||||
batch := new(leveldb.Batch)
|
batch := new(leveldb.Batch)
|
||||||
iter := m.db.NewIterator(util.BytesPrefix(m.blockKey(nil, "")[:1+64]), nil)
|
iter := m.db.NewIterator(util.BytesPrefix(m.blockKeyInto(nil, nil, "")[:1+64]), nil)
|
||||||
defer iter.Release()
|
defer iter.Release()
|
||||||
for iter.Next() {
|
for iter.Next() {
|
||||||
|
if batch.Len() > maxBatchSize {
|
||||||
|
if err := m.db.Write(batch, nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
batch.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
batch.Delete(iter.Key())
|
batch.Delete(iter.Key())
|
||||||
}
|
}
|
||||||
if iter.Error() != nil {
|
if iter.Error() != nil {
|
||||||
@ -104,8 +141,8 @@ func (m *BlockMap) Drop() error {
|
|||||||
return m.db.Write(batch, nil)
|
return m.db.Write(batch, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *BlockMap) blockKey(hash []byte, file string) []byte {
|
func (m *BlockMap) blockKeyInto(o, hash []byte, file string) []byte {
|
||||||
return toBlockKey(hash, m.folder, file)
|
return blockKeyInto(o, hash, m.folder, file)
|
||||||
}
|
}
|
||||||
|
|
||||||
type BlockFinder struct {
|
type BlockFinder struct {
|
||||||
@ -134,8 +171,9 @@ func (f *BlockFinder) String() string {
|
|||||||
// reason. The iterator finally returns the result, whether or not a
|
// reason. The iterator finally returns the result, whether or not a
|
||||||
// satisfying block was eventually found.
|
// satisfying block was eventually found.
|
||||||
func (f *BlockFinder) Iterate(folders []string, hash []byte, iterFn func(string, string, int32) bool) bool {
|
func (f *BlockFinder) Iterate(folders []string, hash []byte, iterFn func(string, string, int32) bool) bool {
|
||||||
|
var key []byte
|
||||||
for _, folder := range folders {
|
for _, folder := range folders {
|
||||||
key := toBlockKey(hash, folder, "")
|
key = blockKeyInto(key, hash, folder, "")
|
||||||
iter := f.db.NewIterator(util.BytesPrefix(key), nil)
|
iter := f.db.NewIterator(util.BytesPrefix(key), nil)
|
||||||
defer iter.Release()
|
defer iter.Release()
|
||||||
|
|
||||||
@ -157,8 +195,8 @@ func (f *BlockFinder) Fix(folder, file string, index int32, oldHash, newHash []b
|
|||||||
binary.BigEndian.PutUint32(buf, uint32(index))
|
binary.BigEndian.PutUint32(buf, uint32(index))
|
||||||
|
|
||||||
batch := new(leveldb.Batch)
|
batch := new(leveldb.Batch)
|
||||||
batch.Delete(toBlockKey(oldHash, folder, file))
|
batch.Delete(blockKeyInto(nil, oldHash, folder, file))
|
||||||
batch.Put(toBlockKey(newHash, folder, file), buf)
|
batch.Put(blockKeyInto(nil, newHash, folder, file), buf)
|
||||||
return f.db.Write(batch, nil)
|
return f.db.Write(batch, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -167,8 +205,13 @@ func (f *BlockFinder) Fix(folder, file string, index int32, oldHash, newHash []b
|
|||||||
// folder (64 bytes)
|
// folder (64 bytes)
|
||||||
// block hash (32 bytes)
|
// block hash (32 bytes)
|
||||||
// file name (variable size)
|
// file name (variable size)
|
||||||
func toBlockKey(hash []byte, folder, file string) []byte {
|
func blockKeyInto(o, hash []byte, folder, file string) []byte {
|
||||||
o := make([]byte, 1+64+32+len(file))
|
reqLen := 1 + 64 + 32 + len(file)
|
||||||
|
if cap(o) < reqLen {
|
||||||
|
o = make([]byte, reqLen)
|
||||||
|
} else {
|
||||||
|
o = o[:reqLen]
|
||||||
|
}
|
||||||
o[0] = KeyTypeBlock
|
o[0] = KeyTypeBlock
|
||||||
copy(o[1:], []byte(folder))
|
copy(o[1:], []byte(folder))
|
||||||
copy(o[1+64:], []byte(hash))
|
copy(o[1+64:], []byte(hash))
|
||||||
|
@ -12,7 +12,6 @@ package db
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"runtime"
|
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/protocol"
|
"github.com/syncthing/syncthing/lib/protocol"
|
||||||
@ -170,8 +169,6 @@ func globalKeyFolder(key []byte) []byte {
|
|||||||
type deletionHandler func(db dbReader, batch dbWriter, folder, device, name []byte, dbi iterator.Iterator) int64
|
type deletionHandler func(db dbReader, batch dbWriter, folder, device, name []byte, dbi iterator.Iterator) int64
|
||||||
|
|
||||||
func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker, deleteFn deletionHandler) int64 {
|
func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker, deleteFn deletionHandler) int64 {
|
||||||
runtime.GC()
|
|
||||||
|
|
||||||
sort.Sort(fileList(fs)) // sort list on name, same as in the database
|
sort.Sort(fileList(fs)) // sort list on name, same as in the database
|
||||||
|
|
||||||
start := deviceKey(folder, device, nil) // before all folder/device files
|
start := deviceKey(folder, device, nil) // before all folder/device files
|
||||||
@ -306,8 +303,6 @@ func ldbReplace(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo, l
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker) int64 {
|
func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker) int64 {
|
||||||
runtime.GC()
|
|
||||||
|
|
||||||
batch := new(leveldb.Batch)
|
batch := new(leveldb.Batch)
|
||||||
l.Debugf("new batch %p", batch)
|
l.Debugf("new batch %p", batch)
|
||||||
snap, err := db.GetSnapshot()
|
snap, err := db.GetSnapshot()
|
||||||
@ -601,8 +596,6 @@ func ldbWithHave(db *leveldb.DB, folder, device []byte, truncate bool, fn Iterat
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ldbWithAllFolderTruncated(db *leveldb.DB, folder []byte, fn func(device []byte, f FileInfoTruncated) bool) {
|
func ldbWithAllFolderTruncated(db *leveldb.DB, folder []byte, fn func(device []byte, f FileInfoTruncated) bool) {
|
||||||
runtime.GC()
|
|
||||||
|
|
||||||
start := deviceKey(folder, nil, nil) // before all folder/device files
|
start := deviceKey(folder, nil, nil) // before all folder/device files
|
||||||
limit := deviceKey(folder, protocol.LocalDeviceID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
|
limit := deviceKey(folder, protocol.LocalDeviceID[:], []byte{0xff, 0xff, 0xff, 0xff}) // after all folder/device files
|
||||||
snap, err := db.GetSnapshot()
|
snap, err := db.GetSnapshot()
|
||||||
@ -706,8 +699,6 @@ func ldbGetGlobal(db *leveldb.DB, folder, file []byte, truncate bool) (FileIntf,
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ldbWithGlobal(db *leveldb.DB, folder, prefix []byte, truncate bool, fn Iterator) {
|
func ldbWithGlobal(db *leveldb.DB, folder, prefix []byte, truncate bool, fn Iterator) {
|
||||||
runtime.GC()
|
|
||||||
|
|
||||||
snap, err := db.GetSnapshot()
|
snap, err := db.GetSnapshot()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -787,8 +778,6 @@ func ldbAvailability(db *leveldb.DB, folder, file []byte) []protocol.DeviceID {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ldbWithNeed(db *leveldb.DB, folder, device []byte, truncate bool, fn Iterator) {
|
func ldbWithNeed(db *leveldb.DB, folder, device []byte, truncate bool, fn Iterator) {
|
||||||
runtime.GC()
|
|
||||||
|
|
||||||
start := globalKey(folder, nil)
|
start := globalKey(folder, nil)
|
||||||
limit := globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff})
|
limit := globalKey(folder, []byte{0xff, 0xff, 0xff, 0xff})
|
||||||
snap, err := db.GetSnapshot()
|
snap, err := db.GetSnapshot()
|
||||||
@ -887,8 +876,6 @@ nextFile:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ldbListFolders(db *leveldb.DB) []string {
|
func ldbListFolders(db *leveldb.DB) []string {
|
||||||
runtime.GC()
|
|
||||||
|
|
||||||
snap, err := db.GetSnapshot()
|
snap, err := db.GetSnapshot()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -920,8 +907,6 @@ func ldbListFolders(db *leveldb.DB) []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ldbDropFolder(db *leveldb.DB, folder []byte) {
|
func ldbDropFolder(db *leveldb.DB, folder []byte) {
|
||||||
runtime.GC()
|
|
||||||
|
|
||||||
snap, err := db.GetSnapshot()
|
snap, err := db.GetSnapshot()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@ -966,8 +951,6 @@ func unmarshalTrunc(bs []byte, truncate bool) (FileIntf, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func ldbCheckGlobals(db *leveldb.DB, folder []byte, globalSize *sizeTracker) {
|
func ldbCheckGlobals(db *leveldb.DB, folder []byte, globalSize *sizeTracker) {
|
||||||
defer runtime.GC()
|
|
||||||
|
|
||||||
snap, err := db.GetSnapshot()
|
snap, err := db.GetSnapshot()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
|
@ -417,180 +417,6 @@ func TestInvalidAvailability(t *testing.T) {
|
|||||||
t.Error("Incorrect availability for 'none':", av)
|
t.Error("Incorrect availability for 'none':", av)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func Benchmark10kReplace(b *testing.B) {
|
|
||||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var local []protocol.FileInfo
|
|
||||||
for i := 0; i < 10000; i++ {
|
|
||||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
|
||||||
}
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
m := db.NewFileSet("test", ldb)
|
|
||||||
m.Replace(protocol.LocalDeviceID, local)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Benchmark10kUpdateChg(b *testing.B) {
|
|
||||||
var remote []protocol.FileInfo
|
|
||||||
for i := 0; i < 10000; i++ {
|
|
||||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
|
||||||
}
|
|
||||||
|
|
||||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
m := db.NewFileSet("test", ldb)
|
|
||||||
m.Replace(remoteDevice0, remote)
|
|
||||||
|
|
||||||
var local []protocol.FileInfo
|
|
||||||
for i := 0; i < 10000; i++ {
|
|
||||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
|
||||||
}
|
|
||||||
|
|
||||||
m.Replace(protocol.LocalDeviceID, local)
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
b.StopTimer()
|
|
||||||
for j := range local {
|
|
||||||
local[j].Version = local[j].Version.Update(myID)
|
|
||||||
}
|
|
||||||
b.StartTimer()
|
|
||||||
m.Update(protocol.LocalDeviceID, local)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Benchmark10kUpdateSme(b *testing.B) {
|
|
||||||
var remote []protocol.FileInfo
|
|
||||||
for i := 0; i < 10000; i++ {
|
|
||||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
|
||||||
}
|
|
||||||
|
|
||||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
m := db.NewFileSet("test", ldb)
|
|
||||||
m.Replace(remoteDevice0, remote)
|
|
||||||
|
|
||||||
var local []protocol.FileInfo
|
|
||||||
for i := 0; i < 10000; i++ {
|
|
||||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
|
||||||
}
|
|
||||||
|
|
||||||
m.Replace(protocol.LocalDeviceID, local)
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
m.Update(protocol.LocalDeviceID, local)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Benchmark10kNeed2k(b *testing.B) {
|
|
||||||
var remote []protocol.FileInfo
|
|
||||||
for i := 0; i < 10000; i++ {
|
|
||||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
|
||||||
}
|
|
||||||
|
|
||||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
m := db.NewFileSet("test", ldb)
|
|
||||||
m.Replace(remoteDevice0, remote)
|
|
||||||
|
|
||||||
var local []protocol.FileInfo
|
|
||||||
for i := 0; i < 8000; i++ {
|
|
||||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
|
||||||
}
|
|
||||||
for i := 8000; i < 10000; i++ {
|
|
||||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{1, 980}}})
|
|
||||||
}
|
|
||||||
|
|
||||||
m.Replace(protocol.LocalDeviceID, local)
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
fs := needList(m, protocol.LocalDeviceID)
|
|
||||||
if l := len(fs); l != 2000 {
|
|
||||||
b.Errorf("wrong length %d != 2k", l)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Benchmark10kHaveFullList(b *testing.B) {
|
|
||||||
var remote []protocol.FileInfo
|
|
||||||
for i := 0; i < 10000; i++ {
|
|
||||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
|
||||||
}
|
|
||||||
|
|
||||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
m := db.NewFileSet("test", ldb)
|
|
||||||
m.Replace(remoteDevice0, remote)
|
|
||||||
|
|
||||||
var local []protocol.FileInfo
|
|
||||||
for i := 0; i < 2000; i++ {
|
|
||||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
|
||||||
}
|
|
||||||
for i := 2000; i < 10000; i++ {
|
|
||||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{1, 980}}})
|
|
||||||
}
|
|
||||||
|
|
||||||
m.Replace(protocol.LocalDeviceID, local)
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
fs := haveList(m, protocol.LocalDeviceID)
|
|
||||||
if l := len(fs); l != 10000 {
|
|
||||||
b.Errorf("wrong length %d != 10k", l)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func Benchmark10kGlobal(b *testing.B) {
|
|
||||||
var remote []protocol.FileInfo
|
|
||||||
for i := 0; i < 10000; i++ {
|
|
||||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
|
||||||
}
|
|
||||||
|
|
||||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
|
||||||
if err != nil {
|
|
||||||
b.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
m := db.NewFileSet("test", ldb)
|
|
||||||
m.Replace(remoteDevice0, remote)
|
|
||||||
|
|
||||||
var local []protocol.FileInfo
|
|
||||||
for i := 0; i < 2000; i++ {
|
|
||||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
|
||||||
}
|
|
||||||
for i := 2000; i < 10000; i++ {
|
|
||||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{1, 980}}})
|
|
||||||
}
|
|
||||||
|
|
||||||
m.Replace(protocol.LocalDeviceID, local)
|
|
||||||
|
|
||||||
b.ResetTimer()
|
|
||||||
for i := 0; i < b.N; i++ {
|
|
||||||
fs := globalList(m)
|
|
||||||
if l := len(fs); l != 10000 {
|
|
||||||
b.Errorf("wrong length %d != 10k", l)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGlobalReset(t *testing.T) {
|
func TestGlobalReset(t *testing.T) {
|
||||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||||
|
Loading…
Reference in New Issue
Block a user