syncthing/lib/db/backend/leveldb_backend.go

234 lines
5.8 KiB
Go
Raw Normal View History

// Copyright (C) 2018 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package backend
import (
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/util"
)
const (
// Never flush transactions smaller than this, even on Checkpoint().
// This just needs to be just large enough to avoid flushing
// transactions when they are super tiny, thus creating millions of tiny
// transactions unnecessarily.
dbFlushBatchMin = 64 << KiB
// Once a transaction reaches this size, flush it unconditionally. This
// should be large enough to avoid forcing a flush between Checkpoint()
// calls in loops where we do those, so in principle just large enough
// to hold a FileInfo plus corresponding version list and metadata
// updates or two.
dbFlushBatchMax = 1 << MiB
)
// leveldbBackend implements Backend on top of a leveldb
type leveldbBackend struct {
ldb *leveldb.DB
closeWG *closeWaitGroup
location string
}
func newLeveldbBackend(ldb *leveldb.DB, location string) *leveldbBackend {
return &leveldbBackend{
ldb: ldb,
closeWG: &closeWaitGroup{},
location: location,
}
}
func (b *leveldbBackend) NewReadTransaction() (ReadTransaction, error) {
return b.newSnapshot()
}
func (b *leveldbBackend) newSnapshot() (leveldbSnapshot, error) {
rel, err := newReleaser(b.closeWG)
if err != nil {
return leveldbSnapshot{}, err
}
snap, err := b.ldb.GetSnapshot()
if err != nil {
rel.Release()
return leveldbSnapshot{}, wrapLeveldbErr(err)
}
return leveldbSnapshot{
snap: snap,
rel: rel,
}, nil
}
2020-07-19 06:55:27 +00:00
func (b *leveldbBackend) NewWriteTransaction(hooks ...CommitHook) (WriteTransaction, error) {
rel, err := newReleaser(b.closeWG)
if err != nil {
return nil, err
}
snap, err := b.newSnapshot()
if err != nil {
rel.Release()
return nil, err // already wrapped
}
return &leveldbTransaction{
leveldbSnapshot: snap,
ldb: b.ldb,
batch: new(leveldb.Batch),
rel: rel,
2020-07-19 06:55:27 +00:00
commitHooks: hooks,
inFlush: false,
}, nil
}
func (b *leveldbBackend) Close() error {
b.closeWG.CloseWait()
return wrapLeveldbErr(b.ldb.Close())
}
func (b *leveldbBackend) Get(key []byte) ([]byte, error) {
val, err := b.ldb.Get(key, nil)
return val, wrapLeveldbErr(err)
}
func (b *leveldbBackend) NewPrefixIterator(prefix []byte) (Iterator, error) {
return &leveldbIterator{b.ldb.NewIterator(util.BytesPrefix(prefix), nil)}, nil
}
func (b *leveldbBackend) NewRangeIterator(first, last []byte) (Iterator, error) {
return &leveldbIterator{b.ldb.NewIterator(&util.Range{Start: first, Limit: last}, nil)}, nil
}
func (b *leveldbBackend) Put(key, val []byte) error {
return wrapLeveldbErr(b.ldb.Put(key, val, nil))
}
func (b *leveldbBackend) Delete(key []byte) error {
return wrapLeveldbErr(b.ldb.Delete(key, nil))
}
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
func (b *leveldbBackend) Compact() error {
// Race is detected during testing when db is closed while compaction
// is ongoing.
err := b.closeWG.Add(1)
if err != nil {
return err
}
defer b.closeWG.Done()
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
return wrapLeveldbErr(b.ldb.CompactRange(util.Range{}))
}
func (b *leveldbBackend) Location() string {
return b.location
}
// leveldbSnapshot implements backend.ReadTransaction
type leveldbSnapshot struct {
snap *leveldb.Snapshot
rel *releaser
}
func (l leveldbSnapshot) Get(key []byte) ([]byte, error) {
val, err := l.snap.Get(key, nil)
return val, wrapLeveldbErr(err)
}
func (l leveldbSnapshot) NewPrefixIterator(prefix []byte) (Iterator, error) {
return l.snap.NewIterator(util.BytesPrefix(prefix), nil), nil
}
func (l leveldbSnapshot) NewRangeIterator(first, last []byte) (Iterator, error) {
return l.snap.NewIterator(&util.Range{Start: first, Limit: last}, nil), nil
}
func (l leveldbSnapshot) Release() {
l.snap.Release()
l.rel.Release()
}
// leveldbTransaction implements backend.WriteTransaction using a batch (not
// an actual leveldb transaction)
type leveldbTransaction struct {
leveldbSnapshot
2020-07-19 06:55:27 +00:00
ldb *leveldb.DB
batch *leveldb.Batch
rel *releaser
commitHooks []CommitHook
inFlush bool
}
func (t *leveldbTransaction) Delete(key []byte) error {
t.batch.Delete(key)
return t.checkFlush(dbFlushBatchMax)
}
func (t *leveldbTransaction) Put(key, val []byte) error {
t.batch.Put(key, val)
return t.checkFlush(dbFlushBatchMax)
}
2020-07-19 06:55:27 +00:00
func (t *leveldbTransaction) Checkpoint() error {
return t.checkFlush(dbFlushBatchMin)
}
func (t *leveldbTransaction) Commit() error {
err := wrapLeveldbErr(t.flush())
t.leveldbSnapshot.Release()
t.rel.Release()
return err
}
func (t *leveldbTransaction) Release() {
t.leveldbSnapshot.Release()
t.rel.Release()
}
// checkFlush flushes and resets the batch if its size exceeds the given size.
2020-07-19 06:55:27 +00:00
func (t *leveldbTransaction) checkFlush(size int) error {
// Hooks might put values in the database, which triggers a checkFlush which might trigger a flush,
// which might trigger the hooks.
// Don't recurse...
if t.inFlush || len(t.batch.Dump()) < size {
return nil
}
return t.flush()
}
func (t *leveldbTransaction) flush() error {
t.inFlush = true
defer func() { t.inFlush = false }()
2020-07-19 06:55:27 +00:00
for _, hook := range t.commitHooks {
if err := hook(t); err != nil {
return err
}
}
if t.batch.Len() == 0 {
return nil
}
if err := t.ldb.Write(t.batch, nil); err != nil {
return wrapLeveldbErr(err)
}
t.batch.Reset()
return nil
}
type leveldbIterator struct {
iterator.Iterator
}
func (it *leveldbIterator) Error() error {
return wrapLeveldbErr(it.Iterator.Error())
}
// wrapLeveldbErr wraps errors so that the backend package can recognize them
func wrapLeveldbErr(err error) error {
switch err {
case leveldb.ErrClosed:
return errClosed
case leveldb.ErrNotFound:
return errNotFound
}
return err
}