syncthing/lib/db/schemaupdater.go

1097 lines
26 KiB
Go
Raw Normal View History

// Copyright (C) 2018 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package db
import (
"bytes"
"fmt"
"sort"
"strings"
"github.com/syncthing/syncthing/lib/db/backend"
"github.com/syncthing/syncthing/lib/protocol"
)
// dbMigrationVersion is for migrations that do not change the schema and thus
// do not put restrictions on downgrades (e.g. for repairs after a bugfix).
const (
dbVersion = 14
dbMigrationVersion = 19
dbMinSyncthingVersion = "v1.9.0"
)
type migration struct {
schemaVersion int64
migrationVersion int64
minSyncthingVersion string
migration func(prevSchema int) error
}
type databaseDowngradeError struct {
minSyncthingVersion string
}
func (e *databaseDowngradeError) Error() string {
if e.minSyncthingVersion == "" {
return "newer Syncthing required"
}
return fmt.Sprintf("Syncthing %s required", e.minSyncthingVersion)
}
// UpdateSchema updates a possibly outdated database to the current schema and
// also does repairs where necessary.
func UpdateSchema(db *Lowlevel) error {
updater := &schemaUpdater{db}
return updater.updateSchema()
}
type schemaUpdater struct {
*Lowlevel
}
func (db *schemaUpdater) updateSchema() error {
// Updating the schema can touch any and all parts of the database. Make
// sure we do not run GC concurrently with schema migrations.
db.gcMut.Lock()
defer db.gcMut.Unlock()
miscDB := NewMiscDataNamespace(db.Lowlevel)
prevVersion, _, err := miscDB.Int64("dbVersion")
if err != nil {
return err
}
if prevVersion > dbVersion {
err := &databaseDowngradeError{}
if minSyncthingVersion, ok, dbErr := miscDB.String("dbMinSyncthingVersion"); dbErr != nil {
return dbErr
} else if ok {
err.minSyncthingVersion = minSyncthingVersion
}
return err
}
prevMigration, _, err := miscDB.Int64("dbMigrationVersion")
if err != nil {
return err
}
// Cover versions before adding `dbMigrationVersion` (== 0) and possible future weirdness.
if prevMigration < prevVersion {
prevMigration = prevVersion
}
if prevVersion == dbVersion && prevMigration >= dbMigrationVersion {
return nil
}
migrations := []migration{
{1, 1, "v0.14.0", db.updateSchema0to1},
{2, 2, "v0.14.46", db.updateSchema1to2},
{3, 3, "v0.14.48", db.updateSchema2to3},
{5, 5, "v0.14.49", db.updateSchemaTo5},
{6, 6, "v0.14.50", db.updateSchema5to6},
{7, 7, "v0.14.53", db.updateSchema6to7},
{9, 9, "v1.4.0", db.updateSchemaTo9},
{10, 10, "v1.6.0", db.updateSchemaTo10},
{11, 11, "v1.6.0", db.updateSchemaTo11},
{13, 13, "v1.7.0", db.updateSchemaTo13},
{14, 14, "v1.9.0", db.updateSchemaTo14},
{14, 16, "v1.9.0", db.checkRepairMigration},
{14, 17, "v1.9.0", db.migration17},
{14, 19, "v1.9.0", db.dropIndexIDsMigration},
}
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
for _, m := range migrations {
if prevMigration < m.migrationVersion {
l.Infof("Running database migration %d...", m.migrationVersion)
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
if err := m.migration(int(prevVersion)); err != nil {
return fmt.Errorf("failed to do migration %v: %w", m.migrationVersion, err)
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
}
if err := db.writeVersions(m, miscDB); err != nil {
return fmt.Errorf("failed to write versions after migration %v: %w", m.migrationVersion, err)
}
}
}
if err := db.writeVersions(migration{
schemaVersion: dbVersion,
migrationVersion: dbMigrationVersion,
minSyncthingVersion: dbMinSyncthingVersion,
}, miscDB); err != nil {
return fmt.Errorf("failed to write versions after migrations: %w", err)
}
l.Infoln("Compacting database after migration...")
return db.Compact()
}
func (*schemaUpdater) writeVersions(m migration, miscDB *NamespacedKV) error {
if err := miscDB.PutInt64("dbVersion", m.schemaVersion); err != nil && err == nil {
return err
}
if err := miscDB.PutString("dbMinSyncthingVersion", m.minSyncthingVersion); err != nil && err == nil {
return err
}
if err := miscDB.PutInt64("dbMigrationVersion", m.migrationVersion); err != nil && err == nil {
return err
}
return nil
}
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
func (db *schemaUpdater) updateSchema0to1(_ int) error {
t, err := db.newReadWriteTransaction()
if err != nil {
return err
}
defer t.close()
dbi, err := t.NewPrefixIterator([]byte{KeyTypeDevice})
if err != nil {
return err
}
defer dbi.Release()
symlinkConv := 0
changedFolders := make(map[string]struct{})
ignAdded := 0
var gk []byte
ro := t.readOnlyTransaction
for dbi.Next() {
folder, ok := db.keyer.FolderFromDeviceFileKey(dbi.Key())
if !ok {
// not having the folder in the index is bad; delete and continue
if err := t.Delete(dbi.Key()); err != nil {
return err
}
continue
}
device, ok := db.keyer.DeviceFromDeviceFileKey(dbi.Key())
if !ok {
// not having the device in the index is bad; delete and continue
if err := t.Delete(dbi.Key()); err != nil {
return err
}
continue
}
name := db.keyer.NameFromDeviceFileKey(dbi.Key())
// Remove files with absolute path (see #4799)
if strings.HasPrefix(string(name), "/") {
if _, ok := changedFolders[string(folder)]; !ok {
changedFolders[string(folder)] = struct{}{}
}
if err := t.Delete(dbi.Key()); err != nil {
return err
}
gk, err = db.keyer.GenerateGlobalVersionKey(gk, folder, name)
if err != nil {
return err
}
fl, err := getGlobalVersionsByKeyBefore11(gk, ro)
if backend.IsNotFound(err) {
// Shouldn't happen, but not critical.
continue
} else if err != nil {
return err
}
_, _ = fl.pop(device)
if len(fl.Versions) == 0 {
err = t.Delete(gk)
} else {
err = t.Put(gk, mustMarshal(&fl))
}
if err != nil {
return err
}
continue
}
// Change SYMLINK_FILE and SYMLINK_DIRECTORY types to the current SYMLINK
// type (previously SYMLINK_UNKNOWN). It does this for all devices, both
// local and remote, and does not reset delta indexes. It shouldn't really
// matter what the symlink type is, but this cleans it up for a possible
// future when SYMLINK_FILE and SYMLINK_DIRECTORY are no longer understood.
var f protocol.FileInfo
if err := f.Unmarshal(dbi.Value()); err != nil {
// probably can't happen
continue
}
if f.Type == protocol.FileInfoTypeSymlinkDirectory || f.Type == protocol.FileInfoTypeSymlinkFile {
f.Type = protocol.FileInfoTypeSymlink
bs, err := f.Marshal()
if err != nil {
panic("can't happen: " + err.Error())
}
if err := t.Put(dbi.Key(), bs); err != nil {
return err
}
symlinkConv++
}
// Add invalid files to global list
if f.IsInvalid() {
gk, err = db.keyer.GenerateGlobalVersionKey(gk, folder, name)
if err != nil {
return err
}
fl, err := getGlobalVersionsByKeyBefore11(gk, ro)
if err != nil && !backend.IsNotFound(err) {
return err
}
i := sort.Search(len(fl.Versions), func(j int) bool {
return fl.Versions[j].Invalid
})
for ; i < len(fl.Versions); i++ {
ordering := fl.Versions[i].Version.Compare(f.Version)
shouldInsert := ordering == protocol.Equal
if !shouldInsert {
shouldInsert, err = shouldInsertBefore(ordering, folder, fl.Versions[i].Device, true, f, ro)
if err != nil {
return err
}
}
if shouldInsert {
nv := FileVersionDeprecated{
Device: device,
Version: f.Version,
Invalid: true,
}
fl.insertAt(i, nv)
if err := t.Put(gk, mustMarshal(&fl)); err != nil {
return err
}
if _, ok := changedFolders[string(folder)]; !ok {
changedFolders[string(folder)] = struct{}{}
}
ignAdded++
break
}
}
}
if err := t.Checkpoint(); err != nil {
return err
}
}
dbi.Release()
if err != dbi.Error() {
return err
}
return t.Commit()
}
// updateSchema1to2 introduces a sequenceKey->deviceKey bucket for local items
// to allow iteration in sequence order (simplifies sending indexes).
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
func (db *schemaUpdater) updateSchema1to2(_ int) error {
t, err := db.newReadWriteTransaction()
if err != nil {
return err
}
defer t.close()
var sk []byte
var dk []byte
for _, folderStr := range db.ListFolders() {
folder := []byte(folderStr)
var putErr error
err := t.withHave(folder, protocol.LocalDeviceID[:], nil, true, func(f protocol.FileIntf) bool {
sk, putErr = db.keyer.GenerateSequenceKey(sk, folder, f.SequenceNo())
if putErr != nil {
return false
}
dk, putErr = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], []byte(f.FileName()))
if putErr != nil {
return false
}
putErr = t.Put(sk, dk)
return putErr == nil
})
if putErr != nil {
return putErr
}
if err != nil {
return err
}
}
return t.Commit()
}
// updateSchema2to3 introduces a needKey->nil bucket for locally needed files.
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
func (db *schemaUpdater) updateSchema2to3(_ int) error {
t, err := db.newReadWriteTransaction()
if err != nil {
return err
}
defer t.close()
var nk []byte
var dk []byte
for _, folderStr := range db.ListFolders() {
folder := []byte(folderStr)
var putErr error
err := withGlobalBefore11(folder, true, func(f protocol.FileIntf) bool {
name := []byte(f.FileName())
dk, putErr = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], name)
if putErr != nil {
return false
}
var v protocol.Vector
haveFile, ok, err := t.getFileTrunc(dk, true)
if err != nil {
putErr = err
return false
}
if ok {
v = haveFile.FileVersion()
}
fv := FileVersionDeprecated{
Version: f.FileVersion(),
Invalid: f.IsInvalid(),
Deleted: f.IsDeleted(),
}
if !needDeprecated(fv, ok, v) {
return true
}
nk, putErr = t.keyer.GenerateNeedFileKey(nk, folder, []byte(f.FileName()))
if putErr != nil {
return false
}
putErr = t.Put(nk, nil)
return putErr == nil
}, t.readOnlyTransaction)
if putErr != nil {
return putErr
}
if err != nil {
return err
}
}
return t.Commit()
}
// updateSchemaTo5 resets the need bucket due to bugs existing in the v0.14.49
// release candidates (dbVersion 3 and 4)
// https://github.com/syncthing/syncthing/issues/5007
// https://github.com/syncthing/syncthing/issues/5053
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
func (db *schemaUpdater) updateSchemaTo5(prevVersion int) error {
if prevVersion != 3 && prevVersion != 4 {
return nil
}
t, err := db.newReadWriteTransaction()
if err != nil {
return err
}
var nk []byte
for _, folderStr := range db.ListFolders() {
nk, err = db.keyer.GenerateNeedFileKey(nk, []byte(folderStr), nil)
if err != nil {
return err
}
if err := t.deleteKeyPrefix(nk[:keyPrefixLen+keyFolderLen]); err != nil {
return err
}
}
if err := t.Commit(); err != nil {
return err
}
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
return db.updateSchema2to3(2)
}
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
func (db *schemaUpdater) updateSchema5to6(_ int) error {
// For every local file with the Invalid bit set, clear the Invalid bit and
// set LocalFlags = FlagLocalIgnored.
t, err := db.newReadWriteTransaction()
if err != nil {
return err
}
defer t.close()
var dk []byte
for _, folderStr := range db.ListFolders() {
folder := []byte(folderStr)
var iterErr error
err := t.withHave(folder, protocol.LocalDeviceID[:], nil, false, func(f protocol.FileIntf) bool {
if !f.IsInvalid() {
return true
}
fi := f.(protocol.FileInfo)
fi.RawInvalid = false
fi.LocalFlags = protocol.FlagLocalIgnored
bs, _ := fi.Marshal()
dk, iterErr = db.keyer.GenerateDeviceFileKey(dk, folder, protocol.LocalDeviceID[:], []byte(fi.Name))
if iterErr != nil {
return false
}
if iterErr = t.Put(dk, bs); iterErr != nil {
return false
}
iterErr = t.Checkpoint()
return iterErr == nil
})
if iterErr != nil {
return iterErr
}
if err != nil {
return err
}
}
return t.Commit()
}
// updateSchema6to7 checks whether all currently locally needed files are really
// needed and removes them if not.
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
func (db *schemaUpdater) updateSchema6to7(_ int) error {
t, err := db.newReadWriteTransaction()
if err != nil {
return err
}
defer t.close()
var gk []byte
var nk []byte
for _, folderStr := range db.ListFolders() {
folder := []byte(folderStr)
var delErr error
err := withNeedLocalBefore11(folder, false, func(f protocol.FileIntf) bool {
name := []byte(f.FileName())
gk, delErr = db.keyer.GenerateGlobalVersionKey(gk, folder, name)
if delErr != nil {
return false
}
svl, err := t.Get(gk)
if err != nil {
// If there is no global list, we hardly need it.
key, err := t.keyer.GenerateNeedFileKey(nk, folder, name)
if err != nil {
delErr = err
return false
}
delErr = t.Delete(key)
return delErr == nil
}
var fl VersionListDeprecated
err = fl.Unmarshal(svl)
if err != nil {
// This can't happen, but it's ignored everywhere else too,
// so lets not act on it.
return true
}
globalFV := FileVersionDeprecated{
Version: f.FileVersion(),
Invalid: f.IsInvalid(),
Deleted: f.IsDeleted(),
}
if localFV, haveLocalFV := fl.Get(protocol.LocalDeviceID[:]); !needDeprecated(globalFV, haveLocalFV, localFV.Version) {
key, err := t.keyer.GenerateNeedFileKey(nk, folder, name)
if err != nil {
delErr = err
return false
}
delErr = t.Delete(key)
}
return delErr == nil
}, t.readOnlyTransaction)
if delErr != nil {
return delErr
}
if err != nil {
return err
}
if err := t.Checkpoint(); err != nil {
return err
}
}
return t.Commit()
}
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
func (db *schemaUpdater) updateSchemaTo9(_ int) error {
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
// Loads and rewrites all files with blocks, to deduplicate block lists.
t, err := db.newReadWriteTransaction()
if err != nil {
return err
}
defer t.close()
if err := rewriteFiles(t); err != nil {
return err
}
db.recordTime(indirectGCTimeKey)
return t.Commit()
}
func rewriteFiles(t readWriteTransaction) error {
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
it, err := t.NewPrefixIterator([]byte{KeyTypeDevice})
if err != nil {
return err
}
defer it.Release()
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
for it.Next() {
intf, err := t.unmarshalTrunc(it.Value(), false)
if backend.IsNotFound(err) {
// Unmarshal error due to missing parts (block list), probably
// due to a bad migration in a previous RC. Drop this key, as
// getFile would anyway return this as a "not found" in the
// normal flow of things.
if err := t.Delete(it.Key()); err != nil {
return err
}
continue
} else if err != nil {
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
return err
}
fi := intf.(protocol.FileInfo)
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
if fi.Blocks == nil {
continue
}
if err := t.putFile(it.Key(), fi); err != nil {
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
return err
}
if err := t.Checkpoint(); err != nil {
return err
}
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
}
it.Release()
return it.Error()
lib/db: Deduplicate block lists in database (fixes #5898) (#6283) * lib/db: Deduplicate block lists in database (fixes #5898) This moves the block list in the database out from being just a field on the FileInfo to being an object of its own. When putting a FileInfo we marshal the block list separately and store it keyed by the sha256 of the marshalled block list. When getting, if we are not doing a "truncated" get, we do an extra read and unmarshal for the block list. Old block lists are cleared out by a periodic GC sweep. The alternative would be to use refcounting, but: - There is a larger risk of getting that wrong and either dropping a block list in error or keeping them around forever. - It's tricky with our current database, as we don't have dirty reads. This means that if we update two FileInfos with identical block lists in the same transaction we can't just do read/modify/write for the ref counters as we wouldn't see our own first update. See above about tracking this and risks about getting it wrong. GC uses a bloom filter for keys to avoid heavy RAM usage. GC can't run concurrently with FileInfo updates so there is a new lock around those operation at the lowlevel. The end result is a much more compact database, especially for setups with many peers where files get duplicated many times. This is per-key-class stats for a large database I'm currently working with, under the current schema: ``` 0x00: 9138161 items, 870876 KB keys + 7397482 KB data, 95 B + 809 B avg, 1637651 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x08: 1349 items, 12 KB keys + 10 KB data, 9 B + 8 B avg, 17 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 7 B avg, 30 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max Total 10426475 items, 968490 KB keys + 9202925 KB data. ``` Note 7.4 GB of data in class 00, total size 9.2 GB. After running the migration we get this instead: ``` 0x00: 9138161 items, 870876 KB keys + 2611392 KB data, 95 B + 285 B avg, 4788 B max 0x01: 185656 items, 10388 KB keys + 1790909 KB data, 55 B + 9646 B avg, 924525 B max 0x02: 916890 items, 84795 KB keys + 3667 KB data, 92 B + 4 B avg, 192 B max 0x03: 384 items, 27 KB keys + 5 KB data, 72 B + 15 B avg, 87 B max 0x04: 1109 items, 17 KB keys + 17 KB data, 15 B + 15 B avg, 69 B max 0x06: 383 items, 3 KB keys + 0 KB data, 9 B + 2 B avg, 18 B max 0x07: 510 items, 4 KB keys + 12 KB data, 9 B + 24 B avg, 41 B max 0x09: 194 items, 0 KB keys + 123 KB data, 5 B + 634 B avg, 11484 B max 0x0a: 3 items, 0 KB keys + 0 KB data, 14 B + 17 B avg, 51 B max 0x0b: 181836 items, 2363 KB keys + 10694 KB data, 13 B + 58 B avg, 173 B max 0x0d: 44282 items, 1461 KB keys + 61081 KB data, 33 B + 1379 B avg, 1637399 B max Total 10469408 items, 969939 KB keys + 4477905 KB data. ``` Class 00 is now down to 2.6 GB, with just 61 MB added in class 0d. There will be some additional reads in some cases which theoretically hurts performance, but this will be more than compensated for by smaller writes and better compaction. On my own home setup which just has three devices and a handful of folders the difference is smaller in absolute numbers of course, but still less than half the old size: ``` 0x00: 297122 items, 20894 KB keys + 306860 KB data, 70 B + 1032 B avg, 103237 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... Total 1947412 items, 151268 KB keys + 337485 KB data. ``` to: ``` 0x00: 297122 items, 20894 KB keys + 37038 KB data, 70 B + 124 B avg, 520 B max 0x01: 115299 items, 7738 KB keys + 17542 KB data, 67 B + 152 B avg, 419 B max 0x02: 1430537 items, 121223 KB keys + 5722 KB data, 84 B + 4 B avg, 253 B max ... 0x0d: 18041 items, 595 KB keys + 71964 KB data, 33 B + 3988 B avg, 101109 B max Total 1965447 items, 151863 KB keys + 139628 KB data. ``` * wip * wip * wip * wip
2020-01-24 07:35:44 +00:00
}
func (db *schemaUpdater) updateSchemaTo10(_ int) error {
// Rewrites global lists to include a Deleted flag.
t, err := db.newReadWriteTransaction()
if err != nil {
return err
}
defer t.close()
var buf []byte
for _, folderStr := range db.ListFolders() {
folder := []byte(folderStr)
buf, err = t.keyer.GenerateGlobalVersionKey(buf, folder, nil)
if err != nil {
return err
}
buf = globalVersionKey(buf).WithoutName()
dbi, err := t.NewPrefixIterator(buf)
if err != nil {
return err
}
defer dbi.Release()
for dbi.Next() {
var vl VersionListDeprecated
if err := vl.Unmarshal(dbi.Value()); err != nil {
return err
}
changed := false
name := t.keyer.NameFromGlobalVersionKey(dbi.Key())
for i, fv := range vl.Versions {
buf, err = t.keyer.GenerateDeviceFileKey(buf, folder, fv.Device, name)
if err != nil {
return err
}
f, ok, err := t.getFileTrunc(buf, true)
if !ok {
return errEntryFromGlobalMissing
}
if err != nil {
return err
}
if f.IsDeleted() {
vl.Versions[i].Deleted = true
changed = true
}
}
if changed {
if err := t.Put(dbi.Key(), mustMarshal(&vl)); err != nil {
return err
}
if err := t.Checkpoint(); err != nil {
return err
}
}
}
dbi.Release()
}
// Trigger metadata recalc
if err := t.deleteKeyPrefix([]byte{KeyTypeFolderMeta}); err != nil {
return err
}
return t.Commit()
}
func (db *schemaUpdater) updateSchemaTo11(_ int) error {
// Populates block list map for every folder.
t, err := db.newReadWriteTransaction()
if err != nil {
return err
}
defer t.close()
var dk []byte
for _, folderStr := range db.ListFolders() {
folder := []byte(folderStr)
var putErr error
err := t.withHave(folder, protocol.LocalDeviceID[:], nil, true, func(fi protocol.FileIntf) bool {
f := fi.(FileInfoTruncated)
if f.IsDirectory() || f.IsDeleted() || f.IsSymlink() || f.IsInvalid() || f.BlocksHash == nil {
return true
}
name := []byte(f.FileName())
dk, putErr = db.keyer.GenerateBlockListMapKey(dk, folder, f.BlocksHash, name)
if putErr != nil {
return false
}
if putErr = t.Put(dk, nil); putErr != nil {
return false
}
putErr = t.Checkpoint()
return putErr == nil
})
if putErr != nil {
return putErr
}
if err != nil {
return err
}
}
return t.Commit()
}
func (db *schemaUpdater) updateSchemaTo13(prev int) error {
// Loads and rewrites all files, to deduplicate version vectors.
t, err := db.newReadWriteTransaction()
if err != nil {
return err
}
defer t.close()
if prev < 12 {
if err := rewriteFiles(t); err != nil {
return err
}
}
if err := rewriteGlobals(t); err != nil {
return err
}
return t.Commit()
}
func (db *schemaUpdater) updateSchemaTo14(_ int) error {
// Checks for missing blocks and marks those entries as requiring a
// rehash/being invalid. The db is checked/repaired afterwards, i.e.
// no care is taken to get metadata and sequences right.
// If the corresponding files changed on disk compared to the global
// version, this will cause a conflict.
var key, gk []byte
for _, folderStr := range db.ListFolders() {
folder := []byte(folderStr)
meta := newMetadataTracker(db.keyer, db.evLogger)
meta.counts.Created = 0 // Recalculate metadata afterwards
t, err := db.newReadWriteTransaction(meta.CommitHook(folder))
if err != nil {
return err
}
defer t.close()
key, err = t.keyer.GenerateDeviceFileKey(key, folder, protocol.LocalDeviceID[:], nil)
if err != nil {
return err
}
it, err := t.NewPrefixIterator(key)
if err != nil {
return err
}
defer it.Release()
for it.Next() {
var fi protocol.FileInfo
if err := fi.Unmarshal(it.Value()); err != nil {
return err
}
if len(fi.Blocks) > 0 || len(fi.BlocksHash) == 0 {
continue
}
key = t.keyer.GenerateBlockListKey(key, fi.BlocksHash)
_, err := t.Get(key)
if err == nil {
continue
}
fi.SetMustRescan()
if err = t.putFile(it.Key(), fi); err != nil {
return err
}
gk, err = t.keyer.GenerateGlobalVersionKey(gk, folder, []byte(fi.Name))
if err != nil {
return err
}
key, err = t.updateGlobal(gk, key, folder, protocol.LocalDeviceID[:], fi, meta)
if err != nil {
return err
}
}
it.Release()
if err = t.Commit(); err != nil {
return err
}
t.close()
}
return nil
}
func (db *schemaUpdater) checkRepairMigration(_ int) error {
for _, folder := range db.ListFolders() {
_, err := db.getMetaAndCheckGCLocked(folder)
if err != nil {
return err
}
}
return nil
}
// migration17 finds all files that were pulled as invalid from an invalid
// global and make sure they get scanned/pulled again.
func (db *schemaUpdater) migration17(prev int) error {
if prev < 16 {
// Issue was introduced in migration to 16
return nil
}
t, err := db.newReadOnlyTransaction()
if err != nil {
return err
}
defer t.close()
for _, folderStr := range db.ListFolders() {
folder := []byte(folderStr)
meta, err := db.loadMetadataTracker(folderStr)
if err != nil {
return err
}
batch := NewFileInfoBatch(func(fs []protocol.FileInfo) error {
return db.updateLocalFiles(folder, fs, meta)
})
var innerErr error
err = t.withHave(folder, protocol.LocalDeviceID[:], nil, false, func(fi protocol.FileIntf) bool {
if fi.IsInvalid() && fi.FileLocalFlags() == 0 {
f := fi.(protocol.FileInfo)
f.SetMustRescan()
f.Version = protocol.Vector{}
batch.Append(f)
innerErr = batch.FlushIfFull()
return innerErr == nil
}
return true
})
if innerErr != nil {
return innerErr
}
if err != nil {
return err
}
if err := batch.Flush(); err != nil {
return err
}
}
return nil
}
func (db *schemaUpdater) dropIndexIDsMigration(_ int) error {
return db.dropIndexIDs()
}
func rewriteGlobals(t readWriteTransaction) error {
it, err := t.NewPrefixIterator([]byte{KeyTypeGlobal})
if err != nil {
return err
}
defer it.Release()
for it.Next() {
var vl VersionListDeprecated
if err := vl.Unmarshal(it.Value()); err != nil {
// If we crashed during an earlier migration, some version
// lists might already be in the new format: Skip those.
var nvl VersionList
if nerr := nvl.Unmarshal(it.Value()); nerr == nil {
continue
}
return err
}
if len(vl.Versions) == 0 {
if err := t.Delete(it.Key()); err != nil {
return err
}
}
newVl := convertVersionList(vl)
if err := t.Put(it.Key(), mustMarshal(&newVl)); err != nil {
return err
}
if err := t.Checkpoint(); err != nil {
return err
}
}
return it.Error()
}
func convertVersionList(vl VersionListDeprecated) VersionList {
var newVl VersionList
var newPos, oldPos int
var lastVersion protocol.Vector
for _, fv := range vl.Versions {
if fv.Invalid {
break
}
oldPos++
if len(newVl.RawVersions) > 0 && lastVersion.Equal(fv.Version) {
newVl.RawVersions[newPos].Devices = append(newVl.RawVersions[newPos].Devices, fv.Device)
continue
}
newPos = len(newVl.RawVersions)
newVl.RawVersions = append(newVl.RawVersions, newFileVersion(fv.Device, fv.Version, false, fv.Deleted))
lastVersion = fv.Version
}
if oldPos == len(vl.Versions) {
return newVl
}
if len(newVl.RawVersions) == 0 {
fv := vl.Versions[oldPos]
newVl.RawVersions = []FileVersion{newFileVersion(fv.Device, fv.Version, true, fv.Deleted)}
oldPos++
}
newPos = 0
outer:
for _, fv := range vl.Versions[oldPos:] {
for _, nfv := range newVl.RawVersions[newPos:] {
switch nfv.Version.Compare(fv.Version) {
case protocol.Equal:
newVl.RawVersions[newPos].InvalidDevices = append(newVl.RawVersions[newPos].InvalidDevices, fv.Device)
continue outer
case protocol.Lesser:
newVl.insertAt(newPos, newFileVersion(fv.Device, fv.Version, true, fv.Deleted))
continue outer
case protocol.ConcurrentLesser, protocol.ConcurrentGreater:
// The version is invalid, i.e. it looses anyway,
// no need to check/get the conflicting file.
}
newPos++
}
// Couldn't insert into any existing versions
newVl.RawVersions = append(newVl.RawVersions, newFileVersion(fv.Device, fv.Version, true, fv.Deleted))
newPos++
}
return newVl
}
func getGlobalVersionsByKeyBefore11(key []byte, t readOnlyTransaction) (VersionListDeprecated, error) {
bs, err := t.Get(key)
if err != nil {
return VersionListDeprecated{}, err
}
var vl VersionListDeprecated
if err := vl.Unmarshal(bs); err != nil {
return VersionListDeprecated{}, err
}
return vl, nil
}
func withGlobalBefore11(folder []byte, truncate bool, fn Iterator, t readOnlyTransaction) error {
key, err := t.keyer.GenerateGlobalVersionKey(nil, folder, nil)
if err != nil {
return err
}
dbi, err := t.NewPrefixIterator(key)
if err != nil {
return err
}
defer dbi.Release()
var dk []byte
for dbi.Next() {
name := t.keyer.NameFromGlobalVersionKey(dbi.Key())
var vl VersionListDeprecated
if err := vl.Unmarshal(dbi.Value()); err != nil {
return err
}
dk, err = t.keyer.GenerateDeviceFileKey(dk, folder, vl.Versions[0].Device, name)
if err != nil {
return err
}
f, ok, err := t.getFileTrunc(dk, truncate)
if err != nil {
return err
}
if !ok {
continue
}
if !fn(f) {
return nil
}
}
if err != nil {
return err
}
return dbi.Error()
}
func withNeedLocalBefore11(folder []byte, truncate bool, fn Iterator, t readOnlyTransaction) error {
key, err := t.keyer.GenerateNeedFileKey(nil, folder, nil)
if err != nil {
return err
}
dbi, err := t.NewPrefixIterator(key.WithoutName())
if err != nil {
return err
}
defer dbi.Release()
var keyBuf []byte
var f protocol.FileIntf
var ok bool
for dbi.Next() {
keyBuf, f, ok, err = getGlobalBefore11(keyBuf, folder, t.keyer.NameFromGlobalVersionKey(dbi.Key()), truncate, t)
if err != nil {
return err
}
if !ok {
continue
}
if !fn(f) {
return nil
}
}
return dbi.Error()
}
func getGlobalBefore11(keyBuf, folder, file []byte, truncate bool, t readOnlyTransaction) ([]byte, protocol.FileIntf, bool, error) {
keyBuf, err := t.keyer.GenerateGlobalVersionKey(keyBuf, folder, file)
if err != nil {
return nil, nil, false, err
}
bs, err := t.Get(keyBuf)
if backend.IsNotFound(err) {
return keyBuf, nil, false, nil
} else if err != nil {
return nil, nil, false, err
}
var vl VersionListDeprecated
if err := vl.Unmarshal(bs); err != nil {
return nil, nil, false, err
}
if len(vl.Versions) == 0 {
return nil, nil, false, nil
}
keyBuf, err = t.keyer.GenerateDeviceFileKey(keyBuf, folder, vl.Versions[0].Device, file)
if err != nil {
return nil, nil, false, err
}
fi, ok, err := t.getFileTrunc(keyBuf, truncate)
if err != nil || !ok {
return keyBuf, nil, false, err
}
return keyBuf, fi, true, nil
}
func (vl *VersionListDeprecated) String() string {
var b bytes.Buffer
var id protocol.DeviceID
b.WriteString("{")
for i, v := range vl.Versions {
if i > 0 {
b.WriteString(", ")
}
copy(id[:], v.Device)
fmt.Fprintf(&b, "{%v, %v}", v.Version, id)
}
b.WriteString("}")
return b.String()
}
func (vl *VersionListDeprecated) pop(device []byte) (FileVersionDeprecated, int) {
for i, v := range vl.Versions {
if bytes.Equal(v.Device, device) {
vl.Versions = append(vl.Versions[:i], vl.Versions[i+1:]...)
return v, i
}
}
return FileVersionDeprecated{}, -1
}
func (vl *VersionListDeprecated) Get(device []byte) (FileVersionDeprecated, bool) {
for _, v := range vl.Versions {
if bytes.Equal(v.Device, device) {
return v, true
}
}
return FileVersionDeprecated{}, false
}
func (vl *VersionListDeprecated) insertAt(i int, v FileVersionDeprecated) {
vl.Versions = append(vl.Versions, FileVersionDeprecated{})
copy(vl.Versions[i+1:], vl.Versions[i:])
vl.Versions[i] = v
}
func needDeprecated(global FileVersionDeprecated, haveLocal bool, localVersion protocol.Vector) bool {
// We never need an invalid file.
if global.Invalid {
return false
}
// We don't need a deleted file if we don't have it.
if global.Deleted && !haveLocal {
return false
}
// We don't need the global file if we already have the same version.
if haveLocal && localVersion.GreaterEqual(global.Version) {
return false
}
return true
}