lib/db: Refactor away the large genericReplace thing

This removes a significant, complex chunk of database code. The
"replace" operation walked both the old and new in lockstep and made the
relevant changes to make the new situation correct. But since delta
indexes we pretty much never need this - we just used replace to drop
the existing data and start over.

This makes that explicit and removes the complexity.

(This is one of those things that would be annoying to make case
insensitive, while the actual "drop and then insert" that we do is
easier.)

This is fairly well unit tested...

The one change to the tests is to cover the fact that previously replace
with something identical didn't bump the sequence number, while
obviously removing everything and re-inserting does. This is not
behavior we depend on anywhere.

GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/4500
LGTM: imsodin, AudriusButkevicius
This commit is contained in:
Jakob Borg 2017-11-12 20:20:34 +00:00 committed by Audrius Butkevicius
parent 57d5dfa80c
commit d7d45d8092
5 changed files with 157 additions and 145 deletions

View File

@ -93,93 +93,6 @@ func (db *Instance) Location() string {
return db.location
}
func (db *Instance) genericReplace(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker, deleteFn deletionHandler) {
sort.Sort(fileList(fs)) // sort list on name, same as in the database
t := db.newReadWriteTransaction()
defer t.close()
dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, device, nil)[:keyPrefixLen+keyFolderLen+keyDeviceLen]), nil)
defer dbi.Release()
moreDb := dbi.Next()
fsi := 0
isLocalDevice := bytes.Equal(device, protocol.LocalDeviceID[:])
for {
var newName, oldName []byte
moreFs := fsi < len(fs)
if !moreDb && !moreFs {
break
}
if moreFs {
newName = []byte(fs[fsi].Name)
}
if moreDb {
oldName = db.deviceKeyName(dbi.Key())
}
cmp := bytes.Compare(newName, oldName)
l.Debugf("generic replace; folder=%q device=%v moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", folder, protocol.DeviceIDFromBytes(device), moreFs, moreDb, cmp, newName, oldName)
switch {
case moreFs && (!moreDb || cmp == -1):
l.Debugln("generic replace; missing - insert")
// Database is missing this file. Insert it.
t.insertFile(folder, device, fs[fsi])
if isLocalDevice {
localSize.addFile(fs[fsi])
}
t.updateGlobal(folder, device, fs[fsi], globalSize)
fsi++
case moreFs && moreDb && cmp == 0:
// File exists on both sides - compare versions. We might get an
// update with the same version if a device has marked a file as
// invalid, so handle that too.
l.Debugln("generic replace; exists - compare")
var ef FileInfoTruncated
ef.Unmarshal(dbi.Value())
if !fs[fsi].Version.Equal(ef.Version) || fs[fsi].Invalid != ef.Invalid {
l.Debugln("generic replace; differs - insert")
t.insertFile(folder, device, fs[fsi])
if isLocalDevice {
localSize.removeFile(ef)
localSize.addFile(fs[fsi])
}
t.updateGlobal(folder, device, fs[fsi], globalSize)
} else {
l.Debugln("generic replace; equal - ignore")
}
fsi++
moreDb = dbi.Next()
case moreDb && (!moreFs || cmp == 1):
l.Debugln("generic replace; exists - remove")
deleteFn(t, folder, device, oldName, dbi)
moreDb = dbi.Next()
}
// Write out and reuse the batch every few records, to avoid the batch
// growing too large and thus allocating unnecessarily much memory.
t.checkFlush()
}
}
func (db *Instance) replace(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker) {
db.genericReplace(folder, device, fs, localSize, globalSize, func(t readWriteTransaction, folder, device, name []byte, dbi iterator.Iterator) {
// Database has a file that we are missing. Remove it.
l.Debugf("delete; folder=%q device=%v name=%q", folder, protocol.DeviceIDFromBytes(device), name)
t.removeFromGlobal(folder, device, name, globalSize)
t.Delete(dbi.Key())
})
}
func (db *Instance) updateFiles(folder, device []byte, fs []protocol.FileInfo, localSize, globalSize *sizeTracker) {
t := db.newReadWriteTransaction()
defer t.close()
@ -552,6 +465,22 @@ func (db *Instance) dropFolder(folder []byte) {
dbi.Release()
}
func (db *Instance) dropDeviceFolder(device, folder []byte, globalSize *sizeTracker) {
t := db.newReadWriteTransaction()
defer t.close()
dbi := t.NewIterator(util.BytesPrefix(db.deviceKey(folder, device, nil)), nil)
defer dbi.Release()
for dbi.Next() {
key := dbi.Key()
name := db.deviceKeyName(key)
t.removeFromGlobal(folder, device, name, globalSize)
t.Delete(key)
t.checkFlush()
}
}
func (db *Instance) checkGlobals(folder []byte, globalSize *sizeTracker) {
t := db.newReadWriteTransaction()
defer t.close()

View File

@ -108,6 +108,12 @@ func (s *sizeTracker) removeFile(f FileIntf) {
s.mut.Unlock()
}
func (s *sizeTracker) reset() {
s.mut.Lock()
defer s.mut.Unlock()
s.Counts = Counts{}
}
func (s *sizeTracker) Size() Counts {
s.mut.Lock()
defer s.mut.Unlock()
@ -144,31 +150,27 @@ func NewFileSet(folder string, fs fs.Filesystem, db *Instance) *FileSet {
return &s
}
func (s *FileSet) Replace(device protocol.DeviceID, fs []protocol.FileInfo) {
l.Debugf("%s Replace(%v, [%d])", s.folder, device, len(fs))
normalizeFilenames(fs)
func (s *FileSet) Drop(device protocol.DeviceID) {
l.Debugf("%s Drop(%v)", s.folder, device)
s.updateMutex.Lock()
defer s.updateMutex.Unlock()
if device == protocol.LocalDeviceID {
if len(fs) == 0 {
s.sequence = 0
} else {
// Always overwrite Sequence on updated files to ensure
// correct ordering. The caller is supposed to leave it set to
// zero anyhow.
for i := range fs {
fs[i].Sequence = atomic.AddInt64(&s.sequence, 1)
}
}
} else {
s.remoteSequence[device] = maxSequence(fs)
}
s.db.replace([]byte(s.folder), device[:], fs, &s.localSize, &s.globalSize)
s.db.dropDeviceFolder(device[:], []byte(s.folder), &s.globalSize)
if device == protocol.LocalDeviceID {
s.blockmap.Drop()
s.blockmap.Add(fs)
s.localSize.reset()
// We deliberately do not reset s.sequence here. Dropping all files
// for the local device ID only happens in testing - which expects
// the sequence to be retained, like an old Replace() of all files
// would do. However, if we ever did it "in production" we would
// anyway want to retain the sequence for delta indexes to be happy.
} else {
// Here, on the other hand, we want to make sure that any file
// announced from the remote is newer than our current sequence
// number.
s.remoteSequence[device] = 0
}
}
@ -179,6 +181,12 @@ func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
s.updateMutex.Lock()
defer s.updateMutex.Unlock()
s.updateLocked(device, fs)
}
func (s *FileSet) updateLocked(device protocol.DeviceID, fs []protocol.FileInfo) {
// names must be normalized and the lock held
if device == protocol.LocalDeviceID {
discards := make([]protocol.FileInfo, 0, len(fs))
updates := make([]protocol.FileInfo, 0, len(fs))
@ -191,9 +199,13 @@ func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
if ok && ef.Version.Equal(nf.Version) && ef.Invalid == nf.Invalid {
continue
}
nf.Sequence = atomic.AddInt64(&s.sequence, 1)
fs = append(fs, nf)
discards = append(discards, ef)
if ok {
discards = append(discards, ef)
}
updates = append(updates, nf)
}
s.blockmap.Discard(discards)

View File

@ -108,17 +108,17 @@ func TestGlobalSet(t *testing.T) {
protocol.FileInfo{Name: "z", Sequence: 5, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(8)},
}
local1 := fileList{
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(3)},
protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(4)},
protocol.FileInfo{Name: "z", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Deleted: true},
protocol.FileInfo{Name: "a", Sequence: 6, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
protocol.FileInfo{Name: "b", Sequence: 7, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Sequence: 8, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(3)},
protocol.FileInfo{Name: "d", Sequence: 9, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(4)},
protocol.FileInfo{Name: "z", Sequence: 10, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Deleted: true},
}
localTot := fileList{
local0[0],
local0[1],
local0[2],
local0[3],
local1[0],
local1[1],
local1[2],
local1[3],
protocol.FileInfo{Name: "z", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Deleted: true},
}
@ -157,9 +157,9 @@ func TestGlobalSet(t *testing.T) {
local0[3],
}
m.Replace(protocol.LocalDeviceID, local0)
m.Replace(protocol.LocalDeviceID, local1)
m.Replace(remoteDevice0, remote0)
replace(m, protocol.LocalDeviceID, local0)
replace(m, protocol.LocalDeviceID, local1)
replace(m, remoteDevice0, remote0)
m.Update(remoteDevice0, remote1)
g := fileList(globalList(m))
@ -335,9 +335,9 @@ func TestNeedWithInvalid(t *testing.T) {
protocol.FileInfo{Name: "d", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, Blocks: genBlocks(7)},
}
s.Replace(protocol.LocalDeviceID, localHave)
s.Replace(remoteDevice0, remote0Have)
s.Replace(remoteDevice1, remote1Have)
replace(s, protocol.LocalDeviceID, localHave)
replace(s, remoteDevice0, remote0Have)
replace(s, remoteDevice1, remote1Have)
need := fileList(needList(s, protocol.LocalDeviceID))
sort.Sort(need)
@ -362,7 +362,7 @@ func TestUpdateToInvalid(t *testing.T) {
protocol.FileInfo{Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1003}}}, Invalid: true},
}
s.Replace(protocol.LocalDeviceID, localHave)
replace(s, protocol.LocalDeviceID, localHave)
have := fileList(haveList(s, protocol.LocalDeviceID))
sort.Sort(have)
@ -421,8 +421,8 @@ func TestInvalidAvailability(t *testing.T) {
protocol.FileInfo{Name: "none", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1004}}}, Blocks: genBlocks(5), Invalid: true},
}
s.Replace(remoteDevice0, remote0Have)
s.Replace(remoteDevice1, remote1Have)
replace(s, remoteDevice0, remote0Have)
replace(s, remoteDevice1, remote1Have)
if av := s.Availability("both"); len(av) != 2 {
t.Error("Incorrect availability for 'both':", av)
@ -460,7 +460,7 @@ func TestGlobalReset(t *testing.T) {
{Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
}
m.Replace(protocol.LocalDeviceID, local)
replace(m, protocol.LocalDeviceID, local)
g := globalList(m)
sort.Sort(fileList(g))
@ -468,8 +468,8 @@ func TestGlobalReset(t *testing.T) {
t.Errorf("Global incorrect;\n%v !=\n%v", g, local)
}
m.Replace(remoteDevice0, remote)
m.Replace(remoteDevice0, nil)
replace(m, remoteDevice0, remote)
replace(m, remoteDevice0, nil)
g = globalList(m)
sort.Sort(fileList(g))
@ -504,8 +504,8 @@ func TestNeed(t *testing.T) {
{Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
}
m.Replace(protocol.LocalDeviceID, local)
m.Replace(remoteDevice0, remote)
replace(m, protocol.LocalDeviceID, local)
replace(m, remoteDevice0, remote)
need := needList(m, protocol.LocalDeviceID)
@ -537,10 +537,10 @@ func TestSequence(t *testing.T) {
{Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
}
m.Replace(protocol.LocalDeviceID, local1)
replace(m, protocol.LocalDeviceID, local1)
c0 := m.Sequence(protocol.LocalDeviceID)
m.Replace(protocol.LocalDeviceID, local2)
replace(m, protocol.LocalDeviceID, local2)
c1 := m.Sequence(protocol.LocalDeviceID)
if !(c1 > c0) {
t.Fatal("Local version number should have incremented")
@ -556,7 +556,7 @@ func TestListDropFolder(t *testing.T) {
{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
}
s0.Replace(protocol.LocalDeviceID, local1)
replace(s0, protocol.LocalDeviceID, local1)
s1 := db.NewFileSet("test1", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
local2 := []protocol.FileInfo{
@ -564,7 +564,7 @@ func TestListDropFolder(t *testing.T) {
{Name: "e", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}},
{Name: "f", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}},
}
s1.Replace(remoteDevice0, local2)
replace(s1, remoteDevice0, local2)
// Check that we have both folders and their data is in the global list
@ -607,14 +607,14 @@ func TestGlobalNeedWithInvalid(t *testing.T) {
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Invalid: true},
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(4)},
}
s.Replace(remoteDevice0, rem0)
replace(s, remoteDevice0, rem0)
rem1 := fileList{
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(4)},
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Blocks: genBlocks(4)},
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1002}}}, Invalid: true},
}
s.Replace(remoteDevice1, rem1)
replace(s, remoteDevice1, rem1)
total := fileList{
// There's a valid copy of each file, so it should be merged
@ -649,7 +649,7 @@ func TestLongPath(t *testing.T) {
{Name: string(name), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}},
}
s.Replace(protocol.LocalDeviceID, local)
replace(s, protocol.LocalDeviceID, local)
gf := globalList(s)
if l := len(gf); l != 1 {
@ -677,7 +677,7 @@ func TestCommitted(t *testing.T) {
c0 := ldb.Committed()
s.Replace(protocol.LocalDeviceID, local)
replace(s, protocol.LocalDeviceID, local)
c1 := ldb.Committed()
if c1 <= c0 {
@ -714,7 +714,7 @@ func BenchmarkUpdateOneFile(b *testing.B) {
}()
m := db.NewFileSet("test)", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
m.Replace(protocol.LocalDeviceID, local0)
replace(m, protocol.LocalDeviceID, local0)
l := local0[4:5]
for i := 0; i < b.N; i++ {
@ -756,3 +756,71 @@ func TestIndexID(t *testing.T) {
t.Errorf("index ID changed; %d != %d", again, id)
}
}
func TestDropFiles(t *testing.T) {
ldb := db.OpenMemory()
m := db.NewFileSet("test)", fs.NewFilesystem(fs.FilesystemTypeBasic, "."), ldb)
local0 := fileList{
protocol.FileInfo{Name: "a", Sequence: 1, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
protocol.FileInfo{Name: "b", Sequence: 2, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Sequence: 3, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(3)},
protocol.FileInfo{Name: "d", Sequence: 4, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(4)},
protocol.FileInfo{Name: "z", Sequence: 5, Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(8)},
}
remote0 := fileList{
protocol.FileInfo{Name: "a", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(1)},
protocol.FileInfo{Name: "b", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(2)},
protocol.FileInfo{Name: "c", Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1001}}}, Blocks: genBlocks(5)},
}
// Insert files
m.Update(protocol.LocalDeviceID, local0)
m.Update(remoteDevice0, remote0)
// Check that they're there
h := haveList(m, protocol.LocalDeviceID)
if len(h) != len(local0) {
t.Errorf("Incorrect number of files after update, %d != %d", len(h), len(local0))
}
h = haveList(m, remoteDevice0)
if len(h) != len(remote0) {
t.Errorf("Incorrect number of files after update, %d != %d", len(h), len(local0))
}
g := globalList(m)
if len(g) != len(local0) {
// local0 covers all files
t.Errorf("Incorrect global files after update, %d != %d", len(g), len(local0))
}
// Drop the local files and recheck
m.Drop(protocol.LocalDeviceID)
h = haveList(m, protocol.LocalDeviceID)
if len(h) != 0 {
t.Errorf("Incorrect number of files after drop, %d != %d", len(h), 0)
}
h = haveList(m, remoteDevice0)
if len(h) != len(remote0) {
t.Errorf("Incorrect number of files after update, %d != %d", len(h), len(local0))
}
g = globalList(m)
if len(g) != len(remote0) {
// the ones in remote0 remain
t.Errorf("Incorrect global files after update, %d != %d", len(g), len(remote0))
}
}
func replace(fs *db.FileSet, device protocol.DeviceID, files []protocol.FileInfo) {
fs.Drop(device)
fs.Update(device, files)
}

View File

@ -206,7 +206,7 @@ func (m *Model) startFolderLocked(folder string) config.FolderType {
for _, available := range fs.ListDevices() {
if _, ok := expected[available]; !ok {
l.Debugln("dropping", folder, "state for", available)
fs.Replace(available, nil)
fs.Drop(available)
}
}
@ -814,7 +814,8 @@ func (m *Model) Index(deviceID protocol.DeviceID, folder string, fs []protocol.F
m.deviceDownloads[deviceID].Update(folder, makeForgetUpdate(fs))
m.pmut.RUnlock()
files.Replace(deviceID, fs)
files.Drop(deviceID)
files.Update(deviceID, fs)
events.Default.Log(events.RemoteIndexUpdated, map[string]interface{}{
"device": deviceID.String(),
@ -980,7 +981,7 @@ func (m *Model) ClusterConfig(deviceID protocol.DeviceID, cm protocol.ClusterCon
// do not support delta indexes and we should clear any
// information we have from them before accepting their
// index, which will presumably be a full index.
fs.Replace(deviceID, nil)
fs.Drop(deviceID)
} else if dev.IndexID != theirIndexID {
// The index ID we have on file is not what they're
// announcing. They must have reset their database and
@ -988,7 +989,7 @@ func (m *Model) ClusterConfig(deviceID protocol.DeviceID, cm protocol.ClusterCon
// information we have and remember this new index ID
// instead.
l.Infof("Device %v folder %s has a new index ID (%v)", deviceID, folder.Description(), dev.IndexID)
fs.Replace(deviceID, nil)
fs.Drop(deviceID)
fs.SetIndexID(deviceID, dev.IndexID)
} else {
// They're sending a recognized index ID and will most

View File

@ -2084,8 +2084,10 @@ func TestIndexesForUnknownDevicesDropped(t *testing.T) {
dbi := db.OpenMemory()
files := db.NewFileSet("default", defaultFs, dbi)
files.Replace(device1, genFiles(1))
files.Replace(device2, genFiles(1))
files.Drop(device1)
files.Update(device1, genFiles(1))
files.Drop(device2)
files.Update(device2, genFiles(1))
if len(files.ListDevices()) != 2 {
t.Error("expected two devices")