mirror of
https://github.com/octoleo/syncthing.git
synced 2024-11-09 14:50:56 +00:00
parent
9c2117f08e
commit
8f5215878b
@ -7,6 +7,7 @@
|
|||||||
package backend
|
package backend
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"os"
|
"os"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
@ -157,13 +158,13 @@ type errNotFound struct{}
|
|||||||
func (*errNotFound) Error() string { return "key not found" }
|
func (*errNotFound) Error() string { return "key not found" }
|
||||||
|
|
||||||
func IsClosed(err error) bool {
|
func IsClosed(err error) bool {
|
||||||
_, ok := err.(*errClosed)
|
var e *errClosed
|
||||||
return ok
|
return errors.As(err, &e)
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsNotFound(err error) bool {
|
func IsNotFound(err error) bool {
|
||||||
_, ok := err.(*errNotFound)
|
var e *errNotFound
|
||||||
return ok
|
return errors.As(err, &e)
|
||||||
}
|
}
|
||||||
|
|
||||||
// releaser manages counting on top of a waitgroup
|
// releaser manages counting on top of a waitgroup
|
||||||
|
@ -306,6 +306,7 @@ func TestRepairSequence(t *testing.T) {
|
|||||||
{Name: "missing", Blocks: genBlocks(3)},
|
{Name: "missing", Blocks: genBlocks(3)},
|
||||||
{Name: "overwriting", Blocks: genBlocks(4)},
|
{Name: "overwriting", Blocks: genBlocks(4)},
|
||||||
{Name: "inconsistent", Blocks: genBlocks(5)},
|
{Name: "inconsistent", Blocks: genBlocks(5)},
|
||||||
|
{Name: "inconsistentNotIndirected", Blocks: genBlocks(2)},
|
||||||
}
|
}
|
||||||
for i, f := range files {
|
for i, f := range files {
|
||||||
files[i].Version = f.Version.Update(short)
|
files[i].Version = f.Version.Update(short)
|
||||||
@ -322,7 +323,7 @@ func TestRepairSequence(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if err := trans.putFile(dk, f, false); err != nil {
|
if err := trans.putFile(dk, f); err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
sk, err := trans.keyer.GenerateSequenceKey(nil, folder, seq)
|
sk, err := trans.keyer.GenerateSequenceKey(nil, folder, seq)
|
||||||
@ -367,10 +368,13 @@ func TestRepairSequence(t *testing.T) {
|
|||||||
files[3].Sequence = seq
|
files[3].Sequence = seq
|
||||||
addFile(files[3], seq)
|
addFile(files[3], seq)
|
||||||
|
|
||||||
// Inconistent file
|
// Inconistent files
|
||||||
seq++
|
seq++
|
||||||
files[4].Sequence = 101
|
files[4].Sequence = 101
|
||||||
addFile(files[4], seq)
|
addFile(files[4], seq)
|
||||||
|
seq++
|
||||||
|
files[5].Sequence = 102
|
||||||
|
addFile(files[5], seq)
|
||||||
|
|
||||||
// And a sequence entry pointing at nothing because why not
|
// And a sequence entry pointing at nothing because why not
|
||||||
sk, err = trans.keyer.GenerateSequenceKey(nil, folder, 100001)
|
sk, err = trans.keyer.GenerateSequenceKey(nil, folder, 100001)
|
||||||
@ -725,6 +729,72 @@ func TestGCIndirect(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUpdateTo14(t *testing.T) {
|
||||||
|
db := NewLowlevel(backend.OpenMemory())
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
folderStr := "default"
|
||||||
|
folder := []byte(folderStr)
|
||||||
|
name := []byte("foo")
|
||||||
|
file := protocol.FileInfo{Name: string(name), Version: protocol.Vector{Counters: []protocol.Counter{{ID: myID, Value: 1000}}}, Blocks: genBlocks(blocksIndirectionCutoff - 1)}
|
||||||
|
file.BlocksHash = protocol.BlocksHash(file.Blocks)
|
||||||
|
fileWOBlocks := file
|
||||||
|
fileWOBlocks.Blocks = nil
|
||||||
|
meta := db.loadMetadataTracker(folderStr)
|
||||||
|
|
||||||
|
// Initally add the correct file the usual way, all good here.
|
||||||
|
if err := db.updateLocalFiles(folder, []protocol.FileInfo{file}, meta); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simulate the previous bug, where .putFile could write a file info without
|
||||||
|
// blocks, even though the file has them (and thus a non-nil BlocksHash).
|
||||||
|
trans, err := db.newReadWriteTransaction()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer trans.close()
|
||||||
|
key, err := db.keyer.GenerateDeviceFileKey(nil, folder, protocol.LocalDeviceID[:], name)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
fiBs := mustMarshal(&fileWOBlocks)
|
||||||
|
if err := trans.Put(key, fiBs); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := trans.Commit(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
trans.close()
|
||||||
|
|
||||||
|
// Run migration, pretending were still on schema 13.
|
||||||
|
if err := (&schemaUpdater{db}).updateSchemaTo14(13); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// checks
|
||||||
|
ro, err := db.newReadOnlyTransaction()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer ro.close()
|
||||||
|
if f, ok, err := ro.getFileByKey(key); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if !ok {
|
||||||
|
t.Error("file missing")
|
||||||
|
} else if !f.MustRescan() {
|
||||||
|
t.Error("file not marked as MustRescan")
|
||||||
|
}
|
||||||
|
|
||||||
|
if vl, err := ro.getGlobalVersions(nil, folder, name); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
} else if fv, ok := vl.GetGlobal(); !ok {
|
||||||
|
t.Error("missing global")
|
||||||
|
} else if !fv.IsInvalid() {
|
||||||
|
t.Error("global not marked as invalid")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func numBlockLists(db *Lowlevel) (int, error) {
|
func numBlockLists(db *Lowlevel) (int, error) {
|
||||||
it, err := db.Backend.NewPrefixIterator([]byte{KeyTypeBlockList})
|
it, err := db.Backend.NewPrefixIterator([]byte{KeyTypeBlockList})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -149,7 +149,7 @@ func (db *Lowlevel) updateRemoteFiles(folder, device []byte, fs []protocol.FileI
|
|||||||
meta.addFile(devID, f)
|
meta.addFile(devID, f)
|
||||||
|
|
||||||
l.Debugf("insert; folder=%q device=%v %v", folder, devID, f)
|
l.Debugf("insert; folder=%q device=%v %v", folder, devID, f)
|
||||||
if err := t.putFile(dk, f, false); err != nil {
|
if err := t.putFile(dk, f); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -240,7 +240,7 @@ func (db *Lowlevel) updateLocalFiles(folder []byte, fs []protocol.FileInfo, meta
|
|||||||
meta.addFile(protocol.LocalDeviceID, f)
|
meta.addFile(protocol.LocalDeviceID, f)
|
||||||
|
|
||||||
l.Debugf("insert (local); folder=%q %v", folder, f)
|
l.Debugf("insert (local); folder=%q %v", folder, f)
|
||||||
if err := t.putFile(dk, f, false); err != nil {
|
if err := t.putFile(dk, f); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -956,11 +956,11 @@ func (db *Lowlevel) repairSequenceGCLocked(folderStr string, meta *metadataTrack
|
|||||||
|
|
||||||
var sk sequenceKey
|
var sk sequenceKey
|
||||||
for it.Next() {
|
for it.Next() {
|
||||||
intf, err := t.unmarshalTrunc(it.Value(), true)
|
intf, err := t.unmarshalTrunc(it.Value(), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
fi := intf.(FileInfoTruncated)
|
fi := intf.(protocol.FileInfo)
|
||||||
if sk, err = t.keyer.GenerateSequenceKey(sk, folder, fi.Sequence); err != nil {
|
if sk, err = t.keyer.GenerateSequenceKey(sk, folder, fi.Sequence); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
@ -979,7 +979,7 @@ func (db *Lowlevel) repairSequenceGCLocked(folderStr string, meta *metadataTrack
|
|||||||
if err := t.Put(sk, it.Key()); err != nil {
|
if err := t.Put(sk, it.Key()); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
if err := t.putFile(it.Key(), fi.copyToFileInfo(), true); err != nil {
|
if err := t.putFile(it.Key(), fi); err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -27,9 +27,10 @@ import (
|
|||||||
// 8-9: v1.4.0
|
// 8-9: v1.4.0
|
||||||
// 10-11: v1.6.0
|
// 10-11: v1.6.0
|
||||||
// 12-13: v1.7.0
|
// 12-13: v1.7.0
|
||||||
|
// 14: v1.9.0
|
||||||
const (
|
const (
|
||||||
dbVersion = 13
|
dbVersion = 14
|
||||||
dbMinSyncthingVersion = "v1.7.0"
|
dbMinSyncthingVersion = "v1.9.0"
|
||||||
)
|
)
|
||||||
|
|
||||||
var errFolderMissing = errors.New("folder present in global list but missing in keyer index")
|
var errFolderMissing = errors.New("folder present in global list but missing in keyer index")
|
||||||
@ -95,6 +96,7 @@ func (db *schemaUpdater) updateSchema() error {
|
|||||||
{10, db.updateSchemaTo10},
|
{10, db.updateSchemaTo10},
|
||||||
{11, db.updateSchemaTo11},
|
{11, db.updateSchemaTo11},
|
||||||
{13, db.updateSchemaTo13},
|
{13, db.updateSchemaTo13},
|
||||||
|
{14, db.updateSchemaTo14},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, m := range migrations {
|
for _, m := range migrations {
|
||||||
@ -537,7 +539,7 @@ func (db *schemaUpdater) rewriteFiles(t readWriteTransaction) error {
|
|||||||
if fi.Blocks == nil {
|
if fi.Blocks == nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := t.putFile(it.Key(), fi, false); err != nil {
|
if err := t.putFile(it.Key(), fi); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := t.Checkpoint(); err != nil {
|
if err := t.Checkpoint(); err != nil {
|
||||||
@ -683,6 +685,70 @@ func (db *schemaUpdater) updateSchemaTo13(prev int) error {
|
|||||||
return t.Commit()
|
return t.Commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (db *schemaUpdater) updateSchemaTo14(_ int) error {
|
||||||
|
// Checks for missing blocks and marks those entries as requiring a
|
||||||
|
// rehash/being invalid. The db is checked/repaired afterwards, i.e.
|
||||||
|
// no care is taken to get metadata and sequences right.
|
||||||
|
// If the corresponding files changed on disk compared to the global
|
||||||
|
// version, this will cause a conflict.
|
||||||
|
|
||||||
|
var key, gk []byte
|
||||||
|
for _, folderStr := range db.ListFolders() {
|
||||||
|
folder := []byte(folderStr)
|
||||||
|
meta := newMetadataTracker(db.keyer)
|
||||||
|
meta.counts.Created = 0 // Recalculate metadata afterwards
|
||||||
|
|
||||||
|
t, err := db.newReadWriteTransaction(meta.CommitHook(folder))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer t.close()
|
||||||
|
|
||||||
|
key, err = t.keyer.GenerateDeviceFileKey(key, folder, protocol.LocalDeviceID[:], nil)
|
||||||
|
it, err := t.NewPrefixIterator(key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer it.Release()
|
||||||
|
for it.Next() {
|
||||||
|
var fi protocol.FileInfo
|
||||||
|
if err := fi.Unmarshal(it.Value()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if len(fi.Blocks) > 0 || len(fi.BlocksHash) == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
key = t.keyer.GenerateBlockListKey(key, fi.BlocksHash)
|
||||||
|
_, err := t.Get(key)
|
||||||
|
if err == nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
fi.SetMustRescan(protocol.LocalDeviceID.Short())
|
||||||
|
if err = t.putFile(it.Key(), fi); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
gk, err = t.keyer.GenerateGlobalVersionKey(gk, folder, []byte(fi.Name))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
key, _, err = t.updateGlobal(gk, key, folder, protocol.LocalDeviceID[:], fi, meta)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
it.Release()
|
||||||
|
|
||||||
|
if err = t.Commit(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.close()
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (db *schemaUpdater) rewriteGlobals(t readWriteTransaction) error {
|
func (db *schemaUpdater) rewriteGlobals(t readWriteTransaction) error {
|
||||||
it, err := t.NewPrefixIterator([]byte{KeyTypeGlobal})
|
it, err := t.NewPrefixIterator([]byte{KeyTypeGlobal})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -9,9 +9,10 @@ package db
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
"github.com/syncthing/syncthing/lib/osutil"
|
"fmt"
|
||||||
|
|
||||||
"github.com/syncthing/syncthing/lib/db/backend"
|
"github.com/syncthing/syncthing/lib/db/backend"
|
||||||
|
"github.com/syncthing/syncthing/lib/osutil"
|
||||||
"github.com/syncthing/syncthing/lib/protocol"
|
"github.com/syncthing/syncthing/lib/protocol"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -109,7 +110,7 @@ func (t readOnlyTransaction) fillFileInfo(fi *protocol.FileInfo) error {
|
|||||||
key = t.keyer.GenerateBlockListKey(key, fi.BlocksHash)
|
key = t.keyer.GenerateBlockListKey(key, fi.BlocksHash)
|
||||||
bs, err := t.Get(key)
|
bs, err := t.Get(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("filling Blocks: %w", err)
|
||||||
}
|
}
|
||||||
var bl BlockList
|
var bl BlockList
|
||||||
if err := bl.Unmarshal(bs); err != nil {
|
if err := bl.Unmarshal(bs); err != nil {
|
||||||
@ -122,7 +123,7 @@ func (t readOnlyTransaction) fillFileInfo(fi *protocol.FileInfo) error {
|
|||||||
key = t.keyer.GenerateVersionKey(key, fi.VersionHash)
|
key = t.keyer.GenerateVersionKey(key, fi.VersionHash)
|
||||||
bs, err := t.Get(key)
|
bs, err := t.Get(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("filling Version: %w", err)
|
||||||
}
|
}
|
||||||
var v protocol.Vector
|
var v protocol.Vector
|
||||||
if err := v.Unmarshal(bs); err != nil {
|
if err := v.Unmarshal(bs); err != nil {
|
||||||
@ -543,18 +544,13 @@ func (t readWriteTransaction) close() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// putFile stores a file in the database, taking care of indirected fields.
|
// putFile stores a file in the database, taking care of indirected fields.
|
||||||
// Set the truncated flag when putting a file that deliberatly can have an
|
func (t readWriteTransaction) putFile(fkey []byte, fi protocol.FileInfo) error {
|
||||||
// empty block list but a non-empty block list hash. This should normally be
|
|
||||||
// false.
|
|
||||||
func (t readWriteTransaction) putFile(fkey []byte, fi protocol.FileInfo, truncated bool) error {
|
|
||||||
var bkey []byte
|
var bkey []byte
|
||||||
|
|
||||||
// Always set the blocks hash when there are blocks. Leave the blocks
|
// Always set the blocks hash when there are blocks.
|
||||||
// hash alone when there are no blocks and we might be putting a
|
|
||||||
// "truncated" FileInfo (no blocks, but the hash reference is live).
|
|
||||||
if len(fi.Blocks) > 0 {
|
if len(fi.Blocks) > 0 {
|
||||||
fi.BlocksHash = protocol.BlocksHash(fi.Blocks)
|
fi.BlocksHash = protocol.BlocksHash(fi.Blocks)
|
||||||
} else if !truncated {
|
} else {
|
||||||
fi.BlocksHash = nil
|
fi.BlocksHash = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user