Improve protocol & leveldb debugging

This commit is contained in:
Jakob Borg 2014-07-23 11:54:15 +02:00
parent 08ce9b09ec
commit c714a12ad7
4 changed files with 30 additions and 10 deletions

View File

@ -164,7 +164,7 @@ func ldbGenericReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo
cmp := bytes.Compare(newName, oldName) cmp := bytes.Compare(newName, oldName)
if debug { if debug {
l.Debugf("generic replace; repo=%q node=%x moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", repo, node, moreFs, moreDb, cmp, newName, oldName) l.Debugf("generic replace; repo=%q node=%v moreFs=%v moreDb=%v cmp=%d newName=%q oldName=%q", repo, protocol.NodeIDFromBytes(node), moreFs, moreDb, cmp, newName, oldName)
} }
switch { switch {
@ -213,7 +213,7 @@ func ldbReplace(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint6
return ldbGenericReplace(db, repo, node, fs, func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64 { return ldbGenericReplace(db, repo, node, fs, func(db dbReader, batch dbWriter, repo, node, name []byte, dbi iterator.Iterator) uint64 {
// Disk has files that we are missing. Remove it. // Disk has files that we are missing. Remove it.
if debug { if debug {
l.Debugf("delete; repo=%q node=%x name=%q", repo, node, name) l.Debugf("delete; repo=%q node=%v name=%q", repo, protocol.NodeIDFromBytes(node), name)
} }
ldbRemoveFromGlobal(db, batch, repo, node, name) ldbRemoveFromGlobal(db, batch, repo, node, name)
batch.Delete(dbi.Key()) batch.Delete(dbi.Key())
@ -230,7 +230,7 @@ func ldbReplaceWithDelete(db *leveldb.DB, repo, node []byte, fs []protocol.FileI
} }
if !protocol.IsDeleted(f.Flags) { if !protocol.IsDeleted(f.Flags) {
if debug { if debug {
l.Debugf("mark deleted; repo=%q node=%x name=%q", repo, node, name) l.Debugf("mark deleted; repo=%q node=%v name=%q", repo, protocol.NodeIDFromBytes(node), name)
} }
ts := clock(f.LocalVersion) ts := clock(f.LocalVersion)
f.Blocks = nil f.Blocks = nil
@ -289,7 +289,7 @@ func ldbUpdate(db *leveldb.DB, repo, node []byte, fs []protocol.FileInfo) uint64
func ldbInsert(batch dbWriter, repo, node, name []byte, file protocol.FileInfo) uint64 { func ldbInsert(batch dbWriter, repo, node, name []byte, file protocol.FileInfo) uint64 {
if debug { if debug {
l.Debugf("insert; repo=%q node=%x %v", repo, node, file) l.Debugf("insert; repo=%q node=%v %v", repo, protocol.NodeIDFromBytes(node), file)
} }
if file.LocalVersion == 0 { if file.LocalVersion == 0 {
@ -307,7 +307,7 @@ func ldbInsert(batch dbWriter, repo, node, name []byte, file protocol.FileInfo)
// If the file does not have an entry in the global list, it is created. // If the file does not have an entry in the global list, it is created.
func ldbUpdateGlobal(db dbReader, batch dbWriter, repo, node, file []byte, version uint64) bool { func ldbUpdateGlobal(db dbReader, batch dbWriter, repo, node, file []byte, version uint64) bool {
if debug { if debug {
l.Debugf("update global; repo=%q node=%x file=%q version=%d", repo, node, file, version) l.Debugf("update global; repo=%q node=%v file=%q version=%d", repo, protocol.NodeIDFromBytes(node), file, version)
} }
gk := globalKey(repo, file) gk := globalKey(repo, file)
svl, err := db.Get(gk, nil) svl, err := db.Get(gk, nil)
@ -361,7 +361,7 @@ done:
// removed entirely. // removed entirely.
func ldbRemoveFromGlobal(db dbReader, batch dbWriter, repo, node, file []byte) { func ldbRemoveFromGlobal(db dbReader, batch dbWriter, repo, node, file []byte) {
if debug { if debug {
l.Debugf("remove from global; repo=%q node=%x file=%q", repo, node, file) l.Debugf("remove from global; repo=%q node=%v file=%q", repo, protocol.NodeIDFromBytes(node), file)
} }
gk := globalKey(repo, file) gk := globalKey(repo, file)
@ -589,8 +589,7 @@ func ldbAvailability(db *leveldb.DB, repo, file []byte) []protocol.NodeID {
if v.version != vl.versions[0].version { if v.version != vl.versions[0].version {
break break
} }
var n protocol.NodeID n := protocol.NodeIDFromBytes(v.node)
copy(n[:], v.node)
nodes = append(nodes, n) nodes = append(nodes, n)
} }
@ -634,7 +633,7 @@ func ldbWithNeed(db *leveldb.DB, repo, node []byte, fn fileIterator) {
if need || !have { if need || !have {
name := globalKeyName(dbi.Key()) name := globalKeyName(dbi.Key())
if debug { if debug {
l.Debugf("need repo=%q node=%x name=%q need=%v have=%v haveV=%d globalV=%d", repo, node, name, need, have, haveVersion, vl.versions[0].version) l.Debugf("need repo=%q node=%v name=%q need=%v have=%v haveV=%d globalV=%d", repo, protocol.NodeIDFromBytes(node), name, need, have, haveVersion, vl.versions[0].version)
} }
fk := nodeKey(repo, vl.versions[0].node, name) fk := nodeKey(repo, vl.versions[0].node, name)
bs, err := snap.Get(fk, nil) bs, err := snap.Get(fk, nil)

View File

@ -17,7 +17,7 @@ go build json.go
start() { start() {
echo "Starting..." echo "Starting..."
for i in 1 2 3 ; do for i in 1 2 3 ; do
STTRACE=files,model,puller,versioner STPROFILER=":909$i" syncthing -home "h$i" > "$i.out" 2>&1 & STTRACE=files,model,puller,versioner,protocol STPROFILER=":909$i" syncthing -home "h$i" > "$i.out" 2>&1 &
done done
} }

View File

@ -31,6 +31,15 @@ func NodeIDFromString(s string) (NodeID, error) {
return n, err return n, err
} }
func NodeIDFromBytes(bs []byte) NodeID {
var n NodeID
if len(bs) != len(n) {
panic("incorrect length of byte slice representing node ID")
}
copy(n[:], bs)
return n
}
// String returns the canonical string representation of the node ID // String returns the canonical string representation of the node ID
func (n NodeID) String() string { func (n NodeID) String() string {
id := base32.StdEncoding.EncodeToString(n[:]) id := base32.StdEncoding.EncodeToString(n[:])

View File

@ -327,8 +327,14 @@ func (c *rawConnection) indexSerializerLoop() {
select { select {
case ii := <-incomingIndexes: case ii := <-incomingIndexes:
if ii.update { if ii.update {
if debug {
l.Debugf("calling IndexUpdate(%v, %v, %d files)", ii.id, ii.repo, len(ii.files))
}
c.receiver.IndexUpdate(ii.id, ii.repo, ii.files) c.receiver.IndexUpdate(ii.id, ii.repo, ii.files)
} else { } else {
if debug {
l.Debugf("calling Index(%v, %v, %d files)", ii.id, ii.repo, len(ii.files))
}
c.receiver.Index(ii.id, ii.repo, ii.files) c.receiver.Index(ii.id, ii.repo, ii.files)
} }
case <-c.closed: case <-c.closed:
@ -351,6 +357,9 @@ func (c *rawConnection) handleIndex() error {
// update and can't receive the large index update from the // update and can't receive the large index update from the
// other side. // other side.
if debug {
l.Debugf("queueing Index(%v, %v, %d files)", c.id, im.Repository, len(im.Files))
}
incomingIndexes <- incomingIndex{false, c.id, im.Repository, im.Files} incomingIndexes <- incomingIndex{false, c.id, im.Repository, im.Files}
} }
return nil return nil
@ -362,6 +371,9 @@ func (c *rawConnection) handleIndexUpdate() error {
if err := c.xr.Error(); err != nil { if err := c.xr.Error(); err != nil {
return err return err
} else { } else {
if debug {
l.Debugf("queueing IndexUpdate(%v, %v, %d files)", c.id, im.Repository, len(im.Files))
}
incomingIndexes <- incomingIndex{true, c.id, im.Repository, im.Files} incomingIndexes <- incomingIndex{true, c.id, im.Repository, im.Files}
} }
return nil return nil