mirror of
https://github.com/octoleo/syncthing.git
synced 2024-12-22 10:58:57 +00:00
commit
fc0cb704f2
4
Godeps/Godeps.json
generated
4
Godeps/Godeps.json
generated
@ -19,7 +19,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/calmh/xdr",
|
||||
"Rev": "03e63d0b968219dd006b17c337f8a6581332f1ab"
|
||||
"Rev": "bccf335c34c01760bdc89f98c952fcda696e27d2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/juju/ratelimit",
|
||||
@ -31,7 +31,7 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syncthing/protocol",
|
||||
"Rev": "d2ec40bb67846f34d3c1e59714351127a2e869e9"
|
||||
"Rev": "f9132cae85dcda1caba2f4ba78996d348b00ac6c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/syndtr/goleveldb/leveldb",
|
||||
|
26
Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
26
Godeps/_workspace/src/github.com/calmh/xdr/cmd/genxdr/main.go
generated
vendored
@ -52,7 +52,7 @@ import (
|
||||
var encodeTpl = template.Must(template.New("encoder").Parse(`
|
||||
func (o {{.TypeName}}) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) MarshalXDR() ([]byte, error) {
|
||||
@ -70,11 +70,11 @@ func (o {{.TypeName}}) MustMarshalXDR() []byte {
|
||||
func (o {{.TypeName}}) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}//+n
|
||||
|
||||
func (o {{.TypeName}}) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o {{.TypeName}}) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
{{range $fieldInfo := .Fields}}
|
||||
{{if not $fieldInfo.IsSlice}}
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
@ -87,7 +87,7 @@ func (o {{.TypeName}}) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
{{end}}
|
||||
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}})
|
||||
{{else}}
|
||||
_, err := o.{{$fieldInfo.Name}}.encodeXDR(xw)
|
||||
_, err := o.{{$fieldInfo.Name}}.EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
@ -105,7 +105,7 @@ func (o {{.TypeName}}) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
xw.Write{{$fieldInfo.Encoder}}(o.{{$fieldInfo.Name}}[i])
|
||||
{{else}}
|
||||
_, err := o.{{$fieldInfo.Name}}[i].encodeXDR(xw)
|
||||
_, err := o.{{$fieldInfo.Name}}[i].EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
@ -118,16 +118,16 @@ func (o {{.TypeName}}) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
func (o *{{.TypeName}}) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}//+n
|
||||
|
||||
func (o *{{.TypeName}}) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}//+n
|
||||
|
||||
func (o *{{.TypeName}}) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *{{.TypeName}}) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
{{range $fieldInfo := .Fields}}
|
||||
{{if not $fieldInfo.IsSlice}}
|
||||
{{if ne $fieldInfo.Convert ""}}
|
||||
@ -139,7 +139,7 @@ func (o *{{.TypeName}}) decodeXDR(xr *xdr.Reader) error {
|
||||
o.{{$fieldInfo.Name}} = xr.Read{{$fieldInfo.Encoder}}()
|
||||
{{end}}
|
||||
{{else}}
|
||||
(&o.{{$fieldInfo.Name}}).decodeXDR(xr)
|
||||
(&o.{{$fieldInfo.Name}}).DecodeXDRFrom(xr)
|
||||
{{end}}
|
||||
{{else}}
|
||||
_{{$fieldInfo.Name}}Size := int(xr.ReadUint32())
|
||||
@ -155,7 +155,7 @@ func (o *{{.TypeName}}) decodeXDR(xr *xdr.Reader) error {
|
||||
{{else if $fieldInfo.IsBasic}}
|
||||
o.{{$fieldInfo.Name}}[i] = xr.Read{{$fieldInfo.Encoder}}()
|
||||
{{else}}
|
||||
(&o.{{$fieldInfo.Name}}[i]).decodeXDR(xr)
|
||||
(&o.{{$fieldInfo.Name}}[i]).DecodeXDRFrom(xr)
|
||||
{{end}}
|
||||
}
|
||||
{{end}}
|
||||
@ -257,7 +257,6 @@ func handleStruct(t *ast.StructType) []fieldInfo {
|
||||
} else {
|
||||
f = fieldInfo{
|
||||
Name: fn,
|
||||
IsBasic: false,
|
||||
IsSlice: true,
|
||||
FieldType: tn,
|
||||
Max: max,
|
||||
@ -317,10 +316,9 @@ func generateDiagram(output io.Writer, s structInfo) {
|
||||
|
||||
for _, f := range fs {
|
||||
tn := f.FieldType
|
||||
sl := f.IsSlice
|
||||
name := uncamelize(f.Name)
|
||||
|
||||
if sl {
|
||||
if f.IsSlice {
|
||||
fmt.Fprintf(output, "| %s |\n", center("Number of "+name, 61))
|
||||
fmt.Fprintln(output, line)
|
||||
}
|
||||
@ -347,7 +345,7 @@ func generateDiagram(output io.Writer, s structInfo) {
|
||||
fmt.Fprintf(output, "/ %61s /\n", "")
|
||||
fmt.Fprintln(output, line)
|
||||
default:
|
||||
if sl {
|
||||
if f.IsSlice {
|
||||
tn = "Zero or more " + tn + " Structures"
|
||||
fmt.Fprintf(output, "/ %s /\n", center("", 61))
|
||||
fmt.Fprintf(output, "\\ %s \\\n", center(tn, 61))
|
||||
|
6
Godeps/_workspace/src/github.com/syncthing/protocol/deviceid.go
generated
vendored
6
Godeps/_workspace/src/github.com/syncthing/protocol/deviceid.go
generated
vendored
@ -6,6 +6,7 @@ import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"encoding/base32"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
@ -67,6 +68,11 @@ func (n DeviceID) Equals(other DeviceID) bool {
|
||||
return bytes.Compare(n[:], other[:]) == 0
|
||||
}
|
||||
|
||||
// Short returns an integer representing bits 0-63 of the device ID.
|
||||
func (n DeviceID) Short() uint64 {
|
||||
return binary.BigEndian.Uint64(n[:])
|
||||
}
|
||||
|
||||
func (n *DeviceID) MarshalText() ([]byte, error) {
|
||||
return []byte(n.String()), nil
|
||||
}
|
||||
|
4
Godeps/_workspace/src/github.com/syncthing/protocol/message.go
generated
vendored
4
Godeps/_workspace/src/github.com/syncthing/protocol/message.go
generated
vendored
@ -17,13 +17,13 @@ type FileInfo struct {
|
||||
Name string // max:8192
|
||||
Flags uint32
|
||||
Modified int64
|
||||
Version int64
|
||||
Version Vector
|
||||
LocalVersion int64
|
||||
Blocks []BlockInfo
|
||||
}
|
||||
|
||||
func (f FileInfo) String() string {
|
||||
return fmt.Sprintf("File{Name:%q, Flags:0%o, Modified:%d, Version:%d, Size:%d, Blocks:%v}",
|
||||
return fmt.Sprintf("File{Name:%q, Flags:0%o, Modified:%d, Version:%v, Size:%d, Blocks:%v}",
|
||||
f.Name, f.Flags, f.Modified, f.Version, f.Size(), f.Blocks)
|
||||
}
|
||||
|
||||
|
183
Godeps/_workspace/src/github.com/syncthing/protocol/message_xdr.go
generated
vendored
183
Godeps/_workspace/src/github.com/syncthing/protocol/message_xdr.go
generated
vendored
@ -51,7 +51,7 @@ struct IndexMessage {
|
||||
|
||||
func (o IndexMessage) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o IndexMessage) MarshalXDR() ([]byte, error) {
|
||||
@ -69,15 +69,15 @@ func (o IndexMessage) MustMarshalXDR() []byte {
|
||||
func (o IndexMessage) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o IndexMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o IndexMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteString(o.Folder)
|
||||
xw.WriteUint32(uint32(len(o.Files)))
|
||||
for i := range o.Files {
|
||||
_, err := o.Files[i].encodeXDR(xw)
|
||||
_, err := o.Files[i].EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
@ -88,7 +88,7 @@ func (o IndexMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Options)))
|
||||
for i := range o.Options {
|
||||
_, err := o.Options[i].encodeXDR(xw)
|
||||
_, err := o.Options[i].EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
@ -98,21 +98,21 @@ func (o IndexMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
func (o *IndexMessage) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *IndexMessage) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *IndexMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *IndexMessage) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.Folder = xr.ReadString()
|
||||
_FilesSize := int(xr.ReadUint32())
|
||||
o.Files = make([]FileInfo, _FilesSize)
|
||||
for i := range o.Files {
|
||||
(&o.Files[i]).decodeXDR(xr)
|
||||
(&o.Files[i]).DecodeXDRFrom(xr)
|
||||
}
|
||||
o.Flags = xr.ReadUint32()
|
||||
_OptionsSize := int(xr.ReadUint32())
|
||||
@ -121,7 +121,7 @@ func (o *IndexMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
}
|
||||
o.Options = make([]Option, _OptionsSize)
|
||||
for i := range o.Options {
|
||||
(&o.Options[i]).decodeXDR(xr)
|
||||
(&o.Options[i]).DecodeXDRFrom(xr)
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
@ -145,9 +145,9 @@ FileInfo Structure:
|
||||
+ Modified (64 bits) +
|
||||
| |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ Version (64 bits) +
|
||||
| |
|
||||
/ /
|
||||
\ Vector Structure \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ Local Version (64 bits) +
|
||||
@ -165,7 +165,7 @@ struct FileInfo {
|
||||
string Name<8192>;
|
||||
unsigned int Flags;
|
||||
hyper Modified;
|
||||
hyper Version;
|
||||
Vector Version;
|
||||
hyper LocalVersion;
|
||||
BlockInfo Blocks<>;
|
||||
}
|
||||
@ -174,7 +174,7 @@ struct FileInfo {
|
||||
|
||||
func (o FileInfo) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o FileInfo) MarshalXDR() ([]byte, error) {
|
||||
@ -192,22 +192,25 @@ func (o FileInfo) MustMarshalXDR() []byte {
|
||||
func (o FileInfo) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o FileInfo) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o FileInfo) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
if l := len(o.Name); l > 8192 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Name", l, 8192)
|
||||
}
|
||||
xw.WriteString(o.Name)
|
||||
xw.WriteUint32(o.Flags)
|
||||
xw.WriteUint64(uint64(o.Modified))
|
||||
xw.WriteUint64(uint64(o.Version))
|
||||
_, err := o.Version.EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
xw.WriteUint64(uint64(o.LocalVersion))
|
||||
xw.WriteUint32(uint32(len(o.Blocks)))
|
||||
for i := range o.Blocks {
|
||||
_, err := o.Blocks[i].encodeXDR(xw)
|
||||
_, err := o.Blocks[i].EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
@ -217,25 +220,25 @@ func (o FileInfo) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
func (o *FileInfo) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *FileInfo) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *FileInfo) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *FileInfo) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.Name = xr.ReadStringMax(8192)
|
||||
o.Flags = xr.ReadUint32()
|
||||
o.Modified = int64(xr.ReadUint64())
|
||||
o.Version = int64(xr.ReadUint64())
|
||||
(&o.Version).DecodeXDRFrom(xr)
|
||||
o.LocalVersion = int64(xr.ReadUint64())
|
||||
_BlocksSize := int(xr.ReadUint32())
|
||||
o.Blocks = make([]BlockInfo, _BlocksSize)
|
||||
for i := range o.Blocks {
|
||||
(&o.Blocks[i]).decodeXDR(xr)
|
||||
(&o.Blocks[i]).DecodeXDRFrom(xr)
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
@ -266,7 +269,7 @@ struct BlockInfo {
|
||||
|
||||
func (o BlockInfo) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o BlockInfo) MarshalXDR() ([]byte, error) {
|
||||
@ -284,11 +287,11 @@ func (o BlockInfo) MustMarshalXDR() []byte {
|
||||
func (o BlockInfo) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o BlockInfo) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o BlockInfo) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint32(uint32(o.Size))
|
||||
if l := len(o.Hash); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Hash", l, 64)
|
||||
@ -299,16 +302,16 @@ func (o BlockInfo) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
func (o *BlockInfo) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *BlockInfo) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *BlockInfo) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *BlockInfo) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.Size = int32(xr.ReadUint32())
|
||||
o.Hash = xr.ReadBytesMax(64)
|
||||
return xr.Error()
|
||||
@ -369,7 +372,7 @@ struct RequestMessage {
|
||||
|
||||
func (o RequestMessage) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o RequestMessage) MarshalXDR() ([]byte, error) {
|
||||
@ -387,11 +390,11 @@ func (o RequestMessage) MustMarshalXDR() []byte {
|
||||
func (o RequestMessage) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o RequestMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o RequestMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
if l := len(o.Folder); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Folder", l, 64)
|
||||
}
|
||||
@ -412,7 +415,7 @@ func (o RequestMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Options)))
|
||||
for i := range o.Options {
|
||||
_, err := o.Options[i].encodeXDR(xw)
|
||||
_, err := o.Options[i].EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
@ -422,16 +425,16 @@ func (o RequestMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
func (o *RequestMessage) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *RequestMessage) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *RequestMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *RequestMessage) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.Folder = xr.ReadStringMax(64)
|
||||
o.Name = xr.ReadStringMax(8192)
|
||||
o.Offset = int64(xr.ReadUint64())
|
||||
@ -444,7 +447,7 @@ func (o *RequestMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
}
|
||||
o.Options = make([]Option, _OptionsSize)
|
||||
for i := range o.Options {
|
||||
(&o.Options[i]).decodeXDR(xr)
|
||||
(&o.Options[i]).DecodeXDRFrom(xr)
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
@ -475,7 +478,7 @@ struct ResponseMessage {
|
||||
|
||||
func (o ResponseMessage) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o ResponseMessage) MarshalXDR() ([]byte, error) {
|
||||
@ -493,11 +496,11 @@ func (o ResponseMessage) MustMarshalXDR() []byte {
|
||||
func (o ResponseMessage) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o ResponseMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o ResponseMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteBytes(o.Data)
|
||||
xw.WriteUint32(uint32(o.Error))
|
||||
return xw.Tot(), xw.Error()
|
||||
@ -505,16 +508,16 @@ func (o ResponseMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
func (o *ResponseMessage) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *ResponseMessage) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *ResponseMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *ResponseMessage) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.Data = xr.ReadBytes()
|
||||
o.Error = int32(xr.ReadUint32())
|
||||
return xr.Error()
|
||||
@ -564,7 +567,7 @@ struct ClusterConfigMessage {
|
||||
|
||||
func (o ClusterConfigMessage) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o ClusterConfigMessage) MarshalXDR() ([]byte, error) {
|
||||
@ -582,11 +585,11 @@ func (o ClusterConfigMessage) MustMarshalXDR() []byte {
|
||||
func (o ClusterConfigMessage) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o ClusterConfigMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o ClusterConfigMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
if l := len(o.ClientName); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("ClientName", l, 64)
|
||||
}
|
||||
@ -597,7 +600,7 @@ func (o ClusterConfigMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteString(o.ClientVersion)
|
||||
xw.WriteUint32(uint32(len(o.Folders)))
|
||||
for i := range o.Folders {
|
||||
_, err := o.Folders[i].encodeXDR(xw)
|
||||
_, err := o.Folders[i].EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
@ -607,7 +610,7 @@ func (o ClusterConfigMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Options)))
|
||||
for i := range o.Options {
|
||||
_, err := o.Options[i].encodeXDR(xw)
|
||||
_, err := o.Options[i].EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
@ -617,22 +620,22 @@ func (o ClusterConfigMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
func (o *ClusterConfigMessage) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *ClusterConfigMessage) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *ClusterConfigMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *ClusterConfigMessage) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.ClientName = xr.ReadStringMax(64)
|
||||
o.ClientVersion = xr.ReadStringMax(64)
|
||||
_FoldersSize := int(xr.ReadUint32())
|
||||
o.Folders = make([]Folder, _FoldersSize)
|
||||
for i := range o.Folders {
|
||||
(&o.Folders[i]).decodeXDR(xr)
|
||||
(&o.Folders[i]).DecodeXDRFrom(xr)
|
||||
}
|
||||
_OptionsSize := int(xr.ReadUint32())
|
||||
if _OptionsSize > 64 {
|
||||
@ -640,7 +643,7 @@ func (o *ClusterConfigMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
}
|
||||
o.Options = make([]Option, _OptionsSize)
|
||||
for i := range o.Options {
|
||||
(&o.Options[i]).decodeXDR(xr)
|
||||
(&o.Options[i]).DecodeXDRFrom(xr)
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
@ -685,7 +688,7 @@ struct Folder {
|
||||
|
||||
func (o Folder) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o Folder) MarshalXDR() ([]byte, error) {
|
||||
@ -703,18 +706,18 @@ func (o Folder) MustMarshalXDR() []byte {
|
||||
func (o Folder) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o Folder) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o Folder) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
if l := len(o.ID); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("ID", l, 64)
|
||||
}
|
||||
xw.WriteString(o.ID)
|
||||
xw.WriteUint32(uint32(len(o.Devices)))
|
||||
for i := range o.Devices {
|
||||
_, err := o.Devices[i].encodeXDR(xw)
|
||||
_, err := o.Devices[i].EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
@ -725,7 +728,7 @@ func (o Folder) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Options)))
|
||||
for i := range o.Options {
|
||||
_, err := o.Options[i].encodeXDR(xw)
|
||||
_, err := o.Options[i].EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
@ -735,21 +738,21 @@ func (o Folder) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
func (o *Folder) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *Folder) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *Folder) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *Folder) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.ID = xr.ReadStringMax(64)
|
||||
_DevicesSize := int(xr.ReadUint32())
|
||||
o.Devices = make([]Device, _DevicesSize)
|
||||
for i := range o.Devices {
|
||||
(&o.Devices[i]).decodeXDR(xr)
|
||||
(&o.Devices[i]).DecodeXDRFrom(xr)
|
||||
}
|
||||
o.Flags = xr.ReadUint32()
|
||||
_OptionsSize := int(xr.ReadUint32())
|
||||
@ -758,7 +761,7 @@ func (o *Folder) decodeXDR(xr *xdr.Reader) error {
|
||||
}
|
||||
o.Options = make([]Option, _OptionsSize)
|
||||
for i := range o.Options {
|
||||
(&o.Options[i]).decodeXDR(xr)
|
||||
(&o.Options[i]).DecodeXDRFrom(xr)
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
@ -801,7 +804,7 @@ struct Device {
|
||||
|
||||
func (o Device) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o Device) MarshalXDR() ([]byte, error) {
|
||||
@ -819,11 +822,11 @@ func (o Device) MustMarshalXDR() []byte {
|
||||
func (o Device) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o Device) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o Device) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
if l := len(o.ID); l > 32 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("ID", l, 32)
|
||||
}
|
||||
@ -835,7 +838,7 @@ func (o Device) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Options)))
|
||||
for i := range o.Options {
|
||||
_, err := o.Options[i].encodeXDR(xw)
|
||||
_, err := o.Options[i].EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
@ -845,16 +848,16 @@ func (o Device) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
func (o *Device) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *Device) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *Device) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *Device) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.ID = xr.ReadBytesMax(32)
|
||||
o.MaxLocalVersion = int64(xr.ReadUint64())
|
||||
o.Flags = xr.ReadUint32()
|
||||
@ -864,7 +867,7 @@ func (o *Device) decodeXDR(xr *xdr.Reader) error {
|
||||
}
|
||||
o.Options = make([]Option, _OptionsSize)
|
||||
for i := range o.Options {
|
||||
(&o.Options[i]).decodeXDR(xr)
|
||||
(&o.Options[i]).DecodeXDRFrom(xr)
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
@ -899,7 +902,7 @@ struct Option {
|
||||
|
||||
func (o Option) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o Option) MarshalXDR() ([]byte, error) {
|
||||
@ -917,11 +920,11 @@ func (o Option) MustMarshalXDR() []byte {
|
||||
func (o Option) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o Option) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o Option) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
if l := len(o.Key); l > 64 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Key", l, 64)
|
||||
}
|
||||
@ -935,16 +938,16 @@ func (o Option) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
func (o *Option) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *Option) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *Option) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *Option) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.Key = xr.ReadStringMax(64)
|
||||
o.Value = xr.ReadStringMax(1024)
|
||||
return xr.Error()
|
||||
@ -976,7 +979,7 @@ struct CloseMessage {
|
||||
|
||||
func (o CloseMessage) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o CloseMessage) MarshalXDR() ([]byte, error) {
|
||||
@ -994,11 +997,11 @@ func (o CloseMessage) MustMarshalXDR() []byte {
|
||||
func (o CloseMessage) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o CloseMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o CloseMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
if l := len(o.Reason); l > 1024 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("Reason", l, 1024)
|
||||
}
|
||||
@ -1009,16 +1012,16 @@ func (o CloseMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
func (o *CloseMessage) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *CloseMessage) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *CloseMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *CloseMessage) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.Reason = xr.ReadStringMax(1024)
|
||||
o.Code = int32(xr.ReadUint32())
|
||||
return xr.Error()
|
||||
@ -1040,7 +1043,7 @@ struct EmptyMessage {
|
||||
|
||||
func (o EmptyMessage) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o EmptyMessage) MarshalXDR() ([]byte, error) {
|
||||
@ -1058,25 +1061,25 @@ func (o EmptyMessage) MustMarshalXDR() []byte {
|
||||
func (o EmptyMessage) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o EmptyMessage) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o EmptyMessage) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *EmptyMessage) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *EmptyMessage) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *EmptyMessage) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *EmptyMessage) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
return xr.Error()
|
||||
}
|
||||
|
105
Godeps/_workspace/src/github.com/syncthing/protocol/vector.go
generated
vendored
Normal file
105
Godeps/_workspace/src/github.com/syncthing/protocol/vector.go
generated
vendored
Normal file
@ -0,0 +1,105 @@
|
||||
// Copyright (C) 2015 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
// The Vector type represents a version vector. The zero value is a usable
|
||||
// version vector. The vector has slice semantics and some operations on it
|
||||
// are "append-like" in that they may return the same vector modified, or a
|
||||
// new allocated Vector with the modified contents.
|
||||
type Vector []Counter
|
||||
|
||||
// Counter represents a single counter in the version vector.
|
||||
type Counter struct {
|
||||
ID uint64
|
||||
Value uint64
|
||||
}
|
||||
|
||||
// Update returns a Vector with the index for the specific ID incremented by
|
||||
// one. If it is possible, the vector v is updated and returned. If it is not,
|
||||
// a copy will be created, updated and returned.
|
||||
func (v Vector) Update(ID uint64) Vector {
|
||||
for i := range v {
|
||||
if v[i].ID == ID {
|
||||
// Update an existing index
|
||||
v[i].Value++
|
||||
return v
|
||||
} else if v[i].ID > ID {
|
||||
// Insert a new index
|
||||
nv := make(Vector, len(v)+1)
|
||||
copy(nv, v[:i])
|
||||
nv[i].ID = ID
|
||||
nv[i].Value = 1
|
||||
copy(nv[i+1:], v[i:])
|
||||
return nv
|
||||
}
|
||||
}
|
||||
// Append a new new index
|
||||
return append(v, Counter{ID, 1})
|
||||
}
|
||||
|
||||
// Merge returns the vector containing the maximum indexes from a and b. If it
|
||||
// is possible, the vector a is updated and returned. If it is not, a copy
|
||||
// will be created, updated and returned.
|
||||
func (a Vector) Merge(b Vector) Vector {
|
||||
var ai, bi int
|
||||
for bi < len(b) {
|
||||
if ai == len(a) {
|
||||
// We've reach the end of a, all that remains are appends
|
||||
return append(a, b[bi:]...)
|
||||
}
|
||||
|
||||
if a[ai].ID > b[bi].ID {
|
||||
// The index from b should be inserted here
|
||||
n := make(Vector, len(a)+1)
|
||||
copy(n, a[:ai])
|
||||
n[ai] = b[bi]
|
||||
copy(n[ai+1:], a[ai:])
|
||||
a = n
|
||||
}
|
||||
|
||||
if a[ai].ID == b[bi].ID {
|
||||
if v := b[bi].Value; v > a[ai].Value {
|
||||
a[ai].Value = v
|
||||
}
|
||||
}
|
||||
|
||||
if bi < len(b) && a[ai].ID == b[bi].ID {
|
||||
bi++
|
||||
}
|
||||
ai++
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// Copy returns an identical vector that is not shared with v.
|
||||
func (v Vector) Copy() Vector {
|
||||
nv := make(Vector, len(v))
|
||||
copy(nv, v)
|
||||
return nv
|
||||
}
|
||||
|
||||
// Equal returns true when the two vectors are equivalent.
|
||||
func (a Vector) Equal(b Vector) bool {
|
||||
return a.Compare(b) == Equal
|
||||
}
|
||||
|
||||
// LesserEqual returns true when the two vectors are equivalent or a is Lesser
|
||||
// than b.
|
||||
func (a Vector) LesserEqual(b Vector) bool {
|
||||
comp := a.Compare(b)
|
||||
return comp == Lesser || comp == Equal
|
||||
}
|
||||
|
||||
// LesserEqual returns true when the two vectors are equivalent or a is Greater
|
||||
// than b.
|
||||
func (a Vector) GreaterEqual(b Vector) bool {
|
||||
comp := a.Compare(b)
|
||||
return comp == Greater || comp == Equal
|
||||
}
|
||||
|
||||
// Concurrent returns true when the two vectors are concrurrent.
|
||||
func (a Vector) Concurrent(b Vector) bool {
|
||||
comp := a.Compare(b)
|
||||
return comp == ConcurrentGreater || comp == ConcurrentLesser
|
||||
}
|
89
Godeps/_workspace/src/github.com/syncthing/protocol/vector_compare.go
generated
vendored
Normal file
89
Godeps/_workspace/src/github.com/syncthing/protocol/vector_compare.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
// Copyright (C) 2015 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
// Ordering represents the relationship between two Vectors.
|
||||
type Ordering int
|
||||
|
||||
const (
|
||||
Equal Ordering = iota
|
||||
Greater
|
||||
Lesser
|
||||
ConcurrentLesser
|
||||
ConcurrentGreater
|
||||
)
|
||||
|
||||
// There's really no such thing as "concurrent lesser" and "concurrent
|
||||
// greater" in version vectors, just "concurrent". But it's useful to be able
|
||||
// to get a strict ordering between versions for stable sorts and so on, so we
|
||||
// return both variants. The convenience method Concurrent() can be used to
|
||||
// check for either case.
|
||||
|
||||
// Compare returns the Ordering that describes a's relation to b.
|
||||
func (a Vector) Compare(b Vector) Ordering {
|
||||
var ai, bi int // index into a and b
|
||||
var av, bv Counter // value at current index
|
||||
|
||||
result := Equal
|
||||
|
||||
for ai < len(a) || bi < len(b) {
|
||||
var aMissing, bMissing bool
|
||||
|
||||
if ai < len(a) {
|
||||
av = a[ai]
|
||||
} else {
|
||||
av = Counter{}
|
||||
aMissing = true
|
||||
}
|
||||
|
||||
if bi < len(b) {
|
||||
bv = b[bi]
|
||||
} else {
|
||||
bv = Counter{}
|
||||
bMissing = true
|
||||
}
|
||||
|
||||
switch {
|
||||
case av.ID == bv.ID:
|
||||
// We have a counter value for each side
|
||||
if av.Value > bv.Value {
|
||||
if result == Lesser {
|
||||
return ConcurrentLesser
|
||||
}
|
||||
result = Greater
|
||||
} else if av.Value < bv.Value {
|
||||
if result == Greater {
|
||||
return ConcurrentGreater
|
||||
}
|
||||
result = Lesser
|
||||
}
|
||||
|
||||
case !aMissing && av.ID < bv.ID || bMissing:
|
||||
// Value is missing on the b side
|
||||
if av.Value > 0 {
|
||||
if result == Lesser {
|
||||
return ConcurrentLesser
|
||||
}
|
||||
result = Greater
|
||||
}
|
||||
|
||||
case !bMissing && bv.ID < av.ID || aMissing:
|
||||
// Value is missing on the a side
|
||||
if bv.Value > 0 {
|
||||
if result == Greater {
|
||||
return ConcurrentGreater
|
||||
}
|
||||
result = Lesser
|
||||
}
|
||||
}
|
||||
|
||||
if ai < len(a) && (av.ID <= bv.ID || bMissing) {
|
||||
ai++
|
||||
}
|
||||
if bi < len(b) && (bv.ID <= av.ID || aMissing) {
|
||||
bi++
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
249
Godeps/_workspace/src/github.com/syncthing/protocol/vector_compare_test.go
generated
vendored
Normal file
249
Godeps/_workspace/src/github.com/syncthing/protocol/vector_compare_test.go
generated
vendored
Normal file
@ -0,0 +1,249 @@
|
||||
// Copyright (C) 2015 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
import (
|
||||
"math"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCompare(t *testing.T) {
|
||||
testcases := []struct {
|
||||
a, b Vector
|
||||
r Ordering
|
||||
}{
|
||||
// Empty vectors are identical
|
||||
{Vector{}, Vector{}, Equal},
|
||||
{Vector{}, nil, Equal},
|
||||
{nil, Vector{}, Equal},
|
||||
{nil, Vector{Counter{42, 0}}, Equal},
|
||||
{Vector{}, Vector{Counter{42, 0}}, Equal},
|
||||
{Vector{Counter{42, 0}}, nil, Equal},
|
||||
{Vector{Counter{42, 0}}, Vector{}, Equal},
|
||||
|
||||
// Zero is the implied value for a missing Counter
|
||||
{
|
||||
Vector{Counter{42, 0}},
|
||||
Vector{Counter{77, 0}},
|
||||
Equal,
|
||||
},
|
||||
|
||||
// Equal vectors are equal
|
||||
{
|
||||
Vector{Counter{42, 33}},
|
||||
Vector{Counter{42, 33}},
|
||||
Equal,
|
||||
},
|
||||
{
|
||||
Vector{Counter{42, 33}, Counter{77, 24}},
|
||||
Vector{Counter{42, 33}, Counter{77, 24}},
|
||||
Equal,
|
||||
},
|
||||
|
||||
// These a-vectors are all greater than the b-vector
|
||||
{
|
||||
Vector{Counter{42, 1}},
|
||||
nil,
|
||||
Greater,
|
||||
},
|
||||
{
|
||||
Vector{Counter{42, 1}},
|
||||
Vector{},
|
||||
Greater,
|
||||
},
|
||||
{
|
||||
Vector{Counter{0, 1}},
|
||||
Vector{Counter{0, 0}},
|
||||
Greater,
|
||||
},
|
||||
{
|
||||
Vector{Counter{42, 1}},
|
||||
Vector{Counter{42, 0}},
|
||||
Greater,
|
||||
},
|
||||
{
|
||||
Vector{Counter{math.MaxUint64, 1}},
|
||||
Vector{Counter{math.MaxUint64, 0}},
|
||||
Greater,
|
||||
},
|
||||
{
|
||||
Vector{Counter{0, math.MaxUint64}},
|
||||
Vector{Counter{0, 0}},
|
||||
Greater,
|
||||
},
|
||||
{
|
||||
Vector{Counter{42, math.MaxUint64}},
|
||||
Vector{Counter{42, 0}},
|
||||
Greater,
|
||||
},
|
||||
{
|
||||
Vector{Counter{math.MaxUint64, math.MaxUint64}},
|
||||
Vector{Counter{math.MaxUint64, 0}},
|
||||
Greater,
|
||||
},
|
||||
{
|
||||
Vector{Counter{0, math.MaxUint64}},
|
||||
Vector{Counter{0, math.MaxUint64 - 1}},
|
||||
Greater,
|
||||
},
|
||||
{
|
||||
Vector{Counter{42, math.MaxUint64}},
|
||||
Vector{Counter{42, math.MaxUint64 - 1}},
|
||||
Greater,
|
||||
},
|
||||
{
|
||||
Vector{Counter{math.MaxUint64, math.MaxUint64}},
|
||||
Vector{Counter{math.MaxUint64, math.MaxUint64 - 1}},
|
||||
Greater,
|
||||
},
|
||||
{
|
||||
Vector{Counter{42, 2}},
|
||||
Vector{Counter{42, 1}},
|
||||
Greater,
|
||||
},
|
||||
{
|
||||
Vector{Counter{22, 22}, Counter{42, 2}},
|
||||
Vector{Counter{22, 22}, Counter{42, 1}},
|
||||
Greater,
|
||||
},
|
||||
{
|
||||
Vector{Counter{42, 2}, Counter{77, 3}},
|
||||
Vector{Counter{42, 1}, Counter{77, 3}},
|
||||
Greater,
|
||||
},
|
||||
{
|
||||
Vector{Counter{22, 22}, Counter{42, 2}, Counter{77, 3}},
|
||||
Vector{Counter{22, 22}, Counter{42, 1}, Counter{77, 3}},
|
||||
Greater,
|
||||
},
|
||||
{
|
||||
Vector{Counter{22, 23}, Counter{42, 2}, Counter{77, 4}},
|
||||
Vector{Counter{22, 22}, Counter{42, 1}, Counter{77, 3}},
|
||||
Greater,
|
||||
},
|
||||
|
||||
// These a-vectors are all lesser than the b-vector
|
||||
{nil, Vector{Counter{42, 1}}, Lesser},
|
||||
{Vector{}, Vector{Counter{42, 1}}, Lesser},
|
||||
{
|
||||
Vector{Counter{42, 0}},
|
||||
Vector{Counter{42, 1}},
|
||||
Lesser,
|
||||
},
|
||||
{
|
||||
Vector{Counter{42, 1}},
|
||||
Vector{Counter{42, 2}},
|
||||
Lesser,
|
||||
},
|
||||
{
|
||||
Vector{Counter{22, 22}, Counter{42, 1}},
|
||||
Vector{Counter{22, 22}, Counter{42, 2}},
|
||||
Lesser,
|
||||
},
|
||||
{
|
||||
Vector{Counter{42, 1}, Counter{77, 3}},
|
||||
Vector{Counter{42, 2}, Counter{77, 3}},
|
||||
Lesser,
|
||||
},
|
||||
{
|
||||
Vector{Counter{22, 22}, Counter{42, 1}, Counter{77, 3}},
|
||||
Vector{Counter{22, 22}, Counter{42, 2}, Counter{77, 3}},
|
||||
Lesser,
|
||||
},
|
||||
{
|
||||
Vector{Counter{22, 22}, Counter{42, 1}, Counter{77, 3}},
|
||||
Vector{Counter{22, 23}, Counter{42, 2}, Counter{77, 4}},
|
||||
Lesser,
|
||||
},
|
||||
|
||||
// These are all in conflict
|
||||
{
|
||||
Vector{Counter{42, 2}},
|
||||
Vector{Counter{43, 1}},
|
||||
ConcurrentGreater,
|
||||
},
|
||||
{
|
||||
Vector{Counter{43, 1}},
|
||||
Vector{Counter{42, 2}},
|
||||
ConcurrentLesser,
|
||||
},
|
||||
{
|
||||
Vector{Counter{22, 23}, Counter{42, 1}},
|
||||
Vector{Counter{22, 22}, Counter{42, 2}},
|
||||
ConcurrentGreater,
|
||||
},
|
||||
{
|
||||
Vector{Counter{22, 21}, Counter{42, 2}},
|
||||
Vector{Counter{22, 22}, Counter{42, 1}},
|
||||
ConcurrentLesser,
|
||||
},
|
||||
{
|
||||
Vector{Counter{22, 21}, Counter{42, 2}, Counter{43, 1}},
|
||||
Vector{Counter{20, 1}, Counter{22, 22}, Counter{42, 1}},
|
||||
ConcurrentLesser,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testcases {
|
||||
// Test real Compare
|
||||
if r := tc.a.Compare(tc.b); r != tc.r {
|
||||
t.Errorf("%d: %+v.Compare(%+v) == %v (expected %v)", i, tc.a, tc.b, r, tc.r)
|
||||
}
|
||||
|
||||
// Test convenience functions
|
||||
switch tc.r {
|
||||
case Greater:
|
||||
if tc.a.Equal(tc.b) {
|
||||
t.Errorf("%+v == %+v", tc.a, tc.b)
|
||||
}
|
||||
if tc.a.Concurrent(tc.b) {
|
||||
t.Errorf("%+v concurrent %+v", tc.a, tc.b)
|
||||
}
|
||||
if !tc.a.GreaterEqual(tc.b) {
|
||||
t.Errorf("%+v not >= %+v", tc.a, tc.b)
|
||||
}
|
||||
if tc.a.LesserEqual(tc.b) {
|
||||
t.Errorf("%+v <= %+v", tc.a, tc.b)
|
||||
}
|
||||
case Lesser:
|
||||
if tc.a.Concurrent(tc.b) {
|
||||
t.Errorf("%+v concurrent %+v", tc.a, tc.b)
|
||||
}
|
||||
if tc.a.Equal(tc.b) {
|
||||
t.Errorf("%+v == %+v", tc.a, tc.b)
|
||||
}
|
||||
if tc.a.GreaterEqual(tc.b) {
|
||||
t.Errorf("%+v >= %+v", tc.a, tc.b)
|
||||
}
|
||||
if !tc.a.LesserEqual(tc.b) {
|
||||
t.Errorf("%+v not <= %+v", tc.a, tc.b)
|
||||
}
|
||||
case Equal:
|
||||
if tc.a.Concurrent(tc.b) {
|
||||
t.Errorf("%+v concurrent %+v", tc.a, tc.b)
|
||||
}
|
||||
if !tc.a.Equal(tc.b) {
|
||||
t.Errorf("%+v not == %+v", tc.a, tc.b)
|
||||
}
|
||||
if !tc.a.GreaterEqual(tc.b) {
|
||||
t.Errorf("%+v not <= %+v", tc.a, tc.b)
|
||||
}
|
||||
if !tc.a.LesserEqual(tc.b) {
|
||||
t.Errorf("%+v not <= %+v", tc.a, tc.b)
|
||||
}
|
||||
case ConcurrentLesser, ConcurrentGreater:
|
||||
if !tc.a.Concurrent(tc.b) {
|
||||
t.Errorf("%+v not concurrent %+v", tc.a, tc.b)
|
||||
}
|
||||
if tc.a.Equal(tc.b) {
|
||||
t.Errorf("%+v == %+v", tc.a, tc.b)
|
||||
}
|
||||
if tc.a.GreaterEqual(tc.b) {
|
||||
t.Errorf("%+v >= %+v", tc.a, tc.b)
|
||||
}
|
||||
if tc.a.LesserEqual(tc.b) {
|
||||
t.Errorf("%+v <= %+v", tc.a, tc.b)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
122
Godeps/_workspace/src/github.com/syncthing/protocol/vector_test.go
generated
vendored
Normal file
122
Godeps/_workspace/src/github.com/syncthing/protocol/vector_test.go
generated
vendored
Normal file
@ -0,0 +1,122 @@
|
||||
// Copyright (C) 2015 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestUpdate(t *testing.T) {
|
||||
var v Vector
|
||||
|
||||
// Append
|
||||
|
||||
v = v.Update(42)
|
||||
expected := Vector{Counter{42, 1}}
|
||||
|
||||
if v.Compare(expected) != Equal {
|
||||
t.Errorf("Update error, %+v != %+v", v, expected)
|
||||
}
|
||||
|
||||
// Insert at front
|
||||
|
||||
v = v.Update(36)
|
||||
expected = Vector{Counter{36, 1}, Counter{42, 1}}
|
||||
|
||||
if v.Compare(expected) != Equal {
|
||||
t.Errorf("Update error, %+v != %+v", v, expected)
|
||||
}
|
||||
|
||||
// Insert in moddle
|
||||
|
||||
v = v.Update(37)
|
||||
expected = Vector{Counter{36, 1}, Counter{37, 1}, Counter{42, 1}}
|
||||
|
||||
if v.Compare(expected) != Equal {
|
||||
t.Errorf("Update error, %+v != %+v", v, expected)
|
||||
}
|
||||
|
||||
// Update existing
|
||||
|
||||
v = v.Update(37)
|
||||
expected = Vector{Counter{36, 1}, Counter{37, 2}, Counter{42, 1}}
|
||||
|
||||
if v.Compare(expected) != Equal {
|
||||
t.Errorf("Update error, %+v != %+v", v, expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
v0 := Vector{Counter{42, 1}}
|
||||
v1 := v0.Copy()
|
||||
v1.Update(42)
|
||||
if v0.Compare(v1) != Lesser {
|
||||
t.Errorf("Copy error, %+v should be ancestor of %+v", v0, v1)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
testcases := []struct {
|
||||
a, b, m Vector
|
||||
}{
|
||||
// No-ops
|
||||
{
|
||||
Vector{},
|
||||
Vector{},
|
||||
Vector{},
|
||||
},
|
||||
{
|
||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
||||
},
|
||||
|
||||
// Appends
|
||||
{
|
||||
Vector{},
|
||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
||||
},
|
||||
{
|
||||
Vector{Counter{22, 1}},
|
||||
Vector{Counter{42, 1}},
|
||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
||||
},
|
||||
{
|
||||
Vector{Counter{22, 1}},
|
||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
||||
},
|
||||
|
||||
// Insert
|
||||
{
|
||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
||||
Vector{Counter{22, 1}, Counter{23, 2}, Counter{42, 1}},
|
||||
Vector{Counter{22, 1}, Counter{23, 2}, Counter{42, 1}},
|
||||
},
|
||||
{
|
||||
Vector{Counter{42, 1}},
|
||||
Vector{Counter{22, 1}},
|
||||
Vector{Counter{22, 1}, Counter{42, 1}},
|
||||
},
|
||||
|
||||
// Update
|
||||
{
|
||||
Vector{Counter{22, 1}, Counter{42, 2}},
|
||||
Vector{Counter{22, 2}, Counter{42, 1}},
|
||||
Vector{Counter{22, 2}, Counter{42, 2}},
|
||||
},
|
||||
|
||||
// All of the above
|
||||
{
|
||||
Vector{Counter{10, 1}, Counter{20, 2}, Counter{30, 1}},
|
||||
Vector{Counter{5, 1}, Counter{10, 2}, Counter{15, 1}, Counter{20, 1}, Counter{25, 1}, Counter{35, 1}},
|
||||
Vector{Counter{5, 1}, Counter{10, 2}, Counter{15, 1}, Counter{20, 2}, Counter{25, 1}, Counter{30, 1}, Counter{35, 1}},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testcases {
|
||||
if m := tc.a.Merge(tc.b); m.Compare(tc.m) != Equal {
|
||||
t.Errorf("%d: %+v.Merge(%+v) == %+v (expected %+v)", i, tc.a, tc.b, m, tc.m)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
38
Godeps/_workspace/src/github.com/syncthing/protocol/vector_xdr.go
generated
vendored
Normal file
38
Godeps/_workspace/src/github.com/syncthing/protocol/vector_xdr.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright (C) 2015 The Protocol Authors.
|
||||
|
||||
package protocol
|
||||
|
||||
// This stuff is hacked up manually because genxdr doesn't support 'type
|
||||
// Vector []Counter' declarations and it was tricky when I tried to add it...
|
||||
|
||||
type xdrWriter interface {
|
||||
WriteUint32(uint32) (int, error)
|
||||
WriteUint64(uint64) (int, error)
|
||||
}
|
||||
type xdrReader interface {
|
||||
ReadUint32() uint32
|
||||
ReadUint64() uint64
|
||||
}
|
||||
|
||||
// EncodeXDRInto encodes the vector as an XDR object into the given XDR
|
||||
// encoder.
|
||||
func (v Vector) EncodeXDRInto(w xdrWriter) (int, error) {
|
||||
w.WriteUint32(uint32(len(v)))
|
||||
for i := range v {
|
||||
w.WriteUint64(v[i].ID)
|
||||
w.WriteUint64(v[i].Value)
|
||||
}
|
||||
return 4 + 16*len(v), nil
|
||||
}
|
||||
|
||||
// DecodeXDRFrom decodes the XDR objects from the given reader into itself.
|
||||
func (v *Vector) DecodeXDRFrom(r xdrReader) error {
|
||||
l := int(r.ReadUint32())
|
||||
n := make(Vector, l)
|
||||
for i := range n {
|
||||
n[i].ID = r.ReadUint64()
|
||||
n[i].Value = r.ReadUint64()
|
||||
}
|
||||
*v = n
|
||||
return nil
|
||||
}
|
2
build.sh
2
build.sh
@ -118,7 +118,7 @@ case "${1:-default}" in
|
||||
-e "STTRACE=$STTRACE" \
|
||||
syncthing/build:latest \
|
||||
sh -c './build.sh clean \
|
||||
&& go vet ./cmd/... ./internal/... \
|
||||
&& go tool vet -composites=false cmd/*/*.go internal/*/*.go \
|
||||
&& ( golint ./cmd/... ; golint ./internal/... ) | egrep -v "comment on exported|should have comment" \
|
||||
&& ./build.sh all \
|
||||
&& STTRACE=all ./build.sh test-cov'
|
||||
|
@ -514,7 +514,7 @@ func syncthingMain() {
|
||||
}
|
||||
}
|
||||
|
||||
m := model.NewModel(cfg, myName, "syncthing", Version, ldb)
|
||||
m := model.NewModel(cfg, myID, myName, "syncthing", Version, ldb)
|
||||
|
||||
sanityCheckFolders(cfg, m)
|
||||
|
||||
|
@ -39,7 +39,7 @@ func TestSanityCheck(t *testing.T) {
|
||||
|
||||
// Case 1 - new folder, directory and marker created
|
||||
|
||||
m := model.NewModel(cfg, "device", "syncthing", "dev", ldb)
|
||||
m := model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb)
|
||||
sanityCheckFolders(cfg, m)
|
||||
|
||||
if cfg.Folders()["folder"].Invalid != "" {
|
||||
@ -66,7 +66,7 @@ func TestSanityCheck(t *testing.T) {
|
||||
Folders: []config.FolderConfiguration{fcfg},
|
||||
})
|
||||
|
||||
m = model.NewModel(cfg, "device", "syncthing", "dev", ldb)
|
||||
m = model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb)
|
||||
sanityCheckFolders(cfg, m)
|
||||
|
||||
if cfg.Folders()["folder"].Invalid != "" {
|
||||
@ -87,7 +87,7 @@ func TestSanityCheck(t *testing.T) {
|
||||
{Name: "dummyfile"},
|
||||
})
|
||||
|
||||
m = model.NewModel(cfg, "device", "syncthing", "dev", ldb)
|
||||
m = model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb)
|
||||
sanityCheckFolders(cfg, m)
|
||||
|
||||
if cfg.Folders()["folder"].Invalid != "folder marker missing" {
|
||||
@ -101,7 +101,7 @@ func TestSanityCheck(t *testing.T) {
|
||||
Folders: []config.FolderConfiguration{fcfg},
|
||||
})
|
||||
|
||||
m = model.NewModel(cfg, "device", "syncthing", "dev", ldb)
|
||||
m = model.NewModel(cfg, protocol.LocalDeviceID, "device", "syncthing", "dev", ldb)
|
||||
sanityCheckFolders(cfg, m)
|
||||
|
||||
if cfg.Folders()["folder"].Invalid != "folder path missing" {
|
||||
|
@ -17,7 +17,6 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/lamport"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
@ -49,7 +48,7 @@ const (
|
||||
)
|
||||
|
||||
type fileVersion struct {
|
||||
version int64
|
||||
version protocol.Vector
|
||||
device []byte
|
||||
}
|
||||
|
||||
@ -242,8 +241,7 @@ func ldbGenericReplace(db *leveldb.DB, folder, device []byte, fs []protocol.File
|
||||
}
|
||||
var ef FileInfoTruncated
|
||||
ef.UnmarshalXDR(dbi.Value())
|
||||
if fs[fsi].Version > ef.Version ||
|
||||
(fs[fsi].Version == ef.Version && fs[fsi].Flags != ef.Flags) {
|
||||
if !fs[fsi].Version.Equal(ef.Version) || fs[fsi].Flags != ef.Flags {
|
||||
if debugDB {
|
||||
l.Debugln("generic replace; differs - insert")
|
||||
}
|
||||
@ -315,7 +313,7 @@ func ldbReplace(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) i
|
||||
})
|
||||
}
|
||||
|
||||
func ldbReplaceWithDelete(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) int64 {
|
||||
func ldbReplaceWithDelete(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo, myID uint64) int64 {
|
||||
return ldbGenericReplace(db, folder, device, fs, func(db dbReader, batch dbWriter, folder, device, name []byte, dbi iterator.Iterator) int64 {
|
||||
var tf FileInfoTruncated
|
||||
err := tf.UnmarshalXDR(dbi.Value())
|
||||
@ -329,7 +327,7 @@ func ldbReplaceWithDelete(db *leveldb.DB, folder, device []byte, fs []protocol.F
|
||||
ts := clock(tf.LocalVersion)
|
||||
f := protocol.FileInfo{
|
||||
Name: tf.Name,
|
||||
Version: lamport.Default.Tick(tf.Version),
|
||||
Version: tf.Version.Update(myID),
|
||||
LocalVersion: ts,
|
||||
Flags: tf.Flags | protocol.FlagDeleted,
|
||||
Modified: tf.Modified,
|
||||
@ -394,7 +392,7 @@ func ldbUpdate(db *leveldb.DB, folder, device []byte, fs []protocol.FileInfo) in
|
||||
}
|
||||
// Flags might change without the version being bumped when we set the
|
||||
// invalid flag on an existing file.
|
||||
if ef.Version != f.Version || ef.Flags != f.Flags {
|
||||
if !ef.Version.Equal(f.Version) || ef.Flags != f.Flags {
|
||||
if lv := ldbInsert(batch, folder, device, f); lv > maxLocalVer {
|
||||
maxLocalVer = lv
|
||||
}
|
||||
@ -454,7 +452,7 @@ func ldbInsert(batch dbWriter, folder, device []byte, file protocol.FileInfo) in
|
||||
// ldbUpdateGlobal adds this device+version to the version list for the given
|
||||
// file. If the device is already present in the list, the version is updated.
|
||||
// If the file does not have an entry in the global list, it is created.
|
||||
func ldbUpdateGlobal(db dbReader, batch dbWriter, folder, device, file []byte, version int64) bool {
|
||||
func ldbUpdateGlobal(db dbReader, batch dbWriter, folder, device, file []byte, version protocol.Vector) bool {
|
||||
if debugDB {
|
||||
l.Debugf("update global; folder=%q device=%v file=%q version=%d", folder, protocol.DeviceIDFromBytes(device), file, version)
|
||||
}
|
||||
@ -465,10 +463,8 @@ func ldbUpdateGlobal(db dbReader, batch dbWriter, folder, device, file []byte, v
|
||||
}
|
||||
|
||||
var fl versionList
|
||||
nv := fileVersion{
|
||||
device: device,
|
||||
version: version,
|
||||
}
|
||||
|
||||
// Remove the device from the current version list
|
||||
if svl != nil {
|
||||
err = fl.UnmarshalXDR(svl)
|
||||
if err != nil {
|
||||
@ -477,7 +473,7 @@ func ldbUpdateGlobal(db dbReader, batch dbWriter, folder, device, file []byte, v
|
||||
|
||||
for i := range fl.versions {
|
||||
if bytes.Compare(fl.versions[i].device, device) == 0 {
|
||||
if fl.versions[i].version == version {
|
||||
if fl.versions[i].version.Equal(version) {
|
||||
// No need to do anything
|
||||
return false
|
||||
}
|
||||
@ -487,8 +483,15 @@ func ldbUpdateGlobal(db dbReader, batch dbWriter, folder, device, file []byte, v
|
||||
}
|
||||
}
|
||||
|
||||
nv := fileVersion{
|
||||
device: device,
|
||||
version: version,
|
||||
}
|
||||
for i := range fl.versions {
|
||||
if fl.versions[i].version <= version {
|
||||
// We compare against ConcurrentLesser as well here because we need
|
||||
// to enforce a consistent ordering of versions even in the case of
|
||||
// conflicts.
|
||||
if comp := fl.versions[i].version.Compare(version); comp == protocol.Equal || comp == protocol.Lesser || comp == protocol.ConcurrentLesser {
|
||||
t := append(fl.versions, fileVersion{})
|
||||
copy(t[i+1:], t[i:])
|
||||
t[i] = nv
|
||||
@ -776,7 +779,7 @@ func ldbAvailability(db *leveldb.DB, folder, file []byte) []protocol.DeviceID {
|
||||
|
||||
var devices []protocol.DeviceID
|
||||
for _, v := range vl.versions {
|
||||
if v.version != vl.versions[0].version {
|
||||
if !v.version.Equal(vl.versions[0].version) {
|
||||
break
|
||||
}
|
||||
n := protocol.DeviceIDFromBytes(v.device)
|
||||
@ -808,7 +811,7 @@ func ldbWithNeed(db *leveldb.DB, folder, device []byte, truncate bool, fn Iterat
|
||||
dbi := snap.NewIterator(&util.Range{Start: start, Limit: limit}, nil)
|
||||
defer dbi.Release()
|
||||
|
||||
outer:
|
||||
nextFile:
|
||||
for dbi.Next() {
|
||||
var vl versionList
|
||||
err := vl.UnmarshalXDR(dbi.Value())
|
||||
@ -822,12 +825,15 @@ outer:
|
||||
|
||||
have := false // If we have the file, any version
|
||||
need := false // If we have a lower version of the file
|
||||
var haveVersion int64
|
||||
var haveVersion protocol.Vector
|
||||
for _, v := range vl.versions {
|
||||
if bytes.Compare(v.device, device) == 0 {
|
||||
have = true
|
||||
haveVersion = v.version
|
||||
need = v.version < vl.versions[0].version
|
||||
// XXX: This marks Concurrent (i.e. conflicting) changes as
|
||||
// needs. Maybe we should do that, but it needs special
|
||||
// handling in the puller.
|
||||
need = !v.version.GreaterEqual(vl.versions[0].version)
|
||||
break
|
||||
}
|
||||
}
|
||||
@ -835,11 +841,12 @@ outer:
|
||||
if need || !have {
|
||||
name := globalKeyName(dbi.Key())
|
||||
needVersion := vl.versions[0].version
|
||||
inner:
|
||||
|
||||
nextVersion:
|
||||
for i := range vl.versions {
|
||||
if vl.versions[i].version != needVersion {
|
||||
if !vl.versions[i].version.Equal(needVersion) {
|
||||
// We haven't found a valid copy of the file with the needed version.
|
||||
continue outer
|
||||
continue nextFile
|
||||
}
|
||||
fk := deviceKey(folder, vl.versions[i].device, name)
|
||||
if debugDB {
|
||||
@ -866,12 +873,12 @@ outer:
|
||||
|
||||
if gf.IsInvalid() {
|
||||
// The file is marked invalid for whatever reason, don't use it.
|
||||
continue inner
|
||||
continue nextVersion
|
||||
}
|
||||
|
||||
if gf.IsDeleted() && !have {
|
||||
// We don't need deleted files that we don't have
|
||||
continue outer
|
||||
continue nextFile
|
||||
}
|
||||
|
||||
if debugDB {
|
||||
@ -883,7 +890,7 @@ outer:
|
||||
}
|
||||
|
||||
// This file is handled, no need to look further in the version list
|
||||
continue outer
|
||||
continue nextFile
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -18,9 +18,9 @@ fileVersion Structure:
|
||||
0 1 2 3
|
||||
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| |
|
||||
+ version (64 bits) +
|
||||
| |
|
||||
/ /
|
||||
\ Vector Structure \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Length of device |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
@ -31,7 +31,7 @@ fileVersion Structure:
|
||||
|
||||
|
||||
struct fileVersion {
|
||||
hyper version;
|
||||
Vector version;
|
||||
opaque device<>;
|
||||
}
|
||||
|
||||
@ -39,7 +39,7 @@ struct fileVersion {
|
||||
|
||||
func (o fileVersion) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o fileVersion) MarshalXDR() ([]byte, error) {
|
||||
@ -57,29 +57,32 @@ func (o fileVersion) MustMarshalXDR() []byte {
|
||||
func (o fileVersion) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o fileVersion) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint64(uint64(o.version))
|
||||
func (o fileVersion) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
_, err := o.version.EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
xw.WriteBytes(o.device)
|
||||
return xw.Tot(), xw.Error()
|
||||
}
|
||||
|
||||
func (o *fileVersion) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *fileVersion) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *fileVersion) decodeXDR(xr *xdr.Reader) error {
|
||||
o.version = int64(xr.ReadUint64())
|
||||
func (o *fileVersion) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
(&o.version).DecodeXDRFrom(xr)
|
||||
o.device = xr.ReadBytes()
|
||||
return xr.Error()
|
||||
}
|
||||
@ -107,7 +110,7 @@ struct versionList {
|
||||
|
||||
func (o versionList) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o versionList) MarshalXDR() ([]byte, error) {
|
||||
@ -125,14 +128,14 @@ func (o versionList) MustMarshalXDR() []byte {
|
||||
func (o versionList) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o versionList) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o versionList) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint32(uint32(len(o.versions)))
|
||||
for i := range o.versions {
|
||||
_, err := o.versions[i].encodeXDR(xw)
|
||||
_, err := o.versions[i].EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
@ -142,20 +145,20 @@ func (o versionList) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
func (o *versionList) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *versionList) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *versionList) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *versionList) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
_versionsSize := int(xr.ReadUint32())
|
||||
o.versions = make([]fileVersion, _versionsSize)
|
||||
for i := range o.versions {
|
||||
(&o.versions[i]).decodeXDR(xr)
|
||||
(&o.versions[i]).DecodeXDRFrom(xr)
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
|
@ -16,7 +16,6 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/lamport"
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
@ -61,7 +60,6 @@ func NewFileSet(folder string, db *leveldb.DB) *FileSet {
|
||||
if f.LocalVersion > s.localVersion[deviceID] {
|
||||
s.localVersion[deviceID] = f.LocalVersion
|
||||
}
|
||||
lamport.Default.Tick(f.Version)
|
||||
return true
|
||||
})
|
||||
if debug {
|
||||
@ -90,14 +88,14 @@ func (s *FileSet) Replace(device protocol.DeviceID, fs []protocol.FileInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *FileSet) ReplaceWithDelete(device protocol.DeviceID, fs []protocol.FileInfo) {
|
||||
func (s *FileSet) ReplaceWithDelete(device protocol.DeviceID, fs []protocol.FileInfo, myID uint64) {
|
||||
if debug {
|
||||
l.Debugf("%s ReplaceWithDelete(%v, [%d])", s.folder, device, len(fs))
|
||||
}
|
||||
normalizeFilenames(fs)
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
if lv := ldbReplaceWithDelete(s.db, []byte(s.folder), device[:], fs); lv > s.localVersion[device] {
|
||||
if lv := ldbReplaceWithDelete(s.db, []byte(s.folder), device[:], fs, myID); lv > s.localVersion[device] {
|
||||
s.localVersion[device] = lv
|
||||
}
|
||||
if device == protocol.LocalDeviceID {
|
||||
@ -118,7 +116,7 @@ func (s *FileSet) Update(device protocol.DeviceID, fs []protocol.FileInfo) {
|
||||
updates := make([]protocol.FileInfo, 0, len(fs))
|
||||
for _, newFile := range fs {
|
||||
existingFile, ok := ldbGet(s.db, []byte(s.folder), device[:], []byte(newFile.Name))
|
||||
if !ok || existingFile.Version <= newFile.Version {
|
||||
if !ok || !existingFile.Version.Equal(newFile.Version) {
|
||||
discards = append(discards, existingFile)
|
||||
updates = append(updates, newFile)
|
||||
}
|
||||
|
@ -15,7 +15,6 @@ import (
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/db"
|
||||
"github.com/syncthing/syncthing/internal/lamport"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/storage"
|
||||
)
|
||||
@ -27,6 +26,8 @@ func init() {
|
||||
remoteDevice1, _ = protocol.DeviceIDFromString("I6KAH76-66SLLLB-5PFXSOA-UFJCDZC-YAOMLEK-CP2GB32-BV5RQST-3PSROAU")
|
||||
}
|
||||
|
||||
const myID = 1
|
||||
|
||||
func genBlocks(n int) []protocol.BlockInfo {
|
||||
b := make([]protocol.BlockInfo, n)
|
||||
for i := range b {
|
||||
@ -95,7 +96,6 @@ func (l fileList) String() string {
|
||||
}
|
||||
|
||||
func TestGlobalSet(t *testing.T) {
|
||||
lamport.Default = lamport.Clock{}
|
||||
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
@ -105,34 +105,34 @@ func TestGlobalSet(t *testing.T) {
|
||||
m := db.NewFileSet("test", ldb)
|
||||
|
||||
local0 := fileList{
|
||||
protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
|
||||
protocol.FileInfo{Name: "b", Version: 1000, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "c", Version: 1000, Blocks: genBlocks(3)},
|
||||
protocol.FileInfo{Name: "d", Version: 1000, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "z", Version: 1000, Blocks: genBlocks(8)},
|
||||
protocol.FileInfo{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(1)},
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(3)},
|
||||
protocol.FileInfo{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "z", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(8)},
|
||||
}
|
||||
local1 := fileList{
|
||||
protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
|
||||
protocol.FileInfo{Name: "b", Version: 1000, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "c", Version: 1000, Blocks: genBlocks(3)},
|
||||
protocol.FileInfo{Name: "d", Version: 1000, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(1)},
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(3)},
|
||||
protocol.FileInfo{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(4)},
|
||||
}
|
||||
localTot := fileList{
|
||||
local0[0],
|
||||
local0[1],
|
||||
local0[2],
|
||||
local0[3],
|
||||
protocol.FileInfo{Name: "z", Version: 1001, Flags: protocol.FlagDeleted},
|
||||
protocol.FileInfo{Name: "z", Version: protocol.Vector{{ID: myID, Value: 1001}}, Flags: protocol.FlagDeleted},
|
||||
}
|
||||
|
||||
remote0 := fileList{
|
||||
protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
|
||||
protocol.FileInfo{Name: "b", Version: 1000, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(5)},
|
||||
protocol.FileInfo{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(1)},
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1001}}, Blocks: genBlocks(5)},
|
||||
}
|
||||
remote1 := fileList{
|
||||
protocol.FileInfo{Name: "b", Version: 1001, Blocks: genBlocks(6)},
|
||||
protocol.FileInfo{Name: "e", Version: 1000, Blocks: genBlocks(7)},
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1001}}, Blocks: genBlocks(6)},
|
||||
protocol.FileInfo{Name: "e", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(7)},
|
||||
}
|
||||
remoteTot := fileList{
|
||||
remote0[0],
|
||||
@ -160,8 +160,8 @@ func TestGlobalSet(t *testing.T) {
|
||||
local0[3],
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local0)
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local1)
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local0, myID)
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local1, myID)
|
||||
m.Replace(remoteDevice0, remote0)
|
||||
m.Update(remoteDevice0, remote1)
|
||||
|
||||
@ -256,8 +256,6 @@ func TestGlobalSet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNeedWithInvalid(t *testing.T) {
|
||||
lamport.Default = lamport.Clock{}
|
||||
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -266,26 +264,26 @@ func TestNeedWithInvalid(t *testing.T) {
|
||||
s := db.NewFileSet("test", ldb)
|
||||
|
||||
localHave := fileList{
|
||||
protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
|
||||
protocol.FileInfo{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(1)},
|
||||
}
|
||||
remote0Have := fileList{
|
||||
protocol.FileInfo{Name: "b", Version: 1001, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(7)},
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1001}}, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1003}}, Blocks: genBlocks(7)},
|
||||
}
|
||||
remote1Have := fileList{
|
||||
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(7)},
|
||||
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "e", Version: 1004, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(7)},
|
||||
protocol.FileInfo{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1003}}, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "e", Version: protocol.Vector{{ID: myID, Value: 1004}}, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
}
|
||||
|
||||
expectedNeed := fileList{
|
||||
protocol.FileInfo{Name: "b", Version: 1001, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(7)},
|
||||
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(7)},
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1001}}, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(7)},
|
||||
protocol.FileInfo{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1003}}, Blocks: genBlocks(7)},
|
||||
}
|
||||
|
||||
s.ReplaceWithDelete(protocol.LocalDeviceID, localHave)
|
||||
s.ReplaceWithDelete(protocol.LocalDeviceID, localHave, myID)
|
||||
s.Replace(remoteDevice0, remote0Have)
|
||||
s.Replace(remoteDevice1, remote1Have)
|
||||
|
||||
@ -298,8 +296,6 @@ func TestNeedWithInvalid(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateToInvalid(t *testing.T) {
|
||||
lamport.Default = lamport.Clock{}
|
||||
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -308,13 +304,13 @@ func TestUpdateToInvalid(t *testing.T) {
|
||||
s := db.NewFileSet("test", ldb)
|
||||
|
||||
localHave := fileList{
|
||||
protocol.FileInfo{Name: "a", Version: 1000, Blocks: genBlocks(1)},
|
||||
protocol.FileInfo{Name: "b", Version: 1001, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "d", Version: 1003, Blocks: genBlocks(7)},
|
||||
protocol.FileInfo{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}, Blocks: genBlocks(1)},
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1001}}, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1003}}, Blocks: genBlocks(7)},
|
||||
}
|
||||
|
||||
s.ReplaceWithDelete(protocol.LocalDeviceID, localHave)
|
||||
s.ReplaceWithDelete(protocol.LocalDeviceID, localHave, myID)
|
||||
|
||||
have := fileList(haveList(s, protocol.LocalDeviceID))
|
||||
sort.Sort(have)
|
||||
@ -323,7 +319,7 @@ func TestUpdateToInvalid(t *testing.T) {
|
||||
t.Errorf("Have incorrect before invalidation;\n A: %v !=\n E: %v", have, localHave)
|
||||
}
|
||||
|
||||
localHave[1] = protocol.FileInfo{Name: "b", Version: 1001, Flags: protocol.FlagInvalid}
|
||||
localHave[1] = protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1001}}, Flags: protocol.FlagInvalid}
|
||||
s.Update(protocol.LocalDeviceID, localHave[1:2])
|
||||
|
||||
have = fileList(haveList(s, protocol.LocalDeviceID))
|
||||
@ -335,8 +331,6 @@ func TestUpdateToInvalid(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestInvalidAvailability(t *testing.T) {
|
||||
lamport.Default = lamport.Clock{}
|
||||
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@ -345,16 +339,16 @@ func TestInvalidAvailability(t *testing.T) {
|
||||
s := db.NewFileSet("test", ldb)
|
||||
|
||||
remote0Have := fileList{
|
||||
protocol.FileInfo{Name: "both", Version: 1001, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "r1only", Version: 1002, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "r0only", Version: 1003, Blocks: genBlocks(7)},
|
||||
protocol.FileInfo{Name: "none", Version: 1004, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "both", Version: protocol.Vector{{ID: myID, Value: 1001}}, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "r1only", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "r0only", Version: protocol.Vector{{ID: myID, Value: 1003}}, Blocks: genBlocks(7)},
|
||||
protocol.FileInfo{Name: "none", Version: protocol.Vector{{ID: myID, Value: 1004}}, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
}
|
||||
remote1Have := fileList{
|
||||
protocol.FileInfo{Name: "both", Version: 1001, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "r1only", Version: 1002, Blocks: genBlocks(7)},
|
||||
protocol.FileInfo{Name: "r0only", Version: 1003, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "none", Version: 1004, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "both", Version: protocol.Vector{{ID: myID, Value: 1001}}, Blocks: genBlocks(2)},
|
||||
protocol.FileInfo{Name: "r1only", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(7)},
|
||||
protocol.FileInfo{Name: "r0only", Version: protocol.Vector{{ID: myID, Value: 1003}}, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "none", Version: protocol.Vector{{ID: myID, Value: 1004}}, Blocks: genBlocks(5), Flags: protocol.FlagInvalid},
|
||||
}
|
||||
|
||||
s.Replace(remoteDevice0, remote0Have)
|
||||
@ -383,17 +377,16 @@ func TestLocalDeleted(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
m := db.NewFileSet("test", ldb)
|
||||
lamport.Default = lamport.Clock{}
|
||||
|
||||
local1 := []protocol.FileInfo{
|
||||
{Name: "a", Version: 1000},
|
||||
{Name: "b", Version: 1000},
|
||||
{Name: "c", Version: 1000},
|
||||
{Name: "d", Version: 1000},
|
||||
{Name: "z", Version: 1000, Flags: protocol.FlagDirectory},
|
||||
{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "z", Version: protocol.Vector{{ID: myID, Value: 1000}}, Flags: protocol.FlagDirectory},
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local1)
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local1, myID)
|
||||
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, []protocol.FileInfo{
|
||||
local1[0],
|
||||
@ -401,25 +394,25 @@ func TestLocalDeleted(t *testing.T) {
|
||||
local1[2],
|
||||
local1[3],
|
||||
local1[4],
|
||||
})
|
||||
}, myID)
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, []protocol.FileInfo{
|
||||
local1[0],
|
||||
local1[2],
|
||||
// [3] removed
|
||||
local1[4],
|
||||
})
|
||||
}, myID)
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, []protocol.FileInfo{
|
||||
local1[0],
|
||||
local1[2],
|
||||
// [4] removed
|
||||
})
|
||||
}, myID)
|
||||
|
||||
expectedGlobal1 := []protocol.FileInfo{
|
||||
local1[0],
|
||||
{Name: "b", Version: 1001, Flags: protocol.FlagDeleted},
|
||||
{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1001}}, Flags: protocol.FlagDeleted},
|
||||
local1[2],
|
||||
{Name: "d", Version: 1002, Flags: protocol.FlagDeleted},
|
||||
{Name: "z", Version: 1003, Flags: protocol.FlagDeleted | protocol.FlagDirectory},
|
||||
{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1001}}, Flags: protocol.FlagDeleted},
|
||||
{Name: "z", Version: protocol.Vector{{ID: myID, Value: 1001}}, Flags: protocol.FlagDeleted | protocol.FlagDirectory},
|
||||
}
|
||||
|
||||
g := globalList(m)
|
||||
@ -433,14 +426,14 @@ func TestLocalDeleted(t *testing.T) {
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, []protocol.FileInfo{
|
||||
local1[0],
|
||||
// [2] removed
|
||||
})
|
||||
}, myID)
|
||||
|
||||
expectedGlobal2 := []protocol.FileInfo{
|
||||
local1[0],
|
||||
{Name: "b", Version: 1001, Flags: protocol.FlagDeleted},
|
||||
{Name: "c", Version: 1004, Flags: protocol.FlagDeleted},
|
||||
{Name: "d", Version: 1002, Flags: protocol.FlagDeleted},
|
||||
{Name: "z", Version: 1003, Flags: protocol.FlagDeleted | protocol.FlagDirectory},
|
||||
{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1001}}, Flags: protocol.FlagDeleted},
|
||||
{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1001}}, Flags: protocol.FlagDeleted},
|
||||
{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1001}}, Flags: protocol.FlagDeleted},
|
||||
{Name: "z", Version: protocol.Vector{{ID: myID, Value: 1001}}, Flags: protocol.FlagDeleted | protocol.FlagDirectory},
|
||||
}
|
||||
|
||||
g = globalList(m)
|
||||
@ -460,20 +453,20 @@ func Benchmark10kReplace(b *testing.B) {
|
||||
|
||||
var local []protocol.FileInfo
|
||||
for i := 0; i < 10000; i++ {
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m := db.NewFileSet("test", ldb)
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local, myID)
|
||||
}
|
||||
}
|
||||
|
||||
func Benchmark10kUpdateChg(b *testing.B) {
|
||||
var remote []protocol.FileInfo
|
||||
for i := 0; i < 10000; i++ {
|
||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
@ -486,16 +479,16 @@ func Benchmark10kUpdateChg(b *testing.B) {
|
||||
|
||||
var local []protocol.FileInfo
|
||||
for i := 0; i < 10000; i++ {
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local, myID)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
for j := range local {
|
||||
local[j].Version++
|
||||
local[j].Version = local[j].Version.Update(myID)
|
||||
}
|
||||
b.StartTimer()
|
||||
m.Update(protocol.LocalDeviceID, local)
|
||||
@ -505,7 +498,7 @@ func Benchmark10kUpdateChg(b *testing.B) {
|
||||
func Benchmark10kUpdateSme(b *testing.B) {
|
||||
var remote []protocol.FileInfo
|
||||
for i := 0; i < 10000; i++ {
|
||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
@ -517,10 +510,10 @@ func Benchmark10kUpdateSme(b *testing.B) {
|
||||
|
||||
var local []protocol.FileInfo
|
||||
for i := 0; i < 10000; i++ {
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local, myID)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@ -531,7 +524,7 @@ func Benchmark10kUpdateSme(b *testing.B) {
|
||||
func Benchmark10kNeed2k(b *testing.B) {
|
||||
var remote []protocol.FileInfo
|
||||
for i := 0; i < 10000; i++ {
|
||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
@ -544,13 +537,13 @@ func Benchmark10kNeed2k(b *testing.B) {
|
||||
|
||||
var local []protocol.FileInfo
|
||||
for i := 0; i < 8000; i++ {
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
for i := 8000; i < 10000; i++ {
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 980})
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{1, 980}}})
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local, myID)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@ -564,7 +557,7 @@ func Benchmark10kNeed2k(b *testing.B) {
|
||||
func Benchmark10kHaveFullList(b *testing.B) {
|
||||
var remote []protocol.FileInfo
|
||||
for i := 0; i < 10000; i++ {
|
||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
@ -577,13 +570,13 @@ func Benchmark10kHaveFullList(b *testing.B) {
|
||||
|
||||
var local []protocol.FileInfo
|
||||
for i := 0; i < 2000; i++ {
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
for i := 2000; i < 10000; i++ {
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 980})
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{1, 980}}})
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local, myID)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@ -597,7 +590,7 @@ func Benchmark10kHaveFullList(b *testing.B) {
|
||||
func Benchmark10kGlobal(b *testing.B) {
|
||||
var remote []protocol.FileInfo
|
||||
for i := 0; i < 10000; i++ {
|
||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
remote = append(remote, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
|
||||
ldb, err := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
@ -610,13 +603,13 @@ func Benchmark10kGlobal(b *testing.B) {
|
||||
|
||||
var local []protocol.FileInfo
|
||||
for i := 0; i < 2000; i++ {
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 1000})
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{ID: myID, Value: 1000}}})
|
||||
}
|
||||
for i := 2000; i < 10000; i++ {
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: 980})
|
||||
local = append(local, protocol.FileInfo{Name: fmt.Sprintf("file%d", i), Version: protocol.Vector{{1, 980}}})
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local, myID)
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@ -636,20 +629,20 @@ func TestGlobalReset(t *testing.T) {
|
||||
m := db.NewFileSet("test", ldb)
|
||||
|
||||
local := []protocol.FileInfo{
|
||||
{Name: "a", Version: 1000},
|
||||
{Name: "b", Version: 1000},
|
||||
{Name: "c", Version: 1000},
|
||||
{Name: "d", Version: 1000},
|
||||
{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
}
|
||||
|
||||
remote := []protocol.FileInfo{
|
||||
{Name: "a", Version: 1000},
|
||||
{Name: "b", Version: 1001},
|
||||
{Name: "c", Version: 1002},
|
||||
{Name: "e", Version: 1000},
|
||||
{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1001}}},
|
||||
{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}},
|
||||
{Name: "e", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local, myID)
|
||||
g := globalList(m)
|
||||
sort.Sort(fileList(g))
|
||||
|
||||
@ -677,26 +670,26 @@ func TestNeed(t *testing.T) {
|
||||
m := db.NewFileSet("test", ldb)
|
||||
|
||||
local := []protocol.FileInfo{
|
||||
{Name: "a", Version: 1000},
|
||||
{Name: "b", Version: 1000},
|
||||
{Name: "c", Version: 1000},
|
||||
{Name: "d", Version: 1000},
|
||||
{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
}
|
||||
|
||||
remote := []protocol.FileInfo{
|
||||
{Name: "a", Version: 1000},
|
||||
{Name: "b", Version: 1001},
|
||||
{Name: "c", Version: 1002},
|
||||
{Name: "e", Version: 1000},
|
||||
{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1001}}},
|
||||
{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}},
|
||||
{Name: "e", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
}
|
||||
|
||||
shouldNeed := []protocol.FileInfo{
|
||||
{Name: "b", Version: 1001},
|
||||
{Name: "c", Version: 1002},
|
||||
{Name: "e", Version: 1000},
|
||||
{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1001}}},
|
||||
{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}},
|
||||
{Name: "e", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local)
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local, myID)
|
||||
m.Replace(remoteDevice0, remote)
|
||||
|
||||
need := needList(m, protocol.LocalDeviceID)
|
||||
@ -718,30 +711,30 @@ func TestLocalVersion(t *testing.T) {
|
||||
m := db.NewFileSet("test", ldb)
|
||||
|
||||
local1 := []protocol.FileInfo{
|
||||
{Name: "a", Version: 1000},
|
||||
{Name: "b", Version: 1000},
|
||||
{Name: "c", Version: 1000},
|
||||
{Name: "d", Version: 1000},
|
||||
{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
}
|
||||
|
||||
local2 := []protocol.FileInfo{
|
||||
local1[0],
|
||||
// [1] deleted
|
||||
local1[2],
|
||||
{Name: "d", Version: 1002},
|
||||
{Name: "e", Version: 1000},
|
||||
{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1002}}},
|
||||
{Name: "e", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local1)
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local1, myID)
|
||||
c0 := m.LocalVersion(protocol.LocalDeviceID)
|
||||
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local2)
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local2, myID)
|
||||
c1 := m.LocalVersion(protocol.LocalDeviceID)
|
||||
if !(c1 > c0) {
|
||||
t.Fatal("Local version number should have incremented")
|
||||
}
|
||||
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local2)
|
||||
m.ReplaceWithDelete(protocol.LocalDeviceID, local2, myID)
|
||||
c2 := m.LocalVersion(protocol.LocalDeviceID)
|
||||
if c2 != c1 {
|
||||
t.Fatal("Local version number should be unchanged")
|
||||
@ -756,17 +749,17 @@ func TestListDropFolder(t *testing.T) {
|
||||
|
||||
s0 := db.NewFileSet("test0", ldb)
|
||||
local1 := []protocol.FileInfo{
|
||||
{Name: "a", Version: 1000},
|
||||
{Name: "b", Version: 1000},
|
||||
{Name: "c", Version: 1000},
|
||||
{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
}
|
||||
s0.Replace(protocol.LocalDeviceID, local1)
|
||||
|
||||
s1 := db.NewFileSet("test1", ldb)
|
||||
local2 := []protocol.FileInfo{
|
||||
{Name: "d", Version: 1002},
|
||||
{Name: "e", Version: 1002},
|
||||
{Name: "f", Version: 1002},
|
||||
{Name: "d", Version: protocol.Vector{{ID: myID, Value: 1002}}},
|
||||
{Name: "e", Version: protocol.Vector{{ID: myID, Value: 1002}}},
|
||||
{Name: "f", Version: protocol.Vector{{ID: myID, Value: 1002}}},
|
||||
}
|
||||
s1.Replace(remoteDevice0, local2)
|
||||
|
||||
@ -808,24 +801,24 @@ func TestGlobalNeedWithInvalid(t *testing.T) {
|
||||
s := db.NewFileSet("test1", ldb)
|
||||
|
||||
rem0 := fileList{
|
||||
protocol.FileInfo{Name: "a", Version: 1002, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "b", Version: 1002, Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1002}}, Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(4)},
|
||||
}
|
||||
s.Replace(remoteDevice0, rem0)
|
||||
|
||||
rem1 := fileList{
|
||||
protocol.FileInfo{Name: "a", Version: 1002, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "b", Version: 1002, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "c", Version: 1002, Flags: protocol.FlagInvalid},
|
||||
protocol.FileInfo{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}, Flags: protocol.FlagInvalid},
|
||||
}
|
||||
s.Replace(remoteDevice1, rem1)
|
||||
|
||||
total := fileList{
|
||||
// There's a valid copy of each file, so it should be merged
|
||||
protocol.FileInfo{Name: "a", Version: 1002, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "b", Version: 1002, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "c", Version: 1002, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "a", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "b", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(4)},
|
||||
protocol.FileInfo{Name: "c", Version: protocol.Vector{{ID: myID, Value: 1002}}, Blocks: genBlocks(4)},
|
||||
}
|
||||
|
||||
need := fileList(needList(s, protocol.LocalDeviceID))
|
||||
@ -854,10 +847,10 @@ func TestLongPath(t *testing.T) {
|
||||
name := b.String() // 5000 characters
|
||||
|
||||
local := []protocol.FileInfo{
|
||||
{Name: string(name), Version: 1000},
|
||||
{Name: string(name), Version: protocol.Vector{{ID: myID, Value: 1000}}},
|
||||
}
|
||||
|
||||
s.ReplaceWithDelete(protocol.LocalDeviceID, local)
|
||||
s.ReplaceWithDelete(protocol.LocalDeviceID, local, myID)
|
||||
|
||||
gf := globalList(s)
|
||||
if l := len(gf); l != 1 {
|
||||
|
@ -37,7 +37,7 @@ struct Query {
|
||||
|
||||
func (o Query) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o Query) MarshalXDR() ([]byte, error) {
|
||||
@ -55,11 +55,11 @@ func (o Query) MustMarshalXDR() []byte {
|
||||
func (o Query) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o Query) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o Query) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint32(o.Magic)
|
||||
if l := len(o.DeviceID); l > 32 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("DeviceID", l, 32)
|
||||
@ -70,16 +70,16 @@ func (o Query) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
func (o *Query) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *Query) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *Query) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *Query) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.Magic = xr.ReadUint32()
|
||||
o.DeviceID = xr.ReadBytesMax(32)
|
||||
return xr.Error()
|
||||
@ -94,7 +94,9 @@ Announce Structure:
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Magic |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Device |
|
||||
/ /
|
||||
\ Device Structure \
|
||||
/ /
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
| Number of Extra |
|
||||
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
@ -114,7 +116,7 @@ struct Announce {
|
||||
|
||||
func (o Announce) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o Announce) MarshalXDR() ([]byte, error) {
|
||||
@ -132,13 +134,13 @@ func (o Announce) MustMarshalXDR() []byte {
|
||||
func (o Announce) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o Announce) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o Announce) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
xw.WriteUint32(o.Magic)
|
||||
_, err := o.This.encodeXDR(xw)
|
||||
_, err := o.This.EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
@ -147,7 +149,7 @@ func (o Announce) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Extra)))
|
||||
for i := range o.Extra {
|
||||
_, err := o.Extra[i].encodeXDR(xw)
|
||||
_, err := o.Extra[i].EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
@ -157,25 +159,25 @@ func (o Announce) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
func (o *Announce) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *Announce) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *Announce) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *Announce) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.Magic = xr.ReadUint32()
|
||||
(&o.This).decodeXDR(xr)
|
||||
(&o.This).DecodeXDRFrom(xr)
|
||||
_ExtraSize := int(xr.ReadUint32())
|
||||
if _ExtraSize > 16 {
|
||||
return xdr.ElementSizeExceeded("Extra", _ExtraSize, 16)
|
||||
}
|
||||
o.Extra = make([]Device, _ExtraSize)
|
||||
for i := range o.Extra {
|
||||
(&o.Extra[i]).decodeXDR(xr)
|
||||
(&o.Extra[i]).DecodeXDRFrom(xr)
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
@ -210,7 +212,7 @@ struct Device {
|
||||
|
||||
func (o Device) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o Device) MarshalXDR() ([]byte, error) {
|
||||
@ -228,11 +230,11 @@ func (o Device) MustMarshalXDR() []byte {
|
||||
func (o Device) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o Device) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o Device) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
if l := len(o.ID); l > 32 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("ID", l, 32)
|
||||
}
|
||||
@ -242,7 +244,7 @@ func (o Device) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
}
|
||||
xw.WriteUint32(uint32(len(o.Addresses)))
|
||||
for i := range o.Addresses {
|
||||
_, err := o.Addresses[i].encodeXDR(xw)
|
||||
_, err := o.Addresses[i].EncodeXDRInto(xw)
|
||||
if err != nil {
|
||||
return xw.Tot(), err
|
||||
}
|
||||
@ -252,16 +254,16 @@ func (o Device) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
func (o *Device) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *Device) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *Device) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *Device) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.ID = xr.ReadBytesMax(32)
|
||||
_AddressesSize := int(xr.ReadUint32())
|
||||
if _AddressesSize > 16 {
|
||||
@ -269,7 +271,7 @@ func (o *Device) decodeXDR(xr *xdr.Reader) error {
|
||||
}
|
||||
o.Addresses = make([]Address, _AddressesSize)
|
||||
for i := range o.Addresses {
|
||||
(&o.Addresses[i]).decodeXDR(xr)
|
||||
(&o.Addresses[i]).DecodeXDRFrom(xr)
|
||||
}
|
||||
return xr.Error()
|
||||
}
|
||||
@ -300,7 +302,7 @@ struct Address {
|
||||
|
||||
func (o Address) EncodeXDR(w io.Writer) (int, error) {
|
||||
var xw = xdr.NewWriter(w)
|
||||
return o.encodeXDR(xw)
|
||||
return o.EncodeXDRInto(xw)
|
||||
}
|
||||
|
||||
func (o Address) MarshalXDR() ([]byte, error) {
|
||||
@ -318,11 +320,11 @@ func (o Address) MustMarshalXDR() []byte {
|
||||
func (o Address) AppendXDR(bs []byte) ([]byte, error) {
|
||||
var aw = xdr.AppendWriter(bs)
|
||||
var xw = xdr.NewWriter(&aw)
|
||||
_, err := o.encodeXDR(xw)
|
||||
_, err := o.EncodeXDRInto(xw)
|
||||
return []byte(aw), err
|
||||
}
|
||||
|
||||
func (o Address) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
func (o Address) EncodeXDRInto(xw *xdr.Writer) (int, error) {
|
||||
if l := len(o.IP); l > 16 {
|
||||
return xw.Tot(), xdr.ElementSizeExceeded("IP", l, 16)
|
||||
}
|
||||
@ -333,16 +335,16 @@ func (o Address) encodeXDR(xw *xdr.Writer) (int, error) {
|
||||
|
||||
func (o *Address) DecodeXDR(r io.Reader) error {
|
||||
xr := xdr.NewReader(r)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *Address) UnmarshalXDR(bs []byte) error {
|
||||
var br = bytes.NewReader(bs)
|
||||
var xr = xdr.NewReader(br)
|
||||
return o.decodeXDR(xr)
|
||||
return o.DecodeXDRFrom(xr)
|
||||
}
|
||||
|
||||
func (o *Address) decodeXDR(xr *xdr.Reader) error {
|
||||
func (o *Address) DecodeXDRFrom(xr *xdr.Reader) error {
|
||||
o.IP = xr.ReadBytesMax(16)
|
||||
o.Port = xr.ReadUint16()
|
||||
return xr.Error()
|
||||
|
@ -1,31 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// Package lamport implements a simple Lamport Clock for versioning
|
||||
package lamport
|
||||
|
||||
import "sync"
|
||||
|
||||
var Default = Clock{}
|
||||
|
||||
type Clock struct {
|
||||
val int64
|
||||
mut sync.Mutex
|
||||
}
|
||||
|
||||
func (c *Clock) Tick(v int64) int64 {
|
||||
c.mut.Lock()
|
||||
if v > c.val {
|
||||
c.val = v + 1
|
||||
c.mut.Unlock()
|
||||
return v + 1
|
||||
} else {
|
||||
c.val++
|
||||
v = c.val
|
||||
c.mut.Unlock()
|
||||
return v
|
||||
}
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
// Copyright (C) 2014 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
package lamport
|
||||
|
||||
import "testing"
|
||||
|
||||
var inputs = []int64{0, 42, 2, 3, 4, 8, 9, 33, 44, 112, 100}
|
||||
|
||||
func TestClock(t *testing.T) {
|
||||
c := Clock{}
|
||||
|
||||
var prev int64
|
||||
for _, input := range inputs {
|
||||
cur := c.Tick(input)
|
||||
if cur <= prev || cur <= input {
|
||||
t.Error("Clock moving backwards")
|
||||
}
|
||||
prev = cur
|
||||
}
|
||||
}
|
@ -27,7 +27,6 @@ import (
|
||||
"github.com/syncthing/syncthing/internal/db"
|
||||
"github.com/syncthing/syncthing/internal/events"
|
||||
"github.com/syncthing/syncthing/internal/ignore"
|
||||
"github.com/syncthing/syncthing/internal/lamport"
|
||||
"github.com/syncthing/syncthing/internal/osutil"
|
||||
"github.com/syncthing/syncthing/internal/scanner"
|
||||
"github.com/syncthing/syncthing/internal/stats"
|
||||
@ -59,6 +58,8 @@ type Model struct {
|
||||
db *leveldb.DB
|
||||
finder *db.BlockFinder
|
||||
progressEmitter *ProgressEmitter
|
||||
id protocol.DeviceID
|
||||
shortID uint64
|
||||
|
||||
deviceName string
|
||||
clientName string
|
||||
@ -93,10 +94,14 @@ var (
|
||||
// NewModel creates and starts a new model. The model starts in read-only mode,
|
||||
// where it sends index information to connected peers and responds to requests
|
||||
// for file data without altering the local folder in any way.
|
||||
func NewModel(cfg *config.Wrapper, deviceName, clientName, clientVersion string, ldb *leveldb.DB) *Model {
|
||||
func NewModel(cfg *config.Wrapper, id protocol.DeviceID, deviceName, clientName, clientVersion string, ldb *leveldb.DB) *Model {
|
||||
m := &Model{
|
||||
cfg: cfg,
|
||||
db: ldb,
|
||||
finder: db.NewBlockFinder(ldb, cfg),
|
||||
progressEmitter: NewProgressEmitter(cfg),
|
||||
id: id,
|
||||
shortID: id.Short(),
|
||||
deviceName: deviceName,
|
||||
clientName: clientName,
|
||||
clientVersion: clientVersion,
|
||||
@ -111,8 +116,6 @@ func NewModel(cfg *config.Wrapper, deviceName, clientName, clientVersion string,
|
||||
protoConn: make(map[protocol.DeviceID]protocol.Connection),
|
||||
rawConn: make(map[protocol.DeviceID]io.Closer),
|
||||
deviceVer: make(map[protocol.DeviceID]string),
|
||||
finder: db.NewBlockFinder(ldb, cfg),
|
||||
progressEmitter: NewProgressEmitter(cfg),
|
||||
}
|
||||
if cfg.Options().ProgressUpdateIntervalS > -1 {
|
||||
go m.progressEmitter.Serve()
|
||||
@ -443,7 +446,6 @@ func (m *Model) Index(deviceID protocol.DeviceID, folder string, fs []protocol.F
|
||||
}
|
||||
|
||||
for i := 0; i < len(fs); {
|
||||
lamport.Default.Tick(fs[i].Version)
|
||||
if fs[i].Flags&^protocol.FlagsAll != 0 {
|
||||
if debug {
|
||||
l.Debugln("dropping update for file with unknown bits set", fs[i])
|
||||
@ -492,7 +494,6 @@ func (m *Model) IndexUpdate(deviceID protocol.DeviceID, folder string, fs []prot
|
||||
}
|
||||
|
||||
for i := 0; i < len(fs); {
|
||||
lamport.Default.Tick(fs[i].Version)
|
||||
if fs[i].Flags&^protocol.FlagsAll != 0 {
|
||||
if debug {
|
||||
l.Debugln("dropping update for file with unknown bits set", fs[i])
|
||||
@ -748,7 +749,7 @@ func (m *Model) Request(deviceID protocol.DeviceID, folder, name string, offset
|
||||
// ReplaceLocal replaces the local folder index with the given list of files.
|
||||
func (m *Model) ReplaceLocal(folder string, fs []protocol.FileInfo) {
|
||||
m.fmut.RLock()
|
||||
m.folderFiles[folder].ReplaceWithDelete(protocol.LocalDeviceID, fs)
|
||||
m.folderFiles[folder].ReplaceWithDelete(protocol.LocalDeviceID, fs, m.shortID)
|
||||
m.fmut.RUnlock()
|
||||
}
|
||||
|
||||
@ -1149,6 +1150,7 @@ func (m *Model) ScanFolderSub(folder, sub string) error {
|
||||
IgnorePerms: folderCfg.IgnorePerms,
|
||||
AutoNormalize: folderCfg.AutoNormalize,
|
||||
Hashers: folderCfg.Hashers,
|
||||
ShortID: m.shortID,
|
||||
}
|
||||
|
||||
runner.setState(FolderScanning)
|
||||
@ -1233,7 +1235,7 @@ func (m *Model) ScanFolderSub(folder, sub string) error {
|
||||
Name: f.Name,
|
||||
Flags: f.Flags | protocol.FlagDeleted,
|
||||
Modified: f.Modified,
|
||||
Version: lamport.Default.Tick(f.Version),
|
||||
Version: f.Version.Update(m.shortID),
|
||||
}
|
||||
events.Default.Log(events.LocalIndexUpdated, map[string]interface{}{
|
||||
"folder": folder,
|
||||
@ -1329,7 +1331,7 @@ func (m *Model) Override(folder string) {
|
||||
// We have the file, replace with our version
|
||||
need = have
|
||||
}
|
||||
need.Version = lamport.Default.Tick(need.Version)
|
||||
need.Version = need.Version.Update(m.shortID)
|
||||
need.LocalVersion = 0
|
||||
batch = append(batch, need)
|
||||
return true
|
||||
|
@ -86,7 +86,7 @@ func init() {
|
||||
func TestRequest(t *testing.T) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
|
||||
// device1 shares default, but device2 doesn't
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
@ -173,7 +173,7 @@ func genFiles(n int) []protocol.FileInfo {
|
||||
|
||||
func BenchmarkIndex10000(b *testing.B) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.ScanFolder("default")
|
||||
files := genFiles(10000)
|
||||
@ -186,7 +186,7 @@ func BenchmarkIndex10000(b *testing.B) {
|
||||
|
||||
func BenchmarkIndex00100(b *testing.B) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.ScanFolder("default")
|
||||
files := genFiles(100)
|
||||
@ -199,7 +199,7 @@ func BenchmarkIndex00100(b *testing.B) {
|
||||
|
||||
func BenchmarkIndexUpdate10000f10000(b *testing.B) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.ScanFolder("default")
|
||||
files := genFiles(10000)
|
||||
@ -213,7 +213,7 @@ func BenchmarkIndexUpdate10000f10000(b *testing.B) {
|
||||
|
||||
func BenchmarkIndexUpdate10000f00100(b *testing.B) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.ScanFolder("default")
|
||||
files := genFiles(10000)
|
||||
@ -228,7 +228,7 @@ func BenchmarkIndexUpdate10000f00100(b *testing.B) {
|
||||
|
||||
func BenchmarkIndexUpdate10000f00001(b *testing.B) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.ScanFolder("default")
|
||||
files := genFiles(10000)
|
||||
@ -286,7 +286,7 @@ func (FakeConnection) Statistics() protocol.Statistics {
|
||||
|
||||
func BenchmarkRequest(b *testing.B) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.ScanFolder("default")
|
||||
|
||||
@ -336,7 +336,7 @@ func TestDeviceRename(t *testing.T) {
|
||||
}
|
||||
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(config.Wrap("tmpconfig.xml", cfg), "device", "syncthing", "dev", db)
|
||||
m := NewModel(config.Wrap("tmpconfig.xml", cfg), protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
if cfg.Devices[0].Name != "" {
|
||||
t.Errorf("Device already has a name")
|
||||
}
|
||||
@ -403,7 +403,7 @@ func TestClusterConfig(t *testing.T) {
|
||||
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
|
||||
m := NewModel(config.Wrap("/tmp/test", cfg), "device", "syncthing", "dev", db)
|
||||
m := NewModel(config.Wrap("/tmp/test", cfg), protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(cfg.Folders[0])
|
||||
m.AddFolder(cfg.Folders[1])
|
||||
|
||||
@ -469,7 +469,7 @@ func TestIgnores(t *testing.T) {
|
||||
}
|
||||
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.StartFolderRO("default")
|
||||
|
||||
@ -543,7 +543,7 @@ func TestIgnores(t *testing.T) {
|
||||
|
||||
func TestRefuseUnknownBits(t *testing.T) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
|
||||
m.ScanFolder("default")
|
||||
@ -580,7 +580,7 @@ func TestRefuseUnknownBits(t *testing.T) {
|
||||
|
||||
func TestGlobalDirectoryTree(t *testing.T) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
|
||||
b := func(isfile bool, path ...string) protocol.FileInfo {
|
||||
@ -830,7 +830,7 @@ func TestGlobalDirectoryTree(t *testing.T) {
|
||||
func TestGlobalDirectorySelfFixing(t *testing.T) {
|
||||
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
|
||||
b := func(isfile bool, path ...string) protocol.FileInfo {
|
||||
@ -991,7 +991,7 @@ func genDeepFiles(n, d int) []protocol.FileInfo {
|
||||
|
||||
func BenchmarkTree_10000_50(b *testing.B) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.ScanFolder("default")
|
||||
files := genDeepFiles(10000, 50)
|
||||
@ -1006,7 +1006,7 @@ func BenchmarkTree_10000_50(b *testing.B) {
|
||||
|
||||
func BenchmarkTree_10000_10(b *testing.B) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.ScanFolder("default")
|
||||
files := genDeepFiles(10000, 10)
|
||||
@ -1021,7 +1021,7 @@ func BenchmarkTree_10000_10(b *testing.B) {
|
||||
|
||||
func BenchmarkTree_00100_50(b *testing.B) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.ScanFolder("default")
|
||||
files := genDeepFiles(100, 50)
|
||||
@ -1036,7 +1036,7 @@ func BenchmarkTree_00100_50(b *testing.B) {
|
||||
|
||||
func BenchmarkTree_00100_10(b *testing.B) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
m.ScanFolder("default")
|
||||
files := genDeepFiles(100, 10)
|
||||
|
@ -67,7 +67,7 @@ func TestHandleFile(t *testing.T) {
|
||||
requiredFile.Blocks = blocks[1:]
|
||||
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
// Update index
|
||||
m.updateLocal("default", existingFile)
|
||||
@ -121,7 +121,7 @@ func TestHandleFileWithTemp(t *testing.T) {
|
||||
requiredFile.Blocks = blocks[1:]
|
||||
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
// Update index
|
||||
m.updateLocal("default", existingFile)
|
||||
@ -181,7 +181,7 @@ func TestCopierFinder(t *testing.T) {
|
||||
requiredFile.Name = "file2"
|
||||
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
// Update index
|
||||
m.updateLocal("default", existingFile)
|
||||
@ -256,7 +256,7 @@ func TestCopierCleanup(t *testing.T) {
|
||||
}
|
||||
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
|
||||
// Create a file
|
||||
@ -275,7 +275,7 @@ func TestCopierCleanup(t *testing.T) {
|
||||
}
|
||||
|
||||
file.Blocks = []protocol.BlockInfo{blocks[1]}
|
||||
file.Version++
|
||||
file.Version = file.Version.Update(protocol.LocalDeviceID.Short())
|
||||
// Update index (removing old blocks)
|
||||
m.updateLocal("default", file)
|
||||
|
||||
@ -288,7 +288,7 @@ func TestCopierCleanup(t *testing.T) {
|
||||
}
|
||||
|
||||
file.Blocks = []protocol.BlockInfo{blocks[0]}
|
||||
file.Version++
|
||||
file.Version = file.Version.Update(protocol.LocalDeviceID.Short())
|
||||
// Update index (removing old blocks)
|
||||
m.updateLocal("default", file)
|
||||
|
||||
@ -305,7 +305,7 @@ func TestCopierCleanup(t *testing.T) {
|
||||
// if it fails to find the block.
|
||||
func TestLastResortPulling(t *testing.T) {
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
|
||||
// Add a file to index (with the incorrect block representation, as content
|
||||
@ -378,7 +378,7 @@ func TestDeregisterOnFailInCopy(t *testing.T) {
|
||||
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
|
||||
emitter := NewProgressEmitter(defaultConfig)
|
||||
@ -465,7 +465,7 @@ func TestDeregisterOnFailInPull(t *testing.T) {
|
||||
defer os.Remove("testdata/" + defTempNamer.TempName("filex"))
|
||||
|
||||
db, _ := leveldb.Open(storage.NewMemStorage(), nil)
|
||||
m := NewModel(defaultConfig, "device", "syncthing", "dev", db)
|
||||
m := NewModel(defaultConfig, protocol.LocalDeviceID, "device", "syncthing", "dev", db)
|
||||
m.AddFolder(defaultFolderConfig)
|
||||
|
||||
emitter := NewProgressEmitter(defaultConfig)
|
||||
|
@ -17,7 +17,6 @@ import (
|
||||
|
||||
"github.com/syncthing/protocol"
|
||||
"github.com/syncthing/syncthing/internal/ignore"
|
||||
"github.com/syncthing/syncthing/internal/lamport"
|
||||
"github.com/syncthing/syncthing/internal/symlinks"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
@ -61,6 +60,8 @@ type Walker struct {
|
||||
AutoNormalize bool
|
||||
// Number of routines to use for hashing
|
||||
Hashers int
|
||||
// Our vector clock id
|
||||
ShortID uint64
|
||||
}
|
||||
|
||||
type TempNamer interface {
|
||||
@ -203,6 +204,9 @@ func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo) filepath.WalkFun
|
||||
rn = normalizedRn
|
||||
}
|
||||
|
||||
var cf protocol.FileInfo
|
||||
var ok bool
|
||||
|
||||
// Index wise symlinks are always files, regardless of what the target
|
||||
// is, because symlinks carry their target path as their content.
|
||||
if info.Mode()&os.ModeSymlink == os.ModeSymlink {
|
||||
@ -243,7 +247,7 @@ func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo) filepath.WalkFun
|
||||
// - it wasn't invalid
|
||||
// - the symlink type (file/dir) was the same
|
||||
// - the block list (i.e. hash of target) was the same
|
||||
cf, ok := w.CurrentFiler.CurrentFile(rn)
|
||||
cf, ok = w.CurrentFiler.CurrentFile(rn)
|
||||
if ok && !cf.IsDeleted() && cf.IsSymlink() && !cf.IsInvalid() && SymlinkTypeEqual(flags, cf.Flags) && BlocksEqual(cf.Blocks, blocks) {
|
||||
return skip
|
||||
}
|
||||
@ -251,7 +255,7 @@ func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo) filepath.WalkFun
|
||||
|
||||
f := protocol.FileInfo{
|
||||
Name: rn,
|
||||
Version: lamport.Default.Tick(0),
|
||||
Version: cf.Version.Update(w.ShortID),
|
||||
Flags: protocol.FlagSymlink | flags | protocol.FlagNoPermBits | 0666,
|
||||
Modified: 0,
|
||||
Blocks: blocks,
|
||||
@ -275,7 +279,7 @@ func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo) filepath.WalkFun
|
||||
// - was a directory previously (not a file or something else)
|
||||
// - was not a symlink (since it's a directory now)
|
||||
// - was not invalid (since it looks valid now)
|
||||
cf, ok := w.CurrentFiler.CurrentFile(rn)
|
||||
cf, ok = w.CurrentFiler.CurrentFile(rn)
|
||||
permUnchanged := w.IgnorePerms || !cf.HasPermissionBits() || PermsEqual(cf.Flags, uint32(info.Mode()))
|
||||
if ok && permUnchanged && !cf.IsDeleted() && cf.IsDirectory() && !cf.IsSymlink() && !cf.IsInvalid() {
|
||||
return nil
|
||||
@ -290,7 +294,7 @@ func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo) filepath.WalkFun
|
||||
}
|
||||
f := protocol.FileInfo{
|
||||
Name: rn,
|
||||
Version: lamport.Default.Tick(0),
|
||||
Version: cf.Version.Update(w.ShortID),
|
||||
Flags: flags,
|
||||
Modified: info.ModTime().Unix(),
|
||||
}
|
||||
@ -312,7 +316,7 @@ func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo) filepath.WalkFun
|
||||
// - was not a symlink (since it's a file now)
|
||||
// - was not invalid (since it looks valid now)
|
||||
// - has the same size as previously
|
||||
cf, ok := w.CurrentFiler.CurrentFile(rn)
|
||||
cf, ok = w.CurrentFiler.CurrentFile(rn)
|
||||
permUnchanged := w.IgnorePerms || !cf.HasPermissionBits() || PermsEqual(cf.Flags, uint32(info.Mode()))
|
||||
if ok && permUnchanged && !cf.IsDeleted() && cf.Modified == info.ModTime().Unix() && !cf.IsDirectory() &&
|
||||
!cf.IsSymlink() && !cf.IsInvalid() && cf.Size() == info.Size() {
|
||||
@ -331,7 +335,7 @@ func (w *Walker) walkAndHashFiles(fchan chan protocol.FileInfo) filepath.WalkFun
|
||||
|
||||
f := protocol.FileInfo{
|
||||
Name: rn,
|
||||
Version: lamport.Default.Tick(0),
|
||||
Version: cf.Version.Update(w.ShortID),
|
||||
Flags: flags,
|
||||
Modified: info.ModTime().Unix(),
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user