syncthing/protocol/protocol.go

473 lines
9.5 KiB
Go
Raw Normal View History

2013-12-15 10:43:31 +00:00
package protocol
import (
"bufio"
2013-12-15 10:43:31 +00:00
"compress/flate"
"errors"
"fmt"
2013-12-15 10:43:31 +00:00
"io"
"sync"
"time"
2013-12-15 10:43:31 +00:00
"github.com/calmh/syncthing/buffers"
2014-02-15 11:08:55 +00:00
"github.com/calmh/syncthing/xdr"
2013-12-15 10:43:31 +00:00
)
2014-02-20 16:40:15 +00:00
const BlockSize = 128 * 1024
2013-12-15 10:43:31 +00:00
const (
messageTypeClusterConfig = 0
messageTypeIndex = 1
messageTypeRequest = 2
messageTypeResponse = 3
messageTypePing = 4
messageTypePong = 5
messageTypeIndexUpdate = 6
2013-12-15 10:43:31 +00:00
)
const (
FlagDeleted uint32 = 1 << 12
FlagInvalid = 1 << 13
FlagDirectory = 1 << 14
)
const (
FlagShareTrusted uint32 = 1 << 0
FlagShareReadOnly = 1 << 1
FlagShareBits = 0x000000ff
)
var (
2014-02-24 12:29:30 +00:00
ErrClusterHash = fmt.Errorf("configuration error: mismatched cluster hash")
ErrClosed = errors.New("connection closed")
)
2013-12-15 10:43:31 +00:00
type Model interface {
// An index was received from the peer node
Index(nodeID string, repo string, files []FileInfo)
2013-12-28 13:10:36 +00:00
// An index update was received from the peer node
IndexUpdate(nodeID string, repo string, files []FileInfo)
2013-12-15 10:43:31 +00:00
// A request was made by the peer node
Request(nodeID string, repo string, name string, offset int64, size int) ([]byte, error)
// A cluster configuration message was received
ClusterConfig(nodeID string, config ClusterConfigMessage)
2013-12-15 10:43:31 +00:00
// The peer node closed the connection
Close(nodeID string, err error)
2013-12-15 10:43:31 +00:00
}
type Connection interface {
ID() string
Index(repo string, files []FileInfo)
Request(repo string, name string, offset int64, size int) ([]byte, error)
ClusterConfig(config ClusterConfigMessage)
Statistics() Statistics
}
type rawConnection struct {
id string
receiver Model
reader io.ReadCloser
cr *countingReader
xr *xdr.Reader
writer io.WriteCloser
cw *countingWriter
wb *bufio.Writer
xw *xdr.Writer
wmut sync.Mutex
closed bool
awaiting map[int]chan asyncResult
nextID int
indexSent map[string]map[string][2]int64
imut sync.Mutex
2013-12-15 10:43:31 +00:00
}
2013-12-15 14:58:27 +00:00
type asyncResult struct {
val []byte
err error
}
2013-12-31 03:10:54 +00:00
const (
2014-05-11 17:30:15 +00:00
pingTimeout = 4 * time.Minute
2013-12-31 03:10:54 +00:00
pingIdleTime = 5 * time.Minute
)
func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver Model) Connection {
cr := &countingReader{Reader: reader}
cw := &countingWriter{Writer: writer}
flrd := flate.NewReader(cr)
flwr, err := flate.NewWriter(cw, flate.BestSpeed)
2013-12-15 10:43:31 +00:00
if err != nil {
panic(err)
}
wb := bufio.NewWriter(flwr)
2013-12-15 10:43:31 +00:00
c := rawConnection{
id: nodeID,
receiver: nativeModel{receiver},
reader: flrd,
cr: cr,
2014-02-20 16:40:15 +00:00
xr: xdr.NewReader(flrd),
writer: flwr,
cw: cw,
wb: wb,
xw: xdr.NewWriter(wb),
awaiting: make(map[int]chan asyncResult),
indexSent: make(map[string]map[string][2]int64),
2013-12-15 10:43:31 +00:00
}
go c.readerLoop()
go c.pingerLoop()
2013-12-15 10:43:31 +00:00
return wireFormatConnection{&c}
2013-12-15 10:43:31 +00:00
}
func (c *rawConnection) ID() string {
2014-01-09 12:58:35 +00:00
return c.id
}
2013-12-15 10:43:31 +00:00
// Index writes the list of file information to the connected peer node
func (c *rawConnection) Index(repo string, idx []FileInfo) {
if c.isClosed() {
return
}
c.imut.Lock()
2013-12-28 13:10:36 +00:00
var msgType int
if c.indexSent[repo] == nil {
2013-12-28 13:10:36 +00:00
// This is the first time we send an index.
msgType = messageTypeIndex
c.indexSent[repo] = make(map[string][2]int64)
2013-12-28 13:10:36 +00:00
for _, f := range idx {
c.indexSent[repo][f.Name] = [2]int64{f.Modified, int64(f.Version)}
2013-12-28 13:10:36 +00:00
}
} else {
// We have sent one full index. Only send updates now.
msgType = messageTypeIndexUpdate
var diff []FileInfo
for _, f := range idx {
if vs, ok := c.indexSent[repo][f.Name]; !ok || f.Modified != vs[0] || int64(f.Version) != vs[1] {
2013-12-28 13:10:36 +00:00
diff = append(diff, f)
c.indexSent[repo][f.Name] = [2]int64{f.Modified, int64(f.Version)}
2013-12-28 13:10:36 +00:00
}
}
idx = diff
}
id := c.nextID
2014-02-24 12:29:30 +00:00
c.nextID = (c.nextID + 1) & 0xfff
c.imut.Unlock()
c.wmut.Lock()
header{0, id, msgType}.encodeXDR(c.xw)
IndexMessage{repo, idx}.encodeXDR(c.xw)
err := c.flush()
c.wmut.Unlock()
if err != nil {
c.close(err)
return
2013-12-19 23:01:34 +00:00
}
2013-12-15 10:43:31 +00:00
}
// Request returns the bytes for the specified block after fetching them from the connected peer.
func (c *rawConnection) Request(repo string, name string, offset int64, size int) ([]byte, error) {
if c.isClosed() {
2013-12-28 15:30:02 +00:00
return nil, ErrClosed
}
c.imut.Lock()
id := c.nextID
c.nextID = (c.nextID + 1) & 0xfff
2013-12-15 14:58:27 +00:00
rc := make(chan asyncResult)
if _, ok := c.awaiting[id]; ok {
panic("id taken")
}
c.awaiting[id] = rc
c.imut.Unlock()
c.wmut.Lock()
header{0, id, messageTypeRequest}.encodeXDR(c.xw)
RequestMessage{repo, name, uint64(offset), uint32(size)}.encodeXDR(c.xw)
err := c.flush()
c.wmut.Unlock()
2013-12-19 23:01:34 +00:00
if err != nil {
c.close(err)
2013-12-19 23:01:34 +00:00
return nil, err
}
2013-12-15 10:43:31 +00:00
2013-12-15 14:58:27 +00:00
res, ok := <-rc
if !ok {
return nil, ErrClosed
2013-12-15 10:43:31 +00:00
}
2013-12-15 14:58:27 +00:00
return res.val, res.err
2013-12-15 10:43:31 +00:00
}
// ClusterConfig send the cluster configuration message to the peer and returns any error
func (c *rawConnection) ClusterConfig(config ClusterConfigMessage) {
if c.isClosed() {
return
}
c.imut.Lock()
id := c.nextID
c.nextID = (c.nextID + 1) & 0xfff
c.imut.Unlock()
c.wmut.Lock()
header{0, id, messageTypeClusterConfig}.encodeXDR(c.xw)
config.encodeXDR(c.xw)
err := c.flush()
c.wmut.Unlock()
if err != nil {
c.close(err)
}
}
func (c *rawConnection) ping() bool {
if c.isClosed() {
2013-12-31 01:52:36 +00:00
return false
2013-12-28 15:30:02 +00:00
}
c.imut.Lock()
id := c.nextID
c.nextID = (c.nextID + 1) & 0xfff
2013-12-30 20:27:20 +00:00
rc := make(chan asyncResult, 1)
c.awaiting[id] = rc
c.imut.Unlock()
c.wmut.Lock()
header{0, id, messageTypePing}.encodeXDR(c.xw)
2013-12-19 23:01:34 +00:00
err := c.flush()
c.wmut.Unlock()
if err != nil {
c.close(err)
return false
2013-12-19 23:01:34 +00:00
}
2013-12-15 10:43:31 +00:00
2013-12-31 01:52:36 +00:00
res, ok := <-rc
return ok && res.err == nil
2013-12-15 10:43:31 +00:00
}
type flusher interface {
Flush() error
}
func (c *rawConnection) flush() error {
if err := c.xw.Error(); err != nil {
return err
}
if err := c.wb.Flush(); err != nil {
return err
}
2013-12-15 10:43:31 +00:00
if f, ok := c.writer.(flusher); ok {
2013-12-19 23:01:34 +00:00
return f.Flush()
2013-12-15 10:43:31 +00:00
}
2013-12-19 23:01:34 +00:00
return nil
2013-12-15 10:43:31 +00:00
}
func (c *rawConnection) close(err error) {
c.imut.Lock()
c.wmut.Lock()
defer c.imut.Unlock()
defer c.wmut.Unlock()
if c.closed {
return
}
c.closed = true
2013-12-15 10:43:31 +00:00
for _, ch := range c.awaiting {
close(ch)
}
c.awaiting = nil
c.writer.Close()
c.reader.Close()
2013-12-15 14:58:27 +00:00
2014-01-09 12:58:35 +00:00
c.receiver.Close(c.id, err)
2013-12-15 10:43:31 +00:00
}
func (c *rawConnection) isClosed() bool {
c.wmut.Lock()
defer c.wmut.Unlock()
return c.closed
2013-12-15 10:43:31 +00:00
}
func (c *rawConnection) readerLoop() {
2013-12-31 03:10:54 +00:00
loop:
for !c.isClosed() {
2014-02-20 16:40:15 +00:00
var hdr header
hdr.decodeXDR(c.xr)
if err := c.xr.Error(); err != nil {
c.close(err)
2013-12-31 03:10:54 +00:00
break loop
2013-12-15 10:43:31 +00:00
}
2013-12-21 07:06:54 +00:00
if hdr.version != 0 {
c.close(fmt.Errorf("protocol error: %s: unknown message version %#x", c.id, hdr.version))
2013-12-31 03:10:54 +00:00
break loop
2013-12-21 07:06:54 +00:00
}
2013-12-15 10:43:31 +00:00
switch hdr.msgType {
case messageTypeIndex:
2014-02-20 16:40:15 +00:00
var im IndexMessage
im.decodeXDR(c.xr)
if err := c.xr.Error(); err != nil {
c.close(err)
2013-12-31 03:10:54 +00:00
break loop
2013-12-15 10:43:31 +00:00
} else {
// We run this (and the corresponding one for update, below)
// in a separate goroutine to avoid blocking the read loop.
// There is otherwise a potential deadlock where both sides
// has the model locked because it's sending a large index
// update and can't receive the large index update from the
// other side.
go c.receiver.Index(c.id, im.Repository, im.Files)
2013-12-15 10:43:31 +00:00
}
2013-12-28 13:10:36 +00:00
case messageTypeIndexUpdate:
2014-02-20 16:40:15 +00:00
var im IndexMessage
im.decodeXDR(c.xr)
if err := c.xr.Error(); err != nil {
c.close(err)
2013-12-31 03:10:54 +00:00
break loop
2013-12-28 13:10:36 +00:00
} else {
go c.receiver.IndexUpdate(c.id, im.Repository, im.Files)
2013-12-28 13:10:36 +00:00
}
2013-12-15 10:43:31 +00:00
case messageTypeRequest:
2014-02-20 16:40:15 +00:00
var req RequestMessage
req.decodeXDR(c.xr)
if err := c.xr.Error(); err != nil {
c.close(err)
2013-12-31 03:10:54 +00:00
break loop
}
go c.processRequest(hdr.msgID, req)
2013-12-15 10:43:31 +00:00
case messageTypeResponse:
2014-02-23 12:58:10 +00:00
data := c.xr.ReadBytesMax(256 * 1024) // Sufficiently larger than max expected block size
2013-12-15 10:43:31 +00:00
if err := c.xr.Error(); err != nil {
c.close(err)
2013-12-31 03:10:54 +00:00
break loop
2014-02-20 16:40:15 +00:00
}
2013-12-15 10:43:31 +00:00
go func(hdr header, err error) {
c.imut.Lock()
rc, ok := c.awaiting[hdr.msgID]
delete(c.awaiting, hdr.msgID)
c.imut.Unlock()
if ok {
rc <- asyncResult{data, err}
close(rc)
}
}(hdr, c.xr.Error())
2013-12-15 10:43:31 +00:00
case messageTypePing:
c.wmut.Lock()
2014-02-20 16:40:15 +00:00
header{0, hdr.msgID, messageTypePong}.encodeXDR(c.xw)
2013-12-19 23:01:34 +00:00
err := c.flush()
c.wmut.Unlock()
if err != nil {
c.close(err)
break loop
2013-12-18 17:29:15 +00:00
}
2013-12-15 10:43:31 +00:00
case messageTypePong:
c.imut.Lock()
2013-12-15 14:58:27 +00:00
rc, ok := c.awaiting[hdr.msgID]
if ok {
go func() {
rc <- asyncResult{}
close(rc)
}()
2013-12-15 14:58:27 +00:00
2013-12-15 10:43:31 +00:00
delete(c.awaiting, hdr.msgID)
}
c.imut.Unlock()
2013-12-21 07:15:19 +00:00
case messageTypeClusterConfig:
var cm ClusterConfigMessage
cm.decodeXDR(c.xr)
if err := c.xr.Error(); err != nil {
c.close(err)
2014-02-20 16:40:15 +00:00
break loop
} else {
go c.receiver.ClusterConfig(c.id, cm)
}
2013-12-21 07:15:19 +00:00
default:
c.close(fmt.Errorf("protocol error: %s: unknown message type %#x", c.id, hdr.msgType))
2013-12-31 03:10:54 +00:00
break loop
2013-12-15 10:43:31 +00:00
}
}
}
func (c *rawConnection) processRequest(msgID int, req RequestMessage) {
2014-02-20 16:40:15 +00:00
data, _ := c.receiver.Request(c.id, req.Repository, req.Name, int64(req.Offset), int(req.Size))
2013-12-31 03:10:54 +00:00
c.wmut.Lock()
2014-02-20 16:40:15 +00:00
header{0, msgID, messageTypeResponse}.encodeXDR(c.xw)
c.xw.WriteBytes(data)
err := c.flush()
c.wmut.Unlock()
2013-12-31 03:10:54 +00:00
buffers.Put(data)
2013-12-31 03:10:54 +00:00
if err != nil {
c.close(err)
2013-12-15 10:43:31 +00:00
}
}
func (c *rawConnection) pingerLoop() {
2013-12-31 01:52:36 +00:00
var rc = make(chan bool, 1)
ticker := time.Tick(pingIdleTime / 2)
2013-12-31 03:10:54 +00:00
for {
if c.isClosed() {
return
}
select {
case <-ticker:
go func() {
rc <- c.ping()
}()
select {
case ok := <-rc:
if !ok {
c.close(fmt.Errorf("ping failure"))
}
case <-time.After(pingTimeout):
c.close(fmt.Errorf("ping timeout"))
}
}
}
}
2013-12-23 17:12:44 +00:00
type Statistics struct {
2014-01-05 15:16:37 +00:00
At time.Time
InBytesTotal int
OutBytesTotal int
2013-12-23 17:12:44 +00:00
}
func (c *rawConnection) Statistics() Statistics {
return Statistics{
2014-01-05 15:16:37 +00:00
At: time.Now(),
InBytesTotal: int(c.cr.Tot()),
OutBytesTotal: int(c.cw.Tot()),
2013-12-23 17:12:44 +00:00
}
}