2014-06-01 20:50:14 +00:00
|
|
|
// Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
|
|
|
// Use of this source code is governed by an MIT-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
2013-12-15 10:43:31 +00:00
|
|
|
package protocol
|
|
|
|
|
|
|
|
import (
|
2014-03-29 17:53:48 +00:00
|
|
|
"bufio"
|
2013-12-15 10:43:31 +00:00
|
|
|
"compress/flate"
|
|
|
|
"errors"
|
2013-12-31 02:21:57 +00:00
|
|
|
"fmt"
|
2013-12-15 10:43:31 +00:00
|
|
|
"io"
|
|
|
|
"sync"
|
2013-12-15 15:19:45 +00:00
|
|
|
"time"
|
2013-12-15 10:43:31 +00:00
|
|
|
|
2014-02-15 11:08:55 +00:00
|
|
|
"github.com/calmh/syncthing/xdr"
|
2013-12-15 10:43:31 +00:00
|
|
|
)
|
|
|
|
|
2014-02-20 16:40:15 +00:00
|
|
|
const BlockSize = 128 * 1024
|
|
|
|
|
2013-12-15 10:43:31 +00:00
|
|
|
const (
|
2014-04-13 13:28:26 +00:00
|
|
|
messageTypeClusterConfig = 0
|
|
|
|
messageTypeIndex = 1
|
|
|
|
messageTypeRequest = 2
|
|
|
|
messageTypeResponse = 3
|
|
|
|
messageTypePing = 4
|
|
|
|
messageTypePong = 5
|
|
|
|
messageTypeIndexUpdate = 6
|
2013-12-15 10:43:31 +00:00
|
|
|
)
|
|
|
|
|
2014-01-07 21:44:21 +00:00
|
|
|
const (
|
2014-05-23 10:53:11 +00:00
|
|
|
FlagDeleted uint32 = 1 << 12
|
|
|
|
FlagInvalid = 1 << 13
|
|
|
|
FlagDirectory = 1 << 14
|
|
|
|
FlagNoPermBits = 1 << 15
|
2014-01-07 21:44:21 +00:00
|
|
|
)
|
|
|
|
|
2014-04-13 13:28:26 +00:00
|
|
|
const (
|
|
|
|
FlagShareTrusted uint32 = 1 << 0
|
|
|
|
FlagShareReadOnly = 1 << 1
|
|
|
|
FlagShareBits = 0x000000ff
|
|
|
|
)
|
|
|
|
|
2014-02-09 22:13:06 +00:00
|
|
|
var (
|
2014-02-24 12:29:30 +00:00
|
|
|
ErrClusterHash = fmt.Errorf("configuration error: mismatched cluster hash")
|
|
|
|
ErrClosed = errors.New("connection closed")
|
2014-02-09 22:13:06 +00:00
|
|
|
)
|
|
|
|
|
2013-12-15 10:43:31 +00:00
|
|
|
type Model interface {
|
|
|
|
// An index was received from the peer node
|
2014-03-29 17:53:48 +00:00
|
|
|
Index(nodeID string, repo string, files []FileInfo)
|
2013-12-28 13:10:36 +00:00
|
|
|
// An index update was received from the peer node
|
2014-03-29 17:53:48 +00:00
|
|
|
IndexUpdate(nodeID string, repo string, files []FileInfo)
|
2013-12-15 10:43:31 +00:00
|
|
|
// A request was made by the peer node
|
2014-03-29 17:53:48 +00:00
|
|
|
Request(nodeID string, repo string, name string, offset int64, size int) ([]byte, error)
|
2014-04-13 13:28:26 +00:00
|
|
|
// A cluster configuration message was received
|
|
|
|
ClusterConfig(nodeID string, config ClusterConfigMessage)
|
2013-12-15 10:43:31 +00:00
|
|
|
// The peer node closed the connection
|
2013-12-31 02:21:57 +00:00
|
|
|
Close(nodeID string, err error)
|
2013-12-15 10:43:31 +00:00
|
|
|
}
|
|
|
|
|
2014-03-28 13:36:57 +00:00
|
|
|
type Connection interface {
|
|
|
|
ID() string
|
2014-03-29 17:53:48 +00:00
|
|
|
Index(repo string, files []FileInfo)
|
|
|
|
Request(repo string, name string, offset int64, size int) ([]byte, error)
|
2014-04-13 13:28:26 +00:00
|
|
|
ClusterConfig(config ClusterConfigMessage)
|
2014-03-28 13:36:57 +00:00
|
|
|
Statistics() Statistics
|
|
|
|
}
|
|
|
|
|
|
|
|
type rawConnection struct {
|
2014-05-04 15:40:40 +00:00
|
|
|
id string
|
|
|
|
receiver Model
|
|
|
|
|
|
|
|
reader io.ReadCloser
|
|
|
|
cr *countingReader
|
|
|
|
xr *xdr.Reader
|
|
|
|
writer io.WriteCloser
|
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
cw *countingWriter
|
|
|
|
wb *bufio.Writer
|
|
|
|
xw *xdr.Writer
|
|
|
|
wmut sync.Mutex
|
2014-05-04 15:40:40 +00:00
|
|
|
|
2014-05-28 08:11:17 +00:00
|
|
|
indexSent map[string]map[string][2]int64
|
2014-05-11 17:30:29 +00:00
|
|
|
awaiting []chan asyncResult
|
2014-05-04 15:40:40 +00:00
|
|
|
imut sync.Mutex
|
2014-05-11 17:30:29 +00:00
|
|
|
|
|
|
|
nextID chan int
|
|
|
|
outbox chan []encodable
|
|
|
|
closed chan struct{}
|
2013-12-15 10:43:31 +00:00
|
|
|
}
|
|
|
|
|
2013-12-15 14:58:27 +00:00
|
|
|
type asyncResult struct {
|
|
|
|
val []byte
|
|
|
|
err error
|
|
|
|
}
|
|
|
|
|
2013-12-31 03:10:54 +00:00
|
|
|
const (
|
2014-05-28 10:30:47 +00:00
|
|
|
pingTimeout = 300 * time.Second
|
|
|
|
pingIdleTime = 600 * time.Second
|
2013-12-31 03:10:54 +00:00
|
|
|
)
|
2013-12-15 15:19:45 +00:00
|
|
|
|
2014-04-13 13:28:26 +00:00
|
|
|
func NewConnection(nodeID string, reader io.Reader, writer io.Writer, receiver Model) Connection {
|
2014-04-21 10:49:47 +00:00
|
|
|
cr := &countingReader{Reader: reader}
|
|
|
|
cw := &countingWriter{Writer: writer}
|
|
|
|
|
|
|
|
flrd := flate.NewReader(cr)
|
|
|
|
flwr, err := flate.NewWriter(cw, flate.BestSpeed)
|
2013-12-15 10:43:31 +00:00
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
2014-03-29 17:53:48 +00:00
|
|
|
wb := bufio.NewWriter(flwr)
|
2013-12-15 10:43:31 +00:00
|
|
|
|
2014-03-28 13:36:57 +00:00
|
|
|
c := rawConnection{
|
2014-02-13 11:41:25 +00:00
|
|
|
id: nodeID,
|
2014-03-28 13:36:57 +00:00
|
|
|
receiver: nativeModel{receiver},
|
2014-02-13 11:41:25 +00:00
|
|
|
reader: flrd,
|
2014-04-21 10:49:47 +00:00
|
|
|
cr: cr,
|
2014-02-20 16:40:15 +00:00
|
|
|
xr: xdr.NewReader(flrd),
|
2014-02-13 11:41:25 +00:00
|
|
|
writer: flwr,
|
2014-04-21 10:49:47 +00:00
|
|
|
cw: cw,
|
2014-03-29 17:53:48 +00:00
|
|
|
wb: wb,
|
|
|
|
xw: xdr.NewWriter(wb),
|
2014-05-11 17:30:29 +00:00
|
|
|
awaiting: make([]chan asyncResult, 0x1000),
|
2014-05-28 08:11:17 +00:00
|
|
|
indexSent: make(map[string]map[string][2]int64),
|
2014-05-11 17:30:29 +00:00
|
|
|
outbox: make(chan []encodable),
|
|
|
|
nextID: make(chan int),
|
|
|
|
closed: make(chan struct{}),
|
2013-12-15 10:43:31 +00:00
|
|
|
}
|
|
|
|
|
2014-06-10 14:03:22 +00:00
|
|
|
go c.indexSerializerLoop()
|
2013-12-15 10:43:31 +00:00
|
|
|
go c.readerLoop()
|
2014-05-11 17:30:29 +00:00
|
|
|
go c.writerLoop()
|
2013-12-15 15:19:45 +00:00
|
|
|
go c.pingerLoop()
|
2014-05-11 17:30:29 +00:00
|
|
|
go c.idGenerator()
|
2013-12-15 10:43:31 +00:00
|
|
|
|
2014-03-28 13:36:57 +00:00
|
|
|
return wireFormatConnection{&c}
|
2013-12-15 10:43:31 +00:00
|
|
|
}
|
|
|
|
|
2014-03-28 13:36:57 +00:00
|
|
|
func (c *rawConnection) ID() string {
|
2014-01-09 12:58:35 +00:00
|
|
|
return c.id
|
|
|
|
}
|
|
|
|
|
2013-12-15 10:43:31 +00:00
|
|
|
// Index writes the list of file information to the connected peer node
|
2014-03-28 13:36:57 +00:00
|
|
|
func (c *rawConnection) Index(repo string, idx []FileInfo) {
|
2014-05-04 15:40:40 +00:00
|
|
|
c.imut.Lock()
|
2013-12-28 13:10:36 +00:00
|
|
|
var msgType int
|
2014-05-28 08:11:17 +00:00
|
|
|
if c.indexSent[repo] == nil {
|
2013-12-28 13:10:36 +00:00
|
|
|
// This is the first time we send an index.
|
|
|
|
msgType = messageTypeIndex
|
2014-05-28 08:11:17 +00:00
|
|
|
|
|
|
|
c.indexSent[repo] = make(map[string][2]int64)
|
2013-12-28 13:10:36 +00:00
|
|
|
for _, f := range idx {
|
2014-05-28 08:11:17 +00:00
|
|
|
c.indexSent[repo][f.Name] = [2]int64{f.Modified, int64(f.Version)}
|
2013-12-28 13:10:36 +00:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// We have sent one full index. Only send updates now.
|
|
|
|
msgType = messageTypeIndexUpdate
|
|
|
|
var diff []FileInfo
|
|
|
|
for _, f := range idx {
|
2014-05-28 08:11:17 +00:00
|
|
|
if vs, ok := c.indexSent[repo][f.Name]; !ok || f.Modified != vs[0] || int64(f.Version) != vs[1] {
|
2013-12-28 13:10:36 +00:00
|
|
|
diff = append(diff, f)
|
2014-05-28 08:11:17 +00:00
|
|
|
c.indexSent[repo][f.Name] = [2]int64{f.Modified, int64(f.Version)}
|
2013-12-28 13:10:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
idx = diff
|
|
|
|
}
|
2014-05-04 15:40:40 +00:00
|
|
|
c.imut.Unlock()
|
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
c.send(header{0, -1, msgType}, IndexMessage{repo, idx})
|
2013-12-15 10:43:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Request returns the bytes for the specified block after fetching them from the connected peer.
|
2014-03-28 13:36:57 +00:00
|
|
|
func (c *rawConnection) Request(repo string, name string, offset int64, size int) ([]byte, error) {
|
2014-05-11 17:30:29 +00:00
|
|
|
var id int
|
|
|
|
select {
|
|
|
|
case id = <-c.nextID:
|
|
|
|
case <-c.closed:
|
2013-12-28 15:30:02 +00:00
|
|
|
return nil, ErrClosed
|
|
|
|
}
|
2014-05-04 15:40:40 +00:00
|
|
|
|
|
|
|
c.imut.Lock()
|
2014-05-11 17:30:29 +00:00
|
|
|
if ch := c.awaiting[id]; ch != nil {
|
2014-03-23 07:44:27 +00:00
|
|
|
panic("id taken")
|
|
|
|
}
|
2014-05-11 17:30:29 +00:00
|
|
|
rc := make(chan asyncResult)
|
2014-05-04 15:40:40 +00:00
|
|
|
c.awaiting[id] = rc
|
|
|
|
c.imut.Unlock()
|
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
ok := c.send(header{0, id, messageTypeRequest},
|
|
|
|
RequestMessage{repo, name, uint64(offset), uint32(size)})
|
|
|
|
if !ok {
|
|
|
|
return nil, ErrClosed
|
2013-12-19 23:01:34 +00:00
|
|
|
}
|
2013-12-15 10:43:31 +00:00
|
|
|
|
2013-12-15 14:58:27 +00:00
|
|
|
res, ok := <-rc
|
|
|
|
if !ok {
|
|
|
|
return nil, ErrClosed
|
2013-12-15 10:43:31 +00:00
|
|
|
}
|
2013-12-15 14:58:27 +00:00
|
|
|
return res.val, res.err
|
2013-12-15 10:43:31 +00:00
|
|
|
}
|
|
|
|
|
2014-04-13 13:28:26 +00:00
|
|
|
// ClusterConfig send the cluster configuration message to the peer and returns any error
|
|
|
|
func (c *rawConnection) ClusterConfig(config ClusterConfigMessage) {
|
2014-05-11 17:30:29 +00:00
|
|
|
c.send(header{0, -1, messageTypeClusterConfig}, config)
|
2014-04-13 13:28:26 +00:00
|
|
|
}
|
|
|
|
|
2014-03-28 13:36:57 +00:00
|
|
|
func (c *rawConnection) ping() bool {
|
2014-05-11 17:30:29 +00:00
|
|
|
var id int
|
|
|
|
select {
|
|
|
|
case id = <-c.nextID:
|
|
|
|
case <-c.closed:
|
2013-12-31 01:52:36 +00:00
|
|
|
return false
|
2013-12-28 15:30:02 +00:00
|
|
|
}
|
2014-05-04 15:40:40 +00:00
|
|
|
|
2013-12-30 20:27:20 +00:00
|
|
|
rc := make(chan asyncResult, 1)
|
2014-05-11 17:30:29 +00:00
|
|
|
c.imut.Lock()
|
2014-05-04 15:40:40 +00:00
|
|
|
c.awaiting[id] = rc
|
|
|
|
c.imut.Unlock()
|
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
ok := c.send(header{0, id, messageTypePing})
|
|
|
|
if !ok {
|
2014-05-04 06:11:06 +00:00
|
|
|
return false
|
2013-12-19 23:01:34 +00:00
|
|
|
}
|
2013-12-15 10:43:31 +00:00
|
|
|
|
2013-12-31 01:52:36 +00:00
|
|
|
res, ok := <-rc
|
|
|
|
return ok && res.err == nil
|
2013-12-15 10:43:31 +00:00
|
|
|
}
|
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
func (c *rawConnection) readerLoop() (err error) {
|
|
|
|
defer func() {
|
|
|
|
c.close(err)
|
|
|
|
}()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-c.closed:
|
|
|
|
return ErrClosed
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
var hdr header
|
|
|
|
hdr.decodeXDR(c.xr)
|
|
|
|
if err := c.xr.Error(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if hdr.version != 0 {
|
|
|
|
return fmt.Errorf("protocol error: %s: unknown message version %#x", c.id, hdr.version)
|
|
|
|
}
|
|
|
|
|
|
|
|
switch hdr.msgType {
|
|
|
|
case messageTypeIndex:
|
|
|
|
if err := c.handleIndex(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
case messageTypeIndexUpdate:
|
|
|
|
if err := c.handleIndexUpdate(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
case messageTypeRequest:
|
|
|
|
if err := c.handleRequest(hdr); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
case messageTypeResponse:
|
|
|
|
if err := c.handleResponse(hdr); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
case messageTypePing:
|
|
|
|
c.send(header{0, hdr.msgID, messageTypePong})
|
|
|
|
|
|
|
|
case messageTypePong:
|
|
|
|
c.handlePong(hdr)
|
|
|
|
|
|
|
|
case messageTypeClusterConfig:
|
|
|
|
if err := c.handleClusterConfig(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
default:
|
|
|
|
return fmt.Errorf("protocol error: %s: unknown message type %#x", c.id, hdr.msgType)
|
|
|
|
}
|
|
|
|
}
|
2013-12-15 10:43:31 +00:00
|
|
|
}
|
|
|
|
|
2014-06-10 14:03:22 +00:00
|
|
|
type incomingIndex struct {
|
|
|
|
update bool
|
|
|
|
id string
|
|
|
|
repo string
|
|
|
|
files []FileInfo
|
|
|
|
}
|
|
|
|
|
|
|
|
var incomingIndexes = make(chan incomingIndex, 100) // should be enough for anyone, right?
|
|
|
|
|
|
|
|
func (c *rawConnection) indexSerializerLoop() {
|
|
|
|
// We must avoid blocking the reader loop when processing large indexes.
|
|
|
|
// There is otherwise a potential deadlock where both sides has the model
|
|
|
|
// locked because it's sending a large index update and can't receive the
|
|
|
|
// large index update from the other side. But we must also ensure to
|
|
|
|
// process the indexes in the order they are received, hence the separate
|
|
|
|
// routine and buffered channel.
|
|
|
|
for ii := range incomingIndexes {
|
|
|
|
if ii.update {
|
|
|
|
c.receiver.IndexUpdate(ii.id, ii.repo, ii.files)
|
|
|
|
} else {
|
|
|
|
c.receiver.Index(ii.id, ii.repo, ii.files)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
func (c *rawConnection) handleIndex() error {
|
|
|
|
var im IndexMessage
|
|
|
|
im.decodeXDR(c.xr)
|
|
|
|
if err := c.xr.Error(); err != nil {
|
2014-05-04 15:40:40 +00:00
|
|
|
return err
|
2014-05-11 17:30:29 +00:00
|
|
|
} else {
|
|
|
|
|
|
|
|
// We run this (and the corresponding one for update, below)
|
|
|
|
// in a separate goroutine to avoid blocking the read loop.
|
|
|
|
// There is otherwise a potential deadlock where both sides
|
|
|
|
// has the model locked because it's sending a large index
|
|
|
|
// update and can't receive the large index update from the
|
|
|
|
// other side.
|
|
|
|
|
2014-06-10 14:03:22 +00:00
|
|
|
incomingIndexes <- incomingIndex{false, c.id, im.Repository, im.Files}
|
2014-05-04 15:40:40 +00:00
|
|
|
}
|
2014-05-11 17:30:29 +00:00
|
|
|
return nil
|
|
|
|
}
|
2014-05-04 15:40:40 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
func (c *rawConnection) handleIndexUpdate() error {
|
|
|
|
var im IndexMessage
|
|
|
|
im.decodeXDR(c.xr)
|
|
|
|
if err := c.xr.Error(); err != nil {
|
2014-05-04 15:40:40 +00:00
|
|
|
return err
|
2014-05-11 17:30:29 +00:00
|
|
|
} else {
|
2014-06-10 14:03:22 +00:00
|
|
|
incomingIndexes <- incomingIndex{true, c.id, im.Repository, im.Files}
|
2014-05-04 15:40:40 +00:00
|
|
|
}
|
2014-05-11 17:30:29 +00:00
|
|
|
return nil
|
|
|
|
}
|
2014-05-04 15:40:40 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
func (c *rawConnection) handleRequest(hdr header) error {
|
|
|
|
var req RequestMessage
|
|
|
|
req.decodeXDR(c.xr)
|
|
|
|
if err := c.xr.Error(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
go c.processRequest(hdr.msgID, req)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *rawConnection) handleResponse(hdr header) error {
|
|
|
|
data := c.xr.ReadBytesMax(256 * 1024) // Sufficiently larger than max expected block size
|
|
|
|
|
|
|
|
if err := c.xr.Error(); err != nil {
|
|
|
|
return err
|
2013-12-15 10:43:31 +00:00
|
|
|
}
|
2014-05-04 15:40:40 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
go func(hdr header, err error) {
|
|
|
|
c.imut.Lock()
|
|
|
|
rc := c.awaiting[hdr.msgID]
|
|
|
|
c.awaiting[hdr.msgID] = nil
|
|
|
|
c.imut.Unlock()
|
|
|
|
|
|
|
|
if rc != nil {
|
|
|
|
rc <- asyncResult{data, err}
|
|
|
|
close(rc)
|
|
|
|
}
|
|
|
|
}(hdr, c.xr.Error())
|
|
|
|
|
2013-12-19 23:01:34 +00:00
|
|
|
return nil
|
2013-12-15 10:43:31 +00:00
|
|
|
}
|
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
func (c *rawConnection) handlePong(hdr header) {
|
2014-05-04 15:40:40 +00:00
|
|
|
c.imut.Lock()
|
2014-05-11 17:30:29 +00:00
|
|
|
if rc := c.awaiting[hdr.msgID]; rc != nil {
|
|
|
|
go func() {
|
|
|
|
rc <- asyncResult{}
|
|
|
|
close(rc)
|
|
|
|
}()
|
2014-05-04 15:40:40 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
c.awaiting[hdr.msgID] = nil
|
2014-05-04 06:11:06 +00:00
|
|
|
}
|
2014-05-11 17:30:29 +00:00
|
|
|
c.imut.Unlock()
|
|
|
|
}
|
2014-05-04 15:40:40 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
func (c *rawConnection) handleClusterConfig() error {
|
|
|
|
var cm ClusterConfigMessage
|
|
|
|
cm.decodeXDR(c.xr)
|
|
|
|
if err := c.xr.Error(); err != nil {
|
|
|
|
return err
|
|
|
|
} else {
|
|
|
|
go c.receiver.ClusterConfig(c.id, cm)
|
2013-12-15 10:43:31 +00:00
|
|
|
}
|
2014-05-11 17:30:29 +00:00
|
|
|
return nil
|
|
|
|
}
|
2013-12-15 14:58:27 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
type encodable interface {
|
|
|
|
encodeXDR(*xdr.Writer) (int, error)
|
2013-12-15 10:43:31 +00:00
|
|
|
}
|
2014-05-11 17:30:29 +00:00
|
|
|
type encodableBytes []byte
|
2013-12-15 10:43:31 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
func (e encodableBytes) encodeXDR(xw *xdr.Writer) (int, error) {
|
|
|
|
return xw.WriteBytes(e)
|
2013-12-15 10:43:31 +00:00
|
|
|
}
|
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
func (c *rawConnection) send(h header, es ...encodable) bool {
|
|
|
|
if h.msgID < 0 {
|
|
|
|
select {
|
|
|
|
case id := <-c.nextID:
|
|
|
|
h.msgID = id
|
|
|
|
case <-c.closed:
|
|
|
|
return false
|
2013-12-21 07:06:54 +00:00
|
|
|
}
|
2014-05-11 17:30:29 +00:00
|
|
|
}
|
|
|
|
msg := append([]encodable{h}, es...)
|
2013-12-15 10:43:31 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
select {
|
|
|
|
case c.outbox <- msg:
|
|
|
|
return true
|
|
|
|
case <-c.closed:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
2013-12-15 10:43:31 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
func (c *rawConnection) writerLoop() {
|
|
|
|
var err error
|
|
|
|
for es := range c.outbox {
|
|
|
|
c.wmut.Lock()
|
|
|
|
for _, e := range es {
|
|
|
|
e.encodeXDR(c.xw)
|
|
|
|
}
|
2013-12-28 13:10:36 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
if err = c.flush(); err != nil {
|
|
|
|
c.wmut.Unlock()
|
|
|
|
c.close(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.wmut.Unlock()
|
|
|
|
}
|
|
|
|
}
|
2013-12-15 10:43:31 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
type flusher interface {
|
|
|
|
Flush() error
|
|
|
|
}
|
2013-12-15 10:43:31 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
func (c *rawConnection) flush() error {
|
|
|
|
if err := c.xw.Error(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-12-15 10:43:31 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
if err := c.wb.Flush(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2014-03-23 07:44:27 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
if f, ok := c.writer.(flusher); ok {
|
|
|
|
return f.Flush()
|
|
|
|
}
|
2013-12-15 10:43:31 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
return nil
|
|
|
|
}
|
2013-12-15 10:43:31 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
func (c *rawConnection) close(err error) {
|
|
|
|
c.imut.Lock()
|
|
|
|
c.wmut.Lock()
|
|
|
|
defer c.imut.Unlock()
|
|
|
|
defer c.wmut.Unlock()
|
2013-12-15 14:58:27 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
select {
|
|
|
|
case <-c.closed:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
close(c.closed)
|
2013-12-15 14:58:27 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
for i, ch := range c.awaiting {
|
|
|
|
if ch != nil {
|
|
|
|
close(ch)
|
|
|
|
c.awaiting[i] = nil
|
2013-12-15 10:43:31 +00:00
|
|
|
}
|
2014-05-11 17:30:29 +00:00
|
|
|
}
|
2013-12-21 07:15:19 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
c.writer.Close()
|
|
|
|
c.reader.Close()
|
2014-02-09 22:13:06 +00:00
|
|
|
|
2014-05-12 00:35:44 +00:00
|
|
|
go c.receiver.Close(c.id, err)
|
2013-12-15 10:43:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
func (c *rawConnection) idGenerator() {
|
|
|
|
nextID := 0
|
|
|
|
for {
|
|
|
|
nextID = (nextID + 1) & 0xfff
|
|
|
|
select {
|
|
|
|
case c.nextID <- nextID:
|
|
|
|
case <-c.closed:
|
|
|
|
return
|
|
|
|
}
|
2013-12-15 10:43:31 +00:00
|
|
|
}
|
|
|
|
}
|
2013-12-15 15:19:45 +00:00
|
|
|
|
2014-03-28 13:36:57 +00:00
|
|
|
func (c *rawConnection) pingerLoop() {
|
2013-12-31 01:52:36 +00:00
|
|
|
var rc = make(chan bool, 1)
|
2014-03-28 13:36:57 +00:00
|
|
|
ticker := time.Tick(pingIdleTime / 2)
|
2013-12-31 03:10:54 +00:00
|
|
|
for {
|
2014-03-28 13:36:57 +00:00
|
|
|
select {
|
|
|
|
case <-ticker:
|
2014-05-28 10:30:47 +00:00
|
|
|
if d := time.Since(c.xr.LastRead()); d < pingIdleTime {
|
|
|
|
if debug {
|
|
|
|
l.Debugln(c.id, "ping skipped after rd", d)
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if d := time.Since(c.xw.LastWrite()); d < pingIdleTime {
|
|
|
|
if debug {
|
|
|
|
l.Debugln(c.id, "ping skipped after wr", d)
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
2014-05-04 15:40:40 +00:00
|
|
|
go func() {
|
2014-05-28 10:30:47 +00:00
|
|
|
if debug {
|
|
|
|
l.Debugln(c.id, "ping ->")
|
|
|
|
}
|
2014-05-04 15:40:40 +00:00
|
|
|
rc <- c.ping()
|
|
|
|
}()
|
|
|
|
select {
|
|
|
|
case ok := <-rc:
|
2014-05-28 18:45:29 +00:00
|
|
|
if debug {
|
|
|
|
l.Debugln(c.id, "<- pong")
|
|
|
|
}
|
2014-05-04 15:40:40 +00:00
|
|
|
if !ok {
|
|
|
|
c.close(fmt.Errorf("ping failure"))
|
2014-01-01 02:22:49 +00:00
|
|
|
}
|
2014-05-04 15:40:40 +00:00
|
|
|
case <-time.After(pingTimeout):
|
|
|
|
c.close(fmt.Errorf("ping timeout"))
|
2014-05-11 17:30:29 +00:00
|
|
|
case <-c.closed:
|
|
|
|
return
|
2013-12-15 15:19:45 +00:00
|
|
|
}
|
2014-05-11 17:30:29 +00:00
|
|
|
|
|
|
|
case <-c.closed:
|
|
|
|
return
|
2013-12-15 15:19:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-12-23 17:12:44 +00:00
|
|
|
|
2014-05-11 17:30:29 +00:00
|
|
|
func (c *rawConnection) processRequest(msgID int, req RequestMessage) {
|
|
|
|
data, _ := c.receiver.Request(c.id, req.Repository, req.Name, int64(req.Offset), int(req.Size))
|
|
|
|
|
|
|
|
c.send(header{0, msgID, messageTypeResponse},
|
|
|
|
encodableBytes(data))
|
|
|
|
}
|
|
|
|
|
2013-12-23 17:12:44 +00:00
|
|
|
type Statistics struct {
|
2014-01-05 15:16:37 +00:00
|
|
|
At time.Time
|
2014-06-01 19:56:05 +00:00
|
|
|
InBytesTotal uint64
|
|
|
|
OutBytesTotal uint64
|
2013-12-23 17:12:44 +00:00
|
|
|
}
|
|
|
|
|
2014-03-28 13:36:57 +00:00
|
|
|
func (c *rawConnection) Statistics() Statistics {
|
2014-03-29 17:53:48 +00:00
|
|
|
return Statistics{
|
2014-01-05 15:16:37 +00:00
|
|
|
At: time.Now(),
|
2014-06-01 19:56:05 +00:00
|
|
|
InBytesTotal: c.cr.Tot(),
|
|
|
|
OutBytesTotal: c.cw.Tot(),
|
2013-12-23 17:12:44 +00:00
|
|
|
}
|
|
|
|
}
|
2014-05-23 10:53:26 +00:00
|
|
|
|
|
|
|
func IsDeleted(bits uint32) bool {
|
|
|
|
return bits&FlagDeleted != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func IsInvalid(bits uint32) bool {
|
|
|
|
return bits&FlagInvalid != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func IsDirectory(bits uint32) bool {
|
|
|
|
return bits&FlagDirectory != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func HasPermissionBits(bits uint32) bool {
|
|
|
|
return bits&FlagNoPermBits == 0
|
|
|
|
}
|