mirror of
https://github.com/octoleo/syncthing.git
synced 2024-12-23 03:18:59 +00:00
916ec63af6
This is a new revision of the discovery server. Relevant changes and non-changes: - Protocol towards clients is unchanged. - Recommended large scale design is still to be deployed nehind nginx (I tested, and it's still a lot faster at terminating TLS). - Database backend is leveldb again, only. It scales enough, is easy to setup, and we don't need any backend to take care of. - Server supports replication. This is a simple TCP channel - protect it with a firewall when deploying over the internet. (We deploy this within the same datacenter, and with firewall.) Any incoming client announces are sent over the replication channel(s) to other peer discosrvs. Incoming replication changes are applied to the database as if they came from clients, but without the TLS/certificate overhead. - Metrics are exposed using the prometheus library, when enabled. - The database values and replication protocol is protobuf, because JSON was quite CPU intensive when I tried that and benchmarked it. - The "Retry-After" value for failed lookups gets slowly increased from a default of 120 seconds, by 5 seconds for each failed lookup, independently by each discosrv. This lowers the query load over time for clients that are never seen. The Retry-After maxes out at 3600 after a couple of weeks of this increase. The number of failed lookups is stored in the database, now and then (avoiding making each lookup a database put). All in all this means clients can be pointed towards a cluster using just multiple A / AAAA records to gain both load sharing and redundancy (if one is down, clients will talk to the remaining ones). GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/4648
76 lines
2.8 KiB
Go
76 lines
2.8 KiB
Go
// Copyright 2013 Matt T. Proud
|
|
//
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
// you may not use this file except in compliance with the License.
|
|
// You may obtain a copy of the License at
|
|
//
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
//
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
// See the License for the specific language governing permissions and
|
|
// limitations under the License.
|
|
|
|
package pbutil
|
|
|
|
import (
|
|
"encoding/binary"
|
|
"errors"
|
|
"io"
|
|
|
|
"github.com/golang/protobuf/proto"
|
|
)
|
|
|
|
var errInvalidVarint = errors.New("invalid varint32 encountered")
|
|
|
|
// ReadDelimited decodes a message from the provided length-delimited stream,
|
|
// where the length is encoded as 32-bit varint prefix to the message body.
|
|
// It returns the total number of bytes read and any applicable error. This is
|
|
// roughly equivalent to the companion Java API's
|
|
// MessageLite#parseDelimitedFrom. As per the reader contract, this function
|
|
// calls r.Read repeatedly as required until exactly one message including its
|
|
// prefix is read and decoded (or an error has occurred). The function never
|
|
// reads more bytes from the stream than required. The function never returns
|
|
// an error if a message has been read and decoded correctly, even if the end
|
|
// of the stream has been reached in doing so. In that case, any subsequent
|
|
// calls return (0, io.EOF).
|
|
func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {
|
|
// Per AbstractParser#parsePartialDelimitedFrom with
|
|
// CodedInputStream#readRawVarint32.
|
|
var headerBuf [binary.MaxVarintLen32]byte
|
|
var bytesRead, varIntBytes int
|
|
var messageLength uint64
|
|
for varIntBytes == 0 { // i.e. no varint has been decoded yet.
|
|
if bytesRead >= len(headerBuf) {
|
|
return bytesRead, errInvalidVarint
|
|
}
|
|
// We have to read byte by byte here to avoid reading more bytes
|
|
// than required. Each read byte is appended to what we have
|
|
// read before.
|
|
newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])
|
|
if newBytesRead == 0 {
|
|
if err != nil {
|
|
return bytesRead, err
|
|
}
|
|
// A Reader should not return (0, nil), but if it does,
|
|
// it should be treated as no-op (according to the
|
|
// Reader contract). So let's go on...
|
|
continue
|
|
}
|
|
bytesRead += newBytesRead
|
|
// Now present everything read so far to the varint decoder and
|
|
// see if a varint can be decoded already.
|
|
messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])
|
|
}
|
|
|
|
messageBuf := make([]byte, messageLength)
|
|
newBytesRead, err := io.ReadFull(r, messageBuf)
|
|
bytesRead += newBytesRead
|
|
if err != nil {
|
|
return bytesRead, err
|
|
}
|
|
|
|
return bytesRead, proto.Unmarshal(messageBuf, m)
|
|
}
|