mirror of
https://github.com/octoleo/syncthing.git
synced 2024-11-10 15:20:56 +00:00
916ec63af6
This is a new revision of the discovery server. Relevant changes and non-changes: - Protocol towards clients is unchanged. - Recommended large scale design is still to be deployed nehind nginx (I tested, and it's still a lot faster at terminating TLS). - Database backend is leveldb again, only. It scales enough, is easy to setup, and we don't need any backend to take care of. - Server supports replication. This is a simple TCP channel - protect it with a firewall when deploying over the internet. (We deploy this within the same datacenter, and with firewall.) Any incoming client announces are sent over the replication channel(s) to other peer discosrvs. Incoming replication changes are applied to the database as if they came from clients, but without the TLS/certificate overhead. - Metrics are exposed using the prometheus library, when enabled. - The database values and replication protocol is protobuf, because JSON was quite CPU intensive when I tried that and benchmarked it. - The "Retry-After" value for failed lookups gets slowly increased from a default of 120 seconds, by 5 seconds for each failed lookup, independently by each discosrv. This lowers the query load over time for clients that are never seen. The Retry-After maxes out at 3600 after a couple of weeks of this increase. The number of failed lookups is stored in the database, now and then (avoiding making each lookup a database put). All in all this means clients can be pointed towards a cluster using just multiple A / AAAA records to gain both load sharing and redundancy (if one is down, clients will talk to the remaining ones). GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/4648
138 lines
3.9 KiB
Go
138 lines
3.9 KiB
Go
package procfs
|
|
|
|
import (
|
|
"bufio"
|
|
"fmt"
|
|
"os"
|
|
"regexp"
|
|
"strconv"
|
|
)
|
|
|
|
// ProcLimits represents the soft limits for each of the process's resource
|
|
// limits. For more information see getrlimit(2):
|
|
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
|
|
type ProcLimits struct {
|
|
// CPU time limit in seconds.
|
|
CPUTime int64
|
|
// Maximum size of files that the process may create.
|
|
FileSize int64
|
|
// Maximum size of the process's data segment (initialized data,
|
|
// uninitialized data, and heap).
|
|
DataSize int64
|
|
// Maximum size of the process stack in bytes.
|
|
StackSize int64
|
|
// Maximum size of a core file.
|
|
CoreFileSize int64
|
|
// Limit of the process's resident set in pages.
|
|
ResidentSet int64
|
|
// Maximum number of processes that can be created for the real user ID of
|
|
// the calling process.
|
|
Processes int64
|
|
// Value one greater than the maximum file descriptor number that can be
|
|
// opened by this process.
|
|
OpenFiles int64
|
|
// Maximum number of bytes of memory that may be locked into RAM.
|
|
LockedMemory int64
|
|
// Maximum size of the process's virtual memory address space in bytes.
|
|
AddressSpace int64
|
|
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
|
|
// this process may establish.
|
|
FileLocks int64
|
|
// Limit of signals that may be queued for the real user ID of the calling
|
|
// process.
|
|
PendingSignals int64
|
|
// Limit on the number of bytes that can be allocated for POSIX message
|
|
// queues for the real user ID of the calling process.
|
|
MsqqueueSize int64
|
|
// Limit of the nice priority set using setpriority(2) or nice(2).
|
|
NicePriority int64
|
|
// Limit of the real-time priority set using sched_setscheduler(2) or
|
|
// sched_setparam(2).
|
|
RealtimePriority int64
|
|
// Limit (in microseconds) on the amount of CPU time that a process
|
|
// scheduled under a real-time scheduling policy may consume without making
|
|
// a blocking system call.
|
|
RealtimeTimeout int64
|
|
}
|
|
|
|
const (
|
|
limitsFields = 3
|
|
limitsUnlimited = "unlimited"
|
|
)
|
|
|
|
var (
|
|
limitsDelimiter = regexp.MustCompile(" +")
|
|
)
|
|
|
|
// NewLimits returns the current soft limits of the process.
|
|
func (p Proc) NewLimits() (ProcLimits, error) {
|
|
f, err := os.Open(p.path("limits"))
|
|
if err != nil {
|
|
return ProcLimits{}, err
|
|
}
|
|
defer f.Close()
|
|
|
|
var (
|
|
l = ProcLimits{}
|
|
s = bufio.NewScanner(f)
|
|
)
|
|
for s.Scan() {
|
|
fields := limitsDelimiter.Split(s.Text(), limitsFields)
|
|
if len(fields) != limitsFields {
|
|
return ProcLimits{}, fmt.Errorf(
|
|
"couldn't parse %s line %s", f.Name(), s.Text())
|
|
}
|
|
|
|
switch fields[0] {
|
|
case "Max cpu time":
|
|
l.CPUTime, err = parseInt(fields[1])
|
|
case "Max file size":
|
|
l.FileSize, err = parseInt(fields[1])
|
|
case "Max data size":
|
|
l.DataSize, err = parseInt(fields[1])
|
|
case "Max stack size":
|
|
l.StackSize, err = parseInt(fields[1])
|
|
case "Max core file size":
|
|
l.CoreFileSize, err = parseInt(fields[1])
|
|
case "Max resident set":
|
|
l.ResidentSet, err = parseInt(fields[1])
|
|
case "Max processes":
|
|
l.Processes, err = parseInt(fields[1])
|
|
case "Max open files":
|
|
l.OpenFiles, err = parseInt(fields[1])
|
|
case "Max locked memory":
|
|
l.LockedMemory, err = parseInt(fields[1])
|
|
case "Max address space":
|
|
l.AddressSpace, err = parseInt(fields[1])
|
|
case "Max file locks":
|
|
l.FileLocks, err = parseInt(fields[1])
|
|
case "Max pending signals":
|
|
l.PendingSignals, err = parseInt(fields[1])
|
|
case "Max msgqueue size":
|
|
l.MsqqueueSize, err = parseInt(fields[1])
|
|
case "Max nice priority":
|
|
l.NicePriority, err = parseInt(fields[1])
|
|
case "Max realtime priority":
|
|
l.RealtimePriority, err = parseInt(fields[1])
|
|
case "Max realtime timeout":
|
|
l.RealtimeTimeout, err = parseInt(fields[1])
|
|
}
|
|
if err != nil {
|
|
return ProcLimits{}, err
|
|
}
|
|
}
|
|
|
|
return l, s.Err()
|
|
}
|
|
|
|
func parseInt(s string) (int64, error) {
|
|
if s == limitsUnlimited {
|
|
return -1, nil
|
|
}
|
|
i, err := strconv.ParseInt(s, 10, 64)
|
|
if err != nil {
|
|
return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
|
|
}
|
|
return i, nil
|
|
}
|