Merge branch 'infrastructure'

* infrastructure:
  cmd/stupgrades: Basic process metrics
  cmd/stcrashreceiver: Ignore patterns, improve metrics
  cmd/strelaypoolsrv: More compact response, improved metrics
  cmd/stdiscosrv: Add AMQP replication
This commit is contained in:
Jakob Borg 2024-06-04 07:18:35 -04:00
commit 21e0f98fe2
18 changed files with 722 additions and 268 deletions

View File

@ -21,11 +21,14 @@ import (
"net/http" "net/http"
"os" "os"
"path/filepath" "path/filepath"
"regexp"
"strings"
"github.com/alecthomas/kong" "github.com/alecthomas/kong"
raven "github.com/getsentry/raven-go" raven "github.com/getsentry/raven-go"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
_ "github.com/syncthing/syncthing/lib/automaxprocs" _ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/sha256" "github.com/syncthing/syncthing/lib/sha256"
"github.com/syncthing/syncthing/lib/ur" "github.com/syncthing/syncthing/lib/ur"
) )
@ -33,13 +36,15 @@ import (
const maxRequestSize = 1 << 20 // 1 MiB const maxRequestSize = 1 << 20 // 1 MiB
type cli struct { type cli struct {
Dir string `help:"Parent directory to store crash and failure reports in" env:"REPORTS_DIR" default:"."` Dir string `help:"Parent directory to store crash and failure reports in" env:"REPORTS_DIR" default:"."`
DSN string `help:"Sentry DSN" env:"SENTRY_DSN"` DSN string `help:"Sentry DSN" env:"SENTRY_DSN"`
Listen string `help:"HTTP listen address" default:":8080" env:"LISTEN_ADDRESS"` Listen string `help:"HTTP listen address" default:":8080" env:"LISTEN_ADDRESS"`
MaxDiskFiles int `help:"Maximum number of reports on disk" default:"100000" env:"MAX_DISK_FILES"` MaxDiskFiles int `help:"Maximum number of reports on disk" default:"100000" env:"MAX_DISK_FILES"`
MaxDiskSizeMB int64 `help:"Maximum disk space to use for reports" default:"1024" env:"MAX_DISK_SIZE_MB"` MaxDiskSizeMB int64 `help:"Maximum disk space to use for reports" default:"1024" env:"MAX_DISK_SIZE_MB"`
SentryQueue int `help:"Maximum number of reports to queue for sending to Sentry" default:"64" env:"SENTRY_QUEUE"` SentryQueue int `help:"Maximum number of reports to queue for sending to Sentry" default:"64" env:"SENTRY_QUEUE"`
DiskQueue int `help:"Maximum number of reports to queue for writing to disk" default:"64" env:"DISK_QUEUE"` DiskQueue int `help:"Maximum number of reports to queue for writing to disk" default:"64" env:"DISK_QUEUE"`
MetricsListen string `help:"HTTP listen address for metrics" default:":8081" env:"METRICS_LISTEN_ADDRESS"`
IngorePatterns string `help:"File containing ignore patterns (regexp)" env:"IGNORE_PATTERNS" type:"existingfile"`
} }
func main() { func main() {
@ -62,19 +67,38 @@ func main() {
} }
go ss.Serve(context.Background()) go ss.Serve(context.Background())
var ip *ignorePatterns
if params.IngorePatterns != "" {
var err error
ip, err = loadIgnorePatterns(params.IngorePatterns)
if err != nil {
log.Fatalf("Failed to load ignore patterns: %v", err)
}
}
cr := &crashReceiver{ cr := &crashReceiver{
store: ds, store: ds,
sentry: ss, sentry: ss,
ignore: ip,
} }
mux.Handle("/", cr) mux.Handle("/", cr)
mux.HandleFunc("/ping", func(w http.ResponseWriter, req *http.Request) { mux.HandleFunc("/ping", func(w http.ResponseWriter, req *http.Request) {
w.Write([]byte("OK")) w.Write([]byte("OK"))
}) })
mux.Handle("/metrics", promhttp.Handler())
if params.MetricsListen != "" {
mmux := http.NewServeMux()
mmux.Handle("/metrics", promhttp.Handler())
go func() {
if err := http.ListenAndServe(params.MetricsListen, mmux); err != nil {
log.Fatalln("HTTP serve metrics:", err)
}
}()
}
if params.DSN != "" { if params.DSN != "" {
mux.HandleFunc("/newcrash/failure", handleFailureFn(params.DSN, filepath.Join(params.Dir, "failure_reports"))) mux.HandleFunc("/newcrash/failure", handleFailureFn(params.DSN, filepath.Join(params.Dir, "failure_reports"), ip))
} }
log.SetOutput(os.Stdout) log.SetOutput(os.Stdout)
@ -83,7 +107,7 @@ func main() {
} }
} }
func handleFailureFn(dsn, failureDir string) func(w http.ResponseWriter, req *http.Request) { func handleFailureFn(dsn, failureDir string, ignore *ignorePatterns) func(w http.ResponseWriter, req *http.Request) {
return func(w http.ResponseWriter, req *http.Request) { return func(w http.ResponseWriter, req *http.Request) {
result := "failure" result := "failure"
defer func() { defer func() {
@ -98,6 +122,11 @@ func handleFailureFn(dsn, failureDir string) func(w http.ResponseWriter, req *ht
return return
} }
if ignore.match(bs) {
result = "ignored"
return
}
var reports []ur.FailureReport var reports []ur.FailureReport
err = json.Unmarshal(bs, &reports) err = json.Unmarshal(bs, &reports)
if err != nil { if err != nil {
@ -110,7 +139,7 @@ func handleFailureFn(dsn, failureDir string) func(w http.ResponseWriter, req *ht
return return
} }
version, err := parseVersion(reports[0].Version) version, err := build.ParseVersion(reports[0].Version)
if err != nil { if err != nil {
http.Error(w, err.Error(), 400) http.Error(w, err.Error(), 400)
return return
@ -158,3 +187,42 @@ func saveFailureWithGoroutines(data ur.FailureData, failureDir string) (string,
} }
return reportServer + path, nil return reportServer + path, nil
} }
type ignorePatterns struct {
patterns []*regexp.Regexp
}
func loadIgnorePatterns(path string) (*ignorePatterns, error) {
bs, err := os.ReadFile(path)
if err != nil {
return nil, err
}
var patterns []*regexp.Regexp
for _, line := range strings.Split(string(bs), "\n") {
line = strings.TrimSpace(line)
if line == "" {
continue
}
re, err := regexp.Compile(line)
if err != nil {
return nil, err
}
patterns = append(patterns, re)
}
log.Printf("Loaded %d ignore patterns", len(patterns))
return &ignorePatterns{patterns: patterns}, nil
}
func (i *ignorePatterns) match(report []byte) bool {
if i == nil {
return false
}
for _, re := range i.patterns {
if re.Match(report) {
return true
}
}
return false
}

View File

@ -18,6 +18,7 @@ import (
raven "github.com/getsentry/raven-go" raven "github.com/getsentry/raven-go"
"github.com/maruel/panicparse/v2/stack" "github.com/maruel/panicparse/v2/stack"
"github.com/syncthing/syncthing/lib/build"
) )
const reportServer = "https://crash.syncthing.net/report/" const reportServer = "https://crash.syncthing.net/report/"
@ -105,7 +106,7 @@ func parseCrashReport(path string, report []byte) (*raven.Packet, error) {
return nil, errors.New("no first line") return nil, errors.New("no first line")
} }
version, err := parseVersion(string(parts[0])) version, err := build.ParseVersion(string(parts[0]))
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -143,12 +144,12 @@ func parseCrashReport(path string, report []byte) (*raven.Packet, error) {
} }
// Lock the source code loader to the version we are processing here. // Lock the source code loader to the version we are processing here.
if version.commit != "" { if version.Commit != "" {
// We have a commit hash, so we know exactly which source to use // We have a commit hash, so we know exactly which source to use
loader.LockWithVersion(version.commit) loader.LockWithVersion(version.Commit)
} else if strings.HasPrefix(version.tag, "v") { } else if strings.HasPrefix(version.Tag, "v") {
// Lets hope the tag is close enough // Lets hope the tag is close enough
loader.LockWithVersion(version.tag) loader.LockWithVersion(version.Tag)
} else { } else {
// Last resort // Last resort
loader.LockWithVersion("main") loader.LockWithVersion("main")
@ -215,106 +216,26 @@ func crashReportFingerprint(message string) []string {
return []string{"{{ default }}", message} return []string{"{{ default }}", message}
} }
// syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC [foo, bar] func packet(version build.VersionParts, reportType string) *raven.Packet {
// or, somewhere along the way the "+" in the version tag disappeared:
// syncthing v1.23.7-dev.26.gdf7b56ae.dirty-stversionextra "Fermium Flea" (go1.20.5 darwin-arm64) jb@ok.kastelo.net 2023-07-12 06:55:26 UTC [Some Wrapper, purego, stnoupgrade]
var (
longVersionRE = regexp.MustCompile(`syncthing\s+(v[^\s]+)\s+"([^"]+)"\s\(([^\s]+)\s+([^-]+)-([^)]+)\)\s+([^\s]+)[^\[]*(?:\[(.+)\])?$`)
gitExtraRE = regexp.MustCompile(`\.\d+\.g[0-9a-f]+`) // ".1.g6aaae618"
gitExtraSepRE = regexp.MustCompile(`[.-]`) // dot or dash
)
type version struct {
version string // "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep"
tag string // "v1.1.4-rc.1"
commit string // "6aaae618", blank when absent
codename string // "Erbium Earthworm"
runtime string // "go1.12.5"
goos string // "darwin"
goarch string // "amd64"
builder string // "jb@kvin.kastelo.net"
extra []string // "foo", "bar"
}
func (v version) environment() string {
if v.commit != "" {
return "Development"
}
if strings.Contains(v.tag, "-rc.") {
return "Candidate"
}
if strings.Contains(v.tag, "-") {
return "Beta"
}
return "Stable"
}
func parseVersion(line string) (version, error) {
m := longVersionRE.FindStringSubmatch(line)
if len(m) == 0 {
return version{}, errors.New("unintelligeble version string")
}
v := version{
version: m[1],
codename: m[2],
runtime: m[3],
goos: m[4],
goarch: m[5],
builder: m[6],
}
// Split the version tag into tag and commit. This is old style
// v1.2.3-something.4+11-g12345678 or newer with just dots
// v1.2.3-something.4.11.g12345678 or v1.2.3-dev.11.g12345678.
parts := []string{v.version}
if strings.Contains(v.version, "+") {
parts = strings.Split(v.version, "+")
} else {
idxs := gitExtraRE.FindStringIndex(v.version)
if len(idxs) > 0 {
parts = []string{v.version[:idxs[0]], v.version[idxs[0]+1:]}
}
}
v.tag = parts[0]
if len(parts) > 1 {
fields := gitExtraSepRE.Split(parts[1], -1)
if len(fields) >= 2 && strings.HasPrefix(fields[1], "g") {
v.commit = fields[1][1:]
}
}
if len(m) >= 8 && m[7] != "" {
tags := strings.Split(m[7], ",")
for i := range tags {
tags[i] = strings.TrimSpace(tags[i])
}
v.extra = tags
}
return v, nil
}
func packet(version version, reportType string) *raven.Packet {
pkt := &raven.Packet{ pkt := &raven.Packet{
Platform: "go", Platform: "go",
Release: version.tag, Release: version.Tag,
Environment: version.environment(), Environment: version.Environment(),
Tags: raven.Tags{ Tags: raven.Tags{
raven.Tag{Key: "version", Value: version.version}, raven.Tag{Key: "version", Value: version.Version},
raven.Tag{Key: "tag", Value: version.tag}, raven.Tag{Key: "tag", Value: version.Tag},
raven.Tag{Key: "codename", Value: version.codename}, raven.Tag{Key: "codename", Value: version.Codename},
raven.Tag{Key: "runtime", Value: version.runtime}, raven.Tag{Key: "runtime", Value: version.Runtime},
raven.Tag{Key: "goos", Value: version.goos}, raven.Tag{Key: "goos", Value: version.GOOS},
raven.Tag{Key: "goarch", Value: version.goarch}, raven.Tag{Key: "goarch", Value: version.GOARCH},
raven.Tag{Key: "builder", Value: version.builder}, raven.Tag{Key: "builder", Value: version.Builder},
raven.Tag{Key: "report_type", Value: reportType}, raven.Tag{Key: "report_type", Value: reportType},
}, },
} }
if version.commit != "" { if version.Commit != "" {
pkt.Tags = append(pkt.Tags, raven.Tag{Key: "commit", Value: version.commit}) pkt.Tags = append(pkt.Tags, raven.Tag{Key: "commit", Value: version.Commit})
} }
for _, tag := range version.extra { for _, tag := range version.Extra {
pkt.Tags = append(pkt.Tags, raven.Tag{Key: tag, Value: "1"}) pkt.Tags = append(pkt.Tags, raven.Tag{Key: tag, Value: "1"})
} }
return pkt return pkt

View File

@ -12,66 +12,6 @@ import (
"testing" "testing"
) )
func TestParseVersion(t *testing.T) {
cases := []struct {
longVersion string
parsed version
}{
{
longVersion: `syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC`,
parsed: version{
version: "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep",
tag: "v1.1.4-rc.1",
commit: "6aaae618",
codename: "Erbium Earthworm",
runtime: "go1.12.5",
goos: "darwin",
goarch: "amd64",
builder: "jb@kvin.kastelo.net",
},
},
{
longVersion: `syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC [foo, bar]`,
parsed: version{
version: "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep",
tag: "v1.1.4-rc.1",
commit: "6aaae618",
codename: "Erbium Earthworm",
runtime: "go1.12.5",
goos: "darwin",
goarch: "amd64",
builder: "jb@kvin.kastelo.net",
extra: []string{"foo", "bar"},
},
},
{
longVersion: `syncthing v1.23.7-dev.26.gdf7b56ae-stversionextra "Fermium Flea" (go1.20.5 darwin-arm64) jb@ok.kastelo.net 2023-07-12 06:55:26 UTC [Some Wrapper, purego, stnoupgrade]`,
parsed: version{
version: "v1.23.7-dev.26.gdf7b56ae-stversionextra",
tag: "v1.23.7-dev",
commit: "df7b56ae",
codename: "Fermium Flea",
runtime: "go1.20.5",
goos: "darwin",
goarch: "arm64",
builder: "jb@ok.kastelo.net",
extra: []string{"Some Wrapper", "purego", "stnoupgrade"},
},
},
}
for _, tc := range cases {
v, err := parseVersion(tc.longVersion)
if err != nil {
t.Errorf("%s\nerror: %v\n", tc.longVersion, err)
continue
}
if fmt.Sprint(v) != fmt.Sprint(tc.parsed) {
t.Errorf("%s\nA: %v\nE: %v\n", tc.longVersion, v, tc.parsed)
}
}
}
func TestParseReport(t *testing.T) { func TestParseReport(t *testing.T) {
bs, err := os.ReadFile("_testdata/panic.log") bs, err := os.ReadFile("_testdata/panic.log")
if err != nil { if err != nil {

View File

@ -12,11 +12,16 @@ import (
"net/http" "net/http"
"path" "path"
"strings" "strings"
"sync"
) )
type crashReceiver struct { type crashReceiver struct {
store *diskStore store *diskStore
sentry *sentryService sentry *sentryService
ignore *ignorePatterns
ignoredMut sync.RWMutex
ignored map[string]struct{}
} }
func (r *crashReceiver) ServeHTTP(w http.ResponseWriter, req *http.Request) { func (r *crashReceiver) ServeHTTP(w http.ResponseWriter, req *http.Request) {
@ -64,6 +69,12 @@ func (r *crashReceiver) serveGet(reportID string, w http.ResponseWriter, _ *http
// serveHead responds to HEAD requests by checking if the named report // serveHead responds to HEAD requests by checking if the named report
// already exists in the system. // already exists in the system.
func (r *crashReceiver) serveHead(reportID string, w http.ResponseWriter, _ *http.Request) { func (r *crashReceiver) serveHead(reportID string, w http.ResponseWriter, _ *http.Request) {
r.ignoredMut.RLock()
_, ignored := r.ignored[reportID]
r.ignoredMut.RUnlock()
if ignored {
return // found
}
if !r.store.Exists(reportID) { if !r.store.Exists(reportID) {
http.Error(w, "Not found", http.StatusNotFound) http.Error(w, "Not found", http.StatusNotFound)
} }
@ -76,6 +87,15 @@ func (r *crashReceiver) servePut(reportID string, w http.ResponseWriter, req *ht
metricCrashReportsTotal.WithLabelValues(result).Inc() metricCrashReportsTotal.WithLabelValues(result).Inc()
}() }()
r.ignoredMut.RLock()
_, ignored := r.ignored[reportID]
r.ignoredMut.RUnlock()
if ignored {
result = "ignored_cached"
io.Copy(io.Discard, req.Body)
return // found
}
// Read at most maxRequestSize of report data. // Read at most maxRequestSize of report data.
log.Println("Receiving report", reportID) log.Println("Receiving report", reportID)
lr := io.LimitReader(req.Body, maxRequestSize) lr := io.LimitReader(req.Body, maxRequestSize)
@ -86,6 +106,17 @@ func (r *crashReceiver) servePut(reportID string, w http.ResponseWriter, req *ht
return return
} }
if r.ignore.match(bs) {
r.ignoredMut.Lock()
if r.ignored == nil {
r.ignored = make(map[string]struct{})
}
r.ignored[reportID] = struct{}{}
r.ignoredMut.Unlock()
result = "ignored"
return
}
result = "success" result = "success"
// Store the report // Store the report

246
cmd/stdiscosrv/amqp.go Normal file
View File

@ -0,0 +1,246 @@
// Copyright (C) 2024 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package main
import (
"context"
"fmt"
"io"
amqp "github.com/rabbitmq/amqp091-go"
"github.com/thejerf/suture/v4"
)
type amqpReplicator struct {
suture.Service
broker string
sender *amqpSender
receiver *amqpReceiver
outbox chan ReplicationRecord
}
func newAMQPReplicator(broker, clientID string, db database) *amqpReplicator {
svc := suture.New("amqpReplicator", suture.Spec{PassThroughPanics: true})
sender := &amqpSender{
broker: broker,
clientID: clientID,
outbox: make(chan ReplicationRecord, replicationOutboxSize),
}
svc.Add(sender)
receiver := &amqpReceiver{
broker: broker,
clientID: clientID,
db: db,
}
svc.Add(receiver)
return &amqpReplicator{
Service: svc,
broker: broker,
sender: sender,
receiver: receiver,
outbox: make(chan ReplicationRecord, replicationOutboxSize),
}
}
func (s *amqpReplicator) send(key string, ps []DatabaseAddress, seen int64) {
s.sender.send(key, ps, seen)
}
type amqpSender struct {
broker string
clientID string
outbox chan ReplicationRecord
}
func (s *amqpSender) Serve(ctx context.Context) error {
conn, ch, err := amqpChannel(s.broker)
if err != nil {
return err
}
defer ch.Close()
defer conn.Close()
buf := make([]byte, 1024)
for {
select {
case rec := <-s.outbox:
size := rec.Size()
if len(buf) < size {
buf = make([]byte, size)
}
n, err := rec.MarshalTo(buf)
if err != nil {
replicationSendsTotal.WithLabelValues("error").Inc()
return fmt.Errorf("replication marshal: %w", err)
}
err = ch.PublishWithContext(ctx,
"discovery", // exchange
"", // routing key
false, // mandatory
false, // immediate
amqp.Publishing{
ContentType: "application/protobuf",
Body: buf[:n],
AppId: s.clientID,
})
if err != nil {
replicationSendsTotal.WithLabelValues("error").Inc()
return fmt.Errorf("replication publish: %w", err)
}
replicationSendsTotal.WithLabelValues("success").Inc()
case <-ctx.Done():
return nil
}
}
}
func (s *amqpSender) String() string {
return fmt.Sprintf("amqpSender(%q)", s.broker)
}
func (s *amqpSender) send(key string, ps []DatabaseAddress, seen int64) {
item := ReplicationRecord{
Key: key,
Addresses: ps,
Seen: seen,
}
// The send should never block. The inbox is suitably buffered for at
// least a few seconds of stalls, which shouldn't happen in practice.
select {
case s.outbox <- item:
default:
replicationSendsTotal.WithLabelValues("drop").Inc()
}
}
type amqpReceiver struct {
broker string
clientID string
db database
}
func (s *amqpReceiver) Serve(ctx context.Context) error {
conn, ch, err := amqpChannel(s.broker)
if err != nil {
return err
}
defer ch.Close()
defer conn.Close()
msgs, err := amqpConsume(ch)
if err != nil {
return err
}
for {
select {
case msg, ok := <-msgs:
if !ok {
return fmt.Errorf("subscription closed: %w", io.EOF)
}
// ignore messages from ourself
if msg.AppId == s.clientID {
continue
}
var rec ReplicationRecord
if err := rec.Unmarshal(msg.Body); err != nil {
replicationRecvsTotal.WithLabelValues("error").Inc()
return fmt.Errorf("replication unmarshal: %w", err)
}
if err := s.db.merge(rec.Key, rec.Addresses, rec.Seen); err != nil {
return fmt.Errorf("replication database merge: %w", err)
}
replicationRecvsTotal.WithLabelValues("success").Inc()
case <-ctx.Done():
return nil
}
}
}
func (s *amqpReceiver) String() string {
return fmt.Sprintf("amqpReceiver(%q)", s.broker)
}
func amqpChannel(dst string) (*amqp.Connection, *amqp.Channel, error) {
conn, err := amqp.Dial(dst)
if err != nil {
return nil, nil, fmt.Errorf("AMQP dial: %w", err)
}
ch, err := conn.Channel()
if err != nil {
return nil, nil, fmt.Errorf("AMQP channel: %w", err)
}
err = ch.ExchangeDeclare(
"discovery", // name
"fanout", // type
false, // durable
false, // auto-deleted
false, // internal
false, // no-wait
nil, // arguments
)
if err != nil {
return nil, nil, fmt.Errorf("AMQP declare exchange: %w", err)
}
return conn, ch, nil
}
func amqpConsume(ch *amqp.Channel) (<-chan amqp.Delivery, error) {
q, err := ch.QueueDeclare(
"", // name
false, // durable
false, // delete when unused
true, // exclusive
false, // no-wait
nil, // arguments
)
if err != nil {
return nil, fmt.Errorf("AMQP declare queue: %w", err)
}
err = ch.QueueBind(
q.Name, // queue name
"", // routing key
"discovery", // exchange
false,
nil,
)
if err != nil {
return nil, fmt.Errorf("AMQP bind queue: %w", err)
}
msgs, err := ch.Consume(
q.Name, // queue
"", // consumer
true, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // args
)
if err != nil {
return nil, fmt.Errorf("AMQP consume: %w", err)
}
return msgs, nil
}

View File

@ -39,12 +39,13 @@ type announcement struct {
} }
type apiSrv struct { type apiSrv struct {
addr string addr string
cert tls.Certificate cert tls.Certificate
db database db database
listener net.Listener listener net.Listener
repl replicator // optional repl replicator // optional
useHTTP bool useHTTP bool
missesIncrease int
mapsMut sync.Mutex mapsMut sync.Mutex
misses map[string]int32 misses map[string]int32
@ -60,14 +61,15 @@ type contextKey int
const idKey contextKey = iota const idKey contextKey = iota
func newAPISrv(addr string, cert tls.Certificate, db database, repl replicator, useHTTP bool) *apiSrv { func newAPISrv(addr string, cert tls.Certificate, db database, repl replicator, useHTTP bool, missesIncrease int) *apiSrv {
return &apiSrv{ return &apiSrv{
addr: addr, addr: addr,
cert: cert, cert: cert,
db: db, db: db,
repl: repl, repl: repl,
useHTTP: useHTTP, useHTTP: useHTTP,
misses: make(map[string]int32), misses: make(map[string]int32),
missesIncrease: missesIncrease,
} }
} }
@ -197,14 +199,13 @@ func (s *apiSrv) handleGET(w http.ResponseWriter, req *http.Request) {
s.mapsMut.Lock() s.mapsMut.Lock()
misses := s.misses[key] misses := s.misses[key]
if misses < rec.Misses { if misses < rec.Misses {
misses = rec.Misses + 1 misses = rec.Misses
} else {
misses++
} }
misses += int32(s.missesIncrease)
s.misses[key] = misses s.misses[key] = misses
s.mapsMut.Unlock() s.mapsMut.Unlock()
if misses%notFoundMissesWriteInterval == 0 { if misses >= notFoundMissesWriteInterval {
rec.Misses = misses rec.Misses = misses
rec.Missed = time.Now().UnixNano() rec.Missed = time.Now().UnixNano()
rec.Addresses = nil rec.Addresses = nil
@ -444,7 +445,6 @@ func fixupAddresses(remote *net.TCPAddr, addresses []string) []string {
// remote is nil, unable to determine host IP // remote is nil, unable to determine host IP
continue continue
} }
} }
// If zero port was specified, use remote port. // If zero port was specified, use remote port.

View File

@ -22,6 +22,7 @@ import (
_ "github.com/syncthing/syncthing/lib/automaxprocs" _ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/build" "github.com/syncthing/syncthing/lib/build"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/rand"
"github.com/syncthing/syncthing/lib/tlsutil" "github.com/syncthing/syncthing/lib/tlsutil"
"github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/opt"
"github.com/thejerf/suture/v4" "github.com/thejerf/suture/v4"
@ -80,6 +81,8 @@ func main() {
var replKeyFile string var replKeyFile string
var useHTTP bool var useHTTP bool
var largeDB bool var largeDB bool
var amqpAddress string
missesIncrease := 1
log.SetOutput(os.Stdout) log.SetOutput(os.Stdout)
log.SetFlags(0) log.SetFlags(0)
@ -96,6 +99,8 @@ func main() {
flag.StringVar(&replCertFile, "replication-cert", "", "Certificate file for replication") flag.StringVar(&replCertFile, "replication-cert", "", "Certificate file for replication")
flag.StringVar(&replKeyFile, "replication-key", "", "Key file for replication") flag.StringVar(&replKeyFile, "replication-key", "", "Key file for replication")
flag.BoolVar(&largeDB, "large-db", false, "Use larger database settings") flag.BoolVar(&largeDB, "large-db", false, "Use larger database settings")
flag.StringVar(&amqpAddress, "amqp-address", "", "Address to AMQP broker")
flag.IntVar(&missesIncrease, "misses-increase", 1, "How many times to increase the misses counter on each miss")
showVersion := flag.Bool("version", false, "Show version") showVersion := flag.Bool("version", false, "Show version")
flag.Parse() flag.Parse()
@ -203,8 +208,24 @@ func main() {
main.Add(rl) main.Add(rl)
} }
// If we have an AMQP broker, start that
if amqpAddress != "" {
clientID := rand.String(10)
kr := newAMQPReplicator(amqpAddress, clientID, db)
repl = append(repl, kr)
main.Add(kr)
}
go func() {
for range time.NewTicker(time.Second).C {
for _, r := range repl {
r.send("<heartbeat>", nil, time.Now().UnixNano())
}
}
}()
// Start the main API server. // Start the main API server.
qs := newAPISrv(listen, cert, db, repl, useHTTP) qs := newAPISrv(listen, cert, db, repl, useHTTP, missesIncrease)
main.Add(qs) main.Add(qs)
// If we have a metrics port configured, start a metrics handler. // If we have a metrics port configured, start a metrics handler.

View File

@ -144,10 +144,11 @@ func (s *replicationSender) String() string {
return fmt.Sprintf("replicationSender(%q)", s.dst) return fmt.Sprintf("replicationSender(%q)", s.dst)
} }
func (s *replicationSender) send(key string, ps []DatabaseAddress, _ int64) { func (s *replicationSender) send(key string, ps []DatabaseAddress, seen int64) {
item := ReplicationRecord{ item := ReplicationRecord{
Key: key, Key: key,
Addresses: ps, Addresses: ps,
Seen: seen,
} }
// The send should never block. The inbox is suitably buffered for at // The send should never block. The inbox is suitably buffered for at

View File

@ -7,10 +7,7 @@
package main package main
import ( import (
"os"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
) )
var ( var (
@ -127,15 +124,4 @@ func init() {
databaseKeys, databaseStatisticsSeconds, databaseKeys, databaseStatisticsSeconds,
databaseOperations, databaseOperationSeconds, databaseOperations, databaseOperationSeconds,
retryAfterHistogram) retryAfterHistogram)
processCollectorOpts := collectors.ProcessCollectorOpts{
Namespace: "syncthing_discovery",
PidFn: func() (int, error) {
return os.Getpid(), nil
},
}
prometheus.MustRegister(
collectors.NewProcessCollector(processCollectorOpts),
)
} }

View File

@ -259,7 +259,7 @@
return a.value > b.value ? 1 : -1; return a.value > b.value ? 1 : -1;
} }
$http.get("/endpoint").then(function(response) { $http.get("/endpoint/full").then(function(response) {
$scope.relays = response.data.relays; $scope.relays = response.data.relays;
angular.forEach($scope.relays, function(relay) { angular.forEach($scope.relays, function(relay) {
@ -338,7 +338,7 @@
relay.showMarker = function() { relay.showMarker = function() {
relay.marker.openPopup(); relay.marker.openPopup();
} }
relay.hideMarker = function() { relay.hideMarker = function() {
relay.marker.closePopup(); relay.marker.closePopup();
} }
@ -347,7 +347,7 @@
function addCircleToMap(relay) { function addCircleToMap(relay) {
console.log(relay.location.latitude) console.log(relay.location.latitude)
L.circle([relay.location.latitude, relay.location.longitude], L.circle([relay.location.latitude, relay.location.longitude],
{ {
radius: ((relay.stats.bytesProxied * 100) / $scope.totals.bytesProxied) * 10000, radius: ((relay.stats.bytesProxied * 100) / $scope.totals.bytesProxied) * 10000,
color: "FF0000", color: "FF0000",

View File

@ -27,9 +27,7 @@ import (
"github.com/syncthing/syncthing/lib/assets" "github.com/syncthing/syncthing/lib/assets"
_ "github.com/syncthing/syncthing/lib/automaxprocs" _ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/geoip" "github.com/syncthing/syncthing/lib/geoip"
"github.com/syncthing/syncthing/lib/httpcache"
"github.com/syncthing/syncthing/lib/protocol" "github.com/syncthing/syncthing/lib/protocol"
"github.com/syncthing/syncthing/lib/rand"
"github.com/syncthing/syncthing/lib/relay/client" "github.com/syncthing/syncthing/lib/relay/client"
"github.com/syncthing/syncthing/lib/sync" "github.com/syncthing/syncthing/lib/sync"
"github.com/syncthing/syncthing/lib/tlsutil" "github.com/syncthing/syncthing/lib/tlsutil"
@ -51,6 +49,10 @@ type relay struct {
StatsRetrieved time.Time `json:"statsRetrieved"` StatsRetrieved time.Time `json:"statsRetrieved"`
} }
type relayShort struct {
URL string `json:"url"`
}
type stats struct { type stats struct {
StartTime time.Time `json:"startTime"` StartTime time.Time `json:"startTime"`
UptimeSeconds int `json:"uptimeSeconds"` UptimeSeconds int `json:"uptimeSeconds"`
@ -95,6 +97,7 @@ var (
testCert tls.Certificate testCert tls.Certificate
knownRelaysFile = filepath.Join(os.TempDir(), "strelaypoolsrv_known_relays") knownRelaysFile = filepath.Join(os.TempDir(), "strelaypoolsrv_known_relays")
listen = ":80" listen = ":80"
metricsListen = ":8081"
dir string dir string
evictionTime = time.Hour evictionTime = time.Hour
debug bool debug bool
@ -125,6 +128,7 @@ func main() {
log.SetFlags(log.Lshortfile) log.SetFlags(log.Lshortfile)
flag.StringVar(&listen, "listen", listen, "Listen address") flag.StringVar(&listen, "listen", listen, "Listen address")
flag.StringVar(&metricsListen, "metrics-listen", metricsListen, "Metrics listen address")
flag.StringVar(&dir, "keys", dir, "Directory where http-cert.pem and http-key.pem is stored for TLS listening") flag.StringVar(&dir, "keys", dir, "Directory where http-cert.pem and http-key.pem is stored for TLS listening")
flag.BoolVar(&debug, "debug", debug, "Enable debug output") flag.BoolVar(&debug, "debug", debug, "Enable debug output")
flag.DurationVar(&evictionTime, "eviction", evictionTime, "After how long the relay is evicted") flag.DurationVar(&evictionTime, "eviction", evictionTime, "After how long the relay is evicted")
@ -218,15 +222,40 @@ func main() {
log.Fatalln("listen:", err) log.Fatalln("listen:", err)
} }
handler := http.NewServeMux() if metricsListen != "" {
handler.HandleFunc("/", handleAssets) mmux := http.NewServeMux()
handler.Handle("/endpoint", httpcache.SinglePath(http.HandlerFunc(handleRequest), 15*time.Second)) mmux.HandleFunc("/metrics", handleMetrics)
handler.HandleFunc("/metrics", handleMetrics) go func() {
if err := http.ListenAndServe(metricsListen, mmux); err != nil {
log.Fatalln("HTTP serve metrics:", err)
}
}()
}
getMux := http.NewServeMux()
getMux.HandleFunc("/", handleAssets)
getMux.HandleFunc("/endpoint", withAPIMetrics(handleEndpointShort))
getMux.HandleFunc("/endpoint/full", withAPIMetrics(handleEndpointFull))
postMux := http.NewServeMux()
postMux.HandleFunc("/endpoint", withAPIMetrics(handleRegister))
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
switch r.Method {
case http.MethodGet, http.MethodHead, http.MethodOptions:
getMux.ServeHTTP(w, r)
case http.MethodPost:
postMux.ServeHTTP(w, r)
default:
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
}
})
srv := http.Server{ srv := http.Server{
Handler: handler, Handler: handler,
ReadTimeout: 10 * time.Second, ReadTimeout: 10 * time.Second,
} }
srv.SetKeepAlivesEnabled(false)
err = srv.Serve(listener) err = srv.Serve(listener)
if err != nil { if err != nil {
@ -260,39 +289,24 @@ func handleAssets(w http.ResponseWriter, r *http.Request) {
assets.Serve(w, r, as) assets.Serve(w, r, as)
} }
func handleRequest(w http.ResponseWriter, r *http.Request) { func withAPIMetrics(next http.HandlerFunc) http.HandlerFunc {
timer := prometheus.NewTimer(apiRequestsSeconds.WithLabelValues(r.Method)) return func(w http.ResponseWriter, r *http.Request) {
timer := prometheus.NewTimer(apiRequestsSeconds.WithLabelValues(r.Method))
w = NewLoggingResponseWriter(w) w = NewLoggingResponseWriter(w)
defer func() { defer func() {
timer.ObserveDuration() timer.ObserveDuration()
lw := w.(*loggingResponseWriter) lw := w.(*loggingResponseWriter)
apiRequestsTotal.WithLabelValues(r.Method, strconv.Itoa(lw.statusCode)).Inc() apiRequestsTotal.WithLabelValues(r.Method, strconv.Itoa(lw.statusCode)).Inc()
}() }()
next(w, r)
if ipHeader != "" {
hdr := r.Header.Get(ipHeader)
fields := strings.Split(hdr, ",")
if len(fields) > 0 {
r.RemoteAddr = strings.TrimSpace(fields[len(fields)-1])
}
}
w.Header().Set("Access-Control-Allow-Origin", "*")
switch r.Method {
case "GET":
handleGetRequest(w, r)
case "POST":
handlePostRequest(w, r)
default:
if debug {
log.Println("Unhandled HTTP method", r.Method)
}
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
} }
} }
func handleGetRequest(rw http.ResponseWriter, r *http.Request) { // handleEndpointFull returns the relay list with full metadata and
// statistics. Large, and expensive.
func handleEndpointFull(rw http.ResponseWriter, r *http.Request) {
rw.Header().Set("Content-Type", "application/json; charset=utf-8") rw.Header().Set("Content-Type", "application/json; charset=utf-8")
rw.Header().Set("Access-Control-Allow-Origin", "*")
mut.RLock() mut.RLock()
relays := make([]*relay, len(permanentRelays)+len(knownRelays)) relays := make([]*relay, len(permanentRelays)+len(knownRelays))
@ -300,17 +314,38 @@ func handleGetRequest(rw http.ResponseWriter, r *http.Request) {
copy(relays[n:], knownRelays) copy(relays[n:], knownRelays)
mut.RUnlock() mut.RUnlock()
// Shuffle
rand.Shuffle(relays)
_ = json.NewEncoder(rw).Encode(map[string][]*relay{ _ = json.NewEncoder(rw).Encode(map[string][]*relay{
"relays": relays, "relays": relays,
}) })
} }
func handlePostRequest(w http.ResponseWriter, r *http.Request) { // handleEndpointShort returns the relay list with only the URL.
func handleEndpointShort(rw http.ResponseWriter, r *http.Request) {
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
rw.Header().Set("Access-Control-Allow-Origin", "*")
mut.RLock()
relays := make([]relayShort, 0, len(permanentRelays)+len(knownRelays))
for _, r := range append(permanentRelays, knownRelays...) {
relays = append(relays, relayShort{URL: slimURL(r.URL)})
}
mut.RUnlock()
_ = json.NewEncoder(rw).Encode(map[string][]relayShort{
"relays": relays,
})
}
func handleRegister(w http.ResponseWriter, r *http.Request) {
// Get the IP address of the client // Get the IP address of the client
rhost := r.RemoteAddr rhost := r.RemoteAddr
if ipHeader != "" {
hdr := r.Header.Get(ipHeader)
fields := strings.Split(hdr, ",")
if len(fields) > 0 {
rhost = strings.TrimSpace(fields[len(fields)-1])
}
}
if host, _, err := net.SplitHostPort(rhost); err == nil { if host, _, err := net.SplitHostPort(rhost); err == nil {
rhost = host rhost = host
} }
@ -660,3 +695,16 @@ func (b *errorTracker) IsBlocked(host string) bool {
} }
return false return false
} }
func slimURL(u string) string {
p, err := url.Parse(u)
if err != nil {
return u
}
newQuery := url.Values{}
if id := p.Query().Get("id"); id != "" {
newQuery.Set("id", id)
}
p.RawQuery = newQuery.Encode()
return p.String()
}

View File

@ -42,7 +42,7 @@ func TestHandleGetRequest(t *testing.T) {
w := httptest.NewRecorder() w := httptest.NewRecorder()
w.Body = new(bytes.Buffer) w.Body = new(bytes.Buffer)
handleGetRequest(w, httptest.NewRequest("GET", "/", nil)) handleEndpointFull(w, httptest.NewRequest("GET", "/", nil))
result := make(map[string][]*relay) result := make(map[string][]*relay)
err := json.NewDecoder(w.Body).Decode(&result) err := json.NewDecoder(w.Body).Decode(&result)
@ -92,3 +92,18 @@ func TestCanonicalizeQueryValues(t *testing.T) {
t.Errorf("expected %q, got %q", exp, str) t.Errorf("expected %q, got %q", exp, str)
} }
} }
func TestSlimURL(t *testing.T) {
cases := []struct {
in, out string
}{
{"http://example.com/", "http://example.com/"},
{"relay://192.0.2.42:22067/?globalLimitBps=0&id=EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M&networkTimeout=2m0s&pingInterval=1m0s&providedBy=Test&sessionLimitBps=0&statusAddr=%3A22070", "relay://192.0.2.42:22067/?id=EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M-EIC6B3M"},
}
for _, c := range cases {
if got := slimURL(c.in); got != c.out {
t.Errorf("expected %q, got %q", c.out, got)
}
}
}

View File

@ -6,27 +6,12 @@ import (
"encoding/json" "encoding/json"
"net" "net"
"net/http" "net/http"
"os"
"time" "time"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/collectors"
"github.com/syncthing/syncthing/lib/sync" "github.com/syncthing/syncthing/lib/sync"
) )
func init() {
processCollectorOpts := collectors.ProcessCollectorOpts{
Namespace: "syncthing_relaypoolsrv",
PidFn: func() (int, error) {
return os.Getpid(), nil
},
}
prometheus.MustRegister(
collectors.NewProcessCollector(processCollectorOpts),
)
}
var ( var (
statusClient = http.Client{ statusClient = http.Client{
Timeout: 5 * time.Second, Timeout: 5 * time.Second,

View File

@ -19,16 +19,18 @@ import (
"time" "time"
"github.com/alecthomas/kong" "github.com/alecthomas/kong"
"github.com/prometheus/client_golang/prometheus/promhttp"
_ "github.com/syncthing/syncthing/lib/automaxprocs" _ "github.com/syncthing/syncthing/lib/automaxprocs"
"github.com/syncthing/syncthing/lib/httpcache" "github.com/syncthing/syncthing/lib/httpcache"
"github.com/syncthing/syncthing/lib/upgrade" "github.com/syncthing/syncthing/lib/upgrade"
) )
type cli struct { type cli struct {
Listen string `default:":8080" help:"Listen address"` Listen string `default:":8080" help:"Listen address"`
URL string `short:"u" default:"https://api.github.com/repos/syncthing/syncthing/releases?per_page=25" help:"GitHub releases url"` MetricsListen string `default:":8081" help:"Listen address for metrics"`
Forward []string `short:"f" help:"Forwarded pages, format: /path->https://example/com/url"` URL string `short:"u" default:"https://api.github.com/repos/syncthing/syncthing/releases?per_page=25" help:"GitHub releases url"`
CacheTime time.Duration `default:"15m" help:"Cache time"` Forward []string `short:"f" help:"Forwarded pages, format: /path->https://example/com/url"`
CacheTime time.Duration `default:"15m" help:"Cache time"`
} }
func main() { func main() {
@ -41,17 +43,37 @@ func main() {
} }
func server(params *cli) error { func server(params *cli) error {
http.Handle("/meta.json", httpcache.SinglePath(&githubReleases{url: params.URL}, params.CacheTime)) if params.MetricsListen != "" {
mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.Handler())
go func() {
log.Println("Listening for metrics on", params.MetricsListen)
if err := http.ListenAndServe(params.MetricsListen, mux); err != nil {
log.Fatalf("Failed to start metrics server: %v", err)
}
}()
}
mux := http.NewServeMux()
mux.Handle("/meta.json", httpcache.SinglePath(&githubReleases{url: params.URL}, params.CacheTime))
for _, fwd := range params.Forward { for _, fwd := range params.Forward {
path, url, ok := strings.Cut(fwd, "->") path, url, ok := strings.Cut(fwd, "->")
if !ok { if !ok {
return fmt.Errorf("invalid forward: %q", fwd) return fmt.Errorf("invalid forward: %q", fwd)
} }
http.Handle(path, httpcache.SinglePath(&proxy{url: url}, params.CacheTime)) log.Println("Forwarding", path, "to", url)
mux.Handle(path, httpcache.SinglePath(&proxy{url: url}, params.CacheTime))
} }
return http.ListenAndServe(params.Listen, nil) srv := &http.Server{
Addr: params.Listen,
Handler: mux,
ReadTimeout: 5 * time.Second,
WriteTimeout: 10 * time.Second,
}
srv.SetKeepAlivesEnabled(false)
return srv.ListenAndServe()
} }
type githubReleases struct { type githubReleases struct {

1
go.mod
View File

@ -31,6 +31,7 @@ require (
github.com/pierrec/lz4/v4 v4.1.21 github.com/pierrec/lz4/v4 v4.1.21
github.com/prometheus/client_golang v1.19.1 github.com/prometheus/client_golang v1.19.1
github.com/quic-go/quic-go v0.44.0 github.com/quic-go/quic-go v0.44.0
github.com/rabbitmq/amqp091-go v1.10.0
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
github.com/shirou/gopsutil/v3 v3.24.4 github.com/shirou/gopsutil/v3 v3.24.4
github.com/syncthing/notify v0.0.0-20210616190510-c6b7342338d2 github.com/syncthing/notify v0.0.0-20210616190510-c6b7342338d2

4
go.sum
View File

@ -200,6 +200,8 @@ github.com/prometheus/procfs v0.15.0 h1:A82kmvXJq2jTu5YUhSGNlYoxh85zLnKgPz4bMZgI
github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk= github.com/prometheus/procfs v0.15.0/go.mod h1:Y0RJ/Y5g5wJpkTisOtqwDSo4HwhGmLB4VQSw2sQJLHk=
github.com/quic-go/quic-go v0.44.0 h1:So5wOr7jyO4vzL2sd8/pD9Kesciv91zSk8BoFngItQ0= github.com/quic-go/quic-go v0.44.0 h1:So5wOr7jyO4vzL2sd8/pD9Kesciv91zSk8BoFngItQ0=
github.com/quic-go/quic-go v0.44.0/go.mod h1:z4cx/9Ny9UtGITIPzmPTXh1ULfOyWh4qGQlpnPcWmek= github.com/quic-go/quic-go v0.44.0/go.mod h1:z4cx/9Ny9UtGITIPzmPTXh1ULfOyWh4qGQlpnPcWmek=
github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw=
github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/riywo/loginshell v0.0.0-20200815045211-7d26008be1ab h1:ZjX6I48eZSFetPb41dHudEyVr5v953N15TsNZXlkcWY= github.com/riywo/loginshell v0.0.0-20200815045211-7d26008be1ab h1:ZjX6I48eZSFetPb41dHudEyVr5v953N15TsNZXlkcWY=
@ -250,6 +252,8 @@ github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8=
go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=

93
lib/build/parse.go Normal file
View File

@ -0,0 +1,93 @@
// Copyright (C) 2019 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package build
import (
"errors"
"regexp"
"strings"
)
// syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC [foo, bar]
// or, somewhere along the way the "+" in the version tag disappeared:
// syncthing v1.23.7-dev.26.gdf7b56ae.dirty-stversionextra "Fermium Flea" (go1.20.5 darwin-arm64) jb@ok.kastelo.net 2023-07-12 06:55:26 UTC [Some Wrapper, purego, stnoupgrade]
var (
longVersionRE = regexp.MustCompile(`syncthing\s+(v[^\s]+)\s+"([^"]+)"\s\(([^\s]+)\s+([^-]+)-([^)]+)\)\s+([^\s]+)[^\[]*(?:\[(.+)\])?$`)
gitExtraRE = regexp.MustCompile(`\.\d+\.g[0-9a-f]+`) // ".1.g6aaae618"
gitExtraSepRE = regexp.MustCompile(`[.-]`) // dot or dash
)
type VersionParts struct {
Version string // "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep"
Tag string // "v1.1.4-rc.1"
Commit string // "6aaae618", blank when absent
Codename string // "Erbium Earthworm"
Runtime string // "go1.12.5"
GOOS string // "darwin"
GOARCH string // "amd64"
Builder string // "jb@kvin.kastelo.net"
Extra []string // "foo", "bar"
}
func (v VersionParts) Environment() string {
if v.Commit != "" {
return "Development"
}
if strings.Contains(v.Tag, "-rc.") {
return "Candidate"
}
if strings.Contains(v.Tag, "-") {
return "Beta"
}
return "Stable"
}
func ParseVersion(line string) (VersionParts, error) {
m := longVersionRE.FindStringSubmatch(line)
if len(m) == 0 {
return VersionParts{}, errors.New("unintelligeble version string")
}
v := VersionParts{
Version: m[1],
Codename: m[2],
Runtime: m[3],
GOOS: m[4],
GOARCH: m[5],
Builder: m[6],
}
// Split the version tag into tag and commit. This is old style
// v1.2.3-something.4+11-g12345678 or newer with just dots
// v1.2.3-something.4.11.g12345678 or v1.2.3-dev.11.g12345678.
parts := []string{v.Version}
if strings.Contains(v.Version, "+") {
parts = strings.Split(v.Version, "+")
} else {
idxs := gitExtraRE.FindStringIndex(v.Version)
if len(idxs) > 0 {
parts = []string{v.Version[:idxs[0]], v.Version[idxs[0]+1:]}
}
}
v.Tag = parts[0]
if len(parts) > 1 {
fields := gitExtraSepRE.Split(parts[1], -1)
if len(fields) >= 2 && strings.HasPrefix(fields[1], "g") {
v.Commit = fields[1][1:]
}
}
if len(m) >= 8 && m[7] != "" {
tags := strings.Split(m[7], ",")
for i := range tags {
tags[i] = strings.TrimSpace(tags[i])
}
v.Extra = tags
}
return v, nil
}

72
lib/build/parse_test.go Normal file
View File

@ -0,0 +1,72 @@
// Copyright (C) 2019 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
package build
import (
"fmt"
"testing"
)
func TestParseVersion(t *testing.T) {
cases := []struct {
longVersion string
parsed VersionParts
}{
{
longVersion: `syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC`,
parsed: VersionParts{
Version: "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep",
Tag: "v1.1.4-rc.1",
Commit: "6aaae618",
Codename: "Erbium Earthworm",
Runtime: "go1.12.5",
GOOS: "darwin",
GOARCH: "amd64",
Builder: "jb@kvin.kastelo.net",
},
},
{
longVersion: `syncthing v1.1.4-rc.1+30-g6aaae618-dirty-crashrep "Erbium Earthworm" (go1.12.5 darwin-amd64) jb@kvin.kastelo.net 2019-05-23 16:08:14 UTC [foo, bar]`,
parsed: VersionParts{
Version: "v1.1.4-rc.1+30-g6aaae618-dirty-crashrep",
Tag: "v1.1.4-rc.1",
Commit: "6aaae618",
Codename: "Erbium Earthworm",
Runtime: "go1.12.5",
GOOS: "darwin",
GOARCH: "amd64",
Builder: "jb@kvin.kastelo.net",
Extra: []string{"foo", "bar"},
},
},
{
longVersion: `syncthing v1.23.7-dev.26.gdf7b56ae-stversionextra "Fermium Flea" (go1.20.5 darwin-arm64) jb@ok.kastelo.net 2023-07-12 06:55:26 UTC [Some Wrapper, purego, stnoupgrade]`,
parsed: VersionParts{
Version: "v1.23.7-dev.26.gdf7b56ae-stversionextra",
Tag: "v1.23.7-dev",
Commit: "df7b56ae",
Codename: "Fermium Flea",
Runtime: "go1.20.5",
GOOS: "darwin",
GOARCH: "arm64",
Builder: "jb@ok.kastelo.net",
Extra: []string{"Some Wrapper", "purego", "stnoupgrade"},
},
},
}
for _, tc := range cases {
v, err := ParseVersion(tc.longVersion)
if err != nil {
t.Errorf("%s\nerror: %v\n", tc.longVersion, err)
continue
}
if fmt.Sprint(v) != fmt.Sprint(tc.parsed) {
t.Errorf("%s\nA: %v\nE: %v\n", tc.longVersion, v, tc.parsed)
}
}
}