2015-09-06 19:52:31 +00:00
|
|
|
// Copyright (C) 2015 Audrius Butkevicius and Contributors (see the CONTRIBUTORS file).
|
|
|
|
|
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2019-11-26 07:39:51 +00:00
|
|
|
"context"
|
2015-09-06 19:52:31 +00:00
|
|
|
"crypto/tls"
|
2020-12-21 10:55:16 +00:00
|
|
|
"crypto/x509"
|
2015-09-06 19:52:31 +00:00
|
|
|
"encoding/json"
|
|
|
|
"flag"
|
|
|
|
"fmt"
|
|
|
|
"log"
|
|
|
|
"net"
|
|
|
|
"net/http"
|
|
|
|
"net/url"
|
2018-04-08 19:13:55 +00:00
|
|
|
"os"
|
2015-09-06 19:52:31 +00:00
|
|
|
"path/filepath"
|
2018-04-08 19:13:55 +00:00
|
|
|
"strconv"
|
2015-09-06 19:52:31 +00:00
|
|
|
"strings"
|
2023-01-23 07:38:55 +00:00
|
|
|
"sync/atomic"
|
2015-09-06 19:52:31 +00:00
|
|
|
"time"
|
|
|
|
|
2023-01-23 07:38:55 +00:00
|
|
|
lru "github.com/hashicorp/golang-lru/v2"
|
2018-04-08 19:13:55 +00:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
|
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
2016-07-04 12:58:29 +00:00
|
|
|
"github.com/syncthing/syncthing/cmd/strelaypoolsrv/auto"
|
2020-05-10 09:44:34 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/assets"
|
2024-02-27 12:05:19 +00:00
|
|
|
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
2024-05-18 17:31:49 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/geoip"
|
2024-02-26 12:23:14 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/protocol"
|
2015-09-22 17:51:40 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/relay/client"
|
2015-09-06 19:52:31 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/sync"
|
|
|
|
"github.com/syncthing/syncthing/lib/tlsutil"
|
|
|
|
)
|
|
|
|
|
2016-04-13 20:34:11 +00:00
|
|
|
type location struct {
|
|
|
|
Latitude float64 `json:"latitude"`
|
|
|
|
Longitude float64 `json:"longitude"`
|
2018-04-08 19:13:55 +00:00
|
|
|
City string `json:"city"`
|
|
|
|
Country string `json:"country"`
|
|
|
|
Continent string `json:"continent"`
|
2016-04-13 20:34:11 +00:00
|
|
|
}
|
|
|
|
|
2015-09-06 19:52:31 +00:00
|
|
|
type relay struct {
|
2018-04-08 19:13:55 +00:00
|
|
|
URL string `json:"url"`
|
|
|
|
Location location `json:"location"`
|
|
|
|
uri *url.URL
|
|
|
|
Stats *stats `json:"stats"`
|
|
|
|
StatsRetrieved time.Time `json:"statsRetrieved"`
|
|
|
|
}
|
|
|
|
|
2024-06-03 05:14:45 +00:00
|
|
|
type relayShort struct {
|
|
|
|
URL string `json:"url"`
|
|
|
|
}
|
|
|
|
|
2018-04-08 19:13:55 +00:00
|
|
|
type stats struct {
|
|
|
|
StartTime time.Time `json:"startTime"`
|
|
|
|
UptimeSeconds int `json:"uptimeSeconds"`
|
|
|
|
PendingSessionKeys int `json:"numPendingSessionKeys"`
|
|
|
|
ActiveSessions int `json:"numActiveSessions"`
|
|
|
|
Connections int `json:"numConnections"`
|
|
|
|
Proxies int `json:"numProxies"`
|
|
|
|
BytesProxied int `json:"bytesProxied"`
|
|
|
|
GoVersion string `json:"goVersion"`
|
|
|
|
GoOS string `json:"goOS"`
|
|
|
|
GoArch string `json:"goArch"`
|
|
|
|
GoMaxProcs int `json:"goMaxProcs"`
|
|
|
|
GoRoutines int `json:"goNumRoutine"`
|
|
|
|
Rates []int64 `json:"kbps10s1m5m15m30m60m"`
|
|
|
|
Options struct {
|
|
|
|
NetworkTimeout int `json:"network-timeout"`
|
|
|
|
PintInterval int `json:"ping-interval"`
|
|
|
|
MessageTimeout int `json:"message-timeout"`
|
|
|
|
SessionRate int `json:"per-session-rate"`
|
|
|
|
GlobalRate int `json:"global-rate"`
|
|
|
|
Pools []string `json:"pools"`
|
|
|
|
ProvidedBy string `json:"provided-by"`
|
|
|
|
} `json:"options"`
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (r relay) String() string {
|
|
|
|
return r.URL
|
|
|
|
}
|
|
|
|
|
|
|
|
type request struct {
|
2018-04-08 19:13:55 +00:00
|
|
|
relay *relay
|
|
|
|
result chan result
|
|
|
|
queueTimer *prometheus.Timer
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type result struct {
|
|
|
|
err error
|
|
|
|
eviction time.Duration
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
2020-04-04 07:19:32 +00:00
|
|
|
testCert tls.Certificate
|
|
|
|
knownRelaysFile = filepath.Join(os.TempDir(), "strelaypoolsrv_known_relays")
|
|
|
|
listen = ":80"
|
2024-06-03 05:14:45 +00:00
|
|
|
metricsListen = ":8081"
|
2020-04-04 07:19:32 +00:00
|
|
|
dir string
|
|
|
|
evictionTime = time.Hour
|
|
|
|
debug bool
|
|
|
|
permRelaysFile string
|
|
|
|
ipHeader string
|
|
|
|
proto string
|
2023-01-23 07:38:55 +00:00
|
|
|
statsRefresh = time.Minute
|
|
|
|
requestQueueLen = 64
|
|
|
|
requestProcessors = 8
|
2024-05-18 17:31:49 +00:00
|
|
|
geoipLicenseKey = os.Getenv("GEOIP_LICENSE_KEY")
|
|
|
|
geoipAccountID, _ = strconv.Atoi(os.Getenv("GEOIP_ACCOUNT_ID"))
|
2015-09-06 19:52:31 +00:00
|
|
|
|
2020-04-04 07:19:32 +00:00
|
|
|
requests chan request
|
2015-09-06 19:52:31 +00:00
|
|
|
|
2016-07-08 06:40:46 +00:00
|
|
|
mut = sync.NewRWMutex()
|
2018-04-08 19:13:55 +00:00
|
|
|
knownRelays = make([]*relay, 0)
|
|
|
|
permanentRelays = make([]*relay, 0)
|
2016-07-08 06:40:46 +00:00
|
|
|
evictionTimers = make(map[string]*time.Timer)
|
2023-01-23 07:38:55 +00:00
|
|
|
globalBlocklist = newErrorTracker(1000)
|
2015-09-06 19:52:31 +00:00
|
|
|
)
|
|
|
|
|
2018-02-21 11:53:49 +00:00
|
|
|
const (
|
|
|
|
httpStatusEnhanceYourCalm = 429
|
|
|
|
)
|
|
|
|
|
2015-09-06 19:52:31 +00:00
|
|
|
func main() {
|
2020-04-04 07:19:32 +00:00
|
|
|
log.SetOutput(os.Stdout)
|
|
|
|
log.SetFlags(log.Lshortfile)
|
|
|
|
|
2015-09-07 17:13:50 +00:00
|
|
|
flag.StringVar(&listen, "listen", listen, "Listen address")
|
2024-06-03 05:14:45 +00:00
|
|
|
flag.StringVar(&metricsListen, "metrics-listen", metricsListen, "Metrics listen address")
|
2015-09-07 17:13:50 +00:00
|
|
|
flag.StringVar(&dir, "keys", dir, "Directory where http-cert.pem and http-key.pem is stored for TLS listening")
|
|
|
|
flag.BoolVar(&debug, "debug", debug, "Enable debug output")
|
|
|
|
flag.DurationVar(&evictionTime, "eviction", evictionTime, "After how long the relay is evicted")
|
2015-09-22 07:02:18 +00:00
|
|
|
flag.StringVar(&permRelaysFile, "perm-relays", "", "Path to list of permanent relays")
|
2023-01-23 07:38:55 +00:00
|
|
|
flag.StringVar(&knownRelaysFile, "known-relays", knownRelaysFile, "Path to list of current relays")
|
2015-11-03 21:23:35 +00:00
|
|
|
flag.StringVar(&ipHeader, "ip-header", "", "Name of header which holds clients ip:port. Only meaningful when running behind a reverse proxy.")
|
2016-08-23 06:42:57 +00:00
|
|
|
flag.StringVar(&proto, "protocol", "tcp", "Protocol used for listening. 'tcp' for IPv4 and IPv6, 'tcp4' for IPv4, 'tcp6' for IPv6")
|
2018-04-08 19:13:55 +00:00
|
|
|
flag.DurationVar(&statsRefresh, "stats-refresh", statsRefresh, "Interval at which to refresh relay stats")
|
2020-04-04 07:19:32 +00:00
|
|
|
flag.IntVar(&requestQueueLen, "request-queue", requestQueueLen, "Queue length for incoming test requests")
|
|
|
|
flag.IntVar(&requestProcessors, "request-processors", requestProcessors, "Number of request processor routines")
|
2024-05-18 17:31:49 +00:00
|
|
|
flag.StringVar(&geoipLicenseKey, "geoip-license-key", geoipLicenseKey, "License key for GeoIP database")
|
2015-09-06 19:52:31 +00:00
|
|
|
|
|
|
|
flag.Parse()
|
|
|
|
|
2020-04-04 07:19:32 +00:00
|
|
|
requests = make(chan request, requestQueueLen)
|
2024-05-18 17:31:49 +00:00
|
|
|
geoip, err := geoip.NewGeoLite2CityProvider(context.Background(), geoipAccountID, geoipLicenseKey, os.TempDir())
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalln("Failed to create GeoIP provider:", err)
|
|
|
|
}
|
|
|
|
go geoip.Serve(context.TODO())
|
2020-04-04 07:19:32 +00:00
|
|
|
|
2015-09-06 19:52:31 +00:00
|
|
|
var listener net.Listener
|
|
|
|
|
2015-09-22 07:02:18 +00:00
|
|
|
if permRelaysFile != "" {
|
2024-05-18 17:31:49 +00:00
|
|
|
permanentRelays = loadRelays(permRelaysFile, geoip)
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
|
|
|
|
2015-09-22 07:02:18 +00:00
|
|
|
testCert = createTestCertificate()
|
|
|
|
|
2020-04-04 07:19:32 +00:00
|
|
|
for i := 0; i < requestProcessors; i++ {
|
2024-05-18 17:31:49 +00:00
|
|
|
go requestProcessor(geoip)
|
2020-04-04 07:19:32 +00:00
|
|
|
}
|
2015-09-06 19:52:31 +00:00
|
|
|
|
2018-04-08 19:13:55 +00:00
|
|
|
// Load relays from cache in the background.
|
|
|
|
// Load them in a serial fashion to make sure any genuine requests
|
|
|
|
// are not dropped.
|
|
|
|
go func() {
|
2024-05-18 17:31:49 +00:00
|
|
|
for _, relay := range loadRelays(knownRelaysFile, geoip) {
|
2018-04-08 19:13:55 +00:00
|
|
|
resultChan := make(chan result)
|
|
|
|
requests <- request{relay, resultChan, nil}
|
|
|
|
result := <-resultChan
|
|
|
|
if result.err != nil {
|
|
|
|
relayTestsTotal.WithLabelValues("failed").Inc()
|
|
|
|
} else {
|
|
|
|
relayTestsTotal.WithLabelValues("success").Inc()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Run the the stats refresher once the relays are loaded.
|
|
|
|
statsRefresher(statsRefresh)
|
|
|
|
}()
|
|
|
|
|
2015-09-06 19:52:31 +00:00
|
|
|
if dir != "" {
|
|
|
|
if debug {
|
2015-09-07 08:14:14 +00:00
|
|
|
log.Println("Starting TLS listener on", listen)
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
|
|
|
certFile, keyFile := filepath.Join(dir, "http-cert.pem"), filepath.Join(dir, "http-key.pem")
|
2016-07-15 14:23:20 +00:00
|
|
|
var cert tls.Certificate
|
|
|
|
cert, err = tls.LoadX509KeyPair(certFile, keyFile)
|
2015-09-06 19:52:31 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Fatalln("Failed to load HTTP X509 key pair:", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
tlsCfg := &tls.Config{
|
|
|
|
Certificates: []tls.Certificate{cert},
|
|
|
|
MinVersion: tls.VersionTLS10, // No SSLv3
|
2020-12-21 10:55:16 +00:00
|
|
|
ClientAuth: tls.RequestClientCert,
|
2015-09-06 19:52:31 +00:00
|
|
|
CipherSuites: []uint16{
|
|
|
|
// No RC4
|
|
|
|
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
|
|
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
|
|
|
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
|
|
|
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
|
|
|
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
|
|
|
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
|
|
|
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
|
|
|
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
|
|
|
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
|
|
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2016-08-23 06:42:57 +00:00
|
|
|
listener, err = tls.Listen(proto, listen, tlsCfg)
|
2015-09-06 19:52:31 +00:00
|
|
|
} else {
|
|
|
|
if debug {
|
2015-09-07 08:14:14 +00:00
|
|
|
log.Println("Starting plain listener on", listen)
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
2016-08-23 06:42:57 +00:00
|
|
|
listener, err = net.Listen(proto, listen)
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalln("listen:", err)
|
|
|
|
}
|
|
|
|
|
2024-06-03 05:14:45 +00:00
|
|
|
if metricsListen != "" {
|
|
|
|
mmux := http.NewServeMux()
|
|
|
|
mmux.HandleFunc("/metrics", handleMetrics)
|
|
|
|
go func() {
|
|
|
|
if err := http.ListenAndServe(metricsListen, mmux); err != nil {
|
|
|
|
log.Fatalln("HTTP serve metrics:", err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
getMux := http.NewServeMux()
|
|
|
|
getMux.HandleFunc("/", handleAssets)
|
|
|
|
getMux.HandleFunc("/endpoint", withAPIMetrics(handleEndpointShort))
|
|
|
|
getMux.HandleFunc("/endpoint/full", withAPIMetrics(handleEndpointFull))
|
|
|
|
|
|
|
|
postMux := http.NewServeMux()
|
|
|
|
postMux.HandleFunc("/endpoint", withAPIMetrics(handleRegister))
|
|
|
|
|
|
|
|
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
switch r.Method {
|
|
|
|
case http.MethodGet, http.MethodHead, http.MethodOptions:
|
|
|
|
getMux.ServeHTTP(w, r)
|
|
|
|
case http.MethodPost:
|
|
|
|
postMux.ServeHTTP(w, r)
|
|
|
|
default:
|
|
|
|
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
|
|
|
}
|
|
|
|
})
|
2015-09-06 19:52:31 +00:00
|
|
|
|
|
|
|
srv := http.Server{
|
|
|
|
Handler: handler,
|
|
|
|
ReadTimeout: 10 * time.Second,
|
|
|
|
}
|
2024-06-03 05:14:45 +00:00
|
|
|
srv.SetKeepAlivesEnabled(false)
|
2015-09-06 19:52:31 +00:00
|
|
|
|
|
|
|
err = srv.Serve(listener)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalln("serve:", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-08 19:13:55 +00:00
|
|
|
func handleMetrics(w http.ResponseWriter, r *http.Request) {
|
|
|
|
timer := prometheus.NewTimer(metricsRequestsSeconds)
|
|
|
|
// Acquire the mutex just to make sure we're not caught mid-way stats collection
|
|
|
|
mut.RLock()
|
|
|
|
promhttp.Handler().ServeHTTP(w, r)
|
|
|
|
mut.RUnlock()
|
|
|
|
timer.ObserveDuration()
|
|
|
|
}
|
|
|
|
|
2015-10-23 23:06:02 +00:00
|
|
|
func handleAssets(w http.ResponseWriter, r *http.Request) {
|
2018-05-10 05:53:39 +00:00
|
|
|
w.Header().Set("Cache-Control", "no-cache, must-revalidate")
|
|
|
|
|
2015-10-23 23:06:02 +00:00
|
|
|
path := r.URL.Path[1:]
|
|
|
|
if path == "" {
|
|
|
|
path = "index.html"
|
|
|
|
}
|
|
|
|
|
2020-05-25 06:51:27 +00:00
|
|
|
as, ok := auto.Assets()[path]
|
2015-10-23 23:06:02 +00:00
|
|
|
if !ok {
|
|
|
|
w.WriteHeader(http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-05-25 06:51:27 +00:00
|
|
|
assets.Serve(w, r, as)
|
2015-10-23 23:06:02 +00:00
|
|
|
}
|
|
|
|
|
2024-06-03 05:14:45 +00:00
|
|
|
func withAPIMetrics(next http.HandlerFunc) http.HandlerFunc {
|
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
timer := prometheus.NewTimer(apiRequestsSeconds.WithLabelValues(r.Method))
|
|
|
|
w = NewLoggingResponseWriter(w)
|
|
|
|
defer func() {
|
|
|
|
timer.ObserveDuration()
|
|
|
|
lw := w.(*loggingResponseWriter)
|
|
|
|
apiRequestsTotal.WithLabelValues(r.Method, strconv.Itoa(lw.statusCode)).Inc()
|
|
|
|
}()
|
|
|
|
next(w, r)
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-06-03 05:14:45 +00:00
|
|
|
// handleEndpointFull returns the relay list with full metadata and
|
|
|
|
// statistics. Large, and expensive.
|
|
|
|
func handleEndpointFull(rw http.ResponseWriter, r *http.Request) {
|
2020-01-15 09:36:21 +00:00
|
|
|
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
|
2024-06-03 05:14:45 +00:00
|
|
|
rw.Header().Set("Access-Control-Allow-Origin", "*")
|
2020-08-27 13:51:58 +00:00
|
|
|
|
2015-09-06 19:52:31 +00:00
|
|
|
mut.RLock()
|
2020-08-27 13:51:58 +00:00
|
|
|
relays := make([]*relay, len(permanentRelays)+len(knownRelays))
|
|
|
|
n := copy(relays, permanentRelays)
|
|
|
|
copy(relays[n:], knownRelays)
|
2015-09-06 19:52:31 +00:00
|
|
|
mut.RUnlock()
|
|
|
|
|
2023-01-23 07:38:55 +00:00
|
|
|
_ = json.NewEncoder(rw).Encode(map[string][]*relay{
|
2015-09-06 19:52:31 +00:00
|
|
|
"relays": relays,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2024-06-03 05:14:45 +00:00
|
|
|
// handleEndpointShort returns the relay list with only the URL.
|
|
|
|
func handleEndpointShort(rw http.ResponseWriter, r *http.Request) {
|
|
|
|
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
|
|
rw.Header().Set("Access-Control-Allow-Origin", "*")
|
|
|
|
|
|
|
|
mut.RLock()
|
|
|
|
relays := make([]relayShort, 0, len(permanentRelays)+len(knownRelays))
|
|
|
|
for _, r := range append(permanentRelays, knownRelays...) {
|
|
|
|
relays = append(relays, relayShort{URL: slimURL(r.URL)})
|
|
|
|
}
|
|
|
|
mut.RUnlock()
|
|
|
|
|
|
|
|
_ = json.NewEncoder(rw).Encode(map[string][]relayShort{
|
|
|
|
"relays": relays,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func handleRegister(w http.ResponseWriter, r *http.Request) {
|
2023-01-23 07:38:55 +00:00
|
|
|
// Get the IP address of the client
|
|
|
|
rhost := r.RemoteAddr
|
2024-06-03 05:14:45 +00:00
|
|
|
if ipHeader != "" {
|
|
|
|
hdr := r.Header.Get(ipHeader)
|
|
|
|
fields := strings.Split(hdr, ",")
|
|
|
|
if len(fields) > 0 {
|
|
|
|
rhost = strings.TrimSpace(fields[len(fields)-1])
|
|
|
|
}
|
|
|
|
}
|
2023-01-23 07:38:55 +00:00
|
|
|
if host, _, err := net.SplitHostPort(rhost); err == nil {
|
|
|
|
rhost = host
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the black list. A client is blacklisted if their last 10
|
|
|
|
// attempts to join have all failed. The "Unauthorized" status return
|
|
|
|
// causes strelaysrv to cease attempting to join.
|
|
|
|
if globalBlocklist.IsBlocked(rhost) {
|
|
|
|
log.Println("Rejected blocked client", rhost)
|
|
|
|
http.Error(w, "Too many errors", http.StatusUnauthorized)
|
|
|
|
globalBlocklist.ClearErrors(rhost)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-12-21 10:55:16 +00:00
|
|
|
var relayCert *x509.Certificate
|
|
|
|
if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 {
|
|
|
|
relayCert = r.TLS.PeerCertificates[0]
|
|
|
|
log.Printf("Got TLS cert from relay server")
|
|
|
|
}
|
|
|
|
|
2015-09-06 19:52:31 +00:00
|
|
|
var newRelay relay
|
|
|
|
err := json.NewDecoder(r.Body).Decode(&newRelay)
|
|
|
|
r.Body.Close()
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
if debug {
|
|
|
|
log.Println("Failed to parse payload")
|
|
|
|
}
|
2020-04-04 07:21:30 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
2015-09-06 19:52:31 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-09-07 08:14:14 +00:00
|
|
|
uri, err := url.Parse(newRelay.URL)
|
|
|
|
if err != nil {
|
|
|
|
if debug {
|
|
|
|
log.Println("Failed to parse URI", newRelay.URL)
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
2020-04-04 07:21:30 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
2015-09-07 08:14:14 +00:00
|
|
|
return
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
|
|
|
|
2022-05-02 08:38:49 +00:00
|
|
|
// Canonicalize the URL. In particular, parse and re-encode the query
|
|
|
|
// string so that it's guaranteed to be valid.
|
|
|
|
uri.RawQuery = uri.Query().Encode()
|
|
|
|
newRelay.URL = uri.String()
|
|
|
|
|
2020-12-21 10:55:16 +00:00
|
|
|
if relayCert != nil {
|
|
|
|
advertisedId := uri.Query().Get("id")
|
|
|
|
idFromCert := protocol.NewDeviceID(relayCert.Raw).String()
|
|
|
|
if advertisedId != idFromCert {
|
|
|
|
log.Println("Warning: Relay server requested to join with an ID different from the join request, rejecting")
|
|
|
|
http.Error(w, "mismatched advertised id and join request cert", http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-07 08:14:14 +00:00
|
|
|
host, port, err := net.SplitHostPort(uri.Host)
|
2015-09-06 19:52:31 +00:00
|
|
|
if err != nil {
|
|
|
|
if debug {
|
2015-09-07 08:14:14 +00:00
|
|
|
log.Println("Failed to split URI", newRelay.URL)
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
2020-04-04 07:21:30 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
2015-09-06 19:52:31 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2016-10-05 19:13:47 +00:00
|
|
|
ip := net.ParseIP(host)
|
2015-09-07 17:13:50 +00:00
|
|
|
// The client did not provide an IP address, use the IP address of the client.
|
2016-10-05 19:13:47 +00:00
|
|
|
if ip == nil || ip.IsUnspecified() {
|
2015-09-07 08:14:14 +00:00
|
|
|
uri.Host = net.JoinHostPort(rhost, port)
|
|
|
|
newRelay.URL = uri.String()
|
2020-12-21 10:55:16 +00:00
|
|
|
} else if host != rhost && relayCert == nil {
|
2015-09-07 17:13:50 +00:00
|
|
|
if debug {
|
|
|
|
log.Println("IP address advertised does not match client IP address", r.RemoteAddr, uri)
|
|
|
|
}
|
2018-04-08 19:13:55 +00:00
|
|
|
http.Error(w, fmt.Sprintf("IP advertised %s does not match client IP %s", host, rhost), http.StatusUnauthorized)
|
2015-09-07 17:13:50 +00:00
|
|
|
return
|
2015-09-07 08:14:14 +00:00
|
|
|
}
|
2018-04-08 19:13:55 +00:00
|
|
|
|
2015-09-07 08:14:14 +00:00
|
|
|
newRelay.uri = uri
|
|
|
|
|
|
|
|
for _, current := range permanentRelays {
|
|
|
|
if current.uri.Host == newRelay.uri.Host {
|
|
|
|
if debug {
|
|
|
|
log.Println("Asked to add a relay", newRelay, "which exists in permanent list")
|
|
|
|
}
|
2018-02-21 11:53:49 +00:00
|
|
|
http.Error(w, "Invalid request", http.StatusBadRequest)
|
2015-09-07 08:14:14 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-09-06 19:52:31 +00:00
|
|
|
reschan := make(chan result)
|
|
|
|
|
|
|
|
select {
|
2018-04-08 19:13:55 +00:00
|
|
|
case requests <- request{&newRelay, reschan, prometheus.NewTimer(relayTestActionsSeconds.WithLabelValues("queue"))}:
|
2015-09-06 19:52:31 +00:00
|
|
|
result := <-reschan
|
|
|
|
if result.err != nil {
|
2023-01-23 07:38:55 +00:00
|
|
|
log.Println("Join from", r.RemoteAddr, "failed:", result.err)
|
|
|
|
globalBlocklist.AddError(rhost)
|
2018-04-08 19:13:55 +00:00
|
|
|
relayTestsTotal.WithLabelValues("failed").Inc()
|
2018-02-21 11:53:49 +00:00
|
|
|
http.Error(w, result.err.Error(), http.StatusBadRequest)
|
2015-09-06 19:52:31 +00:00
|
|
|
return
|
|
|
|
}
|
2023-01-23 07:38:55 +00:00
|
|
|
log.Println("Join from", r.RemoteAddr, "succeeded")
|
|
|
|
globalBlocklist.ClearErrors(rhost)
|
2018-04-08 19:13:55 +00:00
|
|
|
relayTestsTotal.WithLabelValues("success").Inc()
|
2015-09-06 19:52:31 +00:00
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
|
|
json.NewEncoder(w).Encode(map[string]time.Duration{
|
|
|
|
"evictionIn": result.eviction,
|
|
|
|
})
|
|
|
|
|
|
|
|
default:
|
2018-04-08 19:13:55 +00:00
|
|
|
relayTestsTotal.WithLabelValues("dropped").Inc()
|
2015-09-06 19:52:31 +00:00
|
|
|
if debug {
|
|
|
|
log.Println("Dropping request")
|
|
|
|
}
|
2018-02-21 11:53:49 +00:00
|
|
|
w.WriteHeader(httpStatusEnhanceYourCalm)
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-18 17:31:49 +00:00
|
|
|
func requestProcessor(geoip *geoip.Provider) {
|
2015-09-06 19:52:31 +00:00
|
|
|
for request := range requests {
|
2018-04-08 19:13:55 +00:00
|
|
|
if request.queueTimer != nil {
|
|
|
|
request.queueTimer.ObserveDuration()
|
|
|
|
}
|
|
|
|
|
|
|
|
timer := prometheus.NewTimer(relayTestActionsSeconds.WithLabelValues("test"))
|
2024-05-18 17:31:49 +00:00
|
|
|
handleRelayTest(request, geoip)
|
2018-04-08 19:13:55 +00:00
|
|
|
timer.ObserveDuration()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-05-18 17:31:49 +00:00
|
|
|
func handleRelayTest(request request, geoip *geoip.Provider) {
|
2018-04-08 19:13:55 +00:00
|
|
|
if debug {
|
|
|
|
log.Println("Request for", request.relay)
|
|
|
|
}
|
2020-04-04 07:21:52 +00:00
|
|
|
if err := client.TestRelay(context.TODO(), request.relay.uri, []tls.Certificate{testCert}, time.Second, 2*time.Second, 3); err != nil {
|
2015-09-06 19:52:31 +00:00
|
|
|
if debug {
|
2020-04-04 07:21:52 +00:00
|
|
|
log.Println("Test for relay", request.relay, "failed:", err)
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
2020-04-04 07:21:52 +00:00
|
|
|
request.result <- result{err, 0}
|
2018-04-08 19:13:55 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
stats := fetchStats(request.relay)
|
2024-05-18 17:31:49 +00:00
|
|
|
location := getLocation(request.relay.uri.Host, geoip)
|
2018-04-08 19:13:55 +00:00
|
|
|
|
|
|
|
mut.Lock()
|
|
|
|
if stats != nil {
|
2018-07-27 05:59:55 +00:00
|
|
|
updateMetrics(request.relay.uri.Host, *stats, location)
|
2018-04-08 19:13:55 +00:00
|
|
|
}
|
|
|
|
request.relay.Stats = stats
|
2021-03-12 09:35:10 +00:00
|
|
|
request.relay.StatsRetrieved = time.Now().Truncate(time.Second)
|
2018-04-08 19:13:55 +00:00
|
|
|
request.relay.Location = location
|
|
|
|
|
|
|
|
timer, ok := evictionTimers[request.relay.uri.Host]
|
|
|
|
if ok {
|
|
|
|
if debug {
|
|
|
|
log.Println("Stopping existing timer for", request.relay)
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
2018-04-08 19:13:55 +00:00
|
|
|
timer.Stop()
|
|
|
|
}
|
2015-09-06 19:52:31 +00:00
|
|
|
|
2018-04-08 19:13:55 +00:00
|
|
|
for i, current := range knownRelays {
|
|
|
|
if current.uri.Host == request.relay.uri.Host {
|
2015-09-06 19:52:31 +00:00
|
|
|
if debug {
|
2018-04-08 19:13:55 +00:00
|
|
|
log.Println("Relay", request.relay, "already exists")
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
|
|
|
|
2018-04-08 19:13:55 +00:00
|
|
|
// Evict the old entry anyway, as configuration might have changed.
|
|
|
|
last := len(knownRelays) - 1
|
|
|
|
knownRelays[i] = knownRelays[last]
|
|
|
|
knownRelays = knownRelays[:last]
|
2015-10-31 17:27:43 +00:00
|
|
|
|
2018-04-08 19:13:55 +00:00
|
|
|
goto found
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
2018-04-08 19:13:55 +00:00
|
|
|
}
|
2015-09-06 19:52:31 +00:00
|
|
|
|
2018-04-08 19:13:55 +00:00
|
|
|
if debug {
|
|
|
|
log.Println("Adding new relay", request.relay)
|
|
|
|
}
|
|
|
|
|
|
|
|
found:
|
2015-09-06 19:52:31 +00:00
|
|
|
|
2018-04-08 19:13:55 +00:00
|
|
|
knownRelays = append(knownRelays, request.relay)
|
|
|
|
evictionTimers[request.relay.uri.Host] = time.AfterFunc(evictionTime, evict(request.relay))
|
2015-10-31 17:27:43 +00:00
|
|
|
|
2018-04-08 19:13:55 +00:00
|
|
|
mut.Unlock()
|
2015-10-31 17:27:43 +00:00
|
|
|
|
2018-04-08 19:13:55 +00:00
|
|
|
if err := saveRelays(knownRelaysFile, knownRelays); err != nil {
|
|
|
|
log.Println("Failed to write known relays: " + err.Error())
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
|
|
|
|
2018-04-08 19:13:55 +00:00
|
|
|
request.result <- result{nil, evictionTime}
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
|
|
|
|
2018-04-08 19:13:55 +00:00
|
|
|
func evict(relay *relay) func() {
|
2015-09-06 19:52:31 +00:00
|
|
|
return func() {
|
|
|
|
mut.Lock()
|
|
|
|
defer mut.Unlock()
|
|
|
|
if debug {
|
|
|
|
log.Println("Evicting", relay)
|
|
|
|
}
|
|
|
|
for i, current := range knownRelays {
|
2015-09-07 08:14:14 +00:00
|
|
|
if current.uri.Host == relay.uri.Host {
|
2015-09-06 19:52:31 +00:00
|
|
|
if debug {
|
|
|
|
log.Println("Evicted", relay)
|
|
|
|
}
|
|
|
|
last := len(knownRelays) - 1
|
|
|
|
knownRelays[i] = knownRelays[last]
|
|
|
|
knownRelays = knownRelays[:last]
|
2018-04-08 19:13:55 +00:00
|
|
|
deleteMetrics(current.uri.Host)
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
|
|
|
}
|
2015-09-07 08:14:14 +00:00
|
|
|
delete(evictionTimers, relay.uri.Host)
|
2015-09-06 19:52:31 +00:00
|
|
|
}
|
|
|
|
}
|
2015-09-07 17:13:50 +00:00
|
|
|
|
2024-05-18 17:31:49 +00:00
|
|
|
func loadRelays(file string, geoip *geoip.Provider) []*relay {
|
2021-11-22 07:59:47 +00:00
|
|
|
content, err := os.ReadFile(file)
|
2015-09-07 17:13:50 +00:00
|
|
|
if err != nil {
|
2018-04-08 19:13:55 +00:00
|
|
|
log.Println("Failed to load relays: " + err.Error())
|
|
|
|
return nil
|
2015-09-07 17:13:50 +00:00
|
|
|
}
|
|
|
|
|
2018-04-08 19:13:55 +00:00
|
|
|
var relays []*relay
|
2015-09-07 17:13:50 +00:00
|
|
|
for _, line := range strings.Split(string(content), "\n") {
|
2022-07-28 16:49:44 +00:00
|
|
|
if line == "" {
|
2015-09-07 17:13:50 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
uri, err := url.Parse(line)
|
|
|
|
if err != nil {
|
|
|
|
if debug {
|
2018-04-08 19:13:55 +00:00
|
|
|
log.Println("Skipping relay", line, "due to parse error", err)
|
2015-09-07 17:13:50 +00:00
|
|
|
}
|
|
|
|
continue
|
2015-09-22 07:02:18 +00:00
|
|
|
|
2015-09-07 17:13:50 +00:00
|
|
|
}
|
|
|
|
|
2018-04-08 19:13:55 +00:00
|
|
|
relays = append(relays, &relay{
|
2016-04-13 20:34:11 +00:00
|
|
|
URL: line,
|
2024-05-18 17:31:49 +00:00
|
|
|
Location: getLocation(uri.Host, geoip),
|
2016-04-13 20:34:11 +00:00
|
|
|
uri: uri,
|
2015-09-07 17:13:50 +00:00
|
|
|
})
|
|
|
|
if debug {
|
2018-04-08 19:13:55 +00:00
|
|
|
log.Println("Adding relay", line)
|
2015-09-07 17:13:50 +00:00
|
|
|
}
|
|
|
|
}
|
2018-04-08 19:13:55 +00:00
|
|
|
return relays
|
|
|
|
}
|
|
|
|
|
|
|
|
func saveRelays(file string, relays []*relay) error {
|
|
|
|
var content string
|
|
|
|
for _, relay := range relays {
|
|
|
|
content += relay.uri.String() + "\n"
|
|
|
|
}
|
2023-01-23 07:38:55 +00:00
|
|
|
return os.WriteFile(file, []byte(content), 0o777)
|
2015-09-07 17:13:50 +00:00
|
|
|
}
|
|
|
|
|
2015-09-22 07:02:18 +00:00
|
|
|
func createTestCertificate() tls.Certificate {
|
2021-11-22 07:59:47 +00:00
|
|
|
tmpDir, err := os.MkdirTemp("", "relaypoolsrv")
|
2015-09-22 07:02:18 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Fatal(err)
|
2015-09-07 17:13:50 +00:00
|
|
|
}
|
|
|
|
|
2015-09-22 07:02:18 +00:00
|
|
|
certFile, keyFile := filepath.Join(tmpDir, "cert.pem"), filepath.Join(tmpDir, "key.pem")
|
2019-10-16 18:31:46 +00:00
|
|
|
cert, err := tlsutil.NewCertificate(certFile, keyFile, "relaypoolsrv", 20*365)
|
2015-09-07 17:13:50 +00:00
|
|
|
if err != nil {
|
|
|
|
log.Fatalln("Failed to create test X509 key pair:", err)
|
|
|
|
}
|
2015-09-22 07:02:18 +00:00
|
|
|
|
|
|
|
return cert
|
2015-09-07 17:13:50 +00:00
|
|
|
}
|
2016-04-13 20:34:11 +00:00
|
|
|
|
2024-05-18 17:31:49 +00:00
|
|
|
func getLocation(host string, geoip *geoip.Provider) location {
|
2018-04-08 19:13:55 +00:00
|
|
|
timer := prometheus.NewTimer(locationLookupSeconds)
|
|
|
|
defer timer.ObserveDuration()
|
2016-04-13 20:34:11 +00:00
|
|
|
|
|
|
|
addr, err := net.ResolveTCPAddr("tcp", host)
|
|
|
|
if err != nil {
|
|
|
|
return location{}
|
|
|
|
}
|
|
|
|
|
2024-05-18 17:31:49 +00:00
|
|
|
city, err := geoip.City(addr.IP)
|
2016-04-13 20:34:11 +00:00
|
|
|
if err != nil {
|
|
|
|
return location{}
|
|
|
|
}
|
|
|
|
|
|
|
|
return location{
|
|
|
|
Longitude: city.Location.Longitude,
|
2018-04-08 19:13:55 +00:00
|
|
|
Latitude: city.Location.Latitude,
|
|
|
|
City: city.City.Names["en"],
|
|
|
|
Country: city.Country.IsoCode,
|
|
|
|
Continent: city.Continent.Code,
|
2016-04-13 20:34:11 +00:00
|
|
|
}
|
|
|
|
}
|
2018-04-08 19:13:55 +00:00
|
|
|
|
|
|
|
type loggingResponseWriter struct {
|
|
|
|
http.ResponseWriter
|
|
|
|
statusCode int
|
|
|
|
}
|
|
|
|
|
|
|
|
func NewLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter {
|
|
|
|
return &loggingResponseWriter{w, http.StatusOK}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (lrw *loggingResponseWriter) WriteHeader(code int) {
|
|
|
|
lrw.statusCode = code
|
|
|
|
lrw.ResponseWriter.WriteHeader(code)
|
|
|
|
}
|
2023-01-23 07:38:55 +00:00
|
|
|
|
|
|
|
type errorTracker struct {
|
|
|
|
errors *lru.TwoQueueCache[string, *errorCounter]
|
|
|
|
}
|
|
|
|
|
|
|
|
type errorCounter struct {
|
|
|
|
count atomic.Int32
|
|
|
|
}
|
|
|
|
|
|
|
|
func newErrorTracker(size int) *errorTracker {
|
|
|
|
cache, err := lru.New2Q[string, *errorCounter](size)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return &errorTracker{
|
|
|
|
errors: cache,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *errorTracker) AddError(host string) {
|
|
|
|
entry, ok := b.errors.Get(host)
|
|
|
|
if !ok {
|
|
|
|
entry = &errorCounter{}
|
|
|
|
b.errors.Add(host, entry)
|
|
|
|
}
|
|
|
|
c := entry.count.Add(1)
|
|
|
|
log.Printf("Error count for %s is now %d", host, c)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *errorTracker) ClearErrors(host string) {
|
|
|
|
b.errors.Remove(host)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *errorTracker) IsBlocked(host string) bool {
|
|
|
|
if be, ok := b.errors.Get(host); ok {
|
|
|
|
return be.count.Load() > 10
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
2024-06-03 05:14:45 +00:00
|
|
|
|
|
|
|
func slimURL(u string) string {
|
|
|
|
p, err := url.Parse(u)
|
|
|
|
if err != nil {
|
|
|
|
return u
|
|
|
|
}
|
|
|
|
newQuery := url.Values{}
|
|
|
|
if id := p.Query().Get("id"); id != "" {
|
|
|
|
newQuery.Set("id", id)
|
|
|
|
}
|
|
|
|
p.RawQuery = newQuery.Encode()
|
|
|
|
return p.String()
|
|
|
|
}
|