2014-11-16 20:13:20 +00:00
|
|
|
// Copyright (C) 2014 The Syncthing Authors.
|
2014-09-29 19:43:32 +00:00
|
|
|
//
|
2015-03-07 20:36:35 +00:00
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
2017-02-09 06:52:18 +00:00
|
|
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
2014-06-01 20:50:14 +00:00
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
package api
|
2014-03-02 22:58:14 +00:00
|
|
|
|
|
|
|
import (
|
2018-10-01 15:23:46 +00:00
|
|
|
"bytes"
|
2019-11-21 07:41:15 +00:00
|
|
|
"context"
|
2014-08-21 22:45:40 +00:00
|
|
|
"crypto/tls"
|
2019-10-16 18:31:46 +00:00
|
|
|
"crypto/x509"
|
2014-03-02 22:58:14 +00:00
|
|
|
"encoding/json"
|
2019-10-16 18:31:46 +00:00
|
|
|
"errors"
|
2014-05-22 14:12:19 +00:00
|
|
|
"fmt"
|
2018-10-01 15:23:46 +00:00
|
|
|
"io"
|
2019-07-28 07:49:07 +00:00
|
|
|
"log"
|
2014-04-30 20:52:38 +00:00
|
|
|
"net"
|
2014-03-02 22:58:14 +00:00
|
|
|
"net/http"
|
2017-12-15 20:01:56 +00:00
|
|
|
"net/url"
|
2014-07-22 18:11:36 +00:00
|
|
|
"os"
|
2014-05-22 14:12:19 +00:00
|
|
|
"path/filepath"
|
2015-04-07 19:45:22 +00:00
|
|
|
"reflect"
|
2014-03-02 22:58:14 +00:00
|
|
|
"runtime"
|
2016-08-02 11:06:45 +00:00
|
|
|
"runtime/pprof"
|
2015-10-03 15:25:21 +00:00
|
|
|
"sort"
|
2014-07-13 19:07:24 +00:00
|
|
|
"strconv"
|
2014-07-05 19:40:29 +00:00
|
|
|
"strings"
|
2014-03-02 22:58:14 +00:00
|
|
|
"time"
|
2021-03-11 12:15:03 +00:00
|
|
|
"unicode"
|
2014-03-02 22:58:14 +00:00
|
|
|
|
2022-12-25 07:08:41 +00:00
|
|
|
"github.com/calmh/incontainer"
|
2020-10-22 17:54:35 +00:00
|
|
|
"github.com/julienschmidt/httprouter"
|
2023-08-04 17:57:30 +00:00
|
|
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
2021-05-03 10:28:25 +00:00
|
|
|
"github.com/rcrowley/go-metrics"
|
2020-11-17 12:19:04 +00:00
|
|
|
"github.com/thejerf/suture/v4"
|
2019-07-09 09:40:30 +00:00
|
|
|
"github.com/vitrun/qart/qr"
|
2021-03-11 12:15:03 +00:00
|
|
|
"golang.org/x/text/runes"
|
|
|
|
"golang.org/x/text/transform"
|
|
|
|
"golang.org/x/text/unicode/norm"
|
2019-07-09 09:40:30 +00:00
|
|
|
|
2019-02-12 06:58:24 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/build"
|
2015-08-06 09:29:25 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/config"
|
2017-11-21 07:25:38 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/connections"
|
2015-08-06 09:29:25 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/db"
|
|
|
|
"github.com/syncthing/syncthing/lib/discover"
|
|
|
|
"github.com/syncthing/syncthing/lib/events"
|
2017-08-19 14:36:56 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/fs"
|
2020-06-18 09:04:00 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/ignore"
|
2019-02-12 06:58:24 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/locations"
|
2015-10-03 15:25:21 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/logger"
|
2016-04-15 10:59:41 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/model"
|
2015-09-22 17:38:46 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/protocol"
|
2016-05-26 07:02:56 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/rand"
|
2020-12-22 19:17:14 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/svcutil"
|
2015-08-06 09:29:25 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/sync"
|
2015-09-02 20:05:54 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/tlsutil"
|
2015-08-06 09:29:25 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/upgrade"
|
2019-03-26 19:53:58 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/ur"
|
2014-03-02 22:58:14 +00:00
|
|
|
)
|
|
|
|
|
2017-04-13 17:14:34 +00:00
|
|
|
const (
|
2022-04-02 18:36:19 +00:00
|
|
|
// Default mask excludes these very noisy event types to avoid filling the pipe.
|
|
|
|
// FIXME: ItemStarted and ItemFinished should be excluded for the same reason.
|
2019-10-16 18:31:46 +00:00
|
|
|
DefaultEventMask = events.AllEvents &^ events.LocalChangeDetected &^ events.RemoteChangeDetected
|
|
|
|
DiskEventMask = events.LocalChangeDetected | events.RemoteChangeDetected
|
|
|
|
EventSubBufferSize = 1000
|
|
|
|
defaultEventTimeout = time.Minute
|
|
|
|
httpsCertLifetimeDays = 820
|
2017-04-13 17:14:34 +00:00
|
|
|
)
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
type service struct {
|
2019-07-09 09:40:30 +00:00
|
|
|
suture.Service
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
id protocol.DeviceID
|
|
|
|
cfg config.Wrapper
|
|
|
|
statics *staticsServer
|
|
|
|
model model.Model
|
|
|
|
eventSubs map[events.EventType]events.BufferedSubscription
|
|
|
|
eventSubsMut sync.Mutex
|
2019-08-15 14:29:37 +00:00
|
|
|
evLogger events.Logger
|
2020-08-18 07:26:33 +00:00
|
|
|
discoverer discover.Manager
|
2019-03-26 19:53:58 +00:00
|
|
|
connectionsService connections.Service
|
|
|
|
fss model.FolderSummaryService
|
|
|
|
urService *ur.Service
|
|
|
|
noUpgrade bool
|
|
|
|
tlsDefaultCommonName string
|
|
|
|
configChanged chan struct{} // signals intentional listener close due to config change
|
|
|
|
started chan string // signals startup complete by sending the listener address, for testing only
|
|
|
|
startedOnce chan struct{} // the service has started successfully at least once
|
|
|
|
startupErr error
|
2019-10-04 10:25:41 +00:00
|
|
|
listenerAddr net.Addr
|
2020-12-22 19:17:14 +00:00
|
|
|
exitChan chan *svcutil.FatalErr
|
2015-10-03 15:25:21 +00:00
|
|
|
|
2016-03-21 19:36:08 +00:00
|
|
|
guiErrors logger.Recorder
|
|
|
|
systemLog logger.Recorder
|
2015-04-28 21:12:19 +00:00
|
|
|
}
|
2015-03-26 22:26:51 +00:00
|
|
|
|
2021-11-22 07:45:29 +00:00
|
|
|
var _ config.Verifier = &service{}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
type Service interface {
|
|
|
|
suture.Service
|
|
|
|
config.Committer
|
|
|
|
WaitForStart() error
|
2015-04-28 21:12:19 +00:00
|
|
|
}
|
2014-09-12 19:28:47 +00:00
|
|
|
|
2020-11-17 12:19:04 +00:00
|
|
|
func New(id protocol.DeviceID, cfg config.Wrapper, assetDir, tlsDefaultCommonName string, m model.Model, defaultSub, diskSub events.BufferedSubscription, evLogger events.Logger, discoverer discover.Manager, connectionsService connections.Service, urService *ur.Service, fss model.FolderSummaryService, errors, systemLog logger.Recorder, noUpgrade bool) Service {
|
|
|
|
return &service{
|
2019-03-26 19:53:58 +00:00
|
|
|
id: id,
|
|
|
|
cfg: cfg,
|
2021-04-13 08:12:56 +00:00
|
|
|
statics: newStaticsServer(cfg.GUI().Theme, assetDir),
|
2019-03-26 19:53:58 +00:00
|
|
|
model: m,
|
|
|
|
eventSubs: map[events.EventType]events.BufferedSubscription{
|
|
|
|
DefaultEventMask: defaultSub,
|
|
|
|
DiskEventMask: diskSub,
|
|
|
|
},
|
|
|
|
eventSubsMut: sync.NewMutex(),
|
2019-08-15 14:29:37 +00:00
|
|
|
evLogger: evLogger,
|
2019-03-26 19:53:58 +00:00
|
|
|
discoverer: discoverer,
|
|
|
|
connectionsService: connectionsService,
|
|
|
|
fss: fss,
|
|
|
|
urService: urService,
|
|
|
|
guiErrors: errors,
|
|
|
|
systemLog: systemLog,
|
|
|
|
noUpgrade: noUpgrade,
|
|
|
|
tlsDefaultCommonName: tlsDefaultCommonName,
|
|
|
|
configChanged: make(chan struct{}),
|
|
|
|
startedOnce: make(chan struct{}),
|
2020-12-22 19:17:14 +00:00
|
|
|
exitChan: make(chan *svcutil.FatalErr, 1),
|
2019-03-26 19:53:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *service) WaitForStart() error {
|
2019-02-14 20:29:14 +00:00
|
|
|
<-s.startedOnce
|
|
|
|
return s.startupErr
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getListener(guiCfg config.GUIConfiguration) (net.Listener, error) {
|
|
|
|
httpsCertFile := locations.Get(locations.HTTPSCertFile)
|
|
|
|
httpsKeyFile := locations.Get(locations.HTTPSKeyFile)
|
|
|
|
cert, err := tls.LoadX509KeyPair(httpsCertFile, httpsKeyFile)
|
2019-10-16 18:31:46 +00:00
|
|
|
|
|
|
|
// If the certificate has expired or will expire in the next month, fail
|
|
|
|
// it and generate a new one.
|
|
|
|
if err == nil {
|
2020-07-30 11:36:11 +00:00
|
|
|
err = shouldRegenerateCertificate(cert)
|
2019-10-16 18:31:46 +00:00
|
|
|
}
|
2014-09-12 19:28:47 +00:00
|
|
|
if err != nil {
|
|
|
|
l.Infoln("Loading HTTPS certificate:", err)
|
|
|
|
l.Infoln("Creating new HTTPS certificate")
|
2014-12-09 09:42:56 +00:00
|
|
|
|
|
|
|
// When generating the HTTPS certificate, use the system host name per
|
|
|
|
// default. If that isn't available, use the "syncthing" default.
|
2014-12-16 21:55:44 +00:00
|
|
|
var name string
|
|
|
|
name, err = os.Hostname()
|
2014-12-09 09:42:56 +00:00
|
|
|
if err != nil {
|
2019-03-26 19:53:58 +00:00
|
|
|
name = s.tlsDefaultCommonName
|
2014-12-09 09:42:56 +00:00
|
|
|
}
|
2021-03-11 12:15:03 +00:00
|
|
|
name, err = sanitizedHostname(name)
|
|
|
|
if err != nil {
|
|
|
|
name = s.tlsDefaultCommonName
|
|
|
|
}
|
2014-12-09 09:42:56 +00:00
|
|
|
|
2019-10-16 18:31:46 +00:00
|
|
|
cert, err = tlsutil.NewCertificate(httpsCertFile, httpsKeyFile, name, httpsCertLifetimeDays)
|
2014-09-12 19:28:47 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
2015-04-28 21:12:19 +00:00
|
|
|
return nil, err
|
2014-09-12 19:28:47 +00:00
|
|
|
}
|
2021-04-26 08:04:35 +00:00
|
|
|
tlsCfg := tlsutil.SecureDefaultWithTLS12()
|
2018-10-21 05:17:50 +00:00
|
|
|
tlsCfg.Certificates = []tls.Certificate{cert}
|
2014-04-30 20:52:38 +00:00
|
|
|
|
2018-09-21 12:28:57 +00:00
|
|
|
if guiCfg.Network() == "unix" {
|
|
|
|
// When listening on a UNIX socket we should unlink before bind,
|
|
|
|
// lest we get a "bind: address already in use". We don't
|
|
|
|
// particularly care if this succeeds or not.
|
|
|
|
os.Remove(guiCfg.Address())
|
|
|
|
}
|
|
|
|
rawListener, err := net.Listen(guiCfg.Network(), guiCfg.Address())
|
2014-09-12 19:28:47 +00:00
|
|
|
if err != nil {
|
2015-04-28 21:12:19 +00:00
|
|
|
return nil, err
|
2014-04-30 20:52:38 +00:00
|
|
|
}
|
2015-04-28 21:12:19 +00:00
|
|
|
|
2020-02-18 07:52:12 +00:00
|
|
|
if guiCfg.Network() == "unix" && guiCfg.UnixSocketPermissions() != 0 {
|
|
|
|
// We should error if this fails under the assumption that these permissions are
|
|
|
|
// required for operation.
|
|
|
|
err = os.Chmod(guiCfg.Address(), guiCfg.UnixSocketPermissions())
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-04-13 11:50:51 +00:00
|
|
|
listener := &tlsutil.DowngradingListener{
|
|
|
|
Listener: rawListener,
|
|
|
|
TLSConfig: tlsCfg,
|
|
|
|
}
|
2015-04-28 21:12:19 +00:00
|
|
|
return listener, nil
|
|
|
|
}
|
|
|
|
|
2015-12-15 21:40:38 +00:00
|
|
|
func sendJSON(w http.ResponseWriter, jsonObject interface{}) {
|
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
2016-03-20 10:54:53 +00:00
|
|
|
// Marshalling might fail, in which case we should return a 500 with the
|
|
|
|
// actual error.
|
2018-05-12 13:14:41 +00:00
|
|
|
bs, err := json.MarshalIndent(jsonObject, "", " ")
|
2016-03-20 10:54:53 +00:00
|
|
|
if err != nil {
|
|
|
|
// This Marshal() can't fail though.
|
|
|
|
bs, _ = json.Marshal(map[string]string{"error": err.Error()})
|
|
|
|
http.Error(w, string(bs), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2018-05-12 13:14:41 +00:00
|
|
|
fmt.Fprintf(w, "%s\n", bs)
|
2015-12-15 21:40:38 +00:00
|
|
|
}
|
|
|
|
|
2020-11-17 12:19:04 +00:00
|
|
|
func (s *service) Serve(ctx context.Context) error {
|
2016-06-06 22:12:23 +00:00
|
|
|
listener, err := s.getListener(s.cfg.GUI())
|
|
|
|
if err != nil {
|
2016-10-07 03:10:26 +00:00
|
|
|
select {
|
|
|
|
case <-s.startedOnce:
|
|
|
|
// We let this be a loud user-visible warning as it may be the only
|
|
|
|
// indication they get that the GUI won't be available.
|
|
|
|
l.Warnln("Starting API/GUI:", err)
|
|
|
|
|
|
|
|
default:
|
2016-06-06 22:12:23 +00:00
|
|
|
// This is during initialization. A failure here should be fatal
|
|
|
|
// as there will be no way for the user to communicate with us
|
|
|
|
// otherwise anyway.
|
2019-02-14 20:29:14 +00:00
|
|
|
s.startupErr = err
|
|
|
|
close(s.startedOnce)
|
2016-06-06 22:12:23 +00:00
|
|
|
}
|
2020-11-17 12:19:04 +00:00
|
|
|
return err
|
2016-06-06 22:12:23 +00:00
|
|
|
}
|
2016-01-14 10:06:36 +00:00
|
|
|
|
|
|
|
if listener == nil {
|
|
|
|
// Not much we can do here other than exit quickly. The supervisor
|
|
|
|
// will log an error at some point.
|
2020-11-17 12:19:04 +00:00
|
|
|
return nil
|
2016-01-14 10:06:36 +00:00
|
|
|
}
|
2015-06-03 07:47:39 +00:00
|
|
|
|
2019-10-04 10:25:41 +00:00
|
|
|
s.listenerAddr = listener.Addr()
|
2016-10-07 03:10:26 +00:00
|
|
|
defer listener.Close()
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
s.cfg.Subscribe(s)
|
|
|
|
defer s.cfg.Unsubscribe(s)
|
|
|
|
|
2020-10-22 17:54:35 +00:00
|
|
|
restMux := httprouter.New()
|
|
|
|
|
2014-07-05 19:40:29 +00:00
|
|
|
// The GET handlers
|
2020-12-17 18:54:31 +00:00
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/cluster/pending/devices", s.getPendingDevices) // -
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/cluster/pending/folders", s.getPendingFolders) // [device]
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/db/completion", s.getDBCompletion) // [device] [folder]
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/db/file", s.getDBFile) // folder file
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/db/ignores", s.getDBIgnores) // folder
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/db/need", s.getDBNeed) // folder [perpage] [page]
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/db/remoteneed", s.getDBRemoteNeed) // device folder [perpage] [page]
|
2022-02-07 07:51:09 +00:00
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/db/localchanged", s.getDBLocalChanged) // folder [perpage] [page]
|
2020-12-17 18:54:31 +00:00
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/db/status", s.getDBStatus) // folder
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/db/browse", s.getDBBrowse) // folder [prefix] [dirsonly] [levels]
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/folder/versions", s.getFolderVersions) // folder
|
2022-02-07 07:51:09 +00:00
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/folder/errors", s.getFolderErrors) // folder [perpage] [page]
|
2020-12-17 18:54:31 +00:00
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/folder/pullerrors", s.getFolderErrors) // folder (deprecated)
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/events", s.getIndexEvents) // [since] [limit] [timeout] [events]
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/events/disk", s.getDiskEvents) // [since] [limit] [timeout]
|
2022-10-06 19:28:49 +00:00
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/noauth/health", s.getHealth) // -
|
2020-12-17 18:54:31 +00:00
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/stats/device", s.getDeviceStats) // -
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/stats/folder", s.getFolderStats) // -
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/svc/deviceid", s.getDeviceID) // id
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/svc/lang", s.getLang) // -
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/svc/report", s.getReport) // -
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/svc/random/string", s.getRandomString) // [length]
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/system/browse", s.getSystemBrowse) // current
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/system/connections", s.getSystemConnections) // -
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/system/discovery", s.getSystemDiscovery) // -
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/system/error", s.getSystemError) // -
|
gui, api: Show internal config and state paths (fixes #8323) (#8324)
* lib/locations: Fix enum values camelCase.
* lib/locations: Remove unused FailuresFile.
* cmd/syncthing: Turn around role of locations storage.
Previously the locations package was used to provide default paths,
possibly with an overridden home directory. Extra paths supplied on
the command line were handled and passed around in the options object.
To make the changed paths available to any other interested package,
override the location setting from the option if supplied, instead of
vice versa when not supplied. Adapt code using this to read from the
locations package instead of passing through the options object.
* lib/locations: Refactor showPaths to locations package.
Generate a reusable string in locations.PrettyPrintPaths().
Enumerating all possible locations in different packages is error
prone, so add a new public function to generate the listing as a
string in the locations package. Adapt cmd/syncthing --paths to use
that instead of its own console output.
* lib/locations: Include CSRF token in pretty printed paths.
* lib/api: New endpoint /rest/system/paths.
The paths should be available for troubleshooting from a running
instance. Using the --paths CLI option is not easy in some
environments, so expose the locations mapping to a JSON endpoint.
Add utility function ListExpandedPaths() that also filters out any
entries which still contain variable placeholders.
* gui: List runtime paths in separate log viewer tab.
* Wrap paths.
* lib/syncthing: Utilize locations.Get() instead of passing an arg.
* Include base directories, move label to table caption.
* gui: Switch to hard-coded paths instead of iterating over all.
* gui: Break aboutModalView into tabs.
Use tabs to separate authors from included third-party software.
* gui: Move paths from log viewer to about modal.
* lib/locations: Adjust pretty print output order to match GUI.
* gui, authors: Remove additional bot names and fix indent.
The indentation changed because of the tabbed about dialog, fix the
authors script to respect that.
Skip Syncthing*Automation in authors list as well.
* Update AUTHORS list to remove bot names.
* Revert AUTHORS email order change.
* Do not emphasize DB and log file locations.
* Review line wrapping.
* review part 1: strings.Builder, naming
* Rename and extend locations.Set() with error handling.
Remodel the Override() function along the existing SetBaseDir() and
rename it to simply Set(). Make sure to use absolute paths when given
log file or GUI assets override options. Add proper error reporting
if that goes wrong.
* Remove obsolete comment about empty logfile option.
* Don't filter out unexpanded baseDir placeholders, only ${timestamp}.
* Restore behavior regarding special "-" logfile argument.
If the option is given, but with empty value, assume the no log
file (same as "-"). Don't try to convert the special value to an
absolute path though and document this fact in a comment for the Set()
function.
* Use template to check for location key validity.
* Don't filter out timestamp placeholders.
* lib/api: Remove paths from /rest/system/status.
* lib/ur: Properly initialize map in failure data (fixes #8479)
Co-authored-by: Jakob Borg <jakob@kastelo.net>
2022-08-10 06:25:13 +00:00
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/system/paths", s.getSystemPaths) // -
|
2020-12-17 18:54:31 +00:00
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/system/ping", s.restPing) // -
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/system/status", s.getSystemStatus) // -
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/system/upgrade", s.getSystemUpgrade) // -
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/system/version", s.getSystemVersion) // -
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/system/debug", s.getSystemDebug) // -
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/system/log", s.getSystemLog) // [since]
|
|
|
|
restMux.HandlerFunc(http.MethodGet, "/rest/system/log.txt", s.getSystemLogTxt) // [since]
|
2014-07-29 11:01:27 +00:00
|
|
|
|
2014-07-05 19:40:29 +00:00
|
|
|
// The POST handlers
|
2022-02-07 07:51:09 +00:00
|
|
|
restMux.HandlerFunc(http.MethodPost, "/rest/db/prio", s.postDBPrio) // folder file
|
2020-10-22 17:54:35 +00:00
|
|
|
restMux.HandlerFunc(http.MethodPost, "/rest/db/ignores", s.postDBIgnores) // folder
|
|
|
|
restMux.HandlerFunc(http.MethodPost, "/rest/db/override", s.postDBOverride) // folder
|
|
|
|
restMux.HandlerFunc(http.MethodPost, "/rest/db/revert", s.postDBRevert) // folder
|
|
|
|
restMux.HandlerFunc(http.MethodPost, "/rest/db/scan", s.postDBScan) // folder [sub...] [delay]
|
|
|
|
restMux.HandlerFunc(http.MethodPost, "/rest/folder/versions", s.postFolderVersionsRestore) // folder <body>
|
|
|
|
restMux.HandlerFunc(http.MethodPost, "/rest/system/error", s.postSystemError) // <body>
|
|
|
|
restMux.HandlerFunc(http.MethodPost, "/rest/system/error/clear", s.postSystemErrorClear) // -
|
|
|
|
restMux.HandlerFunc(http.MethodPost, "/rest/system/ping", s.restPing) // -
|
|
|
|
restMux.HandlerFunc(http.MethodPost, "/rest/system/reset", s.postSystemReset) // [folder]
|
|
|
|
restMux.HandlerFunc(http.MethodPost, "/rest/system/restart", s.postSystemRestart) // -
|
|
|
|
restMux.HandlerFunc(http.MethodPost, "/rest/system/shutdown", s.postSystemShutdown) // -
|
|
|
|
restMux.HandlerFunc(http.MethodPost, "/rest/system/upgrade", s.postSystemUpgrade) // -
|
|
|
|
restMux.HandlerFunc(http.MethodPost, "/rest/system/pause", s.makeDevicePauseHandler(true)) // [device]
|
|
|
|
restMux.HandlerFunc(http.MethodPost, "/rest/system/resume", s.makeDevicePauseHandler(false)) // [device]
|
|
|
|
restMux.HandlerFunc(http.MethodPost, "/rest/system/debug", s.postSystemDebug) // [enable] [disable]
|
|
|
|
|
2021-06-07 08:29:24 +00:00
|
|
|
// The DELETE handlers
|
|
|
|
restMux.HandlerFunc(http.MethodDelete, "/rest/cluster/pending/devices", s.deletePendingDevices) // device
|
|
|
|
restMux.HandlerFunc(http.MethodDelete, "/rest/cluster/pending/folders", s.deletePendingFolders) // folder [device]
|
|
|
|
|
2020-10-22 17:54:35 +00:00
|
|
|
// Config endpoints
|
|
|
|
|
|
|
|
configBuilder := &configMuxBuilder{
|
|
|
|
Router: restMux,
|
|
|
|
id: s.id,
|
|
|
|
cfg: s.cfg,
|
|
|
|
}
|
|
|
|
|
2020-11-01 20:36:54 +00:00
|
|
|
configBuilder.registerConfig("/rest/config")
|
2021-02-25 08:29:44 +00:00
|
|
|
configBuilder.registerConfigInsync("/rest/config/insync") // deprecated
|
2021-03-11 14:54:05 +00:00
|
|
|
configBuilder.registerConfigRequiresRestart("/rest/config/restart-required")
|
2020-10-22 17:54:35 +00:00
|
|
|
configBuilder.registerFolders("/rest/config/folders")
|
|
|
|
configBuilder.registerDevices("/rest/config/devices")
|
|
|
|
configBuilder.registerFolder("/rest/config/folders/:id")
|
|
|
|
configBuilder.registerDevice("/rest/config/devices/:id")
|
2021-02-04 20:10:41 +00:00
|
|
|
configBuilder.registerDefaultFolder("/rest/config/defaults/folder")
|
|
|
|
configBuilder.registerDefaultDevice("/rest/config/defaults/device")
|
2022-01-13 22:38:21 +00:00
|
|
|
configBuilder.registerDefaultIgnores("/rest/config/defaults/ignores")
|
2020-10-22 17:54:35 +00:00
|
|
|
configBuilder.registerOptions("/rest/config/options")
|
|
|
|
configBuilder.registerLDAP("/rest/config/ldap")
|
|
|
|
configBuilder.registerGUI("/rest/config/gui")
|
|
|
|
|
|
|
|
// Deprecated config endpoints
|
|
|
|
configBuilder.registerConfigDeprecated("/rest/system/config") // POST instead of PUT
|
|
|
|
configBuilder.registerConfigInsync("/rest/system/config/insync")
|
2015-04-06 08:23:27 +00:00
|
|
|
|
|
|
|
// Debug endpoints, not for general use
|
2016-08-02 11:06:45 +00:00
|
|
|
debugMux := http.NewServeMux()
|
|
|
|
debugMux.HandleFunc("/rest/debug/peerCompletion", s.getPeerCompletion)
|
|
|
|
debugMux.HandleFunc("/rest/debug/httpmetrics", s.getSystemHTTPMetrics)
|
|
|
|
debugMux.HandleFunc("/rest/debug/cpuprof", s.getCPUProf) // duration
|
|
|
|
debugMux.HandleFunc("/rest/debug/heapprof", s.getHeapProf)
|
2018-10-01 15:23:46 +00:00
|
|
|
debugMux.HandleFunc("/rest/debug/support", s.getSupportBundle)
|
2020-11-10 08:24:45 +00:00
|
|
|
debugMux.HandleFunc("/rest/debug/file", s.getDebugFile)
|
2020-11-06 13:21:37 +00:00
|
|
|
restMux.Handler(http.MethodGet, "/rest/debug/*method", s.whenDebugging(debugMux))
|
2014-07-05 19:40:29 +00:00
|
|
|
|
2020-10-22 17:54:35 +00:00
|
|
|
// A handler that disables caching
|
|
|
|
noCacheRestMux := noCacheMiddleware(metricsMiddleware(restMux))
|
2014-07-05 19:40:29 +00:00
|
|
|
|
|
|
|
// The main routing handler
|
|
|
|
mux := http.NewServeMux()
|
2020-10-22 17:54:35 +00:00
|
|
|
mux.Handle("/rest/", noCacheRestMux)
|
2015-04-28 21:12:19 +00:00
|
|
|
mux.HandleFunc("/qr/", s.getQR)
|
2014-07-05 19:40:29 +00:00
|
|
|
|
|
|
|
// Serve compiled in assets unless an asset directory was set (for development)
|
2016-06-07 07:46:45 +00:00
|
|
|
mux.Handle("/", s.statics)
|
2016-01-10 15:37:31 +00:00
|
|
|
|
2016-05-22 10:26:09 +00:00
|
|
|
// Handle the special meta.js path
|
|
|
|
mux.HandleFunc("/meta.js", s.getJSMetadata)
|
|
|
|
|
2023-08-04 17:57:30 +00:00
|
|
|
// Handle Prometheus metrics
|
|
|
|
promHttpHandler := promhttp.Handler()
|
|
|
|
mux.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
|
|
|
|
// fetching metrics counts as an event, for the purpose of whether
|
|
|
|
// we should prepare folder summaries etc.
|
|
|
|
s.fss.OnEventRequest()
|
|
|
|
promHttpHandler.ServeHTTP(w, req)
|
|
|
|
})
|
|
|
|
|
2015-09-29 18:05:22 +00:00
|
|
|
guiCfg := s.cfg.GUI()
|
|
|
|
|
2014-07-06 13:00:44 +00:00
|
|
|
// Wrap everything in CSRF protection. The /rest prefix should be
|
|
|
|
// protected, other requests will grant cookies.
|
2019-09-05 11:35:51 +00:00
|
|
|
var handler http.Handler = newCsrfManager(s.id.String()[:5], "/rest", guiCfg, mux, locations.Get(locations.CsrfTokens))
|
2016-01-26 07:05:24 +00:00
|
|
|
|
2015-06-22 15:57:08 +00:00
|
|
|
// Add our version and ID as a header to responses
|
|
|
|
handler = withDetailsMiddleware(s.id, handler)
|
2014-08-31 10:59:20 +00:00
|
|
|
|
2014-07-05 19:40:29 +00:00
|
|
|
// Wrap everything in basic auth, if user/password is set.
|
2018-09-11 21:25:24 +00:00
|
|
|
if guiCfg.IsAuthEnabled() {
|
2019-08-15 14:29:37 +00:00
|
|
|
handler = basicAuthAndSessionMiddleware("sessionid-"+s.id.String()[:5], guiCfg, s.cfg.LDAP(), handler, s.evLogger)
|
2014-07-05 19:40:29 +00:00
|
|
|
}
|
2014-04-30 20:52:38 +00:00
|
|
|
|
2014-09-14 22:18:05 +00:00
|
|
|
// Redirect to HTTPS if we are supposed to
|
2015-10-12 13:27:57 +00:00
|
|
|
if guiCfg.UseTLS() {
|
2014-09-14 22:18:05 +00:00
|
|
|
handler = redirectToHTTPSMiddleware(handler)
|
|
|
|
}
|
2014-09-12 19:28:47 +00:00
|
|
|
|
2016-04-03 11:24:55 +00:00
|
|
|
// Add the CORS handling
|
2017-09-10 08:28:12 +00:00
|
|
|
handler = corsMiddleware(handler, guiCfg.InsecureAllowFrameLoading)
|
2016-04-03 11:24:55 +00:00
|
|
|
|
2016-09-03 08:33:34 +00:00
|
|
|
if addressIsLocalhost(guiCfg.Address()) && !guiCfg.InsecureSkipHostCheck {
|
|
|
|
// Verify source host
|
|
|
|
handler = localhostMiddleware(handler)
|
|
|
|
}
|
|
|
|
|
2015-10-03 15:25:21 +00:00
|
|
|
handler = debugMiddleware(handler)
|
2015-04-07 19:45:22 +00:00
|
|
|
|
2014-10-13 17:34:26 +00:00
|
|
|
srv := http.Server{
|
2017-01-01 12:38:31 +00:00
|
|
|
Handler: handler,
|
|
|
|
// ReadTimeout must be longer than SyncthingController $scope.refresh
|
|
|
|
// interval to avoid HTTP keepalive/GUI refresh race.
|
|
|
|
ReadTimeout: 15 * time.Second,
|
2019-07-28 07:49:07 +00:00
|
|
|
// Prevent the HTTP server from logging stuff on its own. The things we
|
|
|
|
// care about we log ourselves from the handlers.
|
2021-11-22 07:59:47 +00:00
|
|
|
ErrorLog: log.New(io.Discard, "", 0),
|
2014-10-13 17:34:26 +00:00
|
|
|
}
|
|
|
|
|
2016-03-06 22:04:12 +00:00
|
|
|
l.Infoln("GUI and API listening on", listener.Addr())
|
|
|
|
l.Infoln("Access the GUI via the following URL:", guiCfg.URL())
|
2016-01-14 10:06:36 +00:00
|
|
|
if s.started != nil {
|
|
|
|
// only set when run by the tests
|
2019-12-13 08:26:41 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done(): // Shouldn't return directly due to cleanup below
|
|
|
|
case s.started <- listener.Addr().String():
|
|
|
|
}
|
2016-01-14 10:06:36 +00:00
|
|
|
}
|
2015-06-03 07:47:39 +00:00
|
|
|
|
2017-02-05 17:51:52 +00:00
|
|
|
// Indicate successful initial startup, to ourselves and to interested
|
2016-10-07 03:10:26 +00:00
|
|
|
// listeners (i.e. the thing that starts the browser).
|
|
|
|
select {
|
|
|
|
case <-s.startedOnce:
|
|
|
|
default:
|
|
|
|
close(s.startedOnce)
|
|
|
|
}
|
|
|
|
|
2016-06-06 22:12:23 +00:00
|
|
|
// Serve in the background
|
|
|
|
|
|
|
|
serveError := make(chan error, 1)
|
|
|
|
go func() {
|
2019-12-13 08:26:41 +00:00
|
|
|
select {
|
|
|
|
case serveError <- srv.Serve(listener):
|
|
|
|
case <-ctx.Done():
|
|
|
|
}
|
2016-06-06 22:12:23 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
// Wait for stop, restart or error signals
|
|
|
|
|
2020-11-17 12:19:04 +00:00
|
|
|
err = nil
|
2015-06-03 07:47:39 +00:00
|
|
|
select {
|
2019-11-21 07:41:15 +00:00
|
|
|
case <-ctx.Done():
|
2016-06-06 22:12:23 +00:00
|
|
|
// Shutting down permanently
|
|
|
|
l.Debugln("shutting down (stop)")
|
2016-01-14 10:06:36 +00:00
|
|
|
case <-s.configChanged:
|
2016-06-06 22:12:23 +00:00
|
|
|
// Soft restart due to configuration change
|
|
|
|
l.Debugln("restarting (config changed)")
|
2020-11-17 12:19:04 +00:00
|
|
|
case err = <-s.exitChan:
|
|
|
|
case err = <-serveError:
|
2016-06-06 22:12:23 +00:00
|
|
|
// Restart due to listen/serve failure
|
|
|
|
l.Warnln("GUI/API:", err, "(restarting)")
|
2015-06-03 07:47:39 +00:00
|
|
|
}
|
2020-11-26 14:49:39 +00:00
|
|
|
// Give it a moment to shut down gracefully, e.g. if we are restarting
|
|
|
|
// due to a config change through the API, let that finish successfully.
|
|
|
|
timeout, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond)
|
|
|
|
defer cancel()
|
|
|
|
if err := srv.Shutdown(timeout); err == timeout.Err() {
|
|
|
|
srv.Close()
|
|
|
|
}
|
2020-11-17 12:19:04 +00:00
|
|
|
|
|
|
|
return err
|
2015-04-28 21:12:19 +00:00
|
|
|
}
|
|
|
|
|
2019-02-14 20:29:14 +00:00
|
|
|
// Complete implements suture.IsCompletable, which signifies to the supervisor
|
|
|
|
// whether to stop restarting the service.
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) Complete() bool {
|
2019-02-14 20:29:14 +00:00
|
|
|
select {
|
|
|
|
case <-s.startedOnce:
|
|
|
|
return s.startupErr != nil
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) String() string {
|
|
|
|
return fmt.Sprintf("api.service@%p", s)
|
2015-06-03 07:47:39 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 16:48:15 +00:00
|
|
|
func (*service) VerifyConfiguration(_, to config.Configuration) error {
|
2018-09-21 12:28:57 +00:00
|
|
|
if to.GUI.Network() != "tcp" {
|
|
|
|
return nil
|
|
|
|
}
|
2016-12-18 18:57:41 +00:00
|
|
|
_, err := net.ResolveTCPAddr("tcp", to.GUI.Address())
|
|
|
|
return err
|
2015-06-03 07:47:39 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) CommitConfiguration(from, to config.Configuration) bool {
|
2016-08-02 11:06:45 +00:00
|
|
|
// No action required when this changes, so mask the fact that it changed at all.
|
|
|
|
from.GUI.Debugging = to.GUI.Debugging
|
|
|
|
|
2015-06-03 07:47:39 +00:00
|
|
|
if to.GUI == from.GUI {
|
2020-11-09 14:33:32 +00:00
|
|
|
// No GUI changes, we're done here.
|
2015-06-03 07:47:39 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2016-06-07 07:46:45 +00:00
|
|
|
if to.GUI.Theme != from.GUI.Theme {
|
|
|
|
s.statics.setTheme(to.GUI.Theme)
|
|
|
|
}
|
|
|
|
|
2016-06-06 22:12:23 +00:00
|
|
|
// Tell the serve loop to restart
|
2016-01-14 10:06:36 +00:00
|
|
|
s.configChanged <- struct{}{}
|
2015-06-03 07:47:39 +00:00
|
|
|
|
|
|
|
return true
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2020-12-22 19:17:14 +00:00
|
|
|
func (s *service) fatal(err *svcutil.FatalErr) {
|
2020-11-17 12:19:04 +00:00
|
|
|
// s.exitChan is 1-buffered and whoever is first gets handled.
|
|
|
|
select {
|
|
|
|
case s.exitChan <- err:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-07 19:45:22 +00:00
|
|
|
func debugMiddleware(h http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
t0 := time.Now()
|
|
|
|
h.ServeHTTP(w, r)
|
2015-11-21 08:39:40 +00:00
|
|
|
|
|
|
|
if shouldDebugHTTP() {
|
|
|
|
ms := 1000 * time.Since(t0).Seconds()
|
|
|
|
|
|
|
|
// The variable `w` is most likely a *http.response, which we can't do
|
|
|
|
// much with since it's a non exported type. We can however peek into
|
|
|
|
// it with reflection to get at the status code and number of bytes
|
|
|
|
// written.
|
|
|
|
var status, written int64
|
|
|
|
if rw := reflect.Indirect(reflect.ValueOf(w)); rw.IsValid() && rw.Kind() == reflect.Struct {
|
|
|
|
if rf := rw.FieldByName("status"); rf.IsValid() && rf.Kind() == reflect.Int {
|
|
|
|
status = rf.Int()
|
|
|
|
}
|
|
|
|
if rf := rw.FieldByName("written"); rf.IsValid() && rf.Kind() == reflect.Int64 {
|
|
|
|
written = rf.Int()
|
|
|
|
}
|
2015-04-07 19:45:22 +00:00
|
|
|
}
|
2019-03-26 19:53:58 +00:00
|
|
|
l.Debugf("http: %s %q: status %d, %d bytes in %.02f ms", r.Method, r.URL.String(), status, written, ms)
|
2015-04-07 19:45:22 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-09-10 08:28:12 +00:00
|
|
|
func corsMiddleware(next http.Handler, allowFrameLoading bool) http.Handler {
|
2016-01-26 07:05:24 +00:00
|
|
|
// Handle CORS headers and CORS OPTIONS request.
|
|
|
|
// CORS OPTIONS request are typically sent by browser during AJAX preflight
|
|
|
|
// when the browser initiate a POST request.
|
2016-02-12 21:10:08 +00:00
|
|
|
//
|
|
|
|
// As the OPTIONS request is unauthorized, this handler must be the first
|
2016-04-03 11:24:55 +00:00
|
|
|
// of the chain (hence added at the end).
|
2016-02-12 21:10:08 +00:00
|
|
|
//
|
2016-01-26 07:05:24 +00:00
|
|
|
// See https://www.w3.org/TR/cors/ for details.
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
// Process OPTIONS requests
|
|
|
|
if r.Method == "OPTIONS" {
|
2016-09-06 22:16:50 +00:00
|
|
|
// Add a generous access-control-allow-origin header for CORS requests
|
|
|
|
w.Header().Add("Access-Control-Allow-Origin", "*")
|
2020-11-01 13:29:55 +00:00
|
|
|
// Only GET/POST/OPTIONS Methods are supported
|
2020-11-01 20:36:54 +00:00
|
|
|
w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, PATCH, DELETE, OPTIONS")
|
2016-09-06 22:16:50 +00:00
|
|
|
// Only these headers can be set
|
|
|
|
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, X-API-Key")
|
2016-01-26 07:05:24 +00:00
|
|
|
// The request is meant to be cached 10 minutes
|
|
|
|
w.Header().Set("Access-Control-Max-Age", "600")
|
|
|
|
|
|
|
|
// Indicate that no content will be returned
|
|
|
|
w.WriteHeader(204)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-09-10 08:28:12 +00:00
|
|
|
// Other security related headers that should be present.
|
|
|
|
// https://www.owasp.org/index.php/Security_Headers
|
|
|
|
|
|
|
|
if !allowFrameLoading {
|
|
|
|
// We don't want to be rendered in an <iframe>,
|
|
|
|
// <frame> or <object>. (Unless we do it ourselves.
|
|
|
|
// This is also an escape hatch for people who serve
|
|
|
|
// Syncthing GUI as part of their own website
|
|
|
|
// through a proxy, so they don't need to set the
|
|
|
|
// allowFrameLoading bool.)
|
|
|
|
w.Header().Set("X-Frame-Options", "SAMEORIGIN")
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the browser senses an XSS attack it's allowed to take
|
|
|
|
// action. (How this would not always be the default I
|
|
|
|
// don't fully understand.)
|
|
|
|
w.Header().Set("X-XSS-Protection", "1; mode=block")
|
|
|
|
|
|
|
|
// Our content type headers are correct. Don't guess.
|
|
|
|
w.Header().Set("X-Content-Type-Options", "nosniff")
|
|
|
|
|
2016-01-26 07:05:24 +00:00
|
|
|
// For everything else, pass to the next handler
|
|
|
|
next.ServeHTTP(w, r)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-11-21 08:48:57 +00:00
|
|
|
func metricsMiddleware(h http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
t := metrics.GetOrRegisterTimer(r.URL.Path, nil)
|
|
|
|
t0 := time.Now()
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
t.UpdateSince(t0)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-09-14 22:18:05 +00:00
|
|
|
func redirectToHTTPSMiddleware(h http.Handler) http.Handler {
|
2014-09-12 19:28:47 +00:00
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
2014-09-14 22:18:05 +00:00
|
|
|
if r.TLS == nil {
|
|
|
|
// Redirect HTTP requests to HTTPS
|
|
|
|
r.URL.Host = r.Host
|
2014-09-12 19:28:47 +00:00
|
|
|
r.URL.Scheme = "https"
|
2016-01-29 10:07:51 +00:00
|
|
|
http.Redirect(w, r, r.URL.String(), http.StatusTemporaryRedirect)
|
2014-09-12 19:28:47 +00:00
|
|
|
} else {
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-07-05 19:40:29 +00:00
|
|
|
func noCacheMiddleware(h http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
2015-04-27 07:08:55 +00:00
|
|
|
w.Header().Set("Cache-Control", "max-age=0, no-cache, no-store")
|
|
|
|
w.Header().Set("Expires", time.Now().UTC().Format(http.TimeFormat))
|
|
|
|
w.Header().Set("Pragma", "no-cache")
|
2014-07-05 19:40:29 +00:00
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-06-22 15:57:08 +00:00
|
|
|
func withDetailsMiddleware(id protocol.DeviceID, h http.Handler) http.Handler {
|
2014-08-31 10:59:20 +00:00
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
2019-02-12 06:58:24 +00:00
|
|
|
w.Header().Set("X-Syncthing-Version", build.Version)
|
2015-06-22 15:57:08 +00:00
|
|
|
w.Header().Set("X-Syncthing-ID", id.String())
|
2014-08-31 10:59:20 +00:00
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-09-03 08:33:34 +00:00
|
|
|
func localhostMiddleware(h http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if addressIsLocalhost(r.Host) {
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
http.Error(w, "Host check error", http.StatusForbidden)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) whenDebugging(h http.Handler) http.Handler {
|
2016-08-02 11:06:45 +00:00
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if s.cfg.GUI().Debugging {
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-09-16 07:32:24 +00:00
|
|
|
http.Error(w, "Debugging disabled", http.StatusForbidden)
|
2016-08-02 11:06:45 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-07-28 16:48:15 +00:00
|
|
|
func (s *service) getPendingDevices(w http.ResponseWriter, _ *http.Request) {
|
2020-12-17 18:54:31 +00:00
|
|
|
devices, err := s.model.PendingDevices()
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sendJSON(w, devices)
|
|
|
|
}
|
|
|
|
|
2021-06-07 08:29:24 +00:00
|
|
|
func (s *service) deletePendingDevices(w http.ResponseWriter, r *http.Request) {
|
|
|
|
qs := r.URL.Query()
|
|
|
|
|
|
|
|
device := qs.Get("device")
|
|
|
|
deviceID, err := protocol.DeviceIDFromString(device)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := s.model.DismissPendingDevice(deviceID); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-17 18:54:31 +00:00
|
|
|
func (s *service) getPendingFolders(w http.ResponseWriter, r *http.Request) {
|
|
|
|
qs := r.URL.Query()
|
|
|
|
|
|
|
|
device := qs.Get("device")
|
|
|
|
deviceID, err := protocol.DeviceIDFromString(device)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
folders, err := s.model.PendingFolders(deviceID)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sendJSON(w, folders)
|
|
|
|
}
|
|
|
|
|
2021-06-07 08:29:24 +00:00
|
|
|
func (s *service) deletePendingFolders(w http.ResponseWriter, r *http.Request) {
|
|
|
|
qs := r.URL.Query()
|
|
|
|
|
|
|
|
device := qs.Get("device")
|
|
|
|
deviceID, err := protocol.DeviceIDFromString(device)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
folderID := qs.Get("folder")
|
|
|
|
|
|
|
|
if err := s.model.DismissPendingFolder(deviceID, folderID); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-28 16:48:15 +00:00
|
|
|
func (*service) restPing(w http.ResponseWriter, _ *http.Request) {
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, map[string]string{"ping": "pong"})
|
2014-09-18 10:55:28 +00:00
|
|
|
}
|
|
|
|
|
gui, api: Show internal config and state paths (fixes #8323) (#8324)
* lib/locations: Fix enum values camelCase.
* lib/locations: Remove unused FailuresFile.
* cmd/syncthing: Turn around role of locations storage.
Previously the locations package was used to provide default paths,
possibly with an overridden home directory. Extra paths supplied on
the command line were handled and passed around in the options object.
To make the changed paths available to any other interested package,
override the location setting from the option if supplied, instead of
vice versa when not supplied. Adapt code using this to read from the
locations package instead of passing through the options object.
* lib/locations: Refactor showPaths to locations package.
Generate a reusable string in locations.PrettyPrintPaths().
Enumerating all possible locations in different packages is error
prone, so add a new public function to generate the listing as a
string in the locations package. Adapt cmd/syncthing --paths to use
that instead of its own console output.
* lib/locations: Include CSRF token in pretty printed paths.
* lib/api: New endpoint /rest/system/paths.
The paths should be available for troubleshooting from a running
instance. Using the --paths CLI option is not easy in some
environments, so expose the locations mapping to a JSON endpoint.
Add utility function ListExpandedPaths() that also filters out any
entries which still contain variable placeholders.
* gui: List runtime paths in separate log viewer tab.
* Wrap paths.
* lib/syncthing: Utilize locations.Get() instead of passing an arg.
* Include base directories, move label to table caption.
* gui: Switch to hard-coded paths instead of iterating over all.
* gui: Break aboutModalView into tabs.
Use tabs to separate authors from included third-party software.
* gui: Move paths from log viewer to about modal.
* lib/locations: Adjust pretty print output order to match GUI.
* gui, authors: Remove additional bot names and fix indent.
The indentation changed because of the tabbed about dialog, fix the
authors script to respect that.
Skip Syncthing*Automation in authors list as well.
* Update AUTHORS list to remove bot names.
* Revert AUTHORS email order change.
* Do not emphasize DB and log file locations.
* Review line wrapping.
* review part 1: strings.Builder, naming
* Rename and extend locations.Set() with error handling.
Remodel the Override() function along the existing SetBaseDir() and
rename it to simply Set(). Make sure to use absolute paths when given
log file or GUI assets override options. Add proper error reporting
if that goes wrong.
* Remove obsolete comment about empty logfile option.
* Don't filter out unexpanded baseDir placeholders, only ${timestamp}.
* Restore behavior regarding special "-" logfile argument.
If the option is given, but with empty value, assume the no log
file (same as "-"). Don't try to convert the special value to an
absolute path though and document this fact in a comment for the Set()
function.
* Use template to check for location key validity.
* Don't filter out timestamp placeholders.
* lib/api: Remove paths from /rest/system/status.
* lib/ur: Properly initialize map in failure data (fixes #8479)
Co-authored-by: Jakob Borg <jakob@kastelo.net>
2022-08-10 06:25:13 +00:00
|
|
|
func (*service) getSystemPaths(w http.ResponseWriter, _ *http.Request) {
|
|
|
|
sendJSON(w, locations.ListExpandedPaths())
|
|
|
|
}
|
|
|
|
|
2022-07-28 16:48:15 +00:00
|
|
|
func (s *service) getJSMetadata(w http.ResponseWriter, _ *http.Request) {
|
2016-05-22 10:26:09 +00:00
|
|
|
meta, _ := json.Marshal(map[string]string{
|
|
|
|
"deviceID": s.id.String(),
|
|
|
|
})
|
|
|
|
w.Header().Set("Content-Type", "application/javascript")
|
|
|
|
fmt.Fprintf(w, "var metadata = %s;\n", meta)
|
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (*service) getSystemVersion(w http.ResponseWriter, _ *http.Request) {
|
2019-01-15 07:44:46 +00:00
|
|
|
sendJSON(w, map[string]interface{}{
|
2019-02-12 06:58:24 +00:00
|
|
|
"version": build.Version,
|
|
|
|
"codename": build.Codename,
|
|
|
|
"longVersion": build.LongVersion,
|
2023-07-16 15:43:10 +00:00
|
|
|
"extra": build.Extra,
|
2014-09-18 10:52:45 +00:00
|
|
|
"os": runtime.GOOS,
|
|
|
|
"arch": runtime.GOARCH,
|
2019-02-12 06:58:24 +00:00
|
|
|
"isBeta": build.IsBeta,
|
|
|
|
"isCandidate": build.IsCandidate,
|
|
|
|
"isRelease": build.IsRelease,
|
2020-06-21 10:20:19 +00:00
|
|
|
"date": build.Date,
|
2020-12-10 11:22:09 +00:00
|
|
|
"tags": build.TagsList(),
|
2020-06-21 10:20:19 +00:00
|
|
|
"stamp": build.Stamp,
|
|
|
|
"user": build.User,
|
2022-12-25 07:08:41 +00:00
|
|
|
"container": incontainer.Detect(),
|
2014-09-18 10:52:45 +00:00
|
|
|
})
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (*service) getSystemDebug(w http.ResponseWriter, _ *http.Request) {
|
2015-10-03 15:25:21 +00:00
|
|
|
names := l.Facilities()
|
|
|
|
enabled := l.FacilityDebugging()
|
|
|
|
sort.Strings(enabled)
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, map[string]interface{}{
|
2015-10-03 15:25:21 +00:00
|
|
|
"facilities": names,
|
|
|
|
"enabled": enabled,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-07-28 16:48:15 +00:00
|
|
|
func (*service) postSystemDebug(w http.ResponseWriter, r *http.Request) {
|
2015-10-03 15:25:21 +00:00
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
|
|
q := r.URL.Query()
|
|
|
|
for _, f := range strings.Split(q.Get("enable"), ",") {
|
2015-12-06 17:15:55 +00:00
|
|
|
if f == "" || l.ShouldDebug(f) {
|
2015-10-03 15:25:21 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
l.SetDebug(f, true)
|
|
|
|
l.Infof("Enabled debug data for %q", f)
|
|
|
|
}
|
|
|
|
for _, f := range strings.Split(q.Get("disable"), ",") {
|
2015-12-06 17:15:55 +00:00
|
|
|
if f == "" || !l.ShouldDebug(f) {
|
2015-10-03 15:25:21 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
l.SetDebug(f, false)
|
|
|
|
l.Infof("Disabled debug data for %q", f)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDBBrowse(w http.ResponseWriter, r *http.Request) {
|
2015-02-07 10:52:42 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
prefix := qs.Get("prefix")
|
2021-02-01 08:27:34 +00:00
|
|
|
dirsOnly := qs.Get("dirsonly") != ""
|
2015-02-07 10:52:42 +00:00
|
|
|
|
|
|
|
levels, err := strconv.Atoi(qs.Get("levels"))
|
|
|
|
if err != nil {
|
|
|
|
levels = -1
|
|
|
|
}
|
2021-02-01 08:27:34 +00:00
|
|
|
result, err := s.model.GlobalDirectoryTree(folder, prefix, levels, dirsOnly)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2015-02-07 10:52:42 +00:00
|
|
|
|
2021-02-01 08:27:34 +00:00
|
|
|
sendJSON(w, result)
|
2015-02-07 10:52:42 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDBCompletion(w http.ResponseWriter, r *http.Request) {
|
2023-05-03 08:25:36 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder") // empty means all folders
|
|
|
|
deviceStr := qs.Get("device") // empty means local device ID
|
2014-07-29 09:06:52 +00:00
|
|
|
|
2020-07-03 06:48:37 +00:00
|
|
|
// We will check completion status for either the local device, or a
|
|
|
|
// specific given device ID.
|
|
|
|
|
|
|
|
device := protocol.LocalDeviceID
|
|
|
|
if deviceStr != "" {
|
|
|
|
var err error
|
|
|
|
device, err = protocol.DeviceIDFromString(deviceStr)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
|
|
|
return
|
|
|
|
}
|
2014-07-29 09:06:52 +00:00
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
if comp, err := s.model.Completion(device, folder); err != nil {
|
|
|
|
status := http.StatusInternalServerError
|
|
|
|
if isFolderNotFound(err) {
|
|
|
|
status = http.StatusNotFound
|
|
|
|
}
|
|
|
|
http.Error(w, err.Error(), status)
|
|
|
|
} else {
|
|
|
|
sendJSON(w, comp.Map())
|
|
|
|
}
|
2018-01-14 12:08:40 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDBStatus(w http.ResponseWriter, r *http.Request) {
|
2015-03-26 22:26:51 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
2019-03-26 19:53:58 +00:00
|
|
|
if sum, err := s.fss.Summary(folder); err != nil {
|
2018-01-14 17:01:06 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusNotFound)
|
|
|
|
} else {
|
|
|
|
sendJSON(w, sum)
|
|
|
|
}
|
2015-03-26 22:26:51 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (s *service) postDBOverride(_ http.ResponseWriter, r *http.Request) {
|
2023-05-03 08:25:36 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
2015-04-28 21:12:19 +00:00
|
|
|
go s.model.Override(folder)
|
2014-06-16 08:47:02 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (s *service) postDBRevert(_ http.ResponseWriter, r *http.Request) {
|
2023-05-03 08:25:36 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
2018-07-12 08:15:57 +00:00
|
|
|
go s.model.Revert(folder)
|
|
|
|
}
|
|
|
|
|
2017-12-15 20:01:56 +00:00
|
|
|
func getPagingParams(qs url.Values) (int, int) {
|
2015-04-25 21:53:44 +00:00
|
|
|
page, err := strconv.Atoi(qs.Get("page"))
|
|
|
|
if err != nil || page < 1 {
|
|
|
|
page = 1
|
|
|
|
}
|
|
|
|
perpage, err := strconv.Atoi(qs.Get("perpage"))
|
|
|
|
if err != nil || perpage < 1 {
|
|
|
|
perpage = 1 << 16
|
|
|
|
}
|
2017-12-15 20:01:56 +00:00
|
|
|
return page, perpage
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDBNeed(w http.ResponseWriter, r *http.Request) {
|
2017-12-15 20:01:56 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
|
|
|
|
folder := qs.Get("folder")
|
2015-04-25 21:53:44 +00:00
|
|
|
|
2017-12-15 20:01:56 +00:00
|
|
|
page, perpage := getPagingParams(qs)
|
|
|
|
|
2020-11-27 10:26:36 +00:00
|
|
|
progress, queued, rest, err := s.model.NeedFolderFiles(folder, page, perpage)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
2014-05-19 20:31:28 +00:00
|
|
|
|
2014-11-23 00:52:48 +00:00
|
|
|
// Convert the struct to a more loose structure, and inject the size.
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, map[string]interface{}{
|
2018-12-11 08:59:04 +00:00
|
|
|
"progress": toJsonFileInfoSlice(progress),
|
|
|
|
"queued": toJsonFileInfoSlice(queued),
|
|
|
|
"rest": toJsonFileInfoSlice(rest),
|
2015-04-25 21:53:44 +00:00
|
|
|
"page": page,
|
|
|
|
"perpage": perpage,
|
2015-12-15 21:40:38 +00:00
|
|
|
})
|
2014-05-19 20:31:28 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDBRemoteNeed(w http.ResponseWriter, r *http.Request) {
|
2017-12-15 20:01:56 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
device := qs.Get("device")
|
|
|
|
deviceID, err := protocol.DeviceIDFromString(device)
|
|
|
|
if err != nil {
|
2022-10-06 19:27:08 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
2017-12-15 20:01:56 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
page, perpage := getPagingParams(qs)
|
|
|
|
|
2020-11-27 10:26:36 +00:00
|
|
|
files, err := s.model.RemoteNeedFolderFiles(folder, deviceID, page, perpage)
|
2020-01-21 17:23:08 +00:00
|
|
|
if err != nil {
|
2017-12-15 20:01:56 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusNotFound)
|
2020-01-21 17:23:08 +00:00
|
|
|
return
|
2017-12-15 20:01:56 +00:00
|
|
|
}
|
2020-11-27 10:26:36 +00:00
|
|
|
|
2020-01-21 17:23:08 +00:00
|
|
|
sendJSON(w, map[string]interface{}{
|
|
|
|
"files": toJsonFileInfoSlice(files),
|
|
|
|
"page": page,
|
|
|
|
"perpage": perpage,
|
|
|
|
})
|
2017-12-15 20:01:56 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDBLocalChanged(w http.ResponseWriter, r *http.Request) {
|
2018-12-11 08:59:04 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
|
|
|
|
page, perpage := getPagingParams(qs)
|
|
|
|
|
2020-11-27 10:26:36 +00:00
|
|
|
files, err := s.model.LocalChangedFolderFiles(folder, page, perpage)
|
2020-01-21 17:23:08 +00:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
2018-12-11 08:59:04 +00:00
|
|
|
|
|
|
|
sendJSON(w, map[string]interface{}{
|
|
|
|
"files": toJsonFileInfoSlice(files),
|
|
|
|
"page": page,
|
|
|
|
"perpage": perpage,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (s *service) getSystemConnections(w http.ResponseWriter, _ *http.Request) {
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, s.model.ConnectionStats())
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (s *service) getDeviceStats(w http.ResponseWriter, _ *http.Request) {
|
2019-11-30 12:03:24 +00:00
|
|
|
stats, err := s.model.DeviceStatistics()
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sendJSON(w, stats)
|
2014-08-21 22:46:34 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (s *service) getFolderStats(w http.ResponseWriter, _ *http.Request) {
|
2019-11-30 12:03:24 +00:00
|
|
|
stats, err := s.model.FolderStatistics()
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sendJSON(w, stats)
|
2014-12-07 20:21:12 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDBFile(w http.ResponseWriter, r *http.Request) {
|
2015-03-17 17:51:50 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
file := qs.Get("file")
|
2021-03-07 12:43:22 +00:00
|
|
|
|
|
|
|
errStatus := http.StatusInternalServerError
|
|
|
|
gf, gfOk, err := s.model.CurrentGlobalFile(folder, file)
|
|
|
|
if err != nil {
|
|
|
|
if isFolderNotFound(err) {
|
|
|
|
errStatus = http.StatusNotFound
|
|
|
|
}
|
|
|
|
http.Error(w, err.Error(), errStatus)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
lf, lfOk, err := s.model.CurrentFolderFile(folder, file)
|
|
|
|
if err != nil {
|
|
|
|
if isFolderNotFound(err) {
|
|
|
|
errStatus = http.StatusNotFound
|
|
|
|
}
|
|
|
|
http.Error(w, err.Error(), errStatus)
|
|
|
|
return
|
|
|
|
}
|
2016-02-12 13:53:09 +00:00
|
|
|
|
|
|
|
if !(gfOk || lfOk) {
|
|
|
|
// This file for sure does not exist.
|
|
|
|
http.Error(w, "No such object in the index", http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
2015-03-17 17:51:50 +00:00
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
av, err := s.model.Availability(folder, gf, protocol.BlockInfo{})
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
}
|
2021-05-03 10:28:25 +00:00
|
|
|
mtimeMapping, mtimeErr := s.model.GetMtimeMapping(folder, file)
|
2021-03-07 12:43:22 +00:00
|
|
|
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, map[string]interface{}{
|
2015-04-20 13:37:04 +00:00
|
|
|
"global": jsonFileInfo(gf),
|
|
|
|
"local": jsonFileInfo(lf),
|
2015-03-17 17:51:50 +00:00
|
|
|
"availability": av,
|
2021-05-03 10:28:25 +00:00
|
|
|
"mtime": map[string]interface{}{
|
|
|
|
"err": mtimeErr,
|
|
|
|
"value": mtimeMapping,
|
|
|
|
},
|
2015-03-17 17:51:50 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-11-10 08:24:45 +00:00
|
|
|
func (s *service) getDebugFile(w http.ResponseWriter, r *http.Request) {
|
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
file := qs.Get("file")
|
|
|
|
|
|
|
|
snap, err := s.model.DBSnapshot(folder)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-05-03 10:28:25 +00:00
|
|
|
mtimeMapping, mtimeErr := s.model.GetMtimeMapping(folder, file)
|
|
|
|
|
2020-11-10 08:24:45 +00:00
|
|
|
lf, _ := snap.Get(protocol.LocalDeviceID, file)
|
|
|
|
gf, _ := snap.GetGlobal(file)
|
|
|
|
av := snap.Availability(file)
|
|
|
|
vl := snap.DebugGlobalVersions(file)
|
|
|
|
|
|
|
|
sendJSON(w, map[string]interface{}{
|
|
|
|
"global": jsonFileInfo(gf),
|
|
|
|
"local": jsonFileInfo(lf),
|
|
|
|
"availability": av,
|
|
|
|
"globalVersions": vl.String(),
|
2021-05-03 10:28:25 +00:00
|
|
|
"mtime": map[string]interface{}{
|
|
|
|
"err": mtimeErr,
|
|
|
|
"value": mtimeMapping,
|
|
|
|
},
|
2020-11-10 08:24:45 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (s *service) postSystemRestart(w http.ResponseWriter, _ *http.Request) {
|
2015-04-28 21:12:19 +00:00
|
|
|
s.flushResponse(`{"ok": "restarting"}`, w)
|
2020-11-17 12:19:04 +00:00
|
|
|
|
2020-12-22 19:17:14 +00:00
|
|
|
s.fatal(&svcutil.FatalErr{
|
2020-11-17 12:19:04 +00:00
|
|
|
Err: errors.New("restart initiated by rest API"),
|
2020-12-22 19:17:14 +00:00
|
|
|
Status: svcutil.ExitRestart,
|
2020-11-17 12:19:04 +00:00
|
|
|
})
|
2014-04-03 20:10:51 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postSystemReset(w http.ResponseWriter, r *http.Request) {
|
2023-05-03 08:25:36 +00:00
|
|
|
qs := r.URL.Query()
|
2015-04-03 18:06:03 +00:00
|
|
|
folder := qs.Get("folder")
|
2015-06-21 07:35:41 +00:00
|
|
|
|
|
|
|
if len(folder) > 0 {
|
2015-09-29 18:05:22 +00:00
|
|
|
if _, ok := s.cfg.Folders()[folder]; !ok {
|
2022-10-06 19:27:08 +00:00
|
|
|
http.Error(w, "Invalid folder ID", http.StatusInternalServerError)
|
2015-06-21 07:35:41 +00:00
|
|
|
return
|
2015-06-04 12:35:03 +00:00
|
|
|
}
|
2015-04-03 18:06:03 +00:00
|
|
|
}
|
2015-06-21 07:35:41 +00:00
|
|
|
|
2022-07-28 16:49:44 +00:00
|
|
|
if folder == "" {
|
2015-06-21 07:35:41 +00:00
|
|
|
// Reset all folders.
|
2015-09-29 18:05:22 +00:00
|
|
|
for folder := range s.cfg.Folders() {
|
2021-09-11 15:14:47 +00:00
|
|
|
if err := s.model.ResetFolder(folder); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2015-06-21 07:35:41 +00:00
|
|
|
}
|
2015-04-28 21:12:19 +00:00
|
|
|
s.flushResponse(`{"ok": "resetting database"}`, w)
|
2015-04-03 18:06:03 +00:00
|
|
|
} else {
|
2015-06-21 07:35:41 +00:00
|
|
|
// Reset a specific folder, assuming it's supposed to exist.
|
2021-09-11 15:14:47 +00:00
|
|
|
if err := s.model.ResetFolder(folder); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2015-06-19 06:30:19 +00:00
|
|
|
s.flushResponse(`{"ok": "resetting folder `+folder+`"}`, w)
|
2015-04-03 18:06:03 +00:00
|
|
|
}
|
2015-06-21 07:35:41 +00:00
|
|
|
|
2020-12-22 19:17:14 +00:00
|
|
|
s.fatal(&svcutil.FatalErr{
|
2020-11-17 12:19:04 +00:00
|
|
|
Err: errors.New("restart after db reset initiated by rest API"),
|
2020-12-22 19:17:14 +00:00
|
|
|
Status: svcutil.ExitRestart,
|
2020-11-17 12:19:04 +00:00
|
|
|
})
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (s *service) postSystemShutdown(w http.ResponseWriter, _ *http.Request) {
|
2015-04-28 21:12:19 +00:00
|
|
|
s.flushResponse(`{"ok": "shutting down"}`, w)
|
2020-12-22 19:17:14 +00:00
|
|
|
s.fatal(&svcutil.FatalErr{
|
2020-11-17 12:19:04 +00:00
|
|
|
Err: errors.New("shutdown initiated by rest API"),
|
2020-12-22 19:17:14 +00:00
|
|
|
Status: svcutil.ExitSuccess,
|
2020-11-17 12:19:04 +00:00
|
|
|
})
|
2014-05-11 23:16:27 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (*service) flushResponse(resp string, w http.ResponseWriter) {
|
2019-02-02 11:16:27 +00:00
|
|
|
w.Write([]byte(resp + "\n"))
|
2014-05-13 00:15:18 +00:00
|
|
|
f := w.(http.Flusher)
|
|
|
|
f.Flush()
|
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (s *service) getSystemStatus(w http.ResponseWriter, _ *http.Request) {
|
2014-03-02 22:58:14 +00:00
|
|
|
var m runtime.MemStats
|
|
|
|
runtime.ReadMemStats(&m)
|
|
|
|
|
2017-08-19 14:36:56 +00:00
|
|
|
tilde, _ := fs.ExpandTilde("~")
|
2014-03-02 22:58:14 +00:00
|
|
|
res := make(map[string]interface{})
|
2019-03-26 19:53:58 +00:00
|
|
|
res["myID"] = s.id.String()
|
2014-03-02 22:58:14 +00:00
|
|
|
res["goroutines"] = runtime.NumGoroutine()
|
|
|
|
res["alloc"] = m.Alloc
|
2014-08-05 20:14:11 +00:00
|
|
|
res["sys"] = m.Sys - m.HeapReleased
|
2014-10-06 07:25:45 +00:00
|
|
|
res["tilde"] = tilde
|
2015-09-29 18:05:22 +00:00
|
|
|
if s.cfg.Options().LocalAnnEnabled || s.cfg.Options().GlobalAnnEnabled {
|
2015-09-20 13:30:25 +00:00
|
|
|
res["discoveryEnabled"] = true
|
2021-06-07 07:08:44 +00:00
|
|
|
discoStatus := s.discoverer.ChildErrors()
|
|
|
|
res["discoveryStatus"] = discoveryStatusMap(discoStatus)
|
|
|
|
res["discoveryMethods"] = len(discoStatus) // DEPRECATED: Redundant, only for backwards compatibility, should be removed.
|
|
|
|
discoErrors := make(map[string]*string, len(discoStatus))
|
|
|
|
for s, e := range discoStatus {
|
|
|
|
if e != nil {
|
|
|
|
discoErrors[s] = errorString(e)
|
2015-09-20 13:30:25 +00:00
|
|
|
}
|
|
|
|
}
|
2021-06-07 07:08:44 +00:00
|
|
|
res["discoveryErrors"] = discoErrors // DEPRECATED: Redundant, only for backwards compatibility, should be removed.
|
2015-09-20 13:30:25 +00:00
|
|
|
}
|
2016-05-04 19:38:12 +00:00
|
|
|
|
2019-05-16 21:11:46 +00:00
|
|
|
res["connectionServiceStatus"] = s.connectionsService.ListenerStatus()
|
|
|
|
res["lastDialStatus"] = s.connectionsService.ConnectionStatus()
|
2020-03-04 19:27:48 +00:00
|
|
|
res["cpuPercent"] = 0 // deprecated from API
|
2014-12-14 23:12:12 +00:00
|
|
|
res["pathSeparator"] = string(filepath.Separator)
|
2019-03-26 19:53:58 +00:00
|
|
|
res["urVersionMax"] = ur.Version
|
|
|
|
res["uptime"] = s.urService.UptimeS()
|
|
|
|
res["startTime"] = ur.StartTime
|
2018-09-21 12:28:57 +00:00
|
|
|
res["guiAddressOverridden"] = s.cfg.GUI().IsOverridden()
|
2019-10-04 10:25:41 +00:00
|
|
|
res["guiAddressUsed"] = s.listenerAddr.String()
|
2014-03-02 22:58:14 +00:00
|
|
|
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, res)
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 16:48:15 +00:00
|
|
|
func (s *service) getSystemError(w http.ResponseWriter, _ *http.Request) {
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, map[string][]logger.Line{
|
2015-10-03 15:25:21 +00:00
|
|
|
"errors": s.guiErrors.Since(time.Time{}),
|
|
|
|
})
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (*service) postSystemError(_ http.ResponseWriter, r *http.Request) {
|
2021-11-22 07:59:47 +00:00
|
|
|
bs, _ := io.ReadAll(r.Body)
|
2014-07-05 19:40:29 +00:00
|
|
|
r.Body.Close()
|
2015-10-03 15:25:21 +00:00
|
|
|
l.Warnln(string(bs))
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (s *service) postSystemErrorClear(_ http.ResponseWriter, _ *http.Request) {
|
2015-10-03 15:25:21 +00:00
|
|
|
s.guiErrors.Clear()
|
2014-04-16 14:30:49 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getSystemLog(w http.ResponseWriter, r *http.Request) {
|
2015-10-03 15:25:21 +00:00
|
|
|
q := r.URL.Query()
|
|
|
|
since, err := time.Parse(time.RFC3339, q.Get("since"))
|
2018-04-14 08:46:33 +00:00
|
|
|
if err != nil {
|
|
|
|
l.Debugln(err)
|
|
|
|
}
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, map[string][]logger.Line{
|
2015-10-03 15:25:21 +00:00
|
|
|
"messages": s.systemLog.Since(since),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getSystemLogTxt(w http.ResponseWriter, r *http.Request) {
|
2015-10-03 15:25:21 +00:00
|
|
|
q := r.URL.Query()
|
|
|
|
since, err := time.Parse(time.RFC3339, q.Get("since"))
|
2018-04-14 08:46:33 +00:00
|
|
|
if err != nil {
|
|
|
|
l.Debugln(err)
|
|
|
|
}
|
2015-10-03 15:25:21 +00:00
|
|
|
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
|
|
|
|
|
|
|
for _, line := range s.systemLog.Since(since) {
|
|
|
|
fmt.Fprintf(w, "%s: %s\n", line.When.Format(time.RFC3339), line.Message)
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
}
|
2014-04-19 11:33:51 +00:00
|
|
|
|
2018-10-01 15:23:46 +00:00
|
|
|
type fileEntry struct {
|
|
|
|
name string
|
|
|
|
data []byte
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getSupportBundle(w http.ResponseWriter, r *http.Request) {
|
2018-10-01 15:23:46 +00:00
|
|
|
var files []fileEntry
|
|
|
|
|
|
|
|
// Redacted configuration as a JSON
|
2019-06-09 07:33:54 +00:00
|
|
|
if jsonConfig, err := json.MarshalIndent(getRedactedConfig(s), "", " "); err != nil {
|
2018-10-01 15:23:46 +00:00
|
|
|
l.Warnln("Support bundle: failed to create config.json:", err)
|
2019-06-09 07:33:54 +00:00
|
|
|
} else {
|
|
|
|
files = append(files, fileEntry{name: "config.json.txt", data: jsonConfig})
|
2018-10-01 15:23:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Log as a text
|
|
|
|
var buflog bytes.Buffer
|
|
|
|
for _, line := range s.systemLog.Since(time.Time{}) {
|
|
|
|
fmt.Fprintf(&buflog, "%s: %s\n", line.When.Format(time.RFC3339), line.Message)
|
|
|
|
}
|
2018-10-11 06:13:52 +00:00
|
|
|
files = append(files, fileEntry{name: "log-inmemory.txt", data: buflog.Bytes()})
|
2018-10-01 15:23:46 +00:00
|
|
|
|
|
|
|
// Errors as a JSON
|
|
|
|
if errs := s.guiErrors.Since(time.Time{}); len(errs) > 0 {
|
|
|
|
if jsonError, err := json.MarshalIndent(errs, "", " "); err != nil {
|
|
|
|
l.Warnln("Support bundle: failed to create errors.json:", err)
|
2019-06-09 07:33:54 +00:00
|
|
|
} else {
|
|
|
|
files = append(files, fileEntry{name: "errors.json.txt", data: jsonError})
|
2018-10-01 15:23:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-11 06:13:52 +00:00
|
|
|
// Panic files
|
2019-02-12 06:58:24 +00:00
|
|
|
if panicFiles, err := filepath.Glob(filepath.Join(locations.GetBaseDir(locations.ConfigBaseDir), "panic*")); err == nil {
|
2018-10-01 15:23:46 +00:00
|
|
|
for _, f := range panicFiles {
|
2021-11-22 07:59:47 +00:00
|
|
|
if panicFile, err := os.ReadFile(f); err != nil {
|
2018-10-01 15:23:46 +00:00
|
|
|
l.Warnf("Support bundle: failed to load %s: %s", filepath.Base(f), err)
|
|
|
|
} else {
|
|
|
|
files = append(files, fileEntry{name: filepath.Base(f), data: panicFile})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-11 06:13:52 +00:00
|
|
|
// Archived log (default on Windows)
|
2021-11-22 07:59:47 +00:00
|
|
|
if logFile, err := os.ReadFile(locations.Get(locations.LogFile)); err == nil {
|
2018-10-11 06:13:52 +00:00
|
|
|
files = append(files, fileEntry{name: "log-ondisk.txt", data: logFile})
|
|
|
|
}
|
|
|
|
|
2018-10-01 15:23:46 +00:00
|
|
|
// Version and platform information as a JSON
|
|
|
|
if versionPlatform, err := json.MarshalIndent(map[string]string{
|
|
|
|
"now": time.Now().Format(time.RFC3339),
|
2019-02-12 06:58:24 +00:00
|
|
|
"version": build.Version,
|
|
|
|
"codename": build.Codename,
|
|
|
|
"longVersion": build.LongVersion,
|
2018-10-01 15:23:46 +00:00
|
|
|
"os": runtime.GOOS,
|
|
|
|
"arch": runtime.GOARCH,
|
|
|
|
}, "", " "); err == nil {
|
|
|
|
files = append(files, fileEntry{name: "version-platform.json.txt", data: versionPlatform})
|
|
|
|
} else {
|
|
|
|
l.Warnln("Failed to create versionPlatform.json: ", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Report Data as a JSON
|
2021-06-24 18:00:14 +00:00
|
|
|
if r, err := s.urService.ReportDataPreview(r.Context(), ur.Version); err != nil {
|
2020-06-23 08:47:15 +00:00
|
|
|
l.Warnln("Support bundle: failed to create usage-reporting.json.txt:", err)
|
2018-10-01 15:23:46 +00:00
|
|
|
} else {
|
2020-06-23 08:47:15 +00:00
|
|
|
if usageReportingData, err := json.MarshalIndent(r, "", " "); err != nil {
|
|
|
|
l.Warnln("Support bundle: failed to serialize usage-reporting.json.txt", err)
|
|
|
|
} else {
|
|
|
|
files = append(files, fileEntry{name: "usage-reporting.json.txt", data: usageReportingData})
|
|
|
|
}
|
2018-10-01 15:23:46 +00:00
|
|
|
}
|
|
|
|
|
2023-08-04 17:57:30 +00:00
|
|
|
// Metrics data as text
|
|
|
|
buf := bytes.NewBuffer(nil)
|
|
|
|
wr := bufferedResponseWriter{Writer: buf}
|
|
|
|
promhttp.Handler().ServeHTTP(wr, &http.Request{Method: http.MethodGet})
|
|
|
|
files = append(files, fileEntry{name: "metrics.txt", data: buf.Bytes()})
|
|
|
|
|
2018-10-01 15:23:46 +00:00
|
|
|
// Heap and CPU Proofs as a pprof extension
|
|
|
|
var heapBuffer, cpuBuffer bytes.Buffer
|
2019-02-12 06:58:24 +00:00
|
|
|
filename := fmt.Sprintf("syncthing-heap-%s-%s-%s-%s.pprof", runtime.GOOS, runtime.GOARCH, build.Version, time.Now().Format("150405")) // hhmmss
|
2018-10-01 15:23:46 +00:00
|
|
|
runtime.GC()
|
2019-02-02 09:11:42 +00:00
|
|
|
if err := pprof.WriteHeapProfile(&heapBuffer); err == nil {
|
|
|
|
files = append(files, fileEntry{name: filename, data: heapBuffer.Bytes()})
|
|
|
|
}
|
2018-10-01 15:23:46 +00:00
|
|
|
|
|
|
|
const duration = 4 * time.Second
|
2019-02-12 06:58:24 +00:00
|
|
|
filename = fmt.Sprintf("syncthing-cpu-%s-%s-%s-%s.pprof", runtime.GOOS, runtime.GOARCH, build.Version, time.Now().Format("150405")) // hhmmss
|
2019-02-02 09:11:42 +00:00
|
|
|
if err := pprof.StartCPUProfile(&cpuBuffer); err == nil {
|
|
|
|
time.Sleep(duration)
|
|
|
|
pprof.StopCPUProfile()
|
|
|
|
files = append(files, fileEntry{name: filename, data: cpuBuffer.Bytes()})
|
|
|
|
}
|
2018-10-01 15:23:46 +00:00
|
|
|
|
|
|
|
// Add buffer files to buffer zip
|
|
|
|
var zipFilesBuffer bytes.Buffer
|
|
|
|
if err := writeZip(&zipFilesBuffer, files); err != nil {
|
|
|
|
l.Warnln("Support bundle: failed to create support bundle zip:", err)
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set zip file name and path
|
2018-10-11 06:13:52 +00:00
|
|
|
zipFileName := fmt.Sprintf("support-bundle-%s-%s.zip", s.id.Short().String(), time.Now().Format("2006-01-02T150405"))
|
2019-02-12 06:58:24 +00:00
|
|
|
zipFilePath := filepath.Join(locations.GetBaseDir(locations.ConfigBaseDir), zipFileName)
|
2018-10-01 15:23:46 +00:00
|
|
|
|
|
|
|
// Write buffer zip to local zip file (back up)
|
2023-05-03 08:25:36 +00:00
|
|
|
if err := os.WriteFile(zipFilePath, zipFilesBuffer.Bytes(), 0o600); err != nil {
|
2018-10-01 15:23:46 +00:00
|
|
|
l.Warnln("Support bundle: support bundle zip could not be created:", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Serve the buffer zip to client for download
|
|
|
|
w.Header().Set("Content-Type", "application/zip")
|
|
|
|
w.Header().Set("Content-Disposition", "attachment; filename="+zipFileName)
|
2019-02-02 11:16:27 +00:00
|
|
|
io.Copy(w, &zipFilesBuffer)
|
2018-10-01 15:23:46 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (*service) getSystemHTTPMetrics(w http.ResponseWriter, _ *http.Request) {
|
2015-11-21 08:48:57 +00:00
|
|
|
stats := make(map[string]interface{})
|
|
|
|
metrics.Each(func(name string, intf interface{}) {
|
|
|
|
if m, ok := intf.(*metrics.StandardTimer); ok {
|
|
|
|
pct := m.Percentiles([]float64{0.50, 0.95, 0.99})
|
|
|
|
for i := range pct {
|
|
|
|
pct[i] /= 1e6 // ns to ms
|
|
|
|
}
|
|
|
|
stats[name] = map[string]interface{}{
|
|
|
|
"count": m.Count(),
|
|
|
|
"sumMs": m.Sum() / 1e6, // ns to ms
|
|
|
|
"ratesPerS": []float64{m.Rate1(), m.Rate5(), m.Rate15()},
|
|
|
|
"percentilesMs": pct,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
bs, _ := json.MarshalIndent(stats, "", " ")
|
2019-02-02 11:16:27 +00:00
|
|
|
w.Write(bs)
|
2015-11-21 08:48:57 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (s *service) getSystemDiscovery(w http.ResponseWriter, _ *http.Request) {
|
2015-09-20 13:30:25 +00:00
|
|
|
devices := make(map[string]discover.CacheEntry)
|
2014-10-28 19:40:04 +00:00
|
|
|
|
2015-09-12 19:59:15 +00:00
|
|
|
if s.discoverer != nil {
|
2014-10-28 19:40:04 +00:00
|
|
|
// Device ids can't be marshalled as keys so we need to manually
|
|
|
|
// rebuild this map using strings. Discoverer may be nil if discovery
|
|
|
|
// has not started yet.
|
2015-09-20 13:30:25 +00:00
|
|
|
for device, entry := range s.discoverer.Cache() {
|
|
|
|
devices[device.String()] = entry
|
2014-10-28 19:40:04 +00:00
|
|
|
}
|
2014-10-15 18:23:28 +00:00
|
|
|
}
|
2014-10-15 20:52:06 +00:00
|
|
|
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, devices)
|
2014-05-13 01:08:55 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getReport(w http.ResponseWriter, r *http.Request) {
|
|
|
|
version := ur.Version
|
2017-10-12 06:16:46 +00:00
|
|
|
if val, _ := strconv.Atoi(r.URL.Query().Get("version")); val > 0 {
|
|
|
|
version = val
|
|
|
|
}
|
2020-06-23 08:47:15 +00:00
|
|
|
if r, err := s.urService.ReportDataPreview(context.TODO(), version); err != nil {
|
2022-10-06 19:27:08 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
2020-06-23 08:47:15 +00:00
|
|
|
return
|
|
|
|
} else {
|
|
|
|
sendJSON(w, r)
|
|
|
|
}
|
2014-06-11 18:04:23 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (*service) getRandomString(w http.ResponseWriter, r *http.Request) {
|
2016-05-26 07:25:34 +00:00
|
|
|
length := 32
|
|
|
|
if val, _ := strconv.Atoi(r.URL.Query().Get("length")); val > 0 {
|
|
|
|
length = val
|
|
|
|
}
|
|
|
|
str := rand.String(length)
|
|
|
|
|
|
|
|
sendJSON(w, map[string]string{"random": str})
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDBIgnores(w http.ResponseWriter, r *http.Request) {
|
2014-09-15 22:12:29 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
|
2017-04-01 09:58:06 +00:00
|
|
|
folder := qs.Get("folder")
|
|
|
|
|
2021-01-12 15:25:21 +00:00
|
|
|
lines, patterns, err := s.model.LoadIgnores(folder)
|
2020-06-18 09:04:00 +00:00
|
|
|
if err != nil && !ignore.IsParseError(err) {
|
2022-10-06 19:27:08 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
2014-09-15 22:12:29 +00:00
|
|
|
return
|
|
|
|
}
|
2014-11-08 21:12:18 +00:00
|
|
|
|
2020-06-18 09:04:00 +00:00
|
|
|
sendJSON(w, map[string]interface{}{
|
|
|
|
"ignore": lines,
|
2016-04-02 19:03:24 +00:00
|
|
|
"expanded": patterns,
|
2020-06-18 09:04:00 +00:00
|
|
|
"error": errorString(err),
|
2014-09-15 22:12:29 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postDBIgnores(w http.ResponseWriter, r *http.Request) {
|
2014-09-15 22:12:29 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
|
2021-11-22 07:59:47 +00:00
|
|
|
bs, err := io.ReadAll(r.Body)
|
2014-09-15 22:12:29 +00:00
|
|
|
r.Body.Close()
|
2016-05-06 22:01:56 +00:00
|
|
|
if err != nil {
|
2022-10-06 19:27:08 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
2016-05-06 22:01:56 +00:00
|
|
|
return
|
|
|
|
}
|
2014-09-19 20:02:53 +00:00
|
|
|
|
2016-05-06 22:01:56 +00:00
|
|
|
var data map[string][]string
|
|
|
|
err = json.Unmarshal(bs, &data)
|
2014-09-15 22:12:29 +00:00
|
|
|
if err != nil {
|
2022-10-06 19:27:08 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
2014-09-15 22:12:29 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-04-28 21:12:19 +00:00
|
|
|
err = s.model.SetIgnores(qs.Get("folder"), data["ignore"])
|
2014-09-15 22:12:29 +00:00
|
|
|
if err != nil {
|
2022-10-06 19:27:08 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
2014-09-15 22:12:29 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-04-28 21:12:19 +00:00
|
|
|
s.getDBIgnores(w, r)
|
2014-09-15 22:12:29 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getIndexEvents(w http.ResponseWriter, r *http.Request) {
|
2017-04-13 17:14:34 +00:00
|
|
|
mask := s.getEventMask(r.URL.Query().Get("events"))
|
|
|
|
sub := s.getEventSub(mask)
|
|
|
|
s.getEvents(w, r, sub)
|
2016-09-28 15:54:13 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDiskEvents(w http.ResponseWriter, r *http.Request) {
|
|
|
|
sub := s.getEventSub(DiskEventMask)
|
2017-04-13 17:14:34 +00:00
|
|
|
s.getEvents(w, r, sub)
|
2016-09-28 15:54:13 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getEvents(w http.ResponseWriter, r *http.Request, eventSub events.BufferedSubscription) {
|
2020-06-25 19:48:48 +00:00
|
|
|
if eventSub.Mask()&(events.FolderSummary|events.FolderCompletion) != 0 {
|
|
|
|
s.fss.OnEventRequest()
|
|
|
|
}
|
|
|
|
|
2014-07-13 19:07:24 +00:00
|
|
|
qs := r.URL.Query()
|
2014-07-29 09:06:52 +00:00
|
|
|
sinceStr := qs.Get("since")
|
|
|
|
limitStr := qs.Get("limit")
|
2017-01-31 12:04:29 +00:00
|
|
|
timeoutStr := qs.Get("timeout")
|
2014-07-29 09:06:52 +00:00
|
|
|
since, _ := strconv.Atoi(sinceStr)
|
|
|
|
limit, _ := strconv.Atoi(limitStr)
|
|
|
|
|
2017-01-31 12:04:29 +00:00
|
|
|
timeout := defaultEventTimeout
|
|
|
|
if timeoutSec, timeoutErr := strconv.Atoi(timeoutStr); timeoutErr == nil && timeoutSec >= 0 { // 0 is a valid timeout
|
|
|
|
timeout = time.Duration(timeoutSec) * time.Second
|
|
|
|
}
|
|
|
|
|
2016-02-02 11:40:42 +00:00
|
|
|
// Flush before blocking, to indicate that we've received the request and
|
|
|
|
// that it should not be retried. Must set Content-Type header before
|
|
|
|
// flushing.
|
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
2014-08-19 22:18:28 +00:00
|
|
|
f := w.(http.Flusher)
|
|
|
|
f.Flush()
|
|
|
|
|
2017-01-31 12:04:29 +00:00
|
|
|
// If there are no events available return an empty slice, as this gets serialized as `[]`
|
|
|
|
evs := eventSub.Since(since, []events.Event{}, timeout)
|
2014-07-29 09:06:52 +00:00
|
|
|
if 0 < limit && limit < len(evs) {
|
|
|
|
evs = evs[len(evs)-limit:]
|
|
|
|
}
|
2014-07-13 19:07:24 +00:00
|
|
|
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, evs)
|
2014-07-13 19:07:24 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (*service) getEventMask(evs string) events.EventType {
|
2019-03-26 19:53:58 +00:00
|
|
|
eventMask := DefaultEventMask
|
2017-04-13 17:14:34 +00:00
|
|
|
if evs != "" {
|
|
|
|
eventList := strings.Split(evs, ",")
|
|
|
|
eventMask = 0
|
|
|
|
for _, ev := range eventList {
|
|
|
|
eventMask |= events.UnmarshalEventType(strings.TrimSpace(ev))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return eventMask
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getEventSub(mask events.EventType) events.BufferedSubscription {
|
2017-04-13 17:14:34 +00:00
|
|
|
s.eventSubsMut.Lock()
|
|
|
|
bufsub, ok := s.eventSubs[mask]
|
|
|
|
if !ok {
|
2019-08-15 14:29:37 +00:00
|
|
|
evsub := s.evLogger.Subscribe(mask)
|
2019-03-26 19:53:58 +00:00
|
|
|
bufsub = events.NewBufferedSubscription(evsub, EventSubBufferSize)
|
2017-04-13 17:14:34 +00:00
|
|
|
s.eventSubs[mask] = bufsub
|
|
|
|
}
|
|
|
|
s.eventSubsMut.Unlock()
|
|
|
|
|
|
|
|
return bufsub
|
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (s *service) getSystemUpgrade(w http.ResponseWriter, _ *http.Request) {
|
2019-03-26 19:53:58 +00:00
|
|
|
if s.noUpgrade {
|
2022-10-06 19:27:08 +00:00
|
|
|
http.Error(w, upgrade.ErrUpgradeUnsupported.Error(), http.StatusNotImplemented)
|
2015-01-06 21:40:52 +00:00
|
|
|
return
|
|
|
|
}
|
2017-01-27 12:17:06 +00:00
|
|
|
opts := s.cfg.Options()
|
2019-02-12 06:58:24 +00:00
|
|
|
rel, err := upgrade.LatestRelease(opts.ReleasesURL, build.Version, opts.UpgradeToPreReleases)
|
2014-07-14 08:45:29 +00:00
|
|
|
if err != nil {
|
2022-10-06 19:27:08 +00:00
|
|
|
httpError(w, err)
|
2014-07-14 08:45:29 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
res := make(map[string]interface{})
|
2019-02-12 06:58:24 +00:00
|
|
|
res["running"] = build.Version
|
2014-07-14 08:45:29 +00:00
|
|
|
res["latest"] = rel.Tag
|
2019-02-12 06:58:24 +00:00
|
|
|
res["newer"] = upgrade.CompareVersions(rel.Tag, build.Version) == upgrade.Newer
|
|
|
|
res["majorNewer"] = upgrade.CompareVersions(rel.Tag, build.Version) == upgrade.MajorNewer
|
2014-07-14 08:45:29 +00:00
|
|
|
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, res)
|
2014-07-14 08:45:29 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (*service) getDeviceID(w http.ResponseWriter, r *http.Request) {
|
2014-07-18 08:00:02 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
idStr := qs.Get("id")
|
2014-09-28 11:00:38 +00:00
|
|
|
id, err := protocol.DeviceIDFromString(idStr)
|
2015-12-15 21:40:38 +00:00
|
|
|
|
2014-07-18 08:00:02 +00:00
|
|
|
if err == nil {
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, map[string]string{
|
2014-07-18 08:00:02 +00:00
|
|
|
"id": id.String(),
|
|
|
|
})
|
|
|
|
} else {
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, map[string]string{
|
2014-07-18 08:00:02 +00:00
|
|
|
"error": err.Error(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (*service) getLang(w http.ResponseWriter, r *http.Request) {
|
2014-07-26 20:30:29 +00:00
|
|
|
lang := r.Header.Get("Accept-Language")
|
|
|
|
var langs []string
|
|
|
|
for _, l := range strings.Split(lang, ",") {
|
2014-08-14 15:04:17 +00:00
|
|
|
parts := strings.SplitN(l, ";", 2)
|
2014-08-28 11:23:23 +00:00
|
|
|
langs = append(langs, strings.ToLower(strings.TrimSpace(parts[0])))
|
2014-07-26 20:30:29 +00:00
|
|
|
}
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, langs)
|
2014-07-26 20:30:29 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (s *service) postSystemUpgrade(w http.ResponseWriter, _ *http.Request) {
|
2017-01-27 12:17:06 +00:00
|
|
|
opts := s.cfg.Options()
|
2019-02-12 06:58:24 +00:00
|
|
|
rel, err := upgrade.LatestRelease(opts.ReleasesURL, build.Version, opts.UpgradeToPreReleases)
|
2014-07-14 08:45:29 +00:00
|
|
|
if err != nil {
|
2022-10-06 19:27:08 +00:00
|
|
|
httpError(w, err)
|
2014-07-14 08:45:29 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-02-12 06:58:24 +00:00
|
|
|
if upgrade.CompareVersions(rel.Tag, build.Version) > upgrade.Equal {
|
2014-12-08 15:36:15 +00:00
|
|
|
err = upgrade.To(rel)
|
2014-07-31 14:01:23 +00:00
|
|
|
if err != nil {
|
2014-08-17 08:28:36 +00:00
|
|
|
l.Warnln("upgrading:", err)
|
2022-10-06 19:27:08 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
2014-07-31 14:01:23 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-04-28 21:12:19 +00:00
|
|
|
s.flushResponse(`{"ok": "restarting"}`, w)
|
2020-12-22 19:17:14 +00:00
|
|
|
s.fatal(&svcutil.FatalErr{
|
2020-11-17 12:19:04 +00:00
|
|
|
Err: errors.New("exit after upgrade initiated by rest API"),
|
2020-12-22 19:17:14 +00:00
|
|
|
Status: svcutil.ExitUpgrade,
|
2020-11-17 12:19:04 +00:00
|
|
|
})
|
2014-07-31 14:01:23 +00:00
|
|
|
}
|
2014-07-14 08:45:29 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) makeDevicePauseHandler(paused bool) http.HandlerFunc {
|
2016-12-21 18:41:25 +00:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
2023-05-03 08:25:36 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
deviceStr := qs.Get("device")
|
2015-08-23 19:56:10 +00:00
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
var msg string
|
|
|
|
var status int
|
|
|
|
_, err := s.cfg.Modify(func(cfg *config.Configuration) {
|
|
|
|
if deviceStr == "" {
|
|
|
|
for i := range cfg.Devices {
|
|
|
|
cfg.Devices[i].Paused = paused
|
|
|
|
}
|
|
|
|
return
|
2017-03-04 07:54:13 +00:00
|
|
|
}
|
2021-01-15 14:43:34 +00:00
|
|
|
|
2017-03-04 07:54:13 +00:00
|
|
|
device, err := protocol.DeviceIDFromString(deviceStr)
|
|
|
|
if err != nil {
|
2021-01-15 14:43:34 +00:00
|
|
|
msg = err.Error()
|
|
|
|
status = 500
|
2017-03-04 07:54:13 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
_, i, ok := cfg.Device(device)
|
2017-03-04 07:54:13 +00:00
|
|
|
if !ok {
|
2021-01-15 14:43:34 +00:00
|
|
|
msg = "not found"
|
|
|
|
status = http.StatusNotFound
|
2017-03-04 07:54:13 +00:00
|
|
|
return
|
|
|
|
}
|
2015-08-23 19:56:10 +00:00
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
cfg.Devices[i].Paused = paused
|
|
|
|
})
|
2015-08-23 19:56:10 +00:00
|
|
|
|
2021-01-15 14:43:34 +00:00
|
|
|
if msg != "" {
|
|
|
|
http.Error(w, msg, status)
|
|
|
|
} else if err != nil {
|
2022-10-06 19:27:08 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
2016-12-21 18:41:25 +00:00
|
|
|
}
|
2015-08-23 19:56:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postDBScan(w http.ResponseWriter, r *http.Request) {
|
2014-08-11 18:20:01 +00:00
|
|
|
qs := r.URL.Query()
|
2014-09-28 11:00:38 +00:00
|
|
|
folder := qs.Get("folder")
|
2015-02-11 18:52:59 +00:00
|
|
|
if folder != "" {
|
2015-03-27 08:51:18 +00:00
|
|
|
subs := qs["sub"]
|
2017-02-05 18:17:44 +00:00
|
|
|
err := s.model.ScanFolderSubdirs(folder, subs)
|
2015-02-11 18:52:59 +00:00
|
|
|
if err != nil {
|
2022-10-06 19:27:08 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
2015-05-01 12:30:17 +00:00
|
|
|
return
|
2015-02-11 18:52:59 +00:00
|
|
|
}
|
2017-02-05 18:17:44 +00:00
|
|
|
nextStr := qs.Get("next")
|
|
|
|
next, err := strconv.Atoi(nextStr)
|
|
|
|
if err == nil {
|
|
|
|
s.model.DelayScan(folder, time.Duration(next)*time.Second)
|
|
|
|
}
|
2015-02-11 18:52:59 +00:00
|
|
|
} else {
|
2015-04-28 21:12:19 +00:00
|
|
|
errors := s.model.ScanFolders()
|
2015-02-11 18:52:59 +00:00
|
|
|
if len(errors) > 0 {
|
2022-10-06 19:27:08 +00:00
|
|
|
http.Error(w, "Error scanning folders", http.StatusInternalServerError)
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, errors)
|
2015-05-01 12:30:17 +00:00
|
|
|
return
|
2015-02-11 18:52:59 +00:00
|
|
|
}
|
2014-08-11 18:20:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postDBPrio(w http.ResponseWriter, r *http.Request) {
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
file := qs.Get("file")
|
2015-04-28 21:12:19 +00:00
|
|
|
s.model.BringToFront(folder, file)
|
|
|
|
s.getDBNeed(w, r)
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
|
|
|
}
|
|
|
|
|
2022-10-06 19:28:49 +00:00
|
|
|
func (*service) getHealth(w http.ResponseWriter, _ *http.Request) {
|
|
|
|
sendJSON(w, map[string]string{"status": "OK"})
|
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (*service) getQR(w http.ResponseWriter, r *http.Request) {
|
2023-05-03 08:25:36 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
text := qs.Get("text")
|
2014-07-05 19:40:29 +00:00
|
|
|
code, err := qr.Encode(text, qr.M)
|
2014-05-21 18:06:14 +00:00
|
|
|
if err != nil {
|
2022-10-06 19:27:08 +00:00
|
|
|
http.Error(w, "Invalid", http.StatusInternalServerError)
|
2014-05-21 18:06:14 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "image/png")
|
2019-02-02 11:16:27 +00:00
|
|
|
w.Write(code.PNG())
|
2014-05-21 18:06:14 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (s *service) getPeerCompletion(w http.ResponseWriter, _ *http.Request) {
|
2014-07-29 11:01:27 +00:00
|
|
|
tot := map[string]float64{}
|
|
|
|
count := map[string]float64{}
|
|
|
|
|
2015-09-29 18:05:22 +00:00
|
|
|
for _, folder := range s.cfg.Folders() {
|
2014-09-28 11:00:38 +00:00
|
|
|
for _, device := range folder.DeviceIDs() {
|
|
|
|
deviceStr := device.String()
|
2017-11-21 07:25:38 +00:00
|
|
|
if _, ok := s.model.Connection(device); ok {
|
2021-03-07 12:43:22 +00:00
|
|
|
comp, err := s.model.Completion(device, folder.ID)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
tot[deviceStr] += comp.CompletionPct
|
2014-07-29 11:01:27 +00:00
|
|
|
} else {
|
2014-09-28 11:00:38 +00:00
|
|
|
tot[deviceStr] = 0
|
2014-07-29 11:01:27 +00:00
|
|
|
}
|
2014-09-28 11:00:38 +00:00
|
|
|
count[deviceStr]++
|
2014-07-29 11:01:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
comp := map[string]int{}
|
2014-09-28 11:00:38 +00:00
|
|
|
for device := range tot {
|
|
|
|
comp[device] = int(tot[device] / count[device])
|
2014-07-29 11:01:27 +00:00
|
|
|
}
|
|
|
|
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, comp)
|
2014-07-29 11:01:27 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getFolderVersions(w http.ResponseWriter, r *http.Request) {
|
2018-01-01 14:39:23 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
versions, err := s.model.GetFolderVersions(qs.Get("folder"))
|
|
|
|
if err != nil {
|
2022-10-06 19:27:08 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
2018-01-01 14:39:23 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
sendJSON(w, versions)
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postFolderVersionsRestore(w http.ResponseWriter, r *http.Request) {
|
2018-01-01 14:39:23 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
|
2021-11-22 07:59:47 +00:00
|
|
|
bs, err := io.ReadAll(r.Body)
|
2018-01-01 14:39:23 +00:00
|
|
|
r.Body.Close()
|
|
|
|
if err != nil {
|
2022-10-06 19:27:08 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
2018-01-01 14:39:23 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var versions map[string]time.Time
|
|
|
|
err = json.Unmarshal(bs, &versions)
|
|
|
|
if err != nil {
|
2022-10-06 19:27:08 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
2018-01-01 14:39:23 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ferr, err := s.model.RestoreFolderVersions(qs.Get("folder"), versions)
|
|
|
|
if err != nil {
|
2022-10-06 19:27:08 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
2018-01-01 14:39:23 +00:00
|
|
|
return
|
|
|
|
}
|
2021-02-12 19:30:51 +00:00
|
|
|
sendJSON(w, errorStringMap(ferr))
|
2018-01-01 14:39:23 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getFolderErrors(w http.ResponseWriter, r *http.Request) {
|
2018-01-14 17:01:06 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
page, perpage := getPagingParams(qs)
|
|
|
|
|
2018-11-07 10:04:41 +00:00
|
|
|
errors, err := s.model.FolderErrors(folder)
|
2018-01-14 17:01:06 +00:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
start := (page - 1) * perpage
|
|
|
|
if start >= len(errors) {
|
|
|
|
errors = nil
|
|
|
|
} else {
|
|
|
|
errors = errors[start:]
|
|
|
|
if perpage < len(errors) {
|
|
|
|
errors = errors[:perpage]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sendJSON(w, map[string]interface{}{
|
|
|
|
"folder": folder,
|
|
|
|
"errors": errors,
|
|
|
|
"page": page,
|
|
|
|
"perpage": perpage,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (*service) getSystemBrowse(w http.ResponseWriter, r *http.Request) {
|
2014-11-16 19:30:49 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
current := qs.Get("current")
|
2017-12-13 09:34:47 +00:00
|
|
|
|
2017-08-19 14:36:56 +00:00
|
|
|
// Default value or in case of error unmarshalling ends up being basic fs.
|
|
|
|
var fsType fs.FilesystemType
|
2019-02-02 11:16:27 +00:00
|
|
|
fsType.UnmarshalText([]byte(qs.Get("filesystem")))
|
2017-08-19 14:36:56 +00:00
|
|
|
|
2023-05-03 08:25:36 +00:00
|
|
|
sendJSON(w, browse(fsType, current))
|
|
|
|
}
|
|
|
|
|
|
|
|
func browse(fsType fs.FilesystemType, current string) []string {
|
|
|
|
if current == "" {
|
|
|
|
return browseRoots(fsType)
|
|
|
|
}
|
|
|
|
|
|
|
|
parent, base := parentAndBase(current)
|
|
|
|
ffs := fs.NewFilesystem(fsType, parent)
|
|
|
|
files := browseFiles(ffs, base)
|
|
|
|
for i := range files {
|
|
|
|
files[i] = filepath.Join(parent, files[i])
|
|
|
|
}
|
|
|
|
return files
|
2017-12-13 09:34:47 +00:00
|
|
|
}
|
|
|
|
|
2018-11-01 19:13:11 +00:00
|
|
|
const (
|
|
|
|
matchExact int = iota
|
|
|
|
matchCaseIns
|
|
|
|
noMatch
|
|
|
|
)
|
|
|
|
|
|
|
|
func checkPrefixMatch(s, prefix string) int {
|
|
|
|
if strings.HasPrefix(s, prefix) {
|
|
|
|
return matchExact
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.HasPrefix(strings.ToLower(s), strings.ToLower(prefix)) {
|
|
|
|
return matchCaseIns
|
|
|
|
}
|
|
|
|
|
|
|
|
return noMatch
|
|
|
|
}
|
|
|
|
|
2023-05-03 08:25:36 +00:00
|
|
|
func browseRoots(fsType fs.FilesystemType) []string {
|
|
|
|
filesystem := fs.NewFilesystem(fsType, "")
|
|
|
|
if roots, err := filesystem.Roots(); err == nil {
|
|
|
|
return roots
|
2016-05-31 19:27:07 +00:00
|
|
|
}
|
2023-05-03 08:25:36 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// parentAndBase returns the parent directory and the remaining base of the
|
|
|
|
// path. The base may be empty if the path ends with a path separator.
|
|
|
|
func parentAndBase(current string) (string, string) {
|
2017-08-19 14:36:56 +00:00
|
|
|
search, _ := fs.ExpandTilde(current)
|
|
|
|
pathSeparator := string(fs.PathSeparator)
|
|
|
|
|
2014-11-16 19:30:49 +00:00
|
|
|
if strings.HasSuffix(current, pathSeparator) && !strings.HasSuffix(search, pathSeparator) {
|
|
|
|
search = search + pathSeparator
|
|
|
|
}
|
2017-08-19 14:36:56 +00:00
|
|
|
searchDir := filepath.Dir(search)
|
2017-12-13 09:34:47 +00:00
|
|
|
|
|
|
|
// The searchFile should be the last component of search, or empty if it
|
|
|
|
// ends with a path separator
|
|
|
|
var searchFile string
|
|
|
|
if !strings.HasSuffix(search, pathSeparator) {
|
|
|
|
searchFile = filepath.Base(search)
|
|
|
|
}
|
2017-08-19 14:36:56 +00:00
|
|
|
|
2023-05-03 08:25:36 +00:00
|
|
|
return searchDir, searchFile
|
|
|
|
}
|
2017-08-19 14:36:56 +00:00
|
|
|
|
2023-05-03 08:25:36 +00:00
|
|
|
func browseFiles(ffs fs.Filesystem, search string) []string {
|
|
|
|
subdirectories, _ := ffs.DirNames(".")
|
|
|
|
pathSeparator := string(fs.PathSeparator)
|
2018-11-01 19:13:11 +00:00
|
|
|
|
|
|
|
exactMatches := make([]string, 0, len(subdirectories))
|
|
|
|
caseInsMatches := make([]string, 0, len(subdirectories))
|
2017-08-19 14:36:56 +00:00
|
|
|
|
2014-11-16 19:30:49 +00:00
|
|
|
for _, subdirectory := range subdirectories {
|
2023-05-03 08:25:36 +00:00
|
|
|
info, err := ffs.Stat(subdirectory)
|
2018-11-01 19:13:11 +00:00
|
|
|
if err != nil || !info.IsDir() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2023-05-03 08:25:36 +00:00
|
|
|
switch checkPrefixMatch(subdirectory, search) {
|
2018-11-01 19:13:11 +00:00
|
|
|
case matchExact:
|
2023-05-03 08:25:36 +00:00
|
|
|
exactMatches = append(exactMatches, subdirectory+pathSeparator)
|
2018-11-01 19:13:11 +00:00
|
|
|
case matchCaseIns:
|
2023-05-03 08:25:36 +00:00
|
|
|
caseInsMatches = append(caseInsMatches, subdirectory+pathSeparator)
|
2014-11-16 19:30:49 +00:00
|
|
|
}
|
|
|
|
}
|
2018-11-01 19:13:11 +00:00
|
|
|
|
|
|
|
// sort to return matches in deterministic order (don't depend on file system order)
|
|
|
|
sort.Strings(exactMatches)
|
|
|
|
sort.Strings(caseInsMatches)
|
|
|
|
return append(exactMatches, caseInsMatches...)
|
2014-11-16 19:30:49 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (*service) getCPUProf(w http.ResponseWriter, r *http.Request) {
|
2016-08-02 11:06:45 +00:00
|
|
|
duration, err := time.ParseDuration(r.FormValue("duration"))
|
|
|
|
if err != nil {
|
|
|
|
duration = 30 * time.Second
|
|
|
|
}
|
|
|
|
|
2019-02-12 06:58:24 +00:00
|
|
|
filename := fmt.Sprintf("syncthing-cpu-%s-%s-%s-%s.pprof", runtime.GOOS, runtime.GOARCH, build.Version, time.Now().Format("150405")) // hhmmss
|
2016-08-02 11:06:45 +00:00
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "application/octet-stream")
|
|
|
|
w.Header().Set("Content-Disposition", "attachment; filename="+filename)
|
|
|
|
|
2019-02-02 09:11:42 +00:00
|
|
|
if err := pprof.StartCPUProfile(w); err == nil {
|
|
|
|
time.Sleep(duration)
|
|
|
|
pprof.StopCPUProfile()
|
|
|
|
}
|
2016-08-02 11:06:45 +00:00
|
|
|
}
|
|
|
|
|
2022-07-28 17:02:12 +00:00
|
|
|
func (*service) getHeapProf(w http.ResponseWriter, _ *http.Request) {
|
2019-02-12 06:58:24 +00:00
|
|
|
filename := fmt.Sprintf("syncthing-heap-%s-%s-%s-%s.pprof", runtime.GOOS, runtime.GOARCH, build.Version, time.Now().Format("150405")) // hhmmss
|
2016-08-02 11:06:45 +00:00
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "application/octet-stream")
|
|
|
|
w.Header().Set("Content-Disposition", "attachment; filename="+filename)
|
|
|
|
|
|
|
|
runtime.GC()
|
2019-02-02 11:16:27 +00:00
|
|
|
pprof.WriteHeapProfile(w)
|
2016-08-02 11:06:45 +00:00
|
|
|
}
|
|
|
|
|
2019-10-15 09:25:12 +00:00
|
|
|
func toJsonFileInfoSlice(fs []db.FileInfoTruncated) []jsonFileInfoTrunc {
|
|
|
|
res := make([]jsonFileInfoTrunc, len(fs))
|
2015-04-20 13:37:04 +00:00
|
|
|
for i, f := range fs {
|
2019-10-15 09:25:12 +00:00
|
|
|
res[i] = jsonFileInfoTrunc(f)
|
2015-04-20 13:37:04 +00:00
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
// Type wrappers for nice JSON serialization
|
|
|
|
|
|
|
|
type jsonFileInfo protocol.FileInfo
|
|
|
|
|
|
|
|
func (f jsonFileInfo) MarshalJSON() ([]byte, error) {
|
2019-10-15 09:25:12 +00:00
|
|
|
m := fileIntfJSONMap(protocol.FileInfo(f))
|
|
|
|
m["numBlocks"] = len(f.Blocks)
|
|
|
|
return json.Marshal(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
type jsonFileInfoTrunc db.FileInfoTruncated
|
|
|
|
|
|
|
|
func (f jsonFileInfoTrunc) MarshalJSON() ([]byte, error) {
|
|
|
|
m := fileIntfJSONMap(db.FileInfoTruncated(f))
|
|
|
|
m["numBlocks"] = nil // explicitly unknown
|
|
|
|
return json.Marshal(m)
|
|
|
|
}
|
|
|
|
|
2020-05-30 07:50:23 +00:00
|
|
|
func fileIntfJSONMap(f protocol.FileIntf) map[string]interface{} {
|
2019-10-15 09:25:12 +00:00
|
|
|
out := map[string]interface{}{
|
|
|
|
"name": f.FileName(),
|
|
|
|
"type": f.FileType().String(),
|
|
|
|
"size": f.FileSize(),
|
|
|
|
"deleted": f.IsDeleted(),
|
|
|
|
"invalid": f.IsInvalid(),
|
|
|
|
"ignored": f.IsIgnored(),
|
|
|
|
"mustRescan": f.MustRescan(),
|
|
|
|
"noPermissions": !f.HasPermissionBits(),
|
|
|
|
"modified": f.ModTime(),
|
|
|
|
"modifiedBy": f.FileModifiedBy().String(),
|
|
|
|
"sequence": f.SequenceNo(),
|
|
|
|
"version": jsonVersionVector(f.FileVersion()),
|
|
|
|
"localFlags": f.FileLocalFlags(),
|
2022-09-14 07:50:55 +00:00
|
|
|
"platform": f.PlatformData(),
|
|
|
|
"inodeChange": f.InodeChangeTime(),
|
2023-03-06 14:37:15 +00:00
|
|
|
"blocksHash": f.FileBlocksHash(),
|
2019-10-15 09:25:12 +00:00
|
|
|
}
|
|
|
|
if f.HasPermissionBits() {
|
|
|
|
out["permissions"] = fmt.Sprintf("%#o", f.FilePermissions())
|
|
|
|
}
|
|
|
|
return out
|
2015-04-20 13:37:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type jsonVersionVector protocol.Vector
|
|
|
|
|
|
|
|
func (v jsonVersionVector) MarshalJSON() ([]byte, error) {
|
2016-07-04 10:40:29 +00:00
|
|
|
res := make([]string, len(v.Counters))
|
|
|
|
for i, c := range v.Counters {
|
2016-01-20 19:10:22 +00:00
|
|
|
res[i] = fmt.Sprintf("%v:%d", c.ID, c.Value)
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
|
|
|
}
|
2015-04-20 13:37:04 +00:00
|
|
|
return json.Marshal(res)
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
|
|
|
}
|
2016-03-12 12:17:25 +00:00
|
|
|
|
|
|
|
func dirNames(dir string) []string {
|
2023-05-11 15:35:52 +00:00
|
|
|
fis, err := os.ReadDir(dir)
|
2016-03-12 12:17:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var dirs []string
|
|
|
|
for _, fi := range fis {
|
|
|
|
if fi.IsDir() {
|
|
|
|
dirs = append(dirs, filepath.Base(fi.Name()))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Strings(dirs)
|
|
|
|
return dirs
|
|
|
|
}
|
2016-09-03 08:33:34 +00:00
|
|
|
|
|
|
|
func addressIsLocalhost(addr string) bool {
|
|
|
|
host, _, err := net.SplitHostPort(addr)
|
|
|
|
if err != nil {
|
|
|
|
// There was no port, so we assume the address was just a hostname
|
|
|
|
host = addr
|
|
|
|
}
|
2021-02-27 07:52:49 +00:00
|
|
|
host = strings.ToLower(host)
|
|
|
|
switch {
|
|
|
|
case host == "localhost":
|
|
|
|
return true
|
|
|
|
case host == "localhost.":
|
|
|
|
return true
|
|
|
|
case strings.HasSuffix(host, ".localhost"):
|
2016-09-03 08:33:34 +00:00
|
|
|
return true
|
|
|
|
default:
|
2018-03-15 10:29:52 +00:00
|
|
|
ip := net.ParseIP(host)
|
|
|
|
if ip == nil {
|
|
|
|
// not an IP address
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return ip.IsLoopback()
|
2016-09-03 08:33:34 +00:00
|
|
|
}
|
|
|
|
}
|
2019-10-16 18:31:46 +00:00
|
|
|
|
2020-07-30 11:36:11 +00:00
|
|
|
// shouldRegenerateCertificate checks for certificate expiry or other known
|
|
|
|
// issues with our API/GUI certificate and returns either nil (leave the
|
|
|
|
// certificate alone) or an error describing the reason the certificate
|
|
|
|
// should be regenerated.
|
|
|
|
func shouldRegenerateCertificate(cert tls.Certificate) error {
|
2019-10-16 18:31:46 +00:00
|
|
|
leaf := cert.Leaf
|
|
|
|
if leaf == nil {
|
|
|
|
// Leaf can be nil or not, depending on how parsed the certificate
|
|
|
|
// was when we got it.
|
|
|
|
if len(cert.Certificate) < 1 {
|
|
|
|
// can't happen
|
|
|
|
return errors.New("no certificate in certificate")
|
|
|
|
}
|
|
|
|
var err error
|
|
|
|
leaf, err = x509.ParseCertificate(cert.Certificate[0])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-30 11:36:11 +00:00
|
|
|
if leaf.Subject.String() != leaf.Issuer.String() || len(leaf.IPAddresses) != 0 {
|
|
|
|
// The certificate is not self signed, or has IP attributes we don't
|
2019-10-16 18:31:46 +00:00
|
|
|
// add, so we leave it alone.
|
|
|
|
return nil
|
|
|
|
}
|
2020-07-30 11:36:11 +00:00
|
|
|
if len(leaf.DNSNames) > 1 {
|
|
|
|
// The certificate has more DNS SANs attributes than we ever add, so
|
|
|
|
// we leave it alone.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if len(leaf.DNSNames) == 1 && leaf.DNSNames[0] != leaf.Issuer.CommonName {
|
|
|
|
// The one SAN is different from the issuer, so it's not one of our
|
|
|
|
// newer self signed certificates.
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-16 18:31:46 +00:00
|
|
|
|
|
|
|
if leaf.NotAfter.Before(time.Now()) {
|
|
|
|
return errors.New("certificate has expired")
|
|
|
|
}
|
|
|
|
if leaf.NotAfter.Before(time.Now().Add(30 * 24 * time.Hour)) {
|
|
|
|
return errors.New("certificate will soon expire")
|
|
|
|
}
|
|
|
|
|
|
|
|
// On macOS, check for certificates issued on or after July 1st, 2019,
|
|
|
|
// with a longer validity time than 825 days.
|
|
|
|
cutoff := time.Date(2019, 7, 1, 0, 0, 0, 0, time.UTC)
|
2022-07-28 17:36:39 +00:00
|
|
|
if build.IsDarwin &&
|
2019-10-16 18:31:46 +00:00
|
|
|
leaf.NotBefore.After(cutoff) &&
|
|
|
|
leaf.NotAfter.Sub(leaf.NotBefore) > 825*24*time.Hour {
|
|
|
|
return errors.New("certificate incompatible with macOS 10.15 (Catalina)")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2020-06-18 09:04:00 +00:00
|
|
|
|
2021-02-12 19:30:51 +00:00
|
|
|
func errorStringMap(errs map[string]error) map[string]*string {
|
|
|
|
out := make(map[string]*string, len(errs))
|
|
|
|
for s, e := range errs {
|
|
|
|
out[s] = errorString(e)
|
|
|
|
}
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
2020-06-18 09:04:00 +00:00
|
|
|
func errorString(err error) *string {
|
|
|
|
if err != nil {
|
|
|
|
msg := err.Error()
|
|
|
|
return &msg
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2021-03-07 12:43:22 +00:00
|
|
|
|
2021-06-07 07:08:44 +00:00
|
|
|
type discoveryStatusEntry struct {
|
|
|
|
Error *string `json:"error"`
|
|
|
|
}
|
|
|
|
|
|
|
|
func discoveryStatusMap(errs map[string]error) map[string]discoveryStatusEntry {
|
|
|
|
out := make(map[string]discoveryStatusEntry, len(errs))
|
|
|
|
for s, e := range errs {
|
|
|
|
out[s] = discoveryStatusEntry{
|
|
|
|
Error: errorString(e),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return out
|
|
|
|
}
|
|
|
|
|
2021-03-11 12:15:03 +00:00
|
|
|
// sanitizedHostname returns the given name in a suitable form for use as
|
|
|
|
// the common name in a certificate, or an error.
|
|
|
|
func sanitizedHostname(name string) (string, error) {
|
|
|
|
// Remove diacritics and non-alphanumerics. This works by first
|
|
|
|
// transforming into normalization form D (things with diacriticals are
|
|
|
|
// split into the base character and the mark) and then removing
|
|
|
|
// undesired characters.
|
|
|
|
t := transform.Chain(
|
|
|
|
// Split runes with diacritics into base character and mark.
|
|
|
|
norm.NFD,
|
|
|
|
// Leave only [A-Za-z0-9-.].
|
|
|
|
runes.Remove(runes.Predicate(func(r rune) bool {
|
|
|
|
return r > unicode.MaxASCII ||
|
|
|
|
!unicode.IsLetter(r) && !unicode.IsNumber(r) &&
|
|
|
|
r != '.' && r != '-'
|
|
|
|
})))
|
|
|
|
name, _, err := transform.String(t, name)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Name should not start or end with a dash or dot.
|
|
|
|
name = strings.Trim(name, "-.")
|
|
|
|
|
|
|
|
// Name should not be empty.
|
|
|
|
if name == "" {
|
|
|
|
return "", errors.New("no suitable name")
|
|
|
|
}
|
|
|
|
|
|
|
|
return strings.ToLower(name), nil
|
|
|
|
}
|
|
|
|
|
2021-03-07 12:43:22 +00:00
|
|
|
func isFolderNotFound(err error) bool {
|
|
|
|
for _, target := range []error{
|
|
|
|
model.ErrFolderMissing,
|
|
|
|
model.ErrFolderPaused,
|
|
|
|
model.ErrFolderNotRunning,
|
|
|
|
} {
|
|
|
|
if errors.Is(err, target) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
2022-10-06 19:27:08 +00:00
|
|
|
|
|
|
|
func httpError(w http.ResponseWriter, err error) {
|
|
|
|
if errors.Is(err, upgrade.ErrUpgradeUnsupported) {
|
|
|
|
http.Error(w, upgrade.ErrUpgradeUnsupported.Error(), http.StatusNotImplemented)
|
|
|
|
} else {
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
}
|
|
|
|
}
|
2023-08-04 17:57:30 +00:00
|
|
|
|
|
|
|
type bufferedResponseWriter struct {
|
|
|
|
io.Writer
|
|
|
|
}
|
|
|
|
|
|
|
|
func (w bufferedResponseWriter) WriteHeader(int) {}
|
|
|
|
func (w bufferedResponseWriter) Header() http.Header {
|
|
|
|
return http.Header{}
|
|
|
|
}
|