2014-11-16 20:13:20 +00:00
|
|
|
// Copyright (C) 2014 The Syncthing Authors.
|
2014-09-29 19:43:32 +00:00
|
|
|
//
|
2015-03-07 20:36:35 +00:00
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
2017-02-09 06:52:18 +00:00
|
|
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
2014-06-01 20:50:14 +00:00
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
package api
|
2014-03-02 22:58:14 +00:00
|
|
|
|
|
|
|
import (
|
2018-10-01 15:23:46 +00:00
|
|
|
"bytes"
|
2019-11-21 07:41:15 +00:00
|
|
|
"context"
|
2014-08-21 22:45:40 +00:00
|
|
|
"crypto/tls"
|
2019-10-16 18:31:46 +00:00
|
|
|
"crypto/x509"
|
2014-03-02 22:58:14 +00:00
|
|
|
"encoding/json"
|
2019-10-16 18:31:46 +00:00
|
|
|
"errors"
|
2014-05-22 14:12:19 +00:00
|
|
|
"fmt"
|
2018-10-01 15:23:46 +00:00
|
|
|
"io"
|
2014-03-02 22:58:14 +00:00
|
|
|
"io/ioutil"
|
2019-07-28 07:49:07 +00:00
|
|
|
"log"
|
2014-04-30 20:52:38 +00:00
|
|
|
"net"
|
2014-03-02 22:58:14 +00:00
|
|
|
"net/http"
|
2017-12-15 20:01:56 +00:00
|
|
|
"net/url"
|
2014-07-22 18:11:36 +00:00
|
|
|
"os"
|
2014-05-22 14:12:19 +00:00
|
|
|
"path/filepath"
|
2015-04-07 19:45:22 +00:00
|
|
|
"reflect"
|
2017-11-06 14:22:10 +00:00
|
|
|
"regexp"
|
2014-03-02 22:58:14 +00:00
|
|
|
"runtime"
|
2016-08-02 11:06:45 +00:00
|
|
|
"runtime/pprof"
|
2015-10-03 15:25:21 +00:00
|
|
|
"sort"
|
2014-07-13 19:07:24 +00:00
|
|
|
"strconv"
|
2014-07-05 19:40:29 +00:00
|
|
|
"strings"
|
2014-03-02 22:58:14 +00:00
|
|
|
"time"
|
|
|
|
|
2019-02-02 09:11:42 +00:00
|
|
|
metrics "github.com/rcrowley/go-metrics"
|
2019-07-09 09:40:30 +00:00
|
|
|
"github.com/thejerf/suture"
|
|
|
|
"github.com/vitrun/qart/qr"
|
|
|
|
"golang.org/x/crypto/bcrypt"
|
|
|
|
|
2019-02-12 06:58:24 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/build"
|
2015-08-06 09:29:25 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/config"
|
2017-11-21 07:25:38 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/connections"
|
2015-08-06 09:29:25 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/db"
|
|
|
|
"github.com/syncthing/syncthing/lib/discover"
|
|
|
|
"github.com/syncthing/syncthing/lib/events"
|
2017-08-19 14:36:56 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/fs"
|
2019-02-12 06:58:24 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/locations"
|
2015-10-03 15:25:21 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/logger"
|
2016-04-15 10:59:41 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/model"
|
2015-09-22 17:38:46 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/protocol"
|
2016-05-26 07:02:56 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/rand"
|
2015-08-06 09:29:25 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/sync"
|
2015-09-02 20:05:54 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/tlsutil"
|
2015-08-06 09:29:25 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/upgrade"
|
2019-03-26 19:53:58 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/ur"
|
2019-07-09 09:40:30 +00:00
|
|
|
"github.com/syncthing/syncthing/lib/util"
|
2014-03-02 22:58:14 +00:00
|
|
|
)
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
// matches a bcrypt hash and not too much else
|
|
|
|
var bcryptExpr = regexp.MustCompile(`^\$2[aby]\$\d+\$.{50,}`)
|
2014-03-02 22:58:14 +00:00
|
|
|
|
2017-04-13 17:14:34 +00:00
|
|
|
const (
|
2019-10-16 18:31:46 +00:00
|
|
|
DefaultEventMask = events.AllEvents &^ events.LocalChangeDetected &^ events.RemoteChangeDetected
|
|
|
|
DiskEventMask = events.LocalChangeDetected | events.RemoteChangeDetected
|
|
|
|
EventSubBufferSize = 1000
|
|
|
|
defaultEventTimeout = time.Minute
|
|
|
|
httpsCertLifetimeDays = 820
|
2017-04-13 17:14:34 +00:00
|
|
|
)
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
type service struct {
|
2019-07-09 09:40:30 +00:00
|
|
|
suture.Service
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
id protocol.DeviceID
|
|
|
|
cfg config.Wrapper
|
|
|
|
statics *staticsServer
|
|
|
|
model model.Model
|
|
|
|
eventSubs map[events.EventType]events.BufferedSubscription
|
|
|
|
eventSubsMut sync.Mutex
|
2019-08-15 14:29:37 +00:00
|
|
|
evLogger events.Logger
|
2019-03-26 19:53:58 +00:00
|
|
|
discoverer discover.CachingMux
|
|
|
|
connectionsService connections.Service
|
|
|
|
fss model.FolderSummaryService
|
|
|
|
urService *ur.Service
|
|
|
|
systemConfigMut sync.Mutex // serializes posts to /rest/system/config
|
|
|
|
cpu Rater
|
|
|
|
contr Controller
|
|
|
|
noUpgrade bool
|
|
|
|
tlsDefaultCommonName string
|
|
|
|
configChanged chan struct{} // signals intentional listener close due to config change
|
|
|
|
started chan string // signals startup complete by sending the listener address, for testing only
|
|
|
|
startedOnce chan struct{} // the service has started successfully at least once
|
|
|
|
startupErr error
|
2019-10-04 10:25:41 +00:00
|
|
|
listenerAddr net.Addr
|
2015-10-03 15:25:21 +00:00
|
|
|
|
2016-03-21 19:36:08 +00:00
|
|
|
guiErrors logger.Recorder
|
|
|
|
systemLog logger.Recorder
|
2015-04-28 21:12:19 +00:00
|
|
|
}
|
2015-03-26 22:26:51 +00:00
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
type Rater interface {
|
2017-05-31 18:14:04 +00:00
|
|
|
Rate() float64
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
type Controller interface {
|
|
|
|
ExitUpgrading()
|
|
|
|
Restart()
|
|
|
|
Shutdown()
|
|
|
|
}
|
2015-04-20 03:34:04 +00:00
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
type Service interface {
|
|
|
|
suture.Service
|
|
|
|
config.Committer
|
|
|
|
WaitForStart() error
|
2015-04-28 21:12:19 +00:00
|
|
|
}
|
2014-09-12 19:28:47 +00:00
|
|
|
|
2019-08-15 14:29:37 +00:00
|
|
|
func New(id protocol.DeviceID, cfg config.Wrapper, assetDir, tlsDefaultCommonName string, m model.Model, defaultSub, diskSub events.BufferedSubscription, evLogger events.Logger, discoverer discover.CachingMux, connectionsService connections.Service, urService *ur.Service, fss model.FolderSummaryService, errors, systemLog logger.Recorder, cpu Rater, contr Controller, noUpgrade bool) Service {
|
2019-07-09 09:40:30 +00:00
|
|
|
s := &service{
|
2019-03-26 19:53:58 +00:00
|
|
|
id: id,
|
|
|
|
cfg: cfg,
|
|
|
|
statics: newStaticsServer(cfg.GUI().Theme, assetDir),
|
|
|
|
model: m,
|
|
|
|
eventSubs: map[events.EventType]events.BufferedSubscription{
|
|
|
|
DefaultEventMask: defaultSub,
|
|
|
|
DiskEventMask: diskSub,
|
|
|
|
},
|
|
|
|
eventSubsMut: sync.NewMutex(),
|
2019-08-15 14:29:37 +00:00
|
|
|
evLogger: evLogger,
|
2019-03-26 19:53:58 +00:00
|
|
|
discoverer: discoverer,
|
|
|
|
connectionsService: connectionsService,
|
|
|
|
fss: fss,
|
|
|
|
urService: urService,
|
|
|
|
systemConfigMut: sync.NewMutex(),
|
|
|
|
guiErrors: errors,
|
|
|
|
systemLog: systemLog,
|
|
|
|
cpu: cpu,
|
|
|
|
contr: contr,
|
|
|
|
noUpgrade: noUpgrade,
|
|
|
|
tlsDefaultCommonName: tlsDefaultCommonName,
|
|
|
|
configChanged: make(chan struct{}),
|
|
|
|
startedOnce: make(chan struct{}),
|
|
|
|
}
|
2019-11-21 07:41:15 +00:00
|
|
|
s.Service = util.AsService(s.serve, s.String())
|
2019-07-09 09:40:30 +00:00
|
|
|
return s
|
2019-03-26 19:53:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *service) WaitForStart() error {
|
2019-02-14 20:29:14 +00:00
|
|
|
<-s.startedOnce
|
|
|
|
return s.startupErr
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getListener(guiCfg config.GUIConfiguration) (net.Listener, error) {
|
|
|
|
httpsCertFile := locations.Get(locations.HTTPSCertFile)
|
|
|
|
httpsKeyFile := locations.Get(locations.HTTPSKeyFile)
|
|
|
|
cert, err := tls.LoadX509KeyPair(httpsCertFile, httpsKeyFile)
|
2019-10-16 18:31:46 +00:00
|
|
|
|
|
|
|
// If the certificate has expired or will expire in the next month, fail
|
|
|
|
// it and generate a new one.
|
|
|
|
if err == nil {
|
|
|
|
err = checkExpiry(cert)
|
|
|
|
}
|
2014-09-12 19:28:47 +00:00
|
|
|
if err != nil {
|
|
|
|
l.Infoln("Loading HTTPS certificate:", err)
|
|
|
|
l.Infoln("Creating new HTTPS certificate")
|
2014-12-09 09:42:56 +00:00
|
|
|
|
|
|
|
// When generating the HTTPS certificate, use the system host name per
|
|
|
|
// default. If that isn't available, use the "syncthing" default.
|
2014-12-16 21:55:44 +00:00
|
|
|
var name string
|
|
|
|
name, err = os.Hostname()
|
2014-12-09 09:42:56 +00:00
|
|
|
if err != nil {
|
2019-03-26 19:53:58 +00:00
|
|
|
name = s.tlsDefaultCommonName
|
2014-12-09 09:42:56 +00:00
|
|
|
}
|
|
|
|
|
2019-10-16 18:31:46 +00:00
|
|
|
cert, err = tlsutil.NewCertificate(httpsCertFile, httpsKeyFile, name, httpsCertLifetimeDays)
|
2014-09-12 19:28:47 +00:00
|
|
|
}
|
|
|
|
if err != nil {
|
2015-04-28 21:12:19 +00:00
|
|
|
return nil, err
|
2014-09-12 19:28:47 +00:00
|
|
|
}
|
2018-10-21 05:17:50 +00:00
|
|
|
tlsCfg := tlsutil.SecureDefault()
|
|
|
|
tlsCfg.Certificates = []tls.Certificate{cert}
|
2014-04-30 20:52:38 +00:00
|
|
|
|
2018-09-21 12:28:57 +00:00
|
|
|
if guiCfg.Network() == "unix" {
|
|
|
|
// When listening on a UNIX socket we should unlink before bind,
|
|
|
|
// lest we get a "bind: address already in use". We don't
|
|
|
|
// particularly care if this succeeds or not.
|
|
|
|
os.Remove(guiCfg.Address())
|
|
|
|
}
|
|
|
|
rawListener, err := net.Listen(guiCfg.Network(), guiCfg.Address())
|
2014-09-12 19:28:47 +00:00
|
|
|
if err != nil {
|
2015-04-28 21:12:19 +00:00
|
|
|
return nil, err
|
2014-04-30 20:52:38 +00:00
|
|
|
}
|
2015-04-28 21:12:19 +00:00
|
|
|
|
2016-04-13 11:50:51 +00:00
|
|
|
listener := &tlsutil.DowngradingListener{
|
|
|
|
Listener: rawListener,
|
|
|
|
TLSConfig: tlsCfg,
|
|
|
|
}
|
2015-04-28 21:12:19 +00:00
|
|
|
return listener, nil
|
|
|
|
}
|
|
|
|
|
2015-12-15 21:40:38 +00:00
|
|
|
func sendJSON(w http.ResponseWriter, jsonObject interface{}) {
|
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
2016-03-20 10:54:53 +00:00
|
|
|
// Marshalling might fail, in which case we should return a 500 with the
|
|
|
|
// actual error.
|
2018-05-12 13:14:41 +00:00
|
|
|
bs, err := json.MarshalIndent(jsonObject, "", " ")
|
2016-03-20 10:54:53 +00:00
|
|
|
if err != nil {
|
|
|
|
// This Marshal() can't fail though.
|
|
|
|
bs, _ = json.Marshal(map[string]string{"error": err.Error()})
|
|
|
|
http.Error(w, string(bs), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2018-05-12 13:14:41 +00:00
|
|
|
fmt.Fprintf(w, "%s\n", bs)
|
2015-12-15 21:40:38 +00:00
|
|
|
}
|
|
|
|
|
2019-11-21 07:41:15 +00:00
|
|
|
func (s *service) serve(ctx context.Context) {
|
2016-06-06 22:12:23 +00:00
|
|
|
listener, err := s.getListener(s.cfg.GUI())
|
|
|
|
if err != nil {
|
2016-10-07 03:10:26 +00:00
|
|
|
select {
|
|
|
|
case <-s.startedOnce:
|
|
|
|
// We let this be a loud user-visible warning as it may be the only
|
|
|
|
// indication they get that the GUI won't be available.
|
|
|
|
l.Warnln("Starting API/GUI:", err)
|
|
|
|
|
|
|
|
default:
|
2016-06-06 22:12:23 +00:00
|
|
|
// This is during initialization. A failure here should be fatal
|
|
|
|
// as there will be no way for the user to communicate with us
|
|
|
|
// otherwise anyway.
|
2019-02-14 20:29:14 +00:00
|
|
|
s.startupErr = err
|
|
|
|
close(s.startedOnce)
|
2016-06-06 22:12:23 +00:00
|
|
|
}
|
2019-02-14 20:29:14 +00:00
|
|
|
return
|
2016-06-06 22:12:23 +00:00
|
|
|
}
|
2016-01-14 10:06:36 +00:00
|
|
|
|
|
|
|
if listener == nil {
|
|
|
|
// Not much we can do here other than exit quickly. The supervisor
|
|
|
|
// will log an error at some point.
|
|
|
|
return
|
|
|
|
}
|
2015-06-03 07:47:39 +00:00
|
|
|
|
2019-10-04 10:25:41 +00:00
|
|
|
s.listenerAddr = listener.Addr()
|
2016-10-07 03:10:26 +00:00
|
|
|
defer listener.Close()
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
s.cfg.Subscribe(s)
|
|
|
|
defer s.cfg.Unsubscribe(s)
|
|
|
|
|
2014-07-05 19:40:29 +00:00
|
|
|
// The GET handlers
|
|
|
|
getRestMux := http.NewServeMux()
|
2015-04-28 21:12:19 +00:00
|
|
|
getRestMux.HandleFunc("/rest/db/completion", s.getDBCompletion) // device folder
|
|
|
|
getRestMux.HandleFunc("/rest/db/file", s.getDBFile) // folder file
|
|
|
|
getRestMux.HandleFunc("/rest/db/ignores", s.getDBIgnores) // folder
|
|
|
|
getRestMux.HandleFunc("/rest/db/need", s.getDBNeed) // folder [perpage] [page]
|
2017-12-15 20:01:56 +00:00
|
|
|
getRestMux.HandleFunc("/rest/db/remoteneed", s.getDBRemoteNeed) // device folder [perpage] [page]
|
2018-12-11 08:59:04 +00:00
|
|
|
getRestMux.HandleFunc("/rest/db/localchanged", s.getDBLocalChanged) // folder
|
2015-04-28 21:12:19 +00:00
|
|
|
getRestMux.HandleFunc("/rest/db/status", s.getDBStatus) // folder
|
|
|
|
getRestMux.HandleFunc("/rest/db/browse", s.getDBBrowse) // folder [prefix] [dirsonly] [levels]
|
2018-01-01 14:39:23 +00:00
|
|
|
getRestMux.HandleFunc("/rest/folder/versions", s.getFolderVersions) // folder
|
2018-11-07 10:04:41 +00:00
|
|
|
getRestMux.HandleFunc("/rest/folder/errors", s.getFolderErrors) // folder
|
|
|
|
getRestMux.HandleFunc("/rest/folder/pullerrors", s.getFolderErrors) // folder (deprecated)
|
2017-04-13 17:14:34 +00:00
|
|
|
getRestMux.HandleFunc("/rest/events", s.getIndexEvents) // [since] [limit] [timeout] [events]
|
2017-03-07 05:44:47 +00:00
|
|
|
getRestMux.HandleFunc("/rest/events/disk", s.getDiskEvents) // [since] [limit] [timeout]
|
2015-04-28 21:12:19 +00:00
|
|
|
getRestMux.HandleFunc("/rest/stats/device", s.getDeviceStats) // -
|
|
|
|
getRestMux.HandleFunc("/rest/stats/folder", s.getFolderStats) // -
|
|
|
|
getRestMux.HandleFunc("/rest/svc/deviceid", s.getDeviceID) // id
|
|
|
|
getRestMux.HandleFunc("/rest/svc/lang", s.getLang) // -
|
|
|
|
getRestMux.HandleFunc("/rest/svc/report", s.getReport) // -
|
2016-05-26 07:25:34 +00:00
|
|
|
getRestMux.HandleFunc("/rest/svc/random/string", s.getRandomString) // [length]
|
2015-04-28 21:12:19 +00:00
|
|
|
getRestMux.HandleFunc("/rest/system/browse", s.getSystemBrowse) // current
|
|
|
|
getRestMux.HandleFunc("/rest/system/config", s.getSystemConfig) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/config/insync", s.getSystemConfigInsync) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/connections", s.getSystemConnections) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/discovery", s.getSystemDiscovery) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/error", s.getSystemError) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/ping", s.restPing) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/status", s.getSystemStatus) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/upgrade", s.getSystemUpgrade) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/version", s.getSystemVersion) // -
|
2015-10-03 15:25:21 +00:00
|
|
|
getRestMux.HandleFunc("/rest/system/debug", s.getSystemDebug) // -
|
|
|
|
getRestMux.HandleFunc("/rest/system/log", s.getSystemLog) // [since]
|
|
|
|
getRestMux.HandleFunc("/rest/system/log.txt", s.getSystemLogTxt) // [since]
|
2014-07-29 11:01:27 +00:00
|
|
|
|
2014-07-05 19:40:29 +00:00
|
|
|
// The POST handlers
|
|
|
|
postRestMux := http.NewServeMux()
|
2016-12-21 18:41:25 +00:00
|
|
|
postRestMux.HandleFunc("/rest/db/prio", s.postDBPrio) // folder file [perpage] [page]
|
|
|
|
postRestMux.HandleFunc("/rest/db/ignores", s.postDBIgnores) // folder
|
|
|
|
postRestMux.HandleFunc("/rest/db/override", s.postDBOverride) // folder
|
2018-07-12 08:15:57 +00:00
|
|
|
postRestMux.HandleFunc("/rest/db/revert", s.postDBRevert) // folder
|
2016-12-21 18:41:25 +00:00
|
|
|
postRestMux.HandleFunc("/rest/db/scan", s.postDBScan) // folder [sub...] [delay]
|
2018-01-01 14:39:23 +00:00
|
|
|
postRestMux.HandleFunc("/rest/folder/versions", s.postFolderVersionsRestore) // folder <body>
|
2016-12-21 18:41:25 +00:00
|
|
|
postRestMux.HandleFunc("/rest/system/config", s.postSystemConfig) // <body>
|
|
|
|
postRestMux.HandleFunc("/rest/system/error", s.postSystemError) // <body>
|
|
|
|
postRestMux.HandleFunc("/rest/system/error/clear", s.postSystemErrorClear) // -
|
|
|
|
postRestMux.HandleFunc("/rest/system/ping", s.restPing) // -
|
|
|
|
postRestMux.HandleFunc("/rest/system/reset", s.postSystemReset) // [folder]
|
|
|
|
postRestMux.HandleFunc("/rest/system/restart", s.postSystemRestart) // -
|
|
|
|
postRestMux.HandleFunc("/rest/system/shutdown", s.postSystemShutdown) // -
|
|
|
|
postRestMux.HandleFunc("/rest/system/upgrade", s.postSystemUpgrade) // -
|
2017-03-04 07:54:13 +00:00
|
|
|
postRestMux.HandleFunc("/rest/system/pause", s.makeDevicePauseHandler(true)) // [device]
|
|
|
|
postRestMux.HandleFunc("/rest/system/resume", s.makeDevicePauseHandler(false)) // [device]
|
2016-12-21 18:41:25 +00:00
|
|
|
postRestMux.HandleFunc("/rest/system/debug", s.postSystemDebug) // [enable] [disable]
|
2015-04-06 08:23:27 +00:00
|
|
|
|
|
|
|
// Debug endpoints, not for general use
|
2016-08-02 11:06:45 +00:00
|
|
|
debugMux := http.NewServeMux()
|
|
|
|
debugMux.HandleFunc("/rest/debug/peerCompletion", s.getPeerCompletion)
|
|
|
|
debugMux.HandleFunc("/rest/debug/httpmetrics", s.getSystemHTTPMetrics)
|
|
|
|
debugMux.HandleFunc("/rest/debug/cpuprof", s.getCPUProf) // duration
|
|
|
|
debugMux.HandleFunc("/rest/debug/heapprof", s.getHeapProf)
|
2018-10-01 15:23:46 +00:00
|
|
|
debugMux.HandleFunc("/rest/debug/support", s.getSupportBundle)
|
2016-08-02 11:06:45 +00:00
|
|
|
getRestMux.Handle("/rest/debug/", s.whenDebugging(debugMux))
|
2014-07-05 19:40:29 +00:00
|
|
|
|
|
|
|
// A handler that splits requests between the two above and disables
|
|
|
|
// caching
|
2015-11-21 08:48:57 +00:00
|
|
|
restMux := noCacheMiddleware(metricsMiddleware(getPostHandler(getRestMux, postRestMux)))
|
2014-07-05 19:40:29 +00:00
|
|
|
|
|
|
|
// The main routing handler
|
|
|
|
mux := http.NewServeMux()
|
|
|
|
mux.Handle("/rest/", restMux)
|
2015-04-28 21:12:19 +00:00
|
|
|
mux.HandleFunc("/qr/", s.getQR)
|
2014-07-05 19:40:29 +00:00
|
|
|
|
|
|
|
// Serve compiled in assets unless an asset directory was set (for development)
|
2016-06-07 07:46:45 +00:00
|
|
|
mux.Handle("/", s.statics)
|
2016-01-10 15:37:31 +00:00
|
|
|
|
2016-05-22 10:26:09 +00:00
|
|
|
// Handle the special meta.js path
|
|
|
|
mux.HandleFunc("/meta.js", s.getJSMetadata)
|
|
|
|
|
2015-09-29 18:05:22 +00:00
|
|
|
guiCfg := s.cfg.GUI()
|
|
|
|
|
2014-07-06 13:00:44 +00:00
|
|
|
// Wrap everything in CSRF protection. The /rest prefix should be
|
|
|
|
// protected, other requests will grant cookies.
|
2019-09-05 11:35:51 +00:00
|
|
|
var handler http.Handler = newCsrfManager(s.id.String()[:5], "/rest", guiCfg, mux, locations.Get(locations.CsrfTokens))
|
2016-01-26 07:05:24 +00:00
|
|
|
|
2015-06-22 15:57:08 +00:00
|
|
|
// Add our version and ID as a header to responses
|
|
|
|
handler = withDetailsMiddleware(s.id, handler)
|
2014-08-31 10:59:20 +00:00
|
|
|
|
2014-07-05 19:40:29 +00:00
|
|
|
// Wrap everything in basic auth, if user/password is set.
|
2018-09-11 21:25:24 +00:00
|
|
|
if guiCfg.IsAuthEnabled() {
|
2019-08-15 14:29:37 +00:00
|
|
|
handler = basicAuthAndSessionMiddleware("sessionid-"+s.id.String()[:5], guiCfg, s.cfg.LDAP(), handler, s.evLogger)
|
2014-07-05 19:40:29 +00:00
|
|
|
}
|
2014-04-30 20:52:38 +00:00
|
|
|
|
2014-09-14 22:18:05 +00:00
|
|
|
// Redirect to HTTPS if we are supposed to
|
2015-10-12 13:27:57 +00:00
|
|
|
if guiCfg.UseTLS() {
|
2014-09-14 22:18:05 +00:00
|
|
|
handler = redirectToHTTPSMiddleware(handler)
|
|
|
|
}
|
2014-09-12 19:28:47 +00:00
|
|
|
|
2016-04-03 11:24:55 +00:00
|
|
|
// Add the CORS handling
|
2017-09-10 08:28:12 +00:00
|
|
|
handler = corsMiddleware(handler, guiCfg.InsecureAllowFrameLoading)
|
2016-04-03 11:24:55 +00:00
|
|
|
|
2016-09-03 08:33:34 +00:00
|
|
|
if addressIsLocalhost(guiCfg.Address()) && !guiCfg.InsecureSkipHostCheck {
|
|
|
|
// Verify source host
|
|
|
|
handler = localhostMiddleware(handler)
|
|
|
|
}
|
|
|
|
|
2015-10-03 15:25:21 +00:00
|
|
|
handler = debugMiddleware(handler)
|
2015-04-07 19:45:22 +00:00
|
|
|
|
2014-10-13 17:34:26 +00:00
|
|
|
srv := http.Server{
|
2017-01-01 12:38:31 +00:00
|
|
|
Handler: handler,
|
|
|
|
// ReadTimeout must be longer than SyncthingController $scope.refresh
|
|
|
|
// interval to avoid HTTP keepalive/GUI refresh race.
|
|
|
|
ReadTimeout: 15 * time.Second,
|
2019-07-28 07:49:07 +00:00
|
|
|
// Prevent the HTTP server from logging stuff on its own. The things we
|
|
|
|
// care about we log ourselves from the handlers.
|
|
|
|
ErrorLog: log.New(ioutil.Discard, "", 0),
|
2014-10-13 17:34:26 +00:00
|
|
|
}
|
|
|
|
|
2016-03-06 22:04:12 +00:00
|
|
|
l.Infoln("GUI and API listening on", listener.Addr())
|
|
|
|
l.Infoln("Access the GUI via the following URL:", guiCfg.URL())
|
2016-01-14 10:06:36 +00:00
|
|
|
if s.started != nil {
|
|
|
|
// only set when run by the tests
|
2019-12-13 08:26:41 +00:00
|
|
|
select {
|
|
|
|
case <-ctx.Done(): // Shouldn't return directly due to cleanup below
|
|
|
|
case s.started <- listener.Addr().String():
|
|
|
|
}
|
2016-01-14 10:06:36 +00:00
|
|
|
}
|
2015-06-03 07:47:39 +00:00
|
|
|
|
2017-02-05 17:51:52 +00:00
|
|
|
// Indicate successful initial startup, to ourselves and to interested
|
2016-10-07 03:10:26 +00:00
|
|
|
// listeners (i.e. the thing that starts the browser).
|
|
|
|
select {
|
|
|
|
case <-s.startedOnce:
|
|
|
|
default:
|
|
|
|
close(s.startedOnce)
|
|
|
|
}
|
|
|
|
|
2016-06-06 22:12:23 +00:00
|
|
|
// Serve in the background
|
|
|
|
|
|
|
|
serveError := make(chan error, 1)
|
|
|
|
go func() {
|
2019-12-13 08:26:41 +00:00
|
|
|
select {
|
|
|
|
case serveError <- srv.Serve(listener):
|
|
|
|
case <-ctx.Done():
|
|
|
|
}
|
2016-06-06 22:12:23 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
// Wait for stop, restart or error signals
|
|
|
|
|
2015-06-03 07:47:39 +00:00
|
|
|
select {
|
2019-11-21 07:41:15 +00:00
|
|
|
case <-ctx.Done():
|
2016-06-06 22:12:23 +00:00
|
|
|
// Shutting down permanently
|
|
|
|
l.Debugln("shutting down (stop)")
|
2016-01-14 10:06:36 +00:00
|
|
|
case <-s.configChanged:
|
2016-06-06 22:12:23 +00:00
|
|
|
// Soft restart due to configuration change
|
|
|
|
l.Debugln("restarting (config changed)")
|
|
|
|
case <-serveError:
|
|
|
|
// Restart due to listen/serve failure
|
|
|
|
l.Warnln("GUI/API:", err, "(restarting)")
|
2015-06-03 07:47:39 +00:00
|
|
|
}
|
2019-07-28 06:03:55 +00:00
|
|
|
srv.Close()
|
2015-04-28 21:12:19 +00:00
|
|
|
}
|
|
|
|
|
2019-02-14 20:29:14 +00:00
|
|
|
// Complete implements suture.IsCompletable, which signifies to the supervisor
|
|
|
|
// whether to stop restarting the service.
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) Complete() bool {
|
2019-02-14 20:29:14 +00:00
|
|
|
select {
|
|
|
|
case <-s.startedOnce:
|
|
|
|
return s.startupErr != nil
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) String() string {
|
|
|
|
return fmt.Sprintf("api.service@%p", s)
|
2015-06-03 07:47:39 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) VerifyConfiguration(from, to config.Configuration) error {
|
2018-09-21 12:28:57 +00:00
|
|
|
if to.GUI.Network() != "tcp" {
|
|
|
|
return nil
|
|
|
|
}
|
2016-12-18 18:57:41 +00:00
|
|
|
_, err := net.ResolveTCPAddr("tcp", to.GUI.Address())
|
|
|
|
return err
|
2015-06-03 07:47:39 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) CommitConfiguration(from, to config.Configuration) bool {
|
2016-08-02 11:06:45 +00:00
|
|
|
// No action required when this changes, so mask the fact that it changed at all.
|
|
|
|
from.GUI.Debugging = to.GUI.Debugging
|
|
|
|
|
2015-06-03 07:47:39 +00:00
|
|
|
if to.GUI == from.GUI {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2016-06-07 07:46:45 +00:00
|
|
|
if to.GUI.Theme != from.GUI.Theme {
|
|
|
|
s.statics.setTheme(to.GUI.Theme)
|
|
|
|
}
|
|
|
|
|
2016-06-06 22:12:23 +00:00
|
|
|
// Tell the serve loop to restart
|
2016-01-14 10:06:36 +00:00
|
|
|
s.configChanged <- struct{}{}
|
2015-06-03 07:47:39 +00:00
|
|
|
|
|
|
|
return true
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2014-07-05 19:40:29 +00:00
|
|
|
func getPostHandler(get, post http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
switch r.Method {
|
|
|
|
case "GET":
|
|
|
|
get.ServeHTTP(w, r)
|
|
|
|
case "POST":
|
|
|
|
post.ServeHTTP(w, r)
|
|
|
|
default:
|
|
|
|
http.Error(w, "Method not allowed", http.StatusMethodNotAllowed)
|
|
|
|
}
|
|
|
|
})
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2015-04-07 19:45:22 +00:00
|
|
|
func debugMiddleware(h http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
t0 := time.Now()
|
|
|
|
h.ServeHTTP(w, r)
|
2015-11-21 08:39:40 +00:00
|
|
|
|
|
|
|
if shouldDebugHTTP() {
|
|
|
|
ms := 1000 * time.Since(t0).Seconds()
|
|
|
|
|
|
|
|
// The variable `w` is most likely a *http.response, which we can't do
|
|
|
|
// much with since it's a non exported type. We can however peek into
|
|
|
|
// it with reflection to get at the status code and number of bytes
|
|
|
|
// written.
|
|
|
|
var status, written int64
|
|
|
|
if rw := reflect.Indirect(reflect.ValueOf(w)); rw.IsValid() && rw.Kind() == reflect.Struct {
|
|
|
|
if rf := rw.FieldByName("status"); rf.IsValid() && rf.Kind() == reflect.Int {
|
|
|
|
status = rf.Int()
|
|
|
|
}
|
|
|
|
if rf := rw.FieldByName("written"); rf.IsValid() && rf.Kind() == reflect.Int64 {
|
|
|
|
written = rf.Int()
|
|
|
|
}
|
2015-04-07 19:45:22 +00:00
|
|
|
}
|
2019-03-26 19:53:58 +00:00
|
|
|
l.Debugf("http: %s %q: status %d, %d bytes in %.02f ms", r.Method, r.URL.String(), status, written, ms)
|
2015-04-07 19:45:22 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-09-10 08:28:12 +00:00
|
|
|
func corsMiddleware(next http.Handler, allowFrameLoading bool) http.Handler {
|
2016-01-26 07:05:24 +00:00
|
|
|
// Handle CORS headers and CORS OPTIONS request.
|
|
|
|
// CORS OPTIONS request are typically sent by browser during AJAX preflight
|
|
|
|
// when the browser initiate a POST request.
|
2016-02-12 21:10:08 +00:00
|
|
|
//
|
|
|
|
// As the OPTIONS request is unauthorized, this handler must be the first
|
2016-04-03 11:24:55 +00:00
|
|
|
// of the chain (hence added at the end).
|
2016-02-12 21:10:08 +00:00
|
|
|
//
|
2016-01-26 07:05:24 +00:00
|
|
|
// See https://www.w3.org/TR/cors/ for details.
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
// Process OPTIONS requests
|
|
|
|
if r.Method == "OPTIONS" {
|
2016-09-06 22:16:50 +00:00
|
|
|
// Add a generous access-control-allow-origin header for CORS requests
|
|
|
|
w.Header().Add("Access-Control-Allow-Origin", "*")
|
2016-01-26 07:05:24 +00:00
|
|
|
// Only GET/POST Methods are supported
|
|
|
|
w.Header().Set("Access-Control-Allow-Methods", "GET, POST")
|
2016-09-06 22:16:50 +00:00
|
|
|
// Only these headers can be set
|
|
|
|
w.Header().Set("Access-Control-Allow-Headers", "Content-Type, X-API-Key")
|
2016-01-26 07:05:24 +00:00
|
|
|
// The request is meant to be cached 10 minutes
|
|
|
|
w.Header().Set("Access-Control-Max-Age", "600")
|
|
|
|
|
|
|
|
// Indicate that no content will be returned
|
|
|
|
w.WriteHeader(204)
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-09-10 08:28:12 +00:00
|
|
|
// Other security related headers that should be present.
|
|
|
|
// https://www.owasp.org/index.php/Security_Headers
|
|
|
|
|
|
|
|
if !allowFrameLoading {
|
|
|
|
// We don't want to be rendered in an <iframe>,
|
|
|
|
// <frame> or <object>. (Unless we do it ourselves.
|
|
|
|
// This is also an escape hatch for people who serve
|
|
|
|
// Syncthing GUI as part of their own website
|
|
|
|
// through a proxy, so they don't need to set the
|
|
|
|
// allowFrameLoading bool.)
|
|
|
|
w.Header().Set("X-Frame-Options", "SAMEORIGIN")
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the browser senses an XSS attack it's allowed to take
|
|
|
|
// action. (How this would not always be the default I
|
|
|
|
// don't fully understand.)
|
|
|
|
w.Header().Set("X-XSS-Protection", "1; mode=block")
|
|
|
|
|
|
|
|
// Our content type headers are correct. Don't guess.
|
|
|
|
w.Header().Set("X-Content-Type-Options", "nosniff")
|
|
|
|
|
2016-01-26 07:05:24 +00:00
|
|
|
// For everything else, pass to the next handler
|
|
|
|
next.ServeHTTP(w, r)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-11-21 08:48:57 +00:00
|
|
|
func metricsMiddleware(h http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
t := metrics.GetOrRegisterTimer(r.URL.Path, nil)
|
|
|
|
t0 := time.Now()
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
t.UpdateSince(t0)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-09-14 22:18:05 +00:00
|
|
|
func redirectToHTTPSMiddleware(h http.Handler) http.Handler {
|
2014-09-12 19:28:47 +00:00
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
2014-09-14 22:18:05 +00:00
|
|
|
if r.TLS == nil {
|
|
|
|
// Redirect HTTP requests to HTTPS
|
|
|
|
r.URL.Host = r.Host
|
2014-09-12 19:28:47 +00:00
|
|
|
r.URL.Scheme = "https"
|
2016-01-29 10:07:51 +00:00
|
|
|
http.Redirect(w, r, r.URL.String(), http.StatusTemporaryRedirect)
|
2014-09-12 19:28:47 +00:00
|
|
|
} else {
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-07-05 19:40:29 +00:00
|
|
|
func noCacheMiddleware(h http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
2015-04-27 07:08:55 +00:00
|
|
|
w.Header().Set("Cache-Control", "max-age=0, no-cache, no-store")
|
|
|
|
w.Header().Set("Expires", time.Now().UTC().Format(http.TimeFormat))
|
|
|
|
w.Header().Set("Pragma", "no-cache")
|
2014-07-05 19:40:29 +00:00
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2015-06-22 15:57:08 +00:00
|
|
|
func withDetailsMiddleware(id protocol.DeviceID, h http.Handler) http.Handler {
|
2014-08-31 10:59:20 +00:00
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
2019-02-12 06:58:24 +00:00
|
|
|
w.Header().Set("X-Syncthing-Version", build.Version)
|
2015-06-22 15:57:08 +00:00
|
|
|
w.Header().Set("X-Syncthing-ID", id.String())
|
2014-08-31 10:59:20 +00:00
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-09-03 08:33:34 +00:00
|
|
|
func localhostMiddleware(h http.Handler) http.Handler {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if addressIsLocalhost(r.Host) {
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
http.Error(w, "Host check error", http.StatusForbidden)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) whenDebugging(h http.Handler) http.Handler {
|
2016-08-02 11:06:45 +00:00
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if s.cfg.GUI().Debugging {
|
|
|
|
h.ServeHTTP(w, r)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-09-16 07:32:24 +00:00
|
|
|
http.Error(w, "Debugging disabled", http.StatusForbidden)
|
2016-08-02 11:06:45 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) restPing(w http.ResponseWriter, r *http.Request) {
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, map[string]string{"ping": "pong"})
|
2014-09-18 10:55:28 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getJSMetadata(w http.ResponseWriter, r *http.Request) {
|
2016-05-22 10:26:09 +00:00
|
|
|
meta, _ := json.Marshal(map[string]string{
|
|
|
|
"deviceID": s.id.String(),
|
|
|
|
})
|
|
|
|
w.Header().Set("Content-Type", "application/javascript")
|
|
|
|
fmt.Fprintf(w, "var metadata = %s;\n", meta)
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getSystemVersion(w http.ResponseWriter, r *http.Request) {
|
2019-01-15 07:44:46 +00:00
|
|
|
sendJSON(w, map[string]interface{}{
|
2019-02-12 06:58:24 +00:00
|
|
|
"version": build.Version,
|
|
|
|
"codename": build.Codename,
|
|
|
|
"longVersion": build.LongVersion,
|
2014-09-18 10:52:45 +00:00
|
|
|
"os": runtime.GOOS,
|
|
|
|
"arch": runtime.GOARCH,
|
2019-02-12 06:58:24 +00:00
|
|
|
"isBeta": build.IsBeta,
|
|
|
|
"isCandidate": build.IsCandidate,
|
|
|
|
"isRelease": build.IsRelease,
|
2014-09-18 10:52:45 +00:00
|
|
|
})
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getSystemDebug(w http.ResponseWriter, r *http.Request) {
|
2015-10-03 15:25:21 +00:00
|
|
|
names := l.Facilities()
|
|
|
|
enabled := l.FacilityDebugging()
|
|
|
|
sort.Strings(enabled)
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, map[string]interface{}{
|
2015-10-03 15:25:21 +00:00
|
|
|
"facilities": names,
|
|
|
|
"enabled": enabled,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postSystemDebug(w http.ResponseWriter, r *http.Request) {
|
2015-10-03 15:25:21 +00:00
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
|
|
|
q := r.URL.Query()
|
|
|
|
for _, f := range strings.Split(q.Get("enable"), ",") {
|
2015-12-06 17:15:55 +00:00
|
|
|
if f == "" || l.ShouldDebug(f) {
|
2015-10-03 15:25:21 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
l.SetDebug(f, true)
|
|
|
|
l.Infof("Enabled debug data for %q", f)
|
|
|
|
}
|
|
|
|
for _, f := range strings.Split(q.Get("disable"), ",") {
|
2015-12-06 17:15:55 +00:00
|
|
|
if f == "" || !l.ShouldDebug(f) {
|
2015-10-03 15:25:21 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
l.SetDebug(f, false)
|
|
|
|
l.Infof("Disabled debug data for %q", f)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDBBrowse(w http.ResponseWriter, r *http.Request) {
|
2015-02-07 10:52:42 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
prefix := qs.Get("prefix")
|
|
|
|
dirsonly := qs.Get("dirsonly") != ""
|
|
|
|
|
|
|
|
levels, err := strconv.Atoi(qs.Get("levels"))
|
|
|
|
if err != nil {
|
|
|
|
levels = -1
|
|
|
|
}
|
|
|
|
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, s.model.GlobalDirectoryTree(folder, prefix, levels, dirsonly))
|
2015-02-07 10:52:42 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDBCompletion(w http.ResponseWriter, r *http.Request) {
|
2014-07-29 09:06:52 +00:00
|
|
|
var qs = r.URL.Query()
|
2014-09-28 11:00:38 +00:00
|
|
|
var folder = qs.Get("folder")
|
|
|
|
var deviceStr = qs.Get("device")
|
2014-07-29 09:06:52 +00:00
|
|
|
|
2014-09-28 11:00:38 +00:00
|
|
|
device, err := protocol.DeviceIDFromString(deviceStr)
|
2014-07-29 09:06:52 +00:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
sendJSON(w, s.model.Completion(device, folder).Map())
|
2018-01-14 12:08:40 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDBStatus(w http.ResponseWriter, r *http.Request) {
|
2015-03-26 22:26:51 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
2019-03-26 19:53:58 +00:00
|
|
|
if sum, err := s.fss.Summary(folder); err != nil {
|
2018-01-14 17:01:06 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusNotFound)
|
|
|
|
} else {
|
|
|
|
sendJSON(w, sum)
|
|
|
|
}
|
2015-03-26 22:26:51 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postDBOverride(w http.ResponseWriter, r *http.Request) {
|
2014-06-16 08:47:02 +00:00
|
|
|
var qs = r.URL.Query()
|
2014-09-28 11:00:38 +00:00
|
|
|
var folder = qs.Get("folder")
|
2015-04-28 21:12:19 +00:00
|
|
|
go s.model.Override(folder)
|
2014-06-16 08:47:02 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postDBRevert(w http.ResponseWriter, r *http.Request) {
|
2018-07-12 08:15:57 +00:00
|
|
|
var qs = r.URL.Query()
|
|
|
|
var folder = qs.Get("folder")
|
|
|
|
go s.model.Revert(folder)
|
|
|
|
}
|
|
|
|
|
2017-12-15 20:01:56 +00:00
|
|
|
func getPagingParams(qs url.Values) (int, int) {
|
2015-04-25 21:53:44 +00:00
|
|
|
page, err := strconv.Atoi(qs.Get("page"))
|
|
|
|
if err != nil || page < 1 {
|
|
|
|
page = 1
|
|
|
|
}
|
|
|
|
perpage, err := strconv.Atoi(qs.Get("perpage"))
|
|
|
|
if err != nil || perpage < 1 {
|
|
|
|
perpage = 1 << 16
|
|
|
|
}
|
2017-12-15 20:01:56 +00:00
|
|
|
return page, perpage
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDBNeed(w http.ResponseWriter, r *http.Request) {
|
2017-12-15 20:01:56 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
|
|
|
|
folder := qs.Get("folder")
|
2015-04-25 21:53:44 +00:00
|
|
|
|
2017-12-15 20:01:56 +00:00
|
|
|
page, perpage := getPagingParams(qs)
|
|
|
|
|
|
|
|
progress, queued, rest := s.model.NeedFolderFiles(folder, page, perpage)
|
2014-05-19 20:31:28 +00:00
|
|
|
|
2014-11-23 00:52:48 +00:00
|
|
|
// Convert the struct to a more loose structure, and inject the size.
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, map[string]interface{}{
|
2018-12-11 08:59:04 +00:00
|
|
|
"progress": toJsonFileInfoSlice(progress),
|
|
|
|
"queued": toJsonFileInfoSlice(queued),
|
|
|
|
"rest": toJsonFileInfoSlice(rest),
|
2015-04-25 21:53:44 +00:00
|
|
|
"page": page,
|
|
|
|
"perpage": perpage,
|
2015-12-15 21:40:38 +00:00
|
|
|
})
|
2014-05-19 20:31:28 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDBRemoteNeed(w http.ResponseWriter, r *http.Request) {
|
2017-12-15 20:01:56 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
device := qs.Get("device")
|
|
|
|
deviceID, err := protocol.DeviceIDFromString(device)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
page, perpage := getPagingParams(qs)
|
|
|
|
|
|
|
|
if files, err := s.model.RemoteNeedFolderFiles(deviceID, folder, page, perpage); err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusNotFound)
|
|
|
|
} else {
|
|
|
|
sendJSON(w, map[string]interface{}{
|
2018-12-11 08:59:04 +00:00
|
|
|
"files": toJsonFileInfoSlice(files),
|
2017-12-15 20:01:56 +00:00
|
|
|
"page": page,
|
|
|
|
"perpage": perpage,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDBLocalChanged(w http.ResponseWriter, r *http.Request) {
|
2018-12-11 08:59:04 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
|
|
|
|
page, perpage := getPagingParams(qs)
|
|
|
|
|
|
|
|
files := s.model.LocalChangedFiles(folder, page, perpage)
|
|
|
|
|
|
|
|
sendJSON(w, map[string]interface{}{
|
|
|
|
"files": toJsonFileInfoSlice(files),
|
|
|
|
"page": page,
|
|
|
|
"perpage": perpage,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getSystemConnections(w http.ResponseWriter, r *http.Request) {
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, s.model.ConnectionStats())
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDeviceStats(w http.ResponseWriter, r *http.Request) {
|
2019-11-30 12:03:24 +00:00
|
|
|
stats, err := s.model.DeviceStatistics()
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sendJSON(w, stats)
|
2014-08-21 22:46:34 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getFolderStats(w http.ResponseWriter, r *http.Request) {
|
2019-11-30 12:03:24 +00:00
|
|
|
stats, err := s.model.FolderStatistics()
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, "Internal Server Error", http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sendJSON(w, stats)
|
2014-12-07 20:21:12 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDBFile(w http.ResponseWriter, r *http.Request) {
|
2015-03-17 17:51:50 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
file := qs.Get("file")
|
2016-02-12 13:53:09 +00:00
|
|
|
gf, gfOk := s.model.CurrentGlobalFile(folder, file)
|
|
|
|
lf, lfOk := s.model.CurrentFolderFile(folder, file)
|
|
|
|
|
|
|
|
if !(gfOk || lfOk) {
|
|
|
|
// This file for sure does not exist.
|
|
|
|
http.Error(w, "No such object in the index", http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
2015-03-17 17:51:50 +00:00
|
|
|
|
2018-04-16 18:08:50 +00:00
|
|
|
av := s.model.Availability(folder, gf, protocol.BlockInfo{})
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, map[string]interface{}{
|
2015-04-20 13:37:04 +00:00
|
|
|
"global": jsonFileInfo(gf),
|
|
|
|
"local": jsonFileInfo(lf),
|
2015-03-17 17:51:50 +00:00
|
|
|
"availability": av,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getSystemConfig(w http.ResponseWriter, r *http.Request) {
|
2016-11-12 09:34:18 +00:00
|
|
|
sendJSON(w, s.cfg.RawCopy())
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postSystemConfig(w http.ResponseWriter, r *http.Request) {
|
2015-06-03 07:47:39 +00:00
|
|
|
s.systemConfigMut.Lock()
|
|
|
|
defer s.systemConfigMut.Unlock()
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
to, err := config.ReadJSON(r.Body, s.id)
|
2016-05-06 22:01:56 +00:00
|
|
|
r.Body.Close()
|
2014-03-02 22:58:14 +00:00
|
|
|
if err != nil {
|
2016-07-02 19:38:39 +00:00
|
|
|
l.Warnln("Decoding posted config:", err)
|
2015-12-05 14:56:10 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusBadRequest)
|
2014-08-08 12:09:27 +00:00
|
|
|
return
|
2014-12-08 15:36:15 +00:00
|
|
|
}
|
2014-06-07 02:00:46 +00:00
|
|
|
|
2015-09-29 18:05:22 +00:00
|
|
|
if to.GUI.Password != s.cfg.GUI().Password {
|
2017-11-06 14:22:10 +00:00
|
|
|
if to.GUI.Password != "" && !bcryptExpr.MatchString(to.GUI.Password) {
|
2015-06-03 07:47:39 +00:00
|
|
|
hash, err := bcrypt.GenerateFromPassword([]byte(to.GUI.Password), 0)
|
2014-06-11 23:05:00 +00:00
|
|
|
if err != nil {
|
2014-12-08 15:36:15 +00:00
|
|
|
l.Warnln("bcrypting password:", err)
|
2015-12-05 14:56:10 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
2014-12-08 15:36:15 +00:00
|
|
|
return
|
2014-06-11 23:05:00 +00:00
|
|
|
}
|
2014-12-08 15:36:15 +00:00
|
|
|
|
2015-06-03 07:47:39 +00:00
|
|
|
to.GUI.Password = string(hash)
|
2014-06-11 18:04:23 +00:00
|
|
|
}
|
2014-12-08 15:36:15 +00:00
|
|
|
}
|
2014-06-11 18:04:23 +00:00
|
|
|
|
2018-10-05 08:26:25 +00:00
|
|
|
// Activate and save. Wait for the configuration to become active before
|
|
|
|
// completing the request.
|
2014-12-08 15:36:15 +00:00
|
|
|
|
2018-10-05 08:26:25 +00:00
|
|
|
if wg, err := s.cfg.Replace(to); err != nil {
|
2016-09-24 07:58:17 +00:00
|
|
|
l.Warnln("Replacing config:", err)
|
2016-07-04 20:32:34 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
2018-10-05 08:26:25 +00:00
|
|
|
} else {
|
|
|
|
wg.Wait()
|
2016-07-04 20:32:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if err := s.cfg.Save(); err != nil {
|
2016-09-24 07:58:17 +00:00
|
|
|
l.Warnln("Saving config:", err)
|
2016-07-04 20:32:34 +00:00
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getSystemConfigInsync(w http.ResponseWriter, r *http.Request) {
|
2016-07-04 20:32:34 +00:00
|
|
|
sendJSON(w, map[string]bool{"configInSync": !s.cfg.RequiresRestart()})
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postSystemRestart(w http.ResponseWriter, r *http.Request) {
|
2015-04-28 21:12:19 +00:00
|
|
|
s.flushResponse(`{"ok": "restarting"}`, w)
|
2019-03-26 19:53:58 +00:00
|
|
|
go s.contr.Restart()
|
2014-04-03 20:10:51 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postSystemReset(w http.ResponseWriter, r *http.Request) {
|
2015-04-03 18:06:03 +00:00
|
|
|
var qs = r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
2015-06-21 07:35:41 +00:00
|
|
|
|
|
|
|
if len(folder) > 0 {
|
2015-09-29 18:05:22 +00:00
|
|
|
if _, ok := s.cfg.Folders()[folder]; !ok {
|
2015-06-21 07:35:41 +00:00
|
|
|
http.Error(w, "Invalid folder ID", 500)
|
|
|
|
return
|
2015-06-04 12:35:03 +00:00
|
|
|
}
|
2015-04-03 18:06:03 +00:00
|
|
|
}
|
2015-06-21 07:35:41 +00:00
|
|
|
|
2015-04-03 18:06:03 +00:00
|
|
|
if len(folder) == 0 {
|
2015-06-21 07:35:41 +00:00
|
|
|
// Reset all folders.
|
2015-09-29 18:05:22 +00:00
|
|
|
for folder := range s.cfg.Folders() {
|
2015-06-21 07:35:41 +00:00
|
|
|
s.model.ResetFolder(folder)
|
|
|
|
}
|
2015-04-28 21:12:19 +00:00
|
|
|
s.flushResponse(`{"ok": "resetting database"}`, w)
|
2015-04-03 18:06:03 +00:00
|
|
|
} else {
|
2015-06-21 07:35:41 +00:00
|
|
|
// Reset a specific folder, assuming it's supposed to exist.
|
|
|
|
s.model.ResetFolder(folder)
|
2015-06-19 06:30:19 +00:00
|
|
|
s.flushResponse(`{"ok": "resetting folder `+folder+`"}`, w)
|
2015-04-03 18:06:03 +00:00
|
|
|
}
|
2015-06-21 07:35:41 +00:00
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
go s.contr.Restart()
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postSystemShutdown(w http.ResponseWriter, r *http.Request) {
|
2015-04-28 21:12:19 +00:00
|
|
|
s.flushResponse(`{"ok": "shutting down"}`, w)
|
2019-03-26 19:53:58 +00:00
|
|
|
go s.contr.Shutdown()
|
2014-05-11 23:16:27 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) flushResponse(resp string, w http.ResponseWriter) {
|
2019-02-02 11:16:27 +00:00
|
|
|
w.Write([]byte(resp + "\n"))
|
2014-05-13 00:15:18 +00:00
|
|
|
f := w.(http.Flusher)
|
|
|
|
f.Flush()
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getSystemStatus(w http.ResponseWriter, r *http.Request) {
|
2014-03-02 22:58:14 +00:00
|
|
|
var m runtime.MemStats
|
|
|
|
runtime.ReadMemStats(&m)
|
|
|
|
|
2017-08-19 14:36:56 +00:00
|
|
|
tilde, _ := fs.ExpandTilde("~")
|
2014-03-02 22:58:14 +00:00
|
|
|
res := make(map[string]interface{})
|
2019-03-26 19:53:58 +00:00
|
|
|
res["myID"] = s.id.String()
|
2014-03-02 22:58:14 +00:00
|
|
|
res["goroutines"] = runtime.NumGoroutine()
|
|
|
|
res["alloc"] = m.Alloc
|
2014-08-05 20:14:11 +00:00
|
|
|
res["sys"] = m.Sys - m.HeapReleased
|
2014-10-06 07:25:45 +00:00
|
|
|
res["tilde"] = tilde
|
2015-09-29 18:05:22 +00:00
|
|
|
if s.cfg.Options().LocalAnnEnabled || s.cfg.Options().GlobalAnnEnabled {
|
2015-09-20 13:30:25 +00:00
|
|
|
res["discoveryEnabled"] = true
|
|
|
|
discoErrors := make(map[string]string)
|
|
|
|
discoMethods := 0
|
|
|
|
for disco, err := range s.discoverer.ChildErrors() {
|
|
|
|
discoMethods++
|
|
|
|
if err != nil {
|
|
|
|
discoErrors[disco] = err.Error()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
res["discoveryMethods"] = discoMethods
|
|
|
|
res["discoveryErrors"] = discoErrors
|
|
|
|
}
|
2016-05-04 19:38:12 +00:00
|
|
|
|
2019-05-16 21:11:46 +00:00
|
|
|
res["connectionServiceStatus"] = s.connectionsService.ListenerStatus()
|
|
|
|
res["lastDialStatus"] = s.connectionsService.ConnectionStatus()
|
2017-04-20 08:33:09 +00:00
|
|
|
// cpuUsage.Rate() is in milliseconds per second, so dividing by ten
|
|
|
|
// gives us percent
|
2017-05-31 18:14:04 +00:00
|
|
|
res["cpuPercent"] = s.cpu.Rate() / 10 / float64(runtime.NumCPU())
|
2014-12-14 23:12:12 +00:00
|
|
|
res["pathSeparator"] = string(filepath.Separator)
|
2019-03-26 19:53:58 +00:00
|
|
|
res["urVersionMax"] = ur.Version
|
|
|
|
res["uptime"] = s.urService.UptimeS()
|
|
|
|
res["startTime"] = ur.StartTime
|
2018-09-21 12:28:57 +00:00
|
|
|
res["guiAddressOverridden"] = s.cfg.GUI().IsOverridden()
|
2019-10-04 10:25:41 +00:00
|
|
|
res["guiAddressUsed"] = s.listenerAddr.String()
|
2014-03-02 22:58:14 +00:00
|
|
|
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, res)
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getSystemError(w http.ResponseWriter, r *http.Request) {
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, map[string][]logger.Line{
|
2015-10-03 15:25:21 +00:00
|
|
|
"errors": s.guiErrors.Since(time.Time{}),
|
|
|
|
})
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postSystemError(w http.ResponseWriter, r *http.Request) {
|
2014-07-05 19:40:29 +00:00
|
|
|
bs, _ := ioutil.ReadAll(r.Body)
|
|
|
|
r.Body.Close()
|
2015-10-03 15:25:21 +00:00
|
|
|
l.Warnln(string(bs))
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postSystemErrorClear(w http.ResponseWriter, r *http.Request) {
|
2015-10-03 15:25:21 +00:00
|
|
|
s.guiErrors.Clear()
|
2014-04-16 14:30:49 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getSystemLog(w http.ResponseWriter, r *http.Request) {
|
2015-10-03 15:25:21 +00:00
|
|
|
q := r.URL.Query()
|
|
|
|
since, err := time.Parse(time.RFC3339, q.Get("since"))
|
2018-04-14 08:46:33 +00:00
|
|
|
if err != nil {
|
|
|
|
l.Debugln(err)
|
|
|
|
}
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, map[string][]logger.Line{
|
2015-10-03 15:25:21 +00:00
|
|
|
"messages": s.systemLog.Since(since),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getSystemLogTxt(w http.ResponseWriter, r *http.Request) {
|
2015-10-03 15:25:21 +00:00
|
|
|
q := r.URL.Query()
|
|
|
|
since, err := time.Parse(time.RFC3339, q.Get("since"))
|
2018-04-14 08:46:33 +00:00
|
|
|
if err != nil {
|
|
|
|
l.Debugln(err)
|
|
|
|
}
|
2015-10-03 15:25:21 +00:00
|
|
|
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
|
|
|
|
|
|
|
|
for _, line := range s.systemLog.Since(since) {
|
|
|
|
fmt.Fprintf(w, "%s: %s\n", line.When.Format(time.RFC3339), line.Message)
|
2014-03-02 22:58:14 +00:00
|
|
|
}
|
|
|
|
}
|
2014-04-19 11:33:51 +00:00
|
|
|
|
2018-10-01 15:23:46 +00:00
|
|
|
type fileEntry struct {
|
|
|
|
name string
|
|
|
|
data []byte
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getSupportBundle(w http.ResponseWriter, r *http.Request) {
|
2018-10-01 15:23:46 +00:00
|
|
|
var files []fileEntry
|
|
|
|
|
|
|
|
// Redacted configuration as a JSON
|
2019-06-09 07:33:54 +00:00
|
|
|
if jsonConfig, err := json.MarshalIndent(getRedactedConfig(s), "", " "); err != nil {
|
2018-10-01 15:23:46 +00:00
|
|
|
l.Warnln("Support bundle: failed to create config.json:", err)
|
2019-06-09 07:33:54 +00:00
|
|
|
} else {
|
|
|
|
files = append(files, fileEntry{name: "config.json.txt", data: jsonConfig})
|
2018-10-01 15:23:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Log as a text
|
|
|
|
var buflog bytes.Buffer
|
|
|
|
for _, line := range s.systemLog.Since(time.Time{}) {
|
|
|
|
fmt.Fprintf(&buflog, "%s: %s\n", line.When.Format(time.RFC3339), line.Message)
|
|
|
|
}
|
2018-10-11 06:13:52 +00:00
|
|
|
files = append(files, fileEntry{name: "log-inmemory.txt", data: buflog.Bytes()})
|
2018-10-01 15:23:46 +00:00
|
|
|
|
|
|
|
// Errors as a JSON
|
|
|
|
if errs := s.guiErrors.Since(time.Time{}); len(errs) > 0 {
|
|
|
|
if jsonError, err := json.MarshalIndent(errs, "", " "); err != nil {
|
|
|
|
l.Warnln("Support bundle: failed to create errors.json:", err)
|
2019-06-09 07:33:54 +00:00
|
|
|
} else {
|
|
|
|
files = append(files, fileEntry{name: "errors.json.txt", data: jsonError})
|
2018-10-01 15:23:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-11 06:13:52 +00:00
|
|
|
// Panic files
|
2019-02-12 06:58:24 +00:00
|
|
|
if panicFiles, err := filepath.Glob(filepath.Join(locations.GetBaseDir(locations.ConfigBaseDir), "panic*")); err == nil {
|
2018-10-01 15:23:46 +00:00
|
|
|
for _, f := range panicFiles {
|
|
|
|
if panicFile, err := ioutil.ReadFile(f); err != nil {
|
|
|
|
l.Warnf("Support bundle: failed to load %s: %s", filepath.Base(f), err)
|
|
|
|
} else {
|
|
|
|
files = append(files, fileEntry{name: filepath.Base(f), data: panicFile})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-11 06:13:52 +00:00
|
|
|
// Archived log (default on Windows)
|
2019-02-12 06:58:24 +00:00
|
|
|
if logFile, err := ioutil.ReadFile(locations.Get(locations.LogFile)); err == nil {
|
2018-10-11 06:13:52 +00:00
|
|
|
files = append(files, fileEntry{name: "log-ondisk.txt", data: logFile})
|
|
|
|
}
|
|
|
|
|
2018-10-01 15:23:46 +00:00
|
|
|
// Version and platform information as a JSON
|
|
|
|
if versionPlatform, err := json.MarshalIndent(map[string]string{
|
|
|
|
"now": time.Now().Format(time.RFC3339),
|
2019-02-12 06:58:24 +00:00
|
|
|
"version": build.Version,
|
|
|
|
"codename": build.Codename,
|
|
|
|
"longVersion": build.LongVersion,
|
2018-10-01 15:23:46 +00:00
|
|
|
"os": runtime.GOOS,
|
|
|
|
"arch": runtime.GOARCH,
|
|
|
|
}, "", " "); err == nil {
|
|
|
|
files = append(files, fileEntry{name: "version-platform.json.txt", data: versionPlatform})
|
|
|
|
} else {
|
|
|
|
l.Warnln("Failed to create versionPlatform.json: ", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Report Data as a JSON
|
2019-03-26 19:53:58 +00:00
|
|
|
if usageReportingData, err := json.MarshalIndent(s.urService.ReportData(), "", " "); err != nil {
|
2018-10-01 15:23:46 +00:00
|
|
|
l.Warnln("Support bundle: failed to create versionPlatform.json:", err)
|
|
|
|
} else {
|
|
|
|
files = append(files, fileEntry{name: "usage-reporting.json.txt", data: usageReportingData})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Heap and CPU Proofs as a pprof extension
|
|
|
|
var heapBuffer, cpuBuffer bytes.Buffer
|
2019-02-12 06:58:24 +00:00
|
|
|
filename := fmt.Sprintf("syncthing-heap-%s-%s-%s-%s.pprof", runtime.GOOS, runtime.GOARCH, build.Version, time.Now().Format("150405")) // hhmmss
|
2018-10-01 15:23:46 +00:00
|
|
|
runtime.GC()
|
2019-02-02 09:11:42 +00:00
|
|
|
if err := pprof.WriteHeapProfile(&heapBuffer); err == nil {
|
|
|
|
files = append(files, fileEntry{name: filename, data: heapBuffer.Bytes()})
|
|
|
|
}
|
2018-10-01 15:23:46 +00:00
|
|
|
|
|
|
|
const duration = 4 * time.Second
|
2019-02-12 06:58:24 +00:00
|
|
|
filename = fmt.Sprintf("syncthing-cpu-%s-%s-%s-%s.pprof", runtime.GOOS, runtime.GOARCH, build.Version, time.Now().Format("150405")) // hhmmss
|
2019-02-02 09:11:42 +00:00
|
|
|
if err := pprof.StartCPUProfile(&cpuBuffer); err == nil {
|
|
|
|
time.Sleep(duration)
|
|
|
|
pprof.StopCPUProfile()
|
|
|
|
files = append(files, fileEntry{name: filename, data: cpuBuffer.Bytes()})
|
|
|
|
}
|
2018-10-01 15:23:46 +00:00
|
|
|
|
|
|
|
// Add buffer files to buffer zip
|
|
|
|
var zipFilesBuffer bytes.Buffer
|
|
|
|
if err := writeZip(&zipFilesBuffer, files); err != nil {
|
|
|
|
l.Warnln("Support bundle: failed to create support bundle zip:", err)
|
|
|
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set zip file name and path
|
2018-10-11 06:13:52 +00:00
|
|
|
zipFileName := fmt.Sprintf("support-bundle-%s-%s.zip", s.id.Short().String(), time.Now().Format("2006-01-02T150405"))
|
2019-02-12 06:58:24 +00:00
|
|
|
zipFilePath := filepath.Join(locations.GetBaseDir(locations.ConfigBaseDir), zipFileName)
|
2018-10-01 15:23:46 +00:00
|
|
|
|
|
|
|
// Write buffer zip to local zip file (back up)
|
|
|
|
if err := ioutil.WriteFile(zipFilePath, zipFilesBuffer.Bytes(), 0600); err != nil {
|
|
|
|
l.Warnln("Support bundle: support bundle zip could not be created:", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Serve the buffer zip to client for download
|
|
|
|
w.Header().Set("Content-Type", "application/zip")
|
|
|
|
w.Header().Set("Content-Disposition", "attachment; filename="+zipFileName)
|
2019-02-02 11:16:27 +00:00
|
|
|
io.Copy(w, &zipFilesBuffer)
|
2018-10-01 15:23:46 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getSystemHTTPMetrics(w http.ResponseWriter, r *http.Request) {
|
2015-11-21 08:48:57 +00:00
|
|
|
stats := make(map[string]interface{})
|
|
|
|
metrics.Each(func(name string, intf interface{}) {
|
|
|
|
if m, ok := intf.(*metrics.StandardTimer); ok {
|
|
|
|
pct := m.Percentiles([]float64{0.50, 0.95, 0.99})
|
|
|
|
for i := range pct {
|
|
|
|
pct[i] /= 1e6 // ns to ms
|
|
|
|
}
|
|
|
|
stats[name] = map[string]interface{}{
|
|
|
|
"count": m.Count(),
|
|
|
|
"sumMs": m.Sum() / 1e6, // ns to ms
|
|
|
|
"ratesPerS": []float64{m.Rate1(), m.Rate5(), m.Rate15()},
|
|
|
|
"percentilesMs": pct,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
bs, _ := json.MarshalIndent(stats, "", " ")
|
2019-02-02 11:16:27 +00:00
|
|
|
w.Write(bs)
|
2015-11-21 08:48:57 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getSystemDiscovery(w http.ResponseWriter, r *http.Request) {
|
2015-09-20 13:30:25 +00:00
|
|
|
devices := make(map[string]discover.CacheEntry)
|
2014-10-28 19:40:04 +00:00
|
|
|
|
2015-09-12 19:59:15 +00:00
|
|
|
if s.discoverer != nil {
|
2014-10-28 19:40:04 +00:00
|
|
|
// Device ids can't be marshalled as keys so we need to manually
|
|
|
|
// rebuild this map using strings. Discoverer may be nil if discovery
|
|
|
|
// has not started yet.
|
2015-09-20 13:30:25 +00:00
|
|
|
for device, entry := range s.discoverer.Cache() {
|
|
|
|
devices[device.String()] = entry
|
2014-10-28 19:40:04 +00:00
|
|
|
}
|
2014-10-15 18:23:28 +00:00
|
|
|
}
|
2014-10-15 20:52:06 +00:00
|
|
|
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, devices)
|
2014-05-13 01:08:55 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getReport(w http.ResponseWriter, r *http.Request) {
|
|
|
|
version := ur.Version
|
2017-10-12 06:16:46 +00:00
|
|
|
if val, _ := strconv.Atoi(r.URL.Query().Get("version")); val > 0 {
|
|
|
|
version = val
|
|
|
|
}
|
2019-03-26 19:53:58 +00:00
|
|
|
sendJSON(w, s.urService.ReportDataPreview(version))
|
2014-06-11 18:04:23 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getRandomString(w http.ResponseWriter, r *http.Request) {
|
2016-05-26 07:25:34 +00:00
|
|
|
length := 32
|
|
|
|
if val, _ := strconv.Atoi(r.URL.Query().Get("length")); val > 0 {
|
|
|
|
length = val
|
|
|
|
}
|
|
|
|
str := rand.String(length)
|
|
|
|
|
|
|
|
sendJSON(w, map[string]string{"random": str})
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDBIgnores(w http.ResponseWriter, r *http.Request) {
|
2014-09-15 22:12:29 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
|
2017-04-01 09:58:06 +00:00
|
|
|
folder := qs.Get("folder")
|
|
|
|
|
|
|
|
ignores, patterns, err := s.model.GetIgnores(folder)
|
2014-09-15 22:12:29 +00:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
2014-11-08 21:12:18 +00:00
|
|
|
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, map[string][]string{
|
2014-11-08 21:12:18 +00:00
|
|
|
"ignore": ignores,
|
2016-04-02 19:03:24 +00:00
|
|
|
"expanded": patterns,
|
2014-09-15 22:12:29 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postDBIgnores(w http.ResponseWriter, r *http.Request) {
|
2014-09-15 22:12:29 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
|
2016-05-06 22:01:56 +00:00
|
|
|
bs, err := ioutil.ReadAll(r.Body)
|
2014-09-15 22:12:29 +00:00
|
|
|
r.Body.Close()
|
2016-05-06 22:01:56 +00:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
2014-09-19 20:02:53 +00:00
|
|
|
|
2016-05-06 22:01:56 +00:00
|
|
|
var data map[string][]string
|
|
|
|
err = json.Unmarshal(bs, &data)
|
2014-09-15 22:12:29 +00:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-04-28 21:12:19 +00:00
|
|
|
err = s.model.SetIgnores(qs.Get("folder"), data["ignore"])
|
2014-09-15 22:12:29 +00:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-04-28 21:12:19 +00:00
|
|
|
s.getDBIgnores(w, r)
|
2014-09-15 22:12:29 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getIndexEvents(w http.ResponseWriter, r *http.Request) {
|
|
|
|
s.fss.OnEventRequest()
|
2017-04-13 17:14:34 +00:00
|
|
|
mask := s.getEventMask(r.URL.Query().Get("events"))
|
|
|
|
sub := s.getEventSub(mask)
|
|
|
|
s.getEvents(w, r, sub)
|
2016-09-28 15:54:13 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDiskEvents(w http.ResponseWriter, r *http.Request) {
|
|
|
|
sub := s.getEventSub(DiskEventMask)
|
2017-04-13 17:14:34 +00:00
|
|
|
s.getEvents(w, r, sub)
|
2016-09-28 15:54:13 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getEvents(w http.ResponseWriter, r *http.Request, eventSub events.BufferedSubscription) {
|
2014-07-13 19:07:24 +00:00
|
|
|
qs := r.URL.Query()
|
2014-07-29 09:06:52 +00:00
|
|
|
sinceStr := qs.Get("since")
|
|
|
|
limitStr := qs.Get("limit")
|
2017-01-31 12:04:29 +00:00
|
|
|
timeoutStr := qs.Get("timeout")
|
2014-07-29 09:06:52 +00:00
|
|
|
since, _ := strconv.Atoi(sinceStr)
|
|
|
|
limit, _ := strconv.Atoi(limitStr)
|
|
|
|
|
2017-01-31 12:04:29 +00:00
|
|
|
timeout := defaultEventTimeout
|
|
|
|
if timeoutSec, timeoutErr := strconv.Atoi(timeoutStr); timeoutErr == nil && timeoutSec >= 0 { // 0 is a valid timeout
|
|
|
|
timeout = time.Duration(timeoutSec) * time.Second
|
|
|
|
}
|
|
|
|
|
2016-02-02 11:40:42 +00:00
|
|
|
// Flush before blocking, to indicate that we've received the request and
|
|
|
|
// that it should not be retried. Must set Content-Type header before
|
|
|
|
// flushing.
|
|
|
|
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
2014-08-19 22:18:28 +00:00
|
|
|
f := w.(http.Flusher)
|
|
|
|
f.Flush()
|
|
|
|
|
2017-01-31 12:04:29 +00:00
|
|
|
// If there are no events available return an empty slice, as this gets serialized as `[]`
|
|
|
|
evs := eventSub.Since(since, []events.Event{}, timeout)
|
2014-07-29 09:06:52 +00:00
|
|
|
if 0 < limit && limit < len(evs) {
|
|
|
|
evs = evs[len(evs)-limit:]
|
|
|
|
}
|
2014-07-13 19:07:24 +00:00
|
|
|
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, evs)
|
2014-07-13 19:07:24 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getEventMask(evs string) events.EventType {
|
|
|
|
eventMask := DefaultEventMask
|
2017-04-13 17:14:34 +00:00
|
|
|
if evs != "" {
|
|
|
|
eventList := strings.Split(evs, ",")
|
|
|
|
eventMask = 0
|
|
|
|
for _, ev := range eventList {
|
|
|
|
eventMask |= events.UnmarshalEventType(strings.TrimSpace(ev))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return eventMask
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getEventSub(mask events.EventType) events.BufferedSubscription {
|
2017-04-13 17:14:34 +00:00
|
|
|
s.eventSubsMut.Lock()
|
|
|
|
bufsub, ok := s.eventSubs[mask]
|
|
|
|
if !ok {
|
2019-08-15 14:29:37 +00:00
|
|
|
evsub := s.evLogger.Subscribe(mask)
|
2019-03-26 19:53:58 +00:00
|
|
|
bufsub = events.NewBufferedSubscription(evsub, EventSubBufferSize)
|
2017-04-13 17:14:34 +00:00
|
|
|
s.eventSubs[mask] = bufsub
|
|
|
|
}
|
|
|
|
s.eventSubsMut.Unlock()
|
|
|
|
|
|
|
|
return bufsub
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getSystemUpgrade(w http.ResponseWriter, r *http.Request) {
|
|
|
|
if s.noUpgrade {
|
2015-01-06 21:40:52 +00:00
|
|
|
http.Error(w, upgrade.ErrUpgradeUnsupported.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
2017-01-27 12:17:06 +00:00
|
|
|
opts := s.cfg.Options()
|
2019-02-12 06:58:24 +00:00
|
|
|
rel, err := upgrade.LatestRelease(opts.ReleasesURL, build.Version, opts.UpgradeToPreReleases)
|
2014-07-14 08:45:29 +00:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
res := make(map[string]interface{})
|
2019-02-12 06:58:24 +00:00
|
|
|
res["running"] = build.Version
|
2014-07-14 08:45:29 +00:00
|
|
|
res["latest"] = rel.Tag
|
2019-02-12 06:58:24 +00:00
|
|
|
res["newer"] = upgrade.CompareVersions(rel.Tag, build.Version) == upgrade.Newer
|
|
|
|
res["majorNewer"] = upgrade.CompareVersions(rel.Tag, build.Version) == upgrade.MajorNewer
|
2014-07-14 08:45:29 +00:00
|
|
|
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, res)
|
2014-07-14 08:45:29 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getDeviceID(w http.ResponseWriter, r *http.Request) {
|
2014-07-18 08:00:02 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
idStr := qs.Get("id")
|
2014-09-28 11:00:38 +00:00
|
|
|
id, err := protocol.DeviceIDFromString(idStr)
|
2015-12-15 21:40:38 +00:00
|
|
|
|
2014-07-18 08:00:02 +00:00
|
|
|
if err == nil {
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, map[string]string{
|
2014-07-18 08:00:02 +00:00
|
|
|
"id": id.String(),
|
|
|
|
})
|
|
|
|
} else {
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, map[string]string{
|
2014-07-18 08:00:02 +00:00
|
|
|
"error": err.Error(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getLang(w http.ResponseWriter, r *http.Request) {
|
2014-07-26 20:30:29 +00:00
|
|
|
lang := r.Header.Get("Accept-Language")
|
|
|
|
var langs []string
|
|
|
|
for _, l := range strings.Split(lang, ",") {
|
2014-08-14 15:04:17 +00:00
|
|
|
parts := strings.SplitN(l, ";", 2)
|
2014-08-28 11:23:23 +00:00
|
|
|
langs = append(langs, strings.ToLower(strings.TrimSpace(parts[0])))
|
2014-07-26 20:30:29 +00:00
|
|
|
}
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, langs)
|
2014-07-26 20:30:29 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postSystemUpgrade(w http.ResponseWriter, r *http.Request) {
|
2017-01-27 12:17:06 +00:00
|
|
|
opts := s.cfg.Options()
|
2019-02-12 06:58:24 +00:00
|
|
|
rel, err := upgrade.LatestRelease(opts.ReleasesURL, build.Version, opts.UpgradeToPreReleases)
|
2014-07-14 08:45:29 +00:00
|
|
|
if err != nil {
|
2014-08-17 08:28:36 +00:00
|
|
|
l.Warnln("getting latest release:", err)
|
2014-07-14 08:45:29 +00:00
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-02-12 06:58:24 +00:00
|
|
|
if upgrade.CompareVersions(rel.Tag, build.Version) > upgrade.Equal {
|
2014-12-08 15:36:15 +00:00
|
|
|
err = upgrade.To(rel)
|
2014-07-31 14:01:23 +00:00
|
|
|
if err != nil {
|
2014-08-17 08:28:36 +00:00
|
|
|
l.Warnln("upgrading:", err)
|
2014-07-31 14:01:23 +00:00
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-04-28 21:12:19 +00:00
|
|
|
s.flushResponse(`{"ok": "restarting"}`, w)
|
2019-03-26 19:53:58 +00:00
|
|
|
s.contr.ExitUpgrading()
|
2014-07-31 14:01:23 +00:00
|
|
|
}
|
2014-07-14 08:45:29 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) makeDevicePauseHandler(paused bool) http.HandlerFunc {
|
2016-12-21 18:41:25 +00:00
|
|
|
return func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
var qs = r.URL.Query()
|
|
|
|
var deviceStr = qs.Get("device")
|
2015-08-23 19:56:10 +00:00
|
|
|
|
2017-03-04 07:54:13 +00:00
|
|
|
var cfgs []config.DeviceConfiguration
|
|
|
|
|
|
|
|
if deviceStr == "" {
|
|
|
|
for _, cfg := range s.cfg.Devices() {
|
|
|
|
cfg.Paused = paused
|
|
|
|
cfgs = append(cfgs, cfg)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
device, err := protocol.DeviceIDFromString(deviceStr)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
cfg, ok := s.cfg.Devices()[device]
|
|
|
|
if !ok {
|
|
|
|
http.Error(w, "not found", http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
2015-08-23 19:56:10 +00:00
|
|
|
|
2017-03-04 07:54:13 +00:00
|
|
|
cfg.Paused = paused
|
|
|
|
cfgs = append(cfgs, cfg)
|
2016-12-21 18:41:25 +00:00
|
|
|
}
|
2015-08-23 19:56:10 +00:00
|
|
|
|
2017-12-07 07:08:24 +00:00
|
|
|
if _, err := s.cfg.SetDevices(cfgs); err != nil {
|
2016-12-21 18:41:25 +00:00
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
}
|
2015-08-23 19:56:10 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postDBScan(w http.ResponseWriter, r *http.Request) {
|
2014-08-11 18:20:01 +00:00
|
|
|
qs := r.URL.Query()
|
2014-09-28 11:00:38 +00:00
|
|
|
folder := qs.Get("folder")
|
2015-02-11 18:52:59 +00:00
|
|
|
if folder != "" {
|
2015-03-27 08:51:18 +00:00
|
|
|
subs := qs["sub"]
|
2017-02-05 18:17:44 +00:00
|
|
|
err := s.model.ScanFolderSubdirs(folder, subs)
|
2015-02-11 18:52:59 +00:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
2015-05-01 12:30:17 +00:00
|
|
|
return
|
2015-02-11 18:52:59 +00:00
|
|
|
}
|
2017-02-05 18:17:44 +00:00
|
|
|
nextStr := qs.Get("next")
|
|
|
|
next, err := strconv.Atoi(nextStr)
|
|
|
|
if err == nil {
|
|
|
|
s.model.DelayScan(folder, time.Duration(next)*time.Second)
|
|
|
|
}
|
2015-02-11 18:52:59 +00:00
|
|
|
} else {
|
2015-04-28 21:12:19 +00:00
|
|
|
errors := s.model.ScanFolders()
|
2015-02-11 18:52:59 +00:00
|
|
|
if len(errors) > 0 {
|
|
|
|
http.Error(w, "Error scanning folders", 500)
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, errors)
|
2015-05-01 12:30:17 +00:00
|
|
|
return
|
2015-02-11 18:52:59 +00:00
|
|
|
}
|
2014-08-11 18:20:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postDBPrio(w http.ResponseWriter, r *http.Request) {
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
file := qs.Get("file")
|
2015-04-28 21:12:19 +00:00
|
|
|
s.model.BringToFront(folder, file)
|
|
|
|
s.getDBNeed(w, r)
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getQR(w http.ResponseWriter, r *http.Request) {
|
2014-08-04 20:53:37 +00:00
|
|
|
var qs = r.URL.Query()
|
|
|
|
var text = qs.Get("text")
|
2014-07-05 19:40:29 +00:00
|
|
|
code, err := qr.Encode(text, qr.M)
|
2014-05-21 18:06:14 +00:00
|
|
|
if err != nil {
|
|
|
|
http.Error(w, "Invalid", 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "image/png")
|
2019-02-02 11:16:27 +00:00
|
|
|
w.Write(code.PNG())
|
2014-05-21 18:06:14 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getPeerCompletion(w http.ResponseWriter, r *http.Request) {
|
2014-07-29 11:01:27 +00:00
|
|
|
tot := map[string]float64{}
|
|
|
|
count := map[string]float64{}
|
|
|
|
|
2015-09-29 18:05:22 +00:00
|
|
|
for _, folder := range s.cfg.Folders() {
|
2014-09-28 11:00:38 +00:00
|
|
|
for _, device := range folder.DeviceIDs() {
|
|
|
|
deviceStr := device.String()
|
2017-11-21 07:25:38 +00:00
|
|
|
if _, ok := s.model.Connection(device); ok {
|
2016-08-12 06:41:43 +00:00
|
|
|
tot[deviceStr] += s.model.Completion(device, folder.ID).CompletionPct
|
2014-07-29 11:01:27 +00:00
|
|
|
} else {
|
2014-09-28 11:00:38 +00:00
|
|
|
tot[deviceStr] = 0
|
2014-07-29 11:01:27 +00:00
|
|
|
}
|
2014-09-28 11:00:38 +00:00
|
|
|
count[deviceStr]++
|
2014-07-29 11:01:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
comp := map[string]int{}
|
2014-09-28 11:00:38 +00:00
|
|
|
for device := range tot {
|
|
|
|
comp[device] = int(tot[device] / count[device])
|
2014-07-29 11:01:27 +00:00
|
|
|
}
|
|
|
|
|
2015-12-15 21:40:38 +00:00
|
|
|
sendJSON(w, comp)
|
2014-07-29 11:01:27 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getFolderVersions(w http.ResponseWriter, r *http.Request) {
|
2018-01-01 14:39:23 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
versions, err := s.model.GetFolderVersions(qs.Get("folder"))
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sendJSON(w, versions)
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) postFolderVersionsRestore(w http.ResponseWriter, r *http.Request) {
|
2018-01-01 14:39:23 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
|
|
|
|
bs, err := ioutil.ReadAll(r.Body)
|
|
|
|
r.Body.Close()
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var versions map[string]time.Time
|
|
|
|
err = json.Unmarshal(bs, &versions)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ferr, err := s.model.RestoreFolderVersions(qs.Get("folder"), versions)
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), 500)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sendJSON(w, ferr)
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getFolderErrors(w http.ResponseWriter, r *http.Request) {
|
2018-01-14 17:01:06 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
folder := qs.Get("folder")
|
|
|
|
page, perpage := getPagingParams(qs)
|
|
|
|
|
2018-11-07 10:04:41 +00:00
|
|
|
errors, err := s.model.FolderErrors(folder)
|
2018-01-14 17:01:06 +00:00
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
http.Error(w, err.Error(), http.StatusNotFound)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
start := (page - 1) * perpage
|
|
|
|
if start >= len(errors) {
|
|
|
|
errors = nil
|
|
|
|
} else {
|
|
|
|
errors = errors[start:]
|
|
|
|
if perpage < len(errors) {
|
|
|
|
errors = errors[:perpage]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sendJSON(w, map[string]interface{}{
|
|
|
|
"folder": folder,
|
|
|
|
"errors": errors,
|
|
|
|
"page": page,
|
|
|
|
"perpage": perpage,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getSystemBrowse(w http.ResponseWriter, r *http.Request) {
|
2014-11-16 19:30:49 +00:00
|
|
|
qs := r.URL.Query()
|
|
|
|
current := qs.Get("current")
|
2017-12-13 09:34:47 +00:00
|
|
|
|
2017-08-19 14:36:56 +00:00
|
|
|
// Default value or in case of error unmarshalling ends up being basic fs.
|
|
|
|
var fsType fs.FilesystemType
|
2019-02-02 11:16:27 +00:00
|
|
|
fsType.UnmarshalText([]byte(qs.Get("filesystem")))
|
2017-08-19 14:36:56 +00:00
|
|
|
|
2017-12-13 09:34:47 +00:00
|
|
|
sendJSON(w, browseFiles(current, fsType))
|
|
|
|
}
|
|
|
|
|
2018-11-01 19:13:11 +00:00
|
|
|
const (
|
|
|
|
matchExact int = iota
|
|
|
|
matchCaseIns
|
|
|
|
noMatch
|
|
|
|
)
|
|
|
|
|
|
|
|
func checkPrefixMatch(s, prefix string) int {
|
|
|
|
if strings.HasPrefix(s, prefix) {
|
|
|
|
return matchExact
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.HasPrefix(strings.ToLower(s), strings.ToLower(prefix)) {
|
|
|
|
return matchCaseIns
|
|
|
|
}
|
|
|
|
|
|
|
|
return noMatch
|
|
|
|
}
|
|
|
|
|
2017-12-13 09:34:47 +00:00
|
|
|
func browseFiles(current string, fsType fs.FilesystemType) []string {
|
2016-06-20 20:25:00 +00:00
|
|
|
if current == "" {
|
2017-08-19 14:36:56 +00:00
|
|
|
filesystem := fs.NewFilesystem(fsType, "")
|
|
|
|
if roots, err := filesystem.Roots(); err == nil {
|
2017-12-13 09:34:47 +00:00
|
|
|
return roots
|
2016-05-31 19:27:07 +00:00
|
|
|
}
|
2017-12-13 09:34:47 +00:00
|
|
|
return nil
|
2016-05-31 19:27:07 +00:00
|
|
|
}
|
2017-08-19 14:36:56 +00:00
|
|
|
search, _ := fs.ExpandTilde(current)
|
|
|
|
pathSeparator := string(fs.PathSeparator)
|
|
|
|
|
2014-11-16 19:30:49 +00:00
|
|
|
if strings.HasSuffix(current, pathSeparator) && !strings.HasSuffix(search, pathSeparator) {
|
|
|
|
search = search + pathSeparator
|
|
|
|
}
|
2017-08-19 14:36:56 +00:00
|
|
|
searchDir := filepath.Dir(search)
|
2017-12-13 09:34:47 +00:00
|
|
|
|
|
|
|
// The searchFile should be the last component of search, or empty if it
|
|
|
|
// ends with a path separator
|
|
|
|
var searchFile string
|
|
|
|
if !strings.HasSuffix(search, pathSeparator) {
|
|
|
|
searchFile = filepath.Base(search)
|
|
|
|
}
|
2017-08-19 14:36:56 +00:00
|
|
|
|
|
|
|
fs := fs.NewFilesystem(fsType, searchDir)
|
|
|
|
|
2018-11-01 19:13:11 +00:00
|
|
|
subdirectories, _ := fs.DirNames(".")
|
|
|
|
|
|
|
|
exactMatches := make([]string, 0, len(subdirectories))
|
|
|
|
caseInsMatches := make([]string, 0, len(subdirectories))
|
2017-08-19 14:36:56 +00:00
|
|
|
|
2014-11-16 19:30:49 +00:00
|
|
|
for _, subdirectory := range subdirectories {
|
2017-08-19 14:36:56 +00:00
|
|
|
info, err := fs.Stat(subdirectory)
|
2018-11-01 19:13:11 +00:00
|
|
|
if err != nil || !info.IsDir() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
switch checkPrefixMatch(subdirectory, searchFile) {
|
|
|
|
case matchExact:
|
|
|
|
exactMatches = append(exactMatches, filepath.Join(searchDir, subdirectory)+pathSeparator)
|
|
|
|
case matchCaseIns:
|
|
|
|
caseInsMatches = append(caseInsMatches, filepath.Join(searchDir, subdirectory)+pathSeparator)
|
2014-11-16 19:30:49 +00:00
|
|
|
}
|
|
|
|
}
|
2018-11-01 19:13:11 +00:00
|
|
|
|
|
|
|
// sort to return matches in deterministic order (don't depend on file system order)
|
|
|
|
sort.Strings(exactMatches)
|
|
|
|
sort.Strings(caseInsMatches)
|
|
|
|
return append(exactMatches, caseInsMatches...)
|
2014-11-16 19:30:49 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getCPUProf(w http.ResponseWriter, r *http.Request) {
|
2016-08-02 11:06:45 +00:00
|
|
|
duration, err := time.ParseDuration(r.FormValue("duration"))
|
|
|
|
if err != nil {
|
|
|
|
duration = 30 * time.Second
|
|
|
|
}
|
|
|
|
|
2019-02-12 06:58:24 +00:00
|
|
|
filename := fmt.Sprintf("syncthing-cpu-%s-%s-%s-%s.pprof", runtime.GOOS, runtime.GOARCH, build.Version, time.Now().Format("150405")) // hhmmss
|
2016-08-02 11:06:45 +00:00
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "application/octet-stream")
|
|
|
|
w.Header().Set("Content-Disposition", "attachment; filename="+filename)
|
|
|
|
|
2019-02-02 09:11:42 +00:00
|
|
|
if err := pprof.StartCPUProfile(w); err == nil {
|
|
|
|
time.Sleep(duration)
|
|
|
|
pprof.StopCPUProfile()
|
|
|
|
}
|
2016-08-02 11:06:45 +00:00
|
|
|
}
|
|
|
|
|
2019-03-26 19:53:58 +00:00
|
|
|
func (s *service) getHeapProf(w http.ResponseWriter, r *http.Request) {
|
2019-02-12 06:58:24 +00:00
|
|
|
filename := fmt.Sprintf("syncthing-heap-%s-%s-%s-%s.pprof", runtime.GOOS, runtime.GOARCH, build.Version, time.Now().Format("150405")) // hhmmss
|
2016-08-02 11:06:45 +00:00
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "application/octet-stream")
|
|
|
|
w.Header().Set("Content-Disposition", "attachment; filename="+filename)
|
|
|
|
|
|
|
|
runtime.GC()
|
2019-02-02 11:16:27 +00:00
|
|
|
pprof.WriteHeapProfile(w)
|
2016-08-02 11:06:45 +00:00
|
|
|
}
|
|
|
|
|
2019-10-15 09:25:12 +00:00
|
|
|
func toJsonFileInfoSlice(fs []db.FileInfoTruncated) []jsonFileInfoTrunc {
|
|
|
|
res := make([]jsonFileInfoTrunc, len(fs))
|
2015-04-20 13:37:04 +00:00
|
|
|
for i, f := range fs {
|
2019-10-15 09:25:12 +00:00
|
|
|
res[i] = jsonFileInfoTrunc(f)
|
2015-04-20 13:37:04 +00:00
|
|
|
}
|
|
|
|
return res
|
|
|
|
}
|
|
|
|
|
|
|
|
// Type wrappers for nice JSON serialization
|
|
|
|
|
|
|
|
type jsonFileInfo protocol.FileInfo
|
|
|
|
|
|
|
|
func (f jsonFileInfo) MarshalJSON() ([]byte, error) {
|
2019-10-15 09:25:12 +00:00
|
|
|
m := fileIntfJSONMap(protocol.FileInfo(f))
|
|
|
|
m["numBlocks"] = len(f.Blocks)
|
|
|
|
return json.Marshal(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
type jsonFileInfoTrunc db.FileInfoTruncated
|
|
|
|
|
|
|
|
func (f jsonFileInfoTrunc) MarshalJSON() ([]byte, error) {
|
|
|
|
m := fileIntfJSONMap(db.FileInfoTruncated(f))
|
|
|
|
m["numBlocks"] = nil // explicitly unknown
|
|
|
|
return json.Marshal(m)
|
|
|
|
}
|
|
|
|
|
|
|
|
func fileIntfJSONMap(f db.FileIntf) map[string]interface{} {
|
|
|
|
out := map[string]interface{}{
|
|
|
|
"name": f.FileName(),
|
|
|
|
"type": f.FileType().String(),
|
|
|
|
"size": f.FileSize(),
|
|
|
|
"deleted": f.IsDeleted(),
|
|
|
|
"invalid": f.IsInvalid(),
|
|
|
|
"ignored": f.IsIgnored(),
|
|
|
|
"mustRescan": f.MustRescan(),
|
|
|
|
"noPermissions": !f.HasPermissionBits(),
|
|
|
|
"modified": f.ModTime(),
|
|
|
|
"modifiedBy": f.FileModifiedBy().String(),
|
|
|
|
"sequence": f.SequenceNo(),
|
|
|
|
"version": jsonVersionVector(f.FileVersion()),
|
|
|
|
"localFlags": f.FileLocalFlags(),
|
|
|
|
}
|
|
|
|
if f.HasPermissionBits() {
|
|
|
|
out["permissions"] = fmt.Sprintf("%#o", f.FilePermissions())
|
|
|
|
}
|
|
|
|
return out
|
2015-04-20 13:37:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type jsonVersionVector protocol.Vector
|
|
|
|
|
|
|
|
func (v jsonVersionVector) MarshalJSON() ([]byte, error) {
|
2016-07-04 10:40:29 +00:00
|
|
|
res := make([]string, len(v.Counters))
|
|
|
|
for i, c := range v.Counters {
|
2016-01-20 19:10:22 +00:00
|
|
|
res[i] = fmt.Sprintf("%v:%d", c.ID, c.Value)
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
|
|
|
}
|
2015-04-20 13:37:04 +00:00
|
|
|
return json.Marshal(res)
|
Add job queue (fixes #629)
Request to terminate currently ongoing downloads and jump to the bumped file
incoming in 3, 2, 1.
Also, has a slightly strange effect where we pop a job off the queue, but
the copyChannel is still busy and blocks, though it gets moved to the
progress slice in the jobqueue, and looks like it's in progress which it isn't
as it's waiting to be picked up from the copyChan.
As a result, the progress emitter doesn't register on the task, and hence the file
doesn't have a progress bar, but cannot be replaced by a bump.
I guess I can fix progress bar issue by moving the progressEmiter.Register just
before passing the file to the copyChan, but then we are back to the initial
problem of a file with a progress bar, but no progress happening as it's stuck
on write to copyChan
I checked if there is a way to check for channel writeability (before popping)
but got struck by lightning just for bringing the idea up in #go-nuts.
My ideal scenario would be to check if copyChan is writeable, pop job from the
queue and shove it down handleFile. This way jobs would stay in the queue while
they cannot be handled, meaning that the `Bump` could bring your file up higher.
2014-12-01 19:23:06 +00:00
|
|
|
}
|
2016-03-12 12:17:25 +00:00
|
|
|
|
|
|
|
func dirNames(dir string) []string {
|
|
|
|
fd, err := os.Open(dir)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
defer fd.Close()
|
|
|
|
|
|
|
|
fis, err := fd.Readdir(-1)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var dirs []string
|
|
|
|
for _, fi := range fis {
|
|
|
|
if fi.IsDir() {
|
|
|
|
dirs = append(dirs, filepath.Base(fi.Name()))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Strings(dirs)
|
|
|
|
return dirs
|
|
|
|
}
|
2016-09-03 08:33:34 +00:00
|
|
|
|
|
|
|
func addressIsLocalhost(addr string) bool {
|
|
|
|
host, _, err := net.SplitHostPort(addr)
|
|
|
|
if err != nil {
|
|
|
|
// There was no port, so we assume the address was just a hostname
|
|
|
|
host = addr
|
|
|
|
}
|
2016-10-03 08:34:13 +00:00
|
|
|
switch strings.ToLower(host) {
|
2018-03-15 10:29:52 +00:00
|
|
|
case "localhost", "localhost.":
|
2016-09-03 08:33:34 +00:00
|
|
|
return true
|
|
|
|
default:
|
2018-03-15 10:29:52 +00:00
|
|
|
ip := net.ParseIP(host)
|
|
|
|
if ip == nil {
|
|
|
|
// not an IP address
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return ip.IsLoopback()
|
2016-09-03 08:33:34 +00:00
|
|
|
}
|
|
|
|
}
|
2019-10-16 18:31:46 +00:00
|
|
|
|
|
|
|
func checkExpiry(cert tls.Certificate) error {
|
|
|
|
leaf := cert.Leaf
|
|
|
|
if leaf == nil {
|
|
|
|
// Leaf can be nil or not, depending on how parsed the certificate
|
|
|
|
// was when we got it.
|
|
|
|
if len(cert.Certificate) < 1 {
|
|
|
|
// can't happen
|
|
|
|
return errors.New("no certificate in certificate")
|
|
|
|
}
|
|
|
|
var err error
|
|
|
|
leaf, err = x509.ParseCertificate(cert.Certificate[0])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if leaf.Subject.String() != leaf.Issuer.String() ||
|
|
|
|
len(leaf.DNSNames) != 0 || len(leaf.IPAddresses) != 0 {
|
|
|
|
// The certificate is not self signed, or has DNS/IP attributes we don't
|
|
|
|
// add, so we leave it alone.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if leaf.NotAfter.Before(time.Now()) {
|
|
|
|
return errors.New("certificate has expired")
|
|
|
|
}
|
|
|
|
if leaf.NotAfter.Before(time.Now().Add(30 * 24 * time.Hour)) {
|
|
|
|
return errors.New("certificate will soon expire")
|
|
|
|
}
|
|
|
|
|
|
|
|
// On macOS, check for certificates issued on or after July 1st, 2019,
|
|
|
|
// with a longer validity time than 825 days.
|
|
|
|
cutoff := time.Date(2019, 7, 1, 0, 0, 0, 0, time.UTC)
|
|
|
|
if runtime.GOOS == "darwin" &&
|
|
|
|
leaf.NotBefore.After(cutoff) &&
|
|
|
|
leaf.NotAfter.Sub(leaf.NotBefore) > 825*24*time.Hour {
|
|
|
|
return errors.New("certificate incompatible with macOS 10.15 (Catalina)")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|