mirror of
https://github.com/octoleo/syncthing.git
synced 2024-12-22 02:48:59 +00:00
This adds a "token manager" which handles storing and checking expired tokens, used for both sessions and CSRF tokens. It removes the old, corresponding functionality for CSRFs which saved things in a file. The result is less crap in the state directory, and active login sessions now survive a Syncthing restart (this really annoyed me). It also adds a boolean on login to create a longer-lived session cookie, which is now possible and useful. Thus we can remain logged in over browser restarts, which was also annoying... :) <img width="1001" alt="Screenshot 2023-12-12 at 09 56 34" src="https://github.com/syncthing/syncthing/assets/125426/55cb20c8-78fc-453e-825d-655b94c8623b"> Best viewed with whitespace-insensitive diff, as a bunch of the auth functions became methods instead of closures which changed indentation.
This commit is contained in:
parent
17df4b8634
commit
aa901790b9
@ -862,6 +862,7 @@ func cleanConfigDirectory() {
|
||||
"backup-of-v0.8": 30 * 24 * time.Hour, // these neither
|
||||
"tmp-index-sorter.*": time.Minute, // these should never exist on startup
|
||||
"support-bundle-*": 30 * 24 * time.Hour, // keep old support bundle zip or folder for a month
|
||||
"csrftokens.txt": 0, // deprecated, remove immediately
|
||||
}
|
||||
|
||||
for pat, dur := range patterns {
|
||||
|
@ -359,6 +359,12 @@
|
||||
<input id="password" class="form-control" type="password" name="password" ng-model="login.password" ng-trim="false" autocomplete="current-password" />
|
||||
</div>
|
||||
|
||||
<div class="form-group">
|
||||
<label>
|
||||
<input type="checkbox" ng-model="login.stayLoggedIn" > <span translate>Stay logged in</span>
|
||||
</label>
|
||||
</div>
|
||||
|
||||
<div class="row">
|
||||
<div class="col-md-9 login-form-messages">
|
||||
<p ng-if="login.errors.badLogin" class="text-danger" translate>
|
||||
|
@ -103,6 +103,7 @@ angular.module('syncthing.core')
|
||||
$http.post(authUrlbase + '/password', {
|
||||
username: $scope.login.username,
|
||||
password: $scope.login.password,
|
||||
stayLoggedIn: $scope.login.stayLoggedIn,
|
||||
}).then(function () {
|
||||
location.reload();
|
||||
}).catch(function (response) {
|
||||
@ -3602,7 +3603,7 @@ angular.module('syncthing.core')
|
||||
return n.match !== "";
|
||||
});
|
||||
};
|
||||
|
||||
|
||||
// The showModal and hideModal functions are a bandaid for a Bootstrap
|
||||
// bug (see https://github.com/twbs/bootstrap/issues/3902) that causes
|
||||
// multiple consecutively shown or hidden modals to overlap which leads
|
||||
|
@ -91,6 +91,7 @@ type service struct {
|
||||
startupErr error
|
||||
listenerAddr net.Addr
|
||||
exitChan chan *svcutil.FatalErr
|
||||
miscDB *db.NamespacedKV
|
||||
|
||||
guiErrors logger.Recorder
|
||||
systemLog logger.Recorder
|
||||
@ -104,7 +105,7 @@ type Service interface {
|
||||
WaitForStart() error
|
||||
}
|
||||
|
||||
func New(id protocol.DeviceID, cfg config.Wrapper, assetDir, tlsDefaultCommonName string, m model.Model, defaultSub, diskSub events.BufferedSubscription, evLogger events.Logger, discoverer discover.Manager, connectionsService connections.Service, urService *ur.Service, fss model.FolderSummaryService, errors, systemLog logger.Recorder, noUpgrade bool) Service {
|
||||
func New(id protocol.DeviceID, cfg config.Wrapper, assetDir, tlsDefaultCommonName string, m model.Model, defaultSub, diskSub events.BufferedSubscription, evLogger events.Logger, discoverer discover.Manager, connectionsService connections.Service, urService *ur.Service, fss model.FolderSummaryService, errors, systemLog logger.Recorder, noUpgrade bool, miscDB *db.NamespacedKV) Service {
|
||||
return &service{
|
||||
id: id,
|
||||
cfg: cfg,
|
||||
@ -127,6 +128,7 @@ func New(id protocol.DeviceID, cfg config.Wrapper, assetDir, tlsDefaultCommonNam
|
||||
configChanged: make(chan struct{}),
|
||||
startedOnce: make(chan struct{}),
|
||||
exitChan: make(chan *svcutil.FatalErr, 1),
|
||||
miscDB: miscDB,
|
||||
}
|
||||
}
|
||||
|
||||
@ -364,7 +366,7 @@ func (s *service) Serve(ctx context.Context) error {
|
||||
|
||||
// Wrap everything in CSRF protection. The /rest prefix should be
|
||||
// protected, other requests will grant cookies.
|
||||
var handler http.Handler = newCsrfManager(s.id.Short().String(), "/rest", guiCfg, mux, locations.Get(locations.CsrfTokens))
|
||||
var handler http.Handler = newCsrfManager(s.id.Short().String(), "/rest", guiCfg, mux, s.miscDB)
|
||||
|
||||
// Add our version and ID as a header to responses
|
||||
handler = withDetailsMiddleware(s.id, handler)
|
||||
@ -372,12 +374,13 @@ func (s *service) Serve(ctx context.Context) error {
|
||||
// Wrap everything in basic auth, if user/password is set.
|
||||
if guiCfg.IsAuthEnabled() {
|
||||
sessionCookieName := "sessionid-" + s.id.Short().String()
|
||||
handler = basicAuthAndSessionMiddleware(sessionCookieName, s.id.Short().String(), guiCfg, s.cfg.LDAP(), handler, s.evLogger)
|
||||
handlePasswordAuth := passwordAuthHandler(sessionCookieName, guiCfg, s.cfg.LDAP(), s.evLogger)
|
||||
restMux.Handler(http.MethodPost, "/rest/noauth/auth/password", handlePasswordAuth)
|
||||
authMW := newBasicAuthAndSessionMiddleware(sessionCookieName, s.id.Short().String(), guiCfg, s.cfg.LDAP(), handler, s.evLogger, s.miscDB)
|
||||
handler = authMW
|
||||
|
||||
restMux.Handler(http.MethodPost, "/rest/noauth/auth/password", http.HandlerFunc(authMW.passwordAuthHandler))
|
||||
|
||||
// Logout is a no-op without a valid session cookie, so /noauth/ is fine here
|
||||
restMux.Handler(http.MethodPost, "/rest/noauth/auth/logout", handleLogout(sessionCookieName))
|
||||
restMux.Handler(http.MethodPost, "/rest/noauth/auth/logout", http.HandlerFunc(authMW.handleLogout))
|
||||
}
|
||||
|
||||
// Redirect to HTTPS if we are supposed to
|
||||
|
@ -16,15 +16,16 @@ import (
|
||||
|
||||
ldap "github.com/go-ldap/ldap/v3"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
var (
|
||||
sessions = make(map[string]bool)
|
||||
sessionsMut = sync.NewMutex()
|
||||
const (
|
||||
maxSessionLifetime = 7 * 24 * time.Hour
|
||||
maxActiveSessions = 25
|
||||
randomTokenLength = 64
|
||||
)
|
||||
|
||||
func emitLoginAttempt(success bool, username, address string, evLogger events.Logger) {
|
||||
@ -78,75 +79,91 @@ func isNoAuthPath(path string) bool {
|
||||
})
|
||||
}
|
||||
|
||||
func basicAuthAndSessionMiddleware(cookieName, shortID string, guiCfg config.GUIConfiguration, ldapCfg config.LDAPConfiguration, next http.Handler, evLogger events.Logger) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if hasValidAPIKeyHeader(r, guiCfg) {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
for _, cookie := range r.Cookies() {
|
||||
// We iterate here since there may, historically, be multiple
|
||||
// cookies with the same name but different path. Any "old" ones
|
||||
// won't match an existing session and will be ignored, then
|
||||
// later removed on logout or when timing out.
|
||||
if cookie.Name == cookieName {
|
||||
sessionsMut.Lock()
|
||||
_, ok := sessions[cookie.Value]
|
||||
sessionsMut.Unlock()
|
||||
if ok {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to Basic auth if provided
|
||||
if username, ok := attemptBasicAuth(r, guiCfg, ldapCfg, evLogger); ok {
|
||||
createSession(cookieName, username, guiCfg, evLogger, w, r)
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Exception for static assets and REST calls that don't require authentication.
|
||||
if isNoAuthPath(r.URL.Path) {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Some browsers don't send the Authorization request header unless prompted by a 401 response.
|
||||
// This enables https://user:pass@localhost style URLs to keep working.
|
||||
if guiCfg.SendBasicAuthPrompt {
|
||||
unauthorized(w, shortID)
|
||||
return
|
||||
}
|
||||
|
||||
forbidden(w)
|
||||
})
|
||||
type basicAuthAndSessionMiddleware struct {
|
||||
cookieName string
|
||||
shortID string
|
||||
guiCfg config.GUIConfiguration
|
||||
ldapCfg config.LDAPConfiguration
|
||||
next http.Handler
|
||||
evLogger events.Logger
|
||||
tokens *tokenManager
|
||||
}
|
||||
|
||||
func passwordAuthHandler(cookieName string, guiCfg config.GUIConfiguration, ldapCfg config.LDAPConfiguration, evLogger events.Logger) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
if err := unmarshalTo(r.Body, &req); err != nil {
|
||||
l.Debugln("Failed to parse username and password:", err)
|
||||
http.Error(w, "Failed to parse username and password.", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
func newBasicAuthAndSessionMiddleware(cookieName, shortID string, guiCfg config.GUIConfiguration, ldapCfg config.LDAPConfiguration, next http.Handler, evLogger events.Logger, miscDB *db.NamespacedKV) *basicAuthAndSessionMiddleware {
|
||||
return &basicAuthAndSessionMiddleware{
|
||||
cookieName: cookieName,
|
||||
shortID: shortID,
|
||||
guiCfg: guiCfg,
|
||||
ldapCfg: ldapCfg,
|
||||
next: next,
|
||||
evLogger: evLogger,
|
||||
tokens: newTokenManager("sessions", miscDB, maxSessionLifetime, maxActiveSessions),
|
||||
}
|
||||
}
|
||||
|
||||
if auth(req.Username, req.Password, guiCfg, ldapCfg) {
|
||||
createSession(cookieName, req.Username, guiCfg, evLogger, w, r)
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
func (m *basicAuthAndSessionMiddleware) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if hasValidAPIKeyHeader(r, m.guiCfg) {
|
||||
m.next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
emitLoginAttempt(false, req.Username, r.RemoteAddr, evLogger)
|
||||
antiBruteForceSleep()
|
||||
forbidden(w)
|
||||
})
|
||||
for _, cookie := range r.Cookies() {
|
||||
// We iterate here since there may, historically, be multiple
|
||||
// cookies with the same name but different path. Any "old" ones
|
||||
// won't match an existing session and will be ignored, then
|
||||
// later removed on logout or when timing out.
|
||||
if cookie.Name == m.cookieName {
|
||||
if m.tokens.Check(cookie.Value) {
|
||||
m.next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to Basic auth if provided
|
||||
if username, ok := attemptBasicAuth(r, m.guiCfg, m.ldapCfg, m.evLogger); ok {
|
||||
m.createSession(username, false, w, r)
|
||||
m.next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Exception for static assets and REST calls that don't require authentication.
|
||||
if isNoAuthPath(r.URL.Path) {
|
||||
m.next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Some browsers don't send the Authorization request header unless prompted by a 401 response.
|
||||
// This enables https://user:pass@localhost style URLs to keep working.
|
||||
if m.guiCfg.SendBasicAuthPrompt {
|
||||
unauthorized(w, m.shortID)
|
||||
return
|
||||
}
|
||||
|
||||
forbidden(w)
|
||||
}
|
||||
|
||||
func (m *basicAuthAndSessionMiddleware) passwordAuthHandler(w http.ResponseWriter, r *http.Request) {
|
||||
var req struct {
|
||||
Username string
|
||||
Password string
|
||||
StayLoggedIn bool
|
||||
}
|
||||
if err := unmarshalTo(r.Body, &req); err != nil {
|
||||
l.Debugln("Failed to parse username and password:", err)
|
||||
http.Error(w, "Failed to parse username and password.", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
||||
if auth(req.Username, req.Password, m.guiCfg, m.ldapCfg) {
|
||||
m.createSession(req.Username, req.StayLoggedIn, w, r)
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
return
|
||||
}
|
||||
|
||||
emitLoginAttempt(false, req.Username, r.RemoteAddr, m.evLogger)
|
||||
antiBruteForceSleep()
|
||||
forbidden(w)
|
||||
}
|
||||
|
||||
func attemptBasicAuth(r *http.Request, guiCfg config.GUIConfiguration, ldapCfg config.LDAPConfiguration, evLogger events.Logger) (string, bool) {
|
||||
@ -172,11 +189,8 @@ func attemptBasicAuth(r *http.Request, guiCfg config.GUIConfiguration, ldapCfg c
|
||||
return "", false
|
||||
}
|
||||
|
||||
func createSession(cookieName string, username string, guiCfg config.GUIConfiguration, evLogger events.Logger, w http.ResponseWriter, r *http.Request) {
|
||||
sessionid := rand.String(32)
|
||||
sessionsMut.Lock()
|
||||
sessions[sessionid] = true
|
||||
sessionsMut.Unlock()
|
||||
func (m *basicAuthAndSessionMiddleware) createSession(username string, persistent bool, w http.ResponseWriter, r *http.Request) {
|
||||
sessionid := m.tokens.New()
|
||||
|
||||
// Best effort detection of whether the connection is HTTPS --
|
||||
// either directly to us, or as used by the client towards a reverse
|
||||
@ -186,45 +200,45 @@ func createSession(cookieName string, username string, guiCfg config.GUIConfigur
|
||||
strings.Contains(strings.ToLower(r.Header.Get("forwarded")), "proto=https")
|
||||
// If the connection is HTTPS, or *should* be HTTPS, set the Secure
|
||||
// bit in cookies.
|
||||
useSecureCookie := connectionIsHTTPS || guiCfg.UseTLS()
|
||||
useSecureCookie := connectionIsHTTPS || m.guiCfg.UseTLS()
|
||||
|
||||
maxAge := 0
|
||||
if persistent {
|
||||
maxAge = int(maxSessionLifetime.Seconds())
|
||||
}
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: cookieName,
|
||||
Name: m.cookieName,
|
||||
Value: sessionid,
|
||||
// In HTTP spec Max-Age <= 0 means delete immediately,
|
||||
// but in http.Cookie MaxAge = 0 means unspecified (session) and MaxAge < 0 means delete immediately
|
||||
MaxAge: 0,
|
||||
MaxAge: maxAge,
|
||||
Secure: useSecureCookie,
|
||||
Path: "/",
|
||||
})
|
||||
|
||||
emitLoginAttempt(true, username, r.RemoteAddr, evLogger)
|
||||
emitLoginAttempt(true, username, r.RemoteAddr, m.evLogger)
|
||||
}
|
||||
|
||||
func handleLogout(cookieName string) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
for _, cookie := range r.Cookies() {
|
||||
// We iterate here since there may, historically, be multiple
|
||||
// cookies with the same name but different path. We drop them
|
||||
// all.
|
||||
if cookie.Name == cookieName {
|
||||
sessionsMut.Lock()
|
||||
delete(sessions, cookie.Value)
|
||||
sessionsMut.Unlock()
|
||||
func (m *basicAuthAndSessionMiddleware) handleLogout(w http.ResponseWriter, r *http.Request) {
|
||||
for _, cookie := range r.Cookies() {
|
||||
// We iterate here since there may, historically, be multiple
|
||||
// cookies with the same name but different path. We drop them
|
||||
// all.
|
||||
if cookie.Name == m.cookieName {
|
||||
m.tokens.Delete(cookie.Value)
|
||||
|
||||
// Delete the cookie
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: cookieName,
|
||||
Value: "",
|
||||
MaxAge: -1,
|
||||
Secure: cookie.Secure,
|
||||
Path: cookie.Path,
|
||||
})
|
||||
}
|
||||
// Delete the cookie
|
||||
http.SetCookie(w, &http.Cookie{
|
||||
Name: m.cookieName,
|
||||
Value: "",
|
||||
MaxAge: -1,
|
||||
Secure: cookie.Secure,
|
||||
Path: cookie.Path,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
})
|
||||
w.WriteHeader(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func auth(username string, password string, guiCfg config.GUIConfiguration, ldapCfg config.LDAPConfiguration) bool {
|
||||
|
@ -8,8 +8,12 @@ package api
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
)
|
||||
|
||||
var guiCfg config.GUIConfiguration
|
||||
@ -110,3 +114,76 @@ func TestEscapeForLDAPDN(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type mockClock struct {
|
||||
now time.Time
|
||||
}
|
||||
|
||||
func (c *mockClock) Now() time.Time {
|
||||
c.now = c.now.Add(1) // time always ticks by at least 1 ns
|
||||
return c.now
|
||||
}
|
||||
|
||||
func (c *mockClock) wind(t time.Duration) {
|
||||
c.now = c.now.Add(t)
|
||||
}
|
||||
|
||||
func TestTokenManager(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
mdb, _ := db.NewLowlevel(backend.OpenMemory(), events.NoopLogger)
|
||||
kdb := db.NewNamespacedKV(mdb, "test")
|
||||
clock := &mockClock{now: time.Now()}
|
||||
|
||||
// Token manager keeps up to three tokens with a validity time of 24 hours.
|
||||
tm := newTokenManager("testTokens", kdb, 24*time.Hour, 3)
|
||||
tm.timeNow = clock.Now
|
||||
|
||||
// Create three tokens
|
||||
t0 := tm.New()
|
||||
t1 := tm.New()
|
||||
t2 := tm.New()
|
||||
|
||||
// Check that the tokens are valid
|
||||
if !tm.Check(t0) {
|
||||
t.Errorf("token %q should be valid", t0)
|
||||
}
|
||||
if !tm.Check(t1) {
|
||||
t.Errorf("token %q should be valid", t1)
|
||||
}
|
||||
if !tm.Check(t2) {
|
||||
t.Errorf("token %q should be valid", t2)
|
||||
}
|
||||
|
||||
// Create a fourth token
|
||||
t3 := tm.New()
|
||||
// It should be valid
|
||||
if !tm.Check(t3) {
|
||||
t.Errorf("token %q should be valid", t3)
|
||||
}
|
||||
// But the first token should have been removed
|
||||
if tm.Check(t0) {
|
||||
t.Errorf("token %q should be invalid", t0)
|
||||
}
|
||||
|
||||
// Wind the clock by 12 hours
|
||||
clock.wind(12 * time.Hour)
|
||||
// The second token should still be valid (and checking it will give it more life)
|
||||
if !tm.Check(t1) {
|
||||
t.Errorf("token %q should be valid", t1)
|
||||
}
|
||||
|
||||
// Wind the clock by 12 hours
|
||||
clock.wind(12 * time.Hour)
|
||||
// The second token should still be valid
|
||||
if !tm.Check(t1) {
|
||||
t.Errorf("token %q should be valid", t1)
|
||||
}
|
||||
// But the third and fourth tokens should have expired
|
||||
if tm.Check(t2) {
|
||||
t.Errorf("token %q should be invalid", t2)
|
||||
}
|
||||
if tm.Check(t3) {
|
||||
t.Errorf("token %q should be invalid", t3)
|
||||
}
|
||||
}
|
||||
|
@ -7,33 +7,24 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/osutil"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
)
|
||||
|
||||
const maxCsrfTokens = 25
|
||||
const (
|
||||
maxCSRFTokenLifetime = time.Hour
|
||||
maxActiveCSRFTokens = 25
|
||||
)
|
||||
|
||||
type csrfManager struct {
|
||||
// tokens is a list of valid tokens. It is sorted so that the most
|
||||
// recently used token is first in the list. New tokens are added to the front
|
||||
// of the list (as it is the most recently used at that time). The list is
|
||||
// pruned to a maximum of maxCsrfTokens, throwing away the least recently used
|
||||
// tokens.
|
||||
tokens []string
|
||||
tokensMut sync.Mutex
|
||||
|
||||
unique string
|
||||
prefix string
|
||||
apiKeyValidator apiKeyValidator
|
||||
next http.Handler
|
||||
saveLocation string
|
||||
tokens *tokenManager
|
||||
}
|
||||
|
||||
type apiKeyValidator interface {
|
||||
@ -43,17 +34,14 @@ type apiKeyValidator interface {
|
||||
// Check for CSRF token on /rest/ URLs. If a correct one is not given, reject
|
||||
// the request with 403. For / and /index.html, set a new CSRF cookie if none
|
||||
// is currently set.
|
||||
func newCsrfManager(unique string, prefix string, apiKeyValidator apiKeyValidator, next http.Handler, saveLocation string) *csrfManager {
|
||||
func newCsrfManager(unique string, prefix string, apiKeyValidator apiKeyValidator, next http.Handler, miscDB *db.NamespacedKV) *csrfManager {
|
||||
m := &csrfManager{
|
||||
tokensMut: sync.NewMutex(),
|
||||
tokens: make([]string, 0, maxCsrfTokens),
|
||||
unique: unique,
|
||||
prefix: prefix,
|
||||
apiKeyValidator: apiKeyValidator,
|
||||
next: next,
|
||||
saveLocation: saveLocation,
|
||||
tokens: newTokenManager("csrfTokens", miscDB, maxCSRFTokenLifetime, maxActiveCSRFTokens),
|
||||
}
|
||||
m.load()
|
||||
return m
|
||||
}
|
||||
|
||||
@ -78,11 +66,11 @@ func (m *csrfManager) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// and set a CSRF cookie if there isn't already a valid one.
|
||||
if !strings.HasPrefix(r.URL.Path, m.prefix) {
|
||||
cookie, err := r.Cookie("CSRF-Token-" + m.unique)
|
||||
if err != nil || !m.validToken(cookie.Value) {
|
||||
if err != nil || !m.tokens.Check(cookie.Value) {
|
||||
l.Debugln("new CSRF cookie in response to request for", r.URL)
|
||||
cookie = &http.Cookie{
|
||||
Name: "CSRF-Token-" + m.unique,
|
||||
Value: m.newToken(),
|
||||
Value: m.tokens.New(),
|
||||
}
|
||||
http.SetCookie(w, cookie)
|
||||
}
|
||||
@ -99,7 +87,7 @@ func (m *csrfManager) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Verify the CSRF token
|
||||
token := r.Header.Get("X-CSRF-Token-" + m.unique)
|
||||
if !m.validToken(token) {
|
||||
if !m.tokens.Check(token) {
|
||||
http.Error(w, "CSRF Error", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
@ -107,78 +95,6 @@ func (m *csrfManager) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
m.next.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
func (m *csrfManager) validToken(token string) bool {
|
||||
m.tokensMut.Lock()
|
||||
defer m.tokensMut.Unlock()
|
||||
for i, t := range m.tokens {
|
||||
if t == token {
|
||||
if i > 0 {
|
||||
// Move this token to the head of the list. Copy the tokens at
|
||||
// the front one step to the right and then replace the token
|
||||
// at the head.
|
||||
copy(m.tokens[1:], m.tokens[:i])
|
||||
m.tokens[0] = token
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (m *csrfManager) newToken() string {
|
||||
token := rand.String(32)
|
||||
|
||||
m.tokensMut.Lock()
|
||||
defer m.tokensMut.Unlock()
|
||||
|
||||
if len(m.tokens) < maxCsrfTokens {
|
||||
m.tokens = append(m.tokens, "")
|
||||
}
|
||||
copy(m.tokens[1:], m.tokens)
|
||||
m.tokens[0] = token
|
||||
|
||||
m.save()
|
||||
|
||||
return token
|
||||
}
|
||||
|
||||
func (m *csrfManager) save() {
|
||||
// We're ignoring errors in here. It's not super critical and there's
|
||||
// nothing relevant we can do about them anyway...
|
||||
|
||||
if m.saveLocation == "" {
|
||||
return
|
||||
}
|
||||
|
||||
f, err := osutil.CreateAtomic(m.saveLocation)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, t := range m.tokens {
|
||||
fmt.Fprintln(f, t)
|
||||
}
|
||||
|
||||
f.Close()
|
||||
}
|
||||
|
||||
func (m *csrfManager) load() {
|
||||
if m.saveLocation == "" {
|
||||
return
|
||||
}
|
||||
|
||||
f, err := os.Open(m.saveLocation)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
s := bufio.NewScanner(f)
|
||||
for s.Scan() {
|
||||
m.tokens = append(m.tokens, s.Text())
|
||||
}
|
||||
}
|
||||
|
||||
func hasValidAPIKeyHeader(r *http.Request, validator apiKeyValidator) bool {
|
||||
if key := r.Header.Get("X-API-Key"); validator.IsValidAPIKey(key) {
|
||||
return true
|
||||
|
@ -18,7 +18,6 @@ import (
|
||||
"net/http/httptest"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -29,6 +28,8 @@ import (
|
||||
"github.com/syncthing/syncthing/lib/build"
|
||||
"github.com/syncthing/syncthing/lib/config"
|
||||
connmocks "github.com/syncthing/syncthing/lib/connections/mocks"
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/db/backend"
|
||||
discovermocks "github.com/syncthing/syncthing/lib/discover/mocks"
|
||||
"github.com/syncthing/syncthing/lib/events"
|
||||
eventmocks "github.com/syncthing/syncthing/lib/events/mocks"
|
||||
@ -72,71 +73,6 @@ func TestMain(m *testing.M) {
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
func TestCSRFToken(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
max := 10 * maxCsrfTokens
|
||||
int := 5
|
||||
if testing.Short() {
|
||||
max = 1 + maxCsrfTokens
|
||||
int = 2
|
||||
}
|
||||
|
||||
m := newCsrfManager("unique", "prefix", config.GUIConfiguration{}, nil, "")
|
||||
|
||||
t1 := m.newToken()
|
||||
t2 := m.newToken()
|
||||
|
||||
t3 := m.newToken()
|
||||
if !m.validToken(t3) {
|
||||
t.Fatal("t3 should be valid")
|
||||
}
|
||||
|
||||
valid := make(map[string]struct{}, maxCsrfTokens)
|
||||
for _, token := range m.tokens {
|
||||
valid[token] = struct{}{}
|
||||
}
|
||||
|
||||
for i := 0; i < max; i++ {
|
||||
if i%int == 0 {
|
||||
// t1 and t2 should remain valid by virtue of us checking them now
|
||||
// and then.
|
||||
if !m.validToken(t1) {
|
||||
t.Fatal("t1 should be valid at iteration", i)
|
||||
}
|
||||
if !m.validToken(t2) {
|
||||
t.Fatal("t2 should be valid at iteration", i)
|
||||
}
|
||||
}
|
||||
|
||||
if len(m.tokens) == maxCsrfTokens {
|
||||
// We're about to add a token, which will remove the last token
|
||||
// from m.tokens.
|
||||
delete(valid, m.tokens[len(m.tokens)-1])
|
||||
}
|
||||
|
||||
// The newly generated token is always valid
|
||||
t4 := m.newToken()
|
||||
if !m.validToken(t4) {
|
||||
t.Fatal("t4 should be valid at iteration", i)
|
||||
}
|
||||
valid[t4] = struct{}{}
|
||||
|
||||
v := make(map[string]struct{}, maxCsrfTokens)
|
||||
for _, token := range m.tokens {
|
||||
v[token] = struct{}{}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(v, valid) {
|
||||
t.Fatalf("want valid tokens %v, got %v", valid, v)
|
||||
}
|
||||
}
|
||||
|
||||
if m.validToken(t3) {
|
||||
t.Fatal("t3 should have expired by now")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStopAfterBrokenConfig(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
@ -148,7 +84,9 @@ func TestStopAfterBrokenConfig(t *testing.T) {
|
||||
}
|
||||
w := config.Wrap("/dev/null", cfg, protocol.LocalDeviceID, events.NoopLogger)
|
||||
|
||||
srv := New(protocol.LocalDeviceID, w, "", "syncthing", nil, nil, nil, events.NoopLogger, nil, nil, nil, nil, nil, nil, false).(*service)
|
||||
mdb, _ := db.NewLowlevel(backend.OpenMemory(), events.NoopLogger)
|
||||
kdb := db.NewMiscDataNamespace(mdb)
|
||||
srv := New(protocol.LocalDeviceID, w, "", "syncthing", nil, nil, nil, events.NoopLogger, nil, nil, nil, nil, nil, nil, false, kdb).(*service)
|
||||
defer os.Remove(token)
|
||||
|
||||
srv.started = make(chan string)
|
||||
@ -926,7 +864,9 @@ func startHTTP(cfg config.Wrapper) (string, context.CancelFunc, error) {
|
||||
|
||||
// Instantiate the API service
|
||||
urService := ur.New(cfg, m, connections, false)
|
||||
svc := New(protocol.LocalDeviceID, cfg, assetDir, "syncthing", m, eventSub, diskEventSub, events.NoopLogger, discoverer, connections, urService, mockedSummary, errorLog, systemLog, false).(*service)
|
||||
mdb, _ := db.NewLowlevel(backend.OpenMemory(), events.NoopLogger)
|
||||
kdb := db.NewMiscDataNamespace(mdb)
|
||||
svc := New(protocol.LocalDeviceID, cfg, assetDir, "syncthing", m, eventSub, diskEventSub, events.NoopLogger, discoverer, connections, urService, mockedSummary, errorLog, systemLog, false, kdb).(*service)
|
||||
defer os.Remove(token)
|
||||
svc.started = addrChan
|
||||
|
||||
@ -1467,7 +1407,9 @@ func TestEventMasks(t *testing.T) {
|
||||
cfg := newMockedConfig()
|
||||
defSub := new(eventmocks.BufferedSubscription)
|
||||
diskSub := new(eventmocks.BufferedSubscription)
|
||||
svc := New(protocol.LocalDeviceID, cfg, "", "syncthing", nil, defSub, diskSub, events.NoopLogger, nil, nil, nil, nil, nil, nil, false).(*service)
|
||||
mdb, _ := db.NewLowlevel(backend.OpenMemory(), events.NoopLogger)
|
||||
kdb := db.NewMiscDataNamespace(mdb)
|
||||
svc := New(protocol.LocalDeviceID, cfg, "", "syncthing", nil, defSub, diskSub, events.NoopLogger, nil, nil, nil, nil, nil, nil, false, kdb).(*service)
|
||||
defer os.Remove(token)
|
||||
|
||||
if mask := svc.getEventMask(""); mask != DefaultEventMask {
|
||||
|
137
lib/api/tokenmanager.go
Normal file
137
lib/api/tokenmanager.go
Normal file
@ -0,0 +1,137 @@
|
||||
// Copyright (C) 2024 The Syncthing Authors.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/syncthing/syncthing/lib/db"
|
||||
"github.com/syncthing/syncthing/lib/rand"
|
||||
"github.com/syncthing/syncthing/lib/sync"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type tokenManager struct {
|
||||
key string
|
||||
miscDB *db.NamespacedKV
|
||||
lifetime time.Duration
|
||||
maxItems int
|
||||
|
||||
timeNow func() time.Time // can be overridden for testing
|
||||
|
||||
mut sync.Mutex
|
||||
tokens *TokenSet
|
||||
saveTimer *time.Timer
|
||||
}
|
||||
|
||||
func newTokenManager(key string, miscDB *db.NamespacedKV, lifetime time.Duration, maxItems int) *tokenManager {
|
||||
tokens := &TokenSet{
|
||||
Tokens: make(map[string]int64),
|
||||
}
|
||||
if bs, ok, _ := miscDB.Bytes(key); ok {
|
||||
_ = tokens.Unmarshal(bs) // best effort
|
||||
}
|
||||
return &tokenManager{
|
||||
key: key,
|
||||
miscDB: miscDB,
|
||||
lifetime: lifetime,
|
||||
maxItems: maxItems,
|
||||
timeNow: time.Now,
|
||||
mut: sync.NewMutex(),
|
||||
tokens: tokens,
|
||||
}
|
||||
}
|
||||
|
||||
// Check returns true if the token is valid, and updates the token's expiry
|
||||
// time. The token is removed if it is expired.
|
||||
func (m *tokenManager) Check(token string) bool {
|
||||
m.mut.Lock()
|
||||
defer m.mut.Unlock()
|
||||
|
||||
expires, ok := m.tokens.Tokens[token]
|
||||
if ok {
|
||||
if expires < m.timeNow().UnixNano() {
|
||||
// The token is expired.
|
||||
m.saveLocked() // removes expired tokens
|
||||
return false
|
||||
}
|
||||
|
||||
// Give the token further life.
|
||||
m.tokens.Tokens[token] = m.timeNow().Add(m.lifetime).UnixNano()
|
||||
m.saveLocked()
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
// New creates a new token and returns it.
|
||||
func (m *tokenManager) New() string {
|
||||
token := rand.String(randomTokenLength)
|
||||
|
||||
m.mut.Lock()
|
||||
defer m.mut.Unlock()
|
||||
|
||||
m.tokens.Tokens[token] = m.timeNow().Add(m.lifetime).UnixNano()
|
||||
m.saveLocked()
|
||||
|
||||
return token
|
||||
}
|
||||
|
||||
// Delete removes a token.
|
||||
func (m *tokenManager) Delete(token string) {
|
||||
m.mut.Lock()
|
||||
defer m.mut.Unlock()
|
||||
|
||||
delete(m.tokens.Tokens, token)
|
||||
m.saveLocked()
|
||||
}
|
||||
|
||||
func (m *tokenManager) saveLocked() {
|
||||
// Remove expired tokens.
|
||||
now := m.timeNow().UnixNano()
|
||||
for token, expiry := range m.tokens.Tokens {
|
||||
if expiry < now {
|
||||
delete(m.tokens.Tokens, token)
|
||||
}
|
||||
}
|
||||
|
||||
// If we have a limit on the number of tokens, remove the oldest ones.
|
||||
if m.maxItems > 0 && len(m.tokens.Tokens) > m.maxItems {
|
||||
// Sort the tokens by expiry time, oldest first.
|
||||
type tokenExpiry struct {
|
||||
token string
|
||||
expiry int64
|
||||
}
|
||||
var tokens []tokenExpiry
|
||||
for token, expiry := range m.tokens.Tokens {
|
||||
tokens = append(tokens, tokenExpiry{token, expiry})
|
||||
}
|
||||
slices.SortFunc(tokens, func(i, j tokenExpiry) int {
|
||||
return int(i.expiry - j.expiry)
|
||||
})
|
||||
// Remove the oldest tokens.
|
||||
for _, token := range tokens[:len(tokens)-m.maxItems] {
|
||||
delete(m.tokens.Tokens, token.token)
|
||||
}
|
||||
}
|
||||
|
||||
// Postpone saving until one second of inactivity.
|
||||
if m.saveTimer == nil {
|
||||
m.saveTimer = time.AfterFunc(time.Second, m.scheduledSave)
|
||||
} else {
|
||||
m.saveTimer.Reset(time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *tokenManager) scheduledSave() {
|
||||
m.mut.Lock()
|
||||
defer m.mut.Unlock()
|
||||
|
||||
m.saveTimer = nil
|
||||
|
||||
bs, _ := m.tokens.Marshal() // can't fail
|
||||
_ = m.miscDB.PutBytes(m.key, bs) // can fail, but what are we going to do?
|
||||
}
|
411
lib/api/tokenset.pb.go
Normal file
411
lib/api/tokenset.pb.go
Normal file
@ -0,0 +1,411 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: lib/api/tokenset.proto
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type TokenSet struct {
|
||||
// token -> expiry time (epoch nanoseconds)
|
||||
Tokens map[string]int64 `protobuf:"bytes,1,rep,name=tokens,proto3" json:"tokens" xml:"token" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
|
||||
}
|
||||
|
||||
func (m *TokenSet) Reset() { *m = TokenSet{} }
|
||||
func (m *TokenSet) String() string { return proto.CompactTextString(m) }
|
||||
func (*TokenSet) ProtoMessage() {}
|
||||
func (*TokenSet) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_9ea8707737c33b38, []int{0}
|
||||
}
|
||||
func (m *TokenSet) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *TokenSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_TokenSet.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *TokenSet) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_TokenSet.Merge(m, src)
|
||||
}
|
||||
func (m *TokenSet) XXX_Size() int {
|
||||
return m.ProtoSize()
|
||||
}
|
||||
func (m *TokenSet) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_TokenSet.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_TokenSet proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*TokenSet)(nil), "api.TokenSet")
|
||||
proto.RegisterMapType((map[string]int64)(nil), "api.TokenSet.TokensEntry")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("lib/api/tokenset.proto", fileDescriptor_9ea8707737c33b38) }
|
||||
|
||||
var fileDescriptor_9ea8707737c33b38 = []byte{
|
||||
// 260 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xcb, 0xc9, 0x4c, 0xd2,
|
||||
0x4f, 0x2c, 0xc8, 0xd4, 0x2f, 0xc9, 0xcf, 0x4e, 0xcd, 0x2b, 0x4e, 0x2d, 0xd1, 0x2b, 0x28, 0xca,
|
||||
0x2f, 0xc9, 0x17, 0x62, 0x4e, 0x2c, 0xc8, 0x54, 0x3a, 0xce, 0xc8, 0xc5, 0x11, 0x02, 0x12, 0x0f,
|
||||
0x4e, 0x2d, 0x11, 0x0a, 0xe0, 0x62, 0x83, 0xa8, 0x91, 0x60, 0x54, 0x60, 0xd6, 0xe0, 0x36, 0x92,
|
||||
0xd4, 0x4b, 0x2c, 0xc8, 0xd4, 0x83, 0x49, 0x43, 0x18, 0xc5, 0xae, 0x79, 0x25, 0x45, 0x95, 0x4e,
|
||||
0xb2, 0x27, 0xee, 0xc9, 0x33, 0xbc, 0xba, 0x27, 0x0f, 0xd5, 0xf0, 0xe9, 0x9e, 0x3c, 0x77, 0x45,
|
||||
0x6e, 0x8e, 0x95, 0x12, 0x98, 0xab, 0x14, 0x04, 0x15, 0x96, 0xca, 0xe4, 0xe2, 0x46, 0xd2, 0x25,
|
||||
0xa4, 0xc6, 0xc5, 0x9c, 0x9d, 0x5a, 0x29, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xe9, 0x24, 0xf2, 0xea,
|
||||
0x9e, 0x3c, 0x88, 0xfb, 0xe9, 0x9e, 0x3c, 0x27, 0x58, 0x6f, 0x76, 0x6a, 0xa5, 0x52, 0x10, 0x48,
|
||||
0x44, 0x48, 0x8f, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0xd9,
|
||||
0x49, 0xe2, 0xd5, 0x3d, 0x79, 0x88, 0x00, 0xdc, 0x1e, 0x30, 0x4f, 0x29, 0x08, 0x22, 0x6a, 0xc5,
|
||||
0x64, 0xc1, 0xe8, 0xe4, 0x71, 0xe2, 0xa1, 0x1c, 0xc3, 0x85, 0x87, 0x72, 0x0c, 0x27, 0x1e, 0xc9,
|
||||
0x31, 0x5e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x82, 0xc7, 0x72, 0x8c, 0x17, 0x1e,
|
||||
0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0xa5, 0x96, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97,
|
||||
0x9c, 0x9f, 0xab, 0x5f, 0x5c, 0x99, 0x97, 0x5c, 0x92, 0x91, 0x99, 0x97, 0x8e, 0xc4, 0x82, 0x86,
|
||||
0x53, 0x12, 0x1b, 0x38, 0x7c, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xfe, 0x25, 0x31, 0x49,
|
||||
0x39, 0x01, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *TokenSet) Marshal() (dAtA []byte, err error) {
|
||||
size := m.ProtoSize()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *TokenSet) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.ProtoSize()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *TokenSet) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Tokens) > 0 {
|
||||
for k := range m.Tokens {
|
||||
v := m.Tokens[k]
|
||||
baseI := i
|
||||
i = encodeVarintTokenset(dAtA, i, uint64(v))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
i -= len(k)
|
||||
copy(dAtA[i:], k)
|
||||
i = encodeVarintTokenset(dAtA, i, uint64(len(k)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
i = encodeVarintTokenset(dAtA, i, uint64(baseI-i))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintTokenset(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovTokenset(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *TokenSet) ProtoSize() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Tokens) > 0 {
|
||||
for k, v := range m.Tokens {
|
||||
_ = k
|
||||
_ = v
|
||||
mapEntrySize := 1 + len(k) + sovTokenset(uint64(len(k))) + 1 + sovTokenset(uint64(v))
|
||||
n += mapEntrySize + 1 + sovTokenset(uint64(mapEntrySize))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovTokenset(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozTokenset(x uint64) (n int) {
|
||||
return sovTokenset(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *TokenSet) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTokenset
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: TokenSet: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: TokenSet: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Tokens", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTokenset
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTokenset
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTokenset
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Tokens == nil {
|
||||
m.Tokens = make(map[string]int64)
|
||||
}
|
||||
var mapkey string
|
||||
var mapvalue int64
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTokenset
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTokenset
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthTokenset
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey < 0 {
|
||||
return ErrInvalidLengthTokenset
|
||||
}
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTokenset
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
mapvalue |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipTokenset(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthTokenset
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
m.Tokens[mapkey] = mapvalue
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTokenset(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthTokenset
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipTokenset(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTokenset
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTokenset
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTokenset
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthTokenset
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupTokenset
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthTokenset
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthTokenset = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowTokenset = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupTokenset = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
@ -29,7 +29,6 @@ const (
|
||||
HTTPSKeyFile LocationEnum = "httpsKeyFile"
|
||||
Database LocationEnum = "database"
|
||||
LogFile LocationEnum = "logFile"
|
||||
CsrfTokens LocationEnum = "csrfTokens"
|
||||
PanicLog LocationEnum = "panicLog"
|
||||
AuditLog LocationEnum = "auditLog"
|
||||
GUIAssets LocationEnum = "guiAssets"
|
||||
@ -121,7 +120,6 @@ var locationTemplates = map[LocationEnum]string{
|
||||
HTTPSKeyFile: "${config}/https-key.pem",
|
||||
Database: "${data}/" + LevelDBDir,
|
||||
LogFile: "${data}/syncthing.log", // --logfile on Windows
|
||||
CsrfTokens: "${data}/csrftokens.txt",
|
||||
PanicLog: "${data}/panic-%{timestamp}.log",
|
||||
AuditLog: "${data}/audit-%{timestamp}.log",
|
||||
GUIAssets: "${config}/gui",
|
||||
@ -170,7 +168,6 @@ func PrettyPaths() string {
|
||||
fmt.Fprintf(&b, "Database location:\n\t%s\n\n", Get(Database))
|
||||
fmt.Fprintf(&b, "Log file:\n\t%s\n\n", Get(LogFile))
|
||||
fmt.Fprintf(&b, "GUI override directory:\n\t%s\n\n", Get(GUIAssets))
|
||||
fmt.Fprintf(&b, "CSRF tokens file:\n\t%s\n\n", Get(CsrfTokens))
|
||||
fmt.Fprintf(&b, "Default sync folder directory:\n\t%s\n\n", Get(DefFolder))
|
||||
return b.String()
|
||||
}
|
||||
|
@ -305,7 +305,7 @@ func (a *App) startup() error {
|
||||
|
||||
// GUI
|
||||
|
||||
if err := a.setupGUI(m, defaultSub, diskSub, discoveryManager, connectionsService, usageReportingSvc, errors, systemLog); err != nil {
|
||||
if err := a.setupGUI(m, defaultSub, diskSub, discoveryManager, connectionsService, usageReportingSvc, errors, systemLog, miscDB); err != nil {
|
||||
l.Warnln("Failed starting API:", err)
|
||||
return err
|
||||
}
|
||||
@ -407,7 +407,7 @@ func (a *App) stopWithErr(stopReason svcutil.ExitStatus, err error) svcutil.Exit
|
||||
return a.exitStatus
|
||||
}
|
||||
|
||||
func (a *App) setupGUI(m model.Model, defaultSub, diskSub events.BufferedSubscription, discoverer discover.Manager, connectionsService connections.Service, urService *ur.Service, errors, systemLog logger.Recorder) error {
|
||||
func (a *App) setupGUI(m model.Model, defaultSub, diskSub events.BufferedSubscription, discoverer discover.Manager, connectionsService connections.Service, urService *ur.Service, errors, systemLog logger.Recorder, miscDB *db.NamespacedKV) error {
|
||||
guiCfg := a.cfg.GUI()
|
||||
|
||||
if !guiCfg.Enabled {
|
||||
@ -421,7 +421,7 @@ func (a *App) setupGUI(m model.Model, defaultSub, diskSub events.BufferedSubscri
|
||||
summaryService := model.NewFolderSummaryService(a.cfg, m, a.myID, a.evLogger)
|
||||
a.mainService.Add(summaryService)
|
||||
|
||||
apiSvc := api.New(a.myID, a.cfg, locations.Get(locations.GUIAssets), tlsDefaultCommonName, m, defaultSub, diskSub, a.evLogger, discoverer, connectionsService, urService, summaryService, errors, systemLog, a.opts.NoUpgrade)
|
||||
apiSvc := api.New(a.myID, a.cfg, locations.Get(locations.GUIAssets), tlsDefaultCommonName, m, defaultSub, diskSub, a.evLogger, discoverer, connectionsService, urService, summaryService, errors, systemLog, a.opts.NoUpgrade, miscDB)
|
||||
a.mainService.Add(apiSvc)
|
||||
|
||||
if err := apiSvc.WaitForStart(); err != nil {
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
|
||||
// Inception, go generate calls the script itself that then deals with generation.
|
||||
// This is only done because go:generate does not support wildcards in paths.
|
||||
//go:generate go run generate.go lib/protocol lib/config lib/fs lib/db lib/discover
|
||||
//go:generate go run generate.go lib/protocol lib/config lib/fs lib/db lib/discover lib/api
|
||||
|
||||
func main() {
|
||||
for _, path := range os.Args[1:] {
|
||||
|
8
proto/lib/api/tokenset.proto
Normal file
8
proto/lib/api/tokenset.proto
Normal file
@ -0,0 +1,8 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package api;
|
||||
|
||||
message TokenSet {
|
||||
// token -> expiry time (epoch nanoseconds)
|
||||
map<string, int64> tokens = 1;
|
||||
}
|
Loading…
Reference in New Issue
Block a user