syncthing/lib/events/events.go

574 lines
13 KiB
Go
Raw Normal View History

2014-11-16 20:13:20 +00:00
// Copyright (C) 2014 The Syncthing Authors.
2014-09-29 19:43:32 +00:00
//
2015-03-07 20:36:35 +00:00
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
2014-07-25 12:50:14 +00:00
//go:generate -command counterfeiter go run github.com/maxbrunsfeld/counterfeiter/v6
//go:generate counterfeiter -o mocks/buffered_subscription.go --fake-name BufferedSubscription . BufferedSubscription
2014-07-13 19:07:24 +00:00
// Package events provides event subscription and polling functionality.
package events
import (
"context"
"encoding/json"
2014-07-13 19:07:24 +00:00
"errors"
"fmt"
"runtime"
2014-07-13 19:07:24 +00:00
"time"
2015-04-22 22:54:31 +00:00
2020-11-17 12:19:04 +00:00
"github.com/thejerf/suture/v4"
2015-08-06 09:29:25 +00:00
"github.com/syncthing/syncthing/lib/sync"
2014-07-13 19:07:24 +00:00
)
type EventType int64
2014-07-13 19:07:24 +00:00
const (
Starting EventType = 1 << iota
2014-07-13 19:07:24 +00:00
StartupComplete
DeviceDiscovered
DeviceConnected
DeviceDisconnected
lib/model: Forget pending folders no longer announced in ClusterConfig (fixes #5187) (#7205) * lib/db: Add ExpirePendingFolders(). Use-case is to drop any no-longer-pending folders for a specific device when parsing its ClusterConfig message where previously offered folders are not mentioned any more. The timestamp in ObservedFolder is stored with only second precision, so round to seconds here as well. This allows calling the function within the same second of adding or updating entries. * lib/model: Weed out pending folders when receiving ClusterConfig. Filter the entries by timestamp, which must be newer than or equal to the reception time of the ClusterConfig. For just mentioned ones, this assumption will hold as AddOrUpdatePendingFolder() updates the timestamp. * lib/model, gui: Notify when one or more pending folders expired. Introduce new event type FolderOfferCancelled and use it to trigger a complete refreshCluster() cycle. Listing individual entries would be much more code and probably just as much work to answer the API request. * lib/model: Add comment and rename ExpirePendingFolders(). * lib/events: Rename FolderOfferCancelled to ClusterPendingChanged. * lib/model: Reuse ClusterPendingChanged event for cleanPending() Changing the config does not necessarily mean that the /resut/cluster/pending endpoints need to be refreshed, but only if something was actually removed. Detect this and indicate it through the ClusterPendingChanged event, which is already hooked up to requery respective endpoints within the GUI. No more need for a separate refreshCluster() in reaction to ConfigSaved event or calling refreshConfig(). * lib/model: Gofmt. * lib/db: Warn instead of info log for failed removal. * gui: Fix pending notifications not loading on GUI start. * lib/db: Use short device ID in log message. * lib/db: Return list of expired folder IDs after deleting them. * lib/model: Refactor Pending...Changed events. * lib/model: Adjust format of removed pending folders enumeration. Use an array of objects with device / folder ID properties, matching the other places where it's used. * lib/db: Drop invalid entries in RemovePendingFoldersBeforeTime(). * lib/model: Gofmt. My local gofmt did not complain here, strangely... * gui: Handle PendingDevicesChanged event. Even though it currently only holds one device at a time, wrap the contents in an array under the "added" property name. * lib/model: Fix null values in PendingFoldersChanged removed member. * gui: Handle PendingFoldersChanged event. * lib/model: Simplify construction of expiredPendingList. * lib/model: Reduce code duplication in cleanPending(). Use goto and a label for the common parts of calling the DB removal function and building the event data part. * lib/events, gui: Mark ...Rejected events deprecated. Extend comments explaining the conditions when the replacement event types are emitted. * lib/model: Wrap removed devices in array of objects as well. * lib/db: Use iter.Value() instead of needless db.Get(iter.Key()) * lib/db: Add comment explaining RemovePendingFoldersBeforeTime(). * lib/model: Rename fields folderID and deviceID in event data. * lib/db: Only list actually expired IDs as removed. Skip entries where Delete() failed as well as invalid entries that got removed automatically. * lib/model: Gofmt
2021-01-25 10:58:10 +00:00
DeviceRejected // DEPRECATED, superseded by PendingDevicesChanged
PendingDevicesChanged
2015-08-23 19:56:10 +00:00
DevicePaused
DeviceResumed
LocalChangeDetected
RemoteChangeDetected
2014-07-13 19:07:24 +00:00
LocalIndexUpdated
RemoteIndexUpdated
ItemStarted
2015-02-01 17:31:19 +00:00
ItemFinished
2014-07-17 11:38:36 +00:00
StateChanged
lib/model: Forget pending folders no longer announced in ClusterConfig (fixes #5187) (#7205) * lib/db: Add ExpirePendingFolders(). Use-case is to drop any no-longer-pending folders for a specific device when parsing its ClusterConfig message where previously offered folders are not mentioned any more. The timestamp in ObservedFolder is stored with only second precision, so round to seconds here as well. This allows calling the function within the same second of adding or updating entries. * lib/model: Weed out pending folders when receiving ClusterConfig. Filter the entries by timestamp, which must be newer than or equal to the reception time of the ClusterConfig. For just mentioned ones, this assumption will hold as AddOrUpdatePendingFolder() updates the timestamp. * lib/model, gui: Notify when one or more pending folders expired. Introduce new event type FolderOfferCancelled and use it to trigger a complete refreshCluster() cycle. Listing individual entries would be much more code and probably just as much work to answer the API request. * lib/model: Add comment and rename ExpirePendingFolders(). * lib/events: Rename FolderOfferCancelled to ClusterPendingChanged. * lib/model: Reuse ClusterPendingChanged event for cleanPending() Changing the config does not necessarily mean that the /resut/cluster/pending endpoints need to be refreshed, but only if something was actually removed. Detect this and indicate it through the ClusterPendingChanged event, which is already hooked up to requery respective endpoints within the GUI. No more need for a separate refreshCluster() in reaction to ConfigSaved event or calling refreshConfig(). * lib/model: Gofmt. * lib/db: Warn instead of info log for failed removal. * gui: Fix pending notifications not loading on GUI start. * lib/db: Use short device ID in log message. * lib/db: Return list of expired folder IDs after deleting them. * lib/model: Refactor Pending...Changed events. * lib/model: Adjust format of removed pending folders enumeration. Use an array of objects with device / folder ID properties, matching the other places where it's used. * lib/db: Drop invalid entries in RemovePendingFoldersBeforeTime(). * lib/model: Gofmt. My local gofmt did not complain here, strangely... * gui: Handle PendingDevicesChanged event. Even though it currently only holds one device at a time, wrap the contents in an array under the "added" property name. * lib/model: Fix null values in PendingFoldersChanged removed member. * gui: Handle PendingFoldersChanged event. * lib/model: Simplify construction of expiredPendingList. * lib/model: Reduce code duplication in cleanPending(). Use goto and a label for the common parts of calling the DB removal function and building the event data part. * lib/events, gui: Mark ...Rejected events deprecated. Extend comments explaining the conditions when the replacement event types are emitted. * lib/model: Wrap removed devices in array of objects as well. * lib/db: Use iter.Value() instead of needless db.Get(iter.Key()) * lib/db: Add comment explaining RemovePendingFoldersBeforeTime(). * lib/model: Rename fields folderID and deviceID in event data. * lib/db: Only list actually expired IDs as removed. Skip entries where Delete() failed as well as invalid entries that got removed automatically. * lib/model: Gofmt
2021-01-25 10:58:10 +00:00
FolderRejected // DEPRECATED, superseded by PendingFoldersChanged
PendingFoldersChanged
2014-09-06 15:31:23 +00:00
ConfigSaved
DownloadProgress
RemoteDownloadProgress
FolderSummary
FolderCompletion
FolderErrors
2015-08-26 22:49:06 +00:00
FolderScanProgress
FolderPaused
FolderResumed
FolderWatchStateChanged
ListenAddressesChanged
LoginAttempt
Failure
2014-07-13 19:07:24 +00:00
AllEvents = (1 << iota) - 1
2014-07-13 19:07:24 +00:00
)
var (
runningTests = false
errNoop = errors.New("method of a noop object called")
)
const eventLogTimeout = 15 * time.Millisecond
2014-07-13 19:07:24 +00:00
func (t EventType) String() string {
switch t {
2014-07-17 11:38:36 +00:00
case Starting:
return "Starting"
2014-07-13 19:07:24 +00:00
case StartupComplete:
return "StartupComplete"
case DeviceDiscovered:
return "DeviceDiscovered"
case DeviceConnected:
return "DeviceConnected"
case DeviceDisconnected:
return "DeviceDisconnected"
case DeviceRejected:
return "DeviceRejected"
lib/model: Forget pending folders no longer announced in ClusterConfig (fixes #5187) (#7205) * lib/db: Add ExpirePendingFolders(). Use-case is to drop any no-longer-pending folders for a specific device when parsing its ClusterConfig message where previously offered folders are not mentioned any more. The timestamp in ObservedFolder is stored with only second precision, so round to seconds here as well. This allows calling the function within the same second of adding or updating entries. * lib/model: Weed out pending folders when receiving ClusterConfig. Filter the entries by timestamp, which must be newer than or equal to the reception time of the ClusterConfig. For just mentioned ones, this assumption will hold as AddOrUpdatePendingFolder() updates the timestamp. * lib/model, gui: Notify when one or more pending folders expired. Introduce new event type FolderOfferCancelled and use it to trigger a complete refreshCluster() cycle. Listing individual entries would be much more code and probably just as much work to answer the API request. * lib/model: Add comment and rename ExpirePendingFolders(). * lib/events: Rename FolderOfferCancelled to ClusterPendingChanged. * lib/model: Reuse ClusterPendingChanged event for cleanPending() Changing the config does not necessarily mean that the /resut/cluster/pending endpoints need to be refreshed, but only if something was actually removed. Detect this and indicate it through the ClusterPendingChanged event, which is already hooked up to requery respective endpoints within the GUI. No more need for a separate refreshCluster() in reaction to ConfigSaved event or calling refreshConfig(). * lib/model: Gofmt. * lib/db: Warn instead of info log for failed removal. * gui: Fix pending notifications not loading on GUI start. * lib/db: Use short device ID in log message. * lib/db: Return list of expired folder IDs after deleting them. * lib/model: Refactor Pending...Changed events. * lib/model: Adjust format of removed pending folders enumeration. Use an array of objects with device / folder ID properties, matching the other places where it's used. * lib/db: Drop invalid entries in RemovePendingFoldersBeforeTime(). * lib/model: Gofmt. My local gofmt did not complain here, strangely... * gui: Handle PendingDevicesChanged event. Even though it currently only holds one device at a time, wrap the contents in an array under the "added" property name. * lib/model: Fix null values in PendingFoldersChanged removed member. * gui: Handle PendingFoldersChanged event. * lib/model: Simplify construction of expiredPendingList. * lib/model: Reduce code duplication in cleanPending(). Use goto and a label for the common parts of calling the DB removal function and building the event data part. * lib/events, gui: Mark ...Rejected events deprecated. Extend comments explaining the conditions when the replacement event types are emitted. * lib/model: Wrap removed devices in array of objects as well. * lib/db: Use iter.Value() instead of needless db.Get(iter.Key()) * lib/db: Add comment explaining RemovePendingFoldersBeforeTime(). * lib/model: Rename fields folderID and deviceID in event data. * lib/db: Only list actually expired IDs as removed. Skip entries where Delete() failed as well as invalid entries that got removed automatically. * lib/model: Gofmt
2021-01-25 10:58:10 +00:00
case PendingDevicesChanged:
return "PendingDevicesChanged"
case LocalChangeDetected:
return "LocalChangeDetected"
case RemoteChangeDetected:
return "RemoteChangeDetected"
2014-07-13 19:07:24 +00:00
case LocalIndexUpdated:
return "LocalIndexUpdated"
case RemoteIndexUpdated:
return "RemoteIndexUpdated"
case ItemStarted:
return "ItemStarted"
2015-02-01 17:31:19 +00:00
case ItemFinished:
return "ItemFinished"
2014-07-17 11:38:36 +00:00
case StateChanged:
return "StateChanged"
case FolderRejected:
return "FolderRejected"
lib/model: Forget pending folders no longer announced in ClusterConfig (fixes #5187) (#7205) * lib/db: Add ExpirePendingFolders(). Use-case is to drop any no-longer-pending folders for a specific device when parsing its ClusterConfig message where previously offered folders are not mentioned any more. The timestamp in ObservedFolder is stored with only second precision, so round to seconds here as well. This allows calling the function within the same second of adding or updating entries. * lib/model: Weed out pending folders when receiving ClusterConfig. Filter the entries by timestamp, which must be newer than or equal to the reception time of the ClusterConfig. For just mentioned ones, this assumption will hold as AddOrUpdatePendingFolder() updates the timestamp. * lib/model, gui: Notify when one or more pending folders expired. Introduce new event type FolderOfferCancelled and use it to trigger a complete refreshCluster() cycle. Listing individual entries would be much more code and probably just as much work to answer the API request. * lib/model: Add comment and rename ExpirePendingFolders(). * lib/events: Rename FolderOfferCancelled to ClusterPendingChanged. * lib/model: Reuse ClusterPendingChanged event for cleanPending() Changing the config does not necessarily mean that the /resut/cluster/pending endpoints need to be refreshed, but only if something was actually removed. Detect this and indicate it through the ClusterPendingChanged event, which is already hooked up to requery respective endpoints within the GUI. No more need for a separate refreshCluster() in reaction to ConfigSaved event or calling refreshConfig(). * lib/model: Gofmt. * lib/db: Warn instead of info log for failed removal. * gui: Fix pending notifications not loading on GUI start. * lib/db: Use short device ID in log message. * lib/db: Return list of expired folder IDs after deleting them. * lib/model: Refactor Pending...Changed events. * lib/model: Adjust format of removed pending folders enumeration. Use an array of objects with device / folder ID properties, matching the other places where it's used. * lib/db: Drop invalid entries in RemovePendingFoldersBeforeTime(). * lib/model: Gofmt. My local gofmt did not complain here, strangely... * gui: Handle PendingDevicesChanged event. Even though it currently only holds one device at a time, wrap the contents in an array under the "added" property name. * lib/model: Fix null values in PendingFoldersChanged removed member. * gui: Handle PendingFoldersChanged event. * lib/model: Simplify construction of expiredPendingList. * lib/model: Reduce code duplication in cleanPending(). Use goto and a label for the common parts of calling the DB removal function and building the event data part. * lib/events, gui: Mark ...Rejected events deprecated. Extend comments explaining the conditions when the replacement event types are emitted. * lib/model: Wrap removed devices in array of objects as well. * lib/db: Use iter.Value() instead of needless db.Get(iter.Key()) * lib/db: Add comment explaining RemovePendingFoldersBeforeTime(). * lib/model: Rename fields folderID and deviceID in event data. * lib/db: Only list actually expired IDs as removed. Skip entries where Delete() failed as well as invalid entries that got removed automatically. * lib/model: Gofmt
2021-01-25 10:58:10 +00:00
case PendingFoldersChanged:
return "PendingFoldersChanged"
2014-09-06 15:31:23 +00:00
case ConfigSaved:
return "ConfigSaved"
case DownloadProgress:
return "DownloadProgress"
case RemoteDownloadProgress:
return "RemoteDownloadProgress"
case FolderSummary:
return "FolderSummary"
case FolderCompletion:
return "FolderCompletion"
case FolderErrors:
return "FolderErrors"
2015-08-23 19:56:10 +00:00
case DevicePaused:
return "DevicePaused"
case DeviceResumed:
return "DeviceResumed"
2015-08-26 22:49:06 +00:00
case FolderScanProgress:
return "FolderScanProgress"
case FolderPaused:
return "FolderPaused"
case FolderResumed:
return "FolderResumed"
case ListenAddressesChanged:
return "ListenAddressesChanged"
case LoginAttempt:
return "LoginAttempt"
case FolderWatchStateChanged:
return "FolderWatchStateChanged"
case Failure:
return "Failure"
2014-07-13 19:07:24 +00:00
default:
return "Unknown"
}
}
func (t EventType) MarshalText() ([]byte, error) {
return []byte(t.String()), nil
}
func (t *EventType) UnmarshalJSON(b []byte) error {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
*t = UnmarshalEventType(s)
return nil
}
func UnmarshalEventType(s string) EventType {
switch s {
case "Starting":
return Starting
case "StartupComplete":
return StartupComplete
case "DeviceDiscovered":
return DeviceDiscovered
case "DeviceConnected":
return DeviceConnected
case "DeviceDisconnected":
return DeviceDisconnected
case "DeviceRejected":
return DeviceRejected
lib/model: Forget pending folders no longer announced in ClusterConfig (fixes #5187) (#7205) * lib/db: Add ExpirePendingFolders(). Use-case is to drop any no-longer-pending folders for a specific device when parsing its ClusterConfig message where previously offered folders are not mentioned any more. The timestamp in ObservedFolder is stored with only second precision, so round to seconds here as well. This allows calling the function within the same second of adding or updating entries. * lib/model: Weed out pending folders when receiving ClusterConfig. Filter the entries by timestamp, which must be newer than or equal to the reception time of the ClusterConfig. For just mentioned ones, this assumption will hold as AddOrUpdatePendingFolder() updates the timestamp. * lib/model, gui: Notify when one or more pending folders expired. Introduce new event type FolderOfferCancelled and use it to trigger a complete refreshCluster() cycle. Listing individual entries would be much more code and probably just as much work to answer the API request. * lib/model: Add comment and rename ExpirePendingFolders(). * lib/events: Rename FolderOfferCancelled to ClusterPendingChanged. * lib/model: Reuse ClusterPendingChanged event for cleanPending() Changing the config does not necessarily mean that the /resut/cluster/pending endpoints need to be refreshed, but only if something was actually removed. Detect this and indicate it through the ClusterPendingChanged event, which is already hooked up to requery respective endpoints within the GUI. No more need for a separate refreshCluster() in reaction to ConfigSaved event or calling refreshConfig(). * lib/model: Gofmt. * lib/db: Warn instead of info log for failed removal. * gui: Fix pending notifications not loading on GUI start. * lib/db: Use short device ID in log message. * lib/db: Return list of expired folder IDs after deleting them. * lib/model: Refactor Pending...Changed events. * lib/model: Adjust format of removed pending folders enumeration. Use an array of objects with device / folder ID properties, matching the other places where it's used. * lib/db: Drop invalid entries in RemovePendingFoldersBeforeTime(). * lib/model: Gofmt. My local gofmt did not complain here, strangely... * gui: Handle PendingDevicesChanged event. Even though it currently only holds one device at a time, wrap the contents in an array under the "added" property name. * lib/model: Fix null values in PendingFoldersChanged removed member. * gui: Handle PendingFoldersChanged event. * lib/model: Simplify construction of expiredPendingList. * lib/model: Reduce code duplication in cleanPending(). Use goto and a label for the common parts of calling the DB removal function and building the event data part. * lib/events, gui: Mark ...Rejected events deprecated. Extend comments explaining the conditions when the replacement event types are emitted. * lib/model: Wrap removed devices in array of objects as well. * lib/db: Use iter.Value() instead of needless db.Get(iter.Key()) * lib/db: Add comment explaining RemovePendingFoldersBeforeTime(). * lib/model: Rename fields folderID and deviceID in event data. * lib/db: Only list actually expired IDs as removed. Skip entries where Delete() failed as well as invalid entries that got removed automatically. * lib/model: Gofmt
2021-01-25 10:58:10 +00:00
case "PendingDevicesChanged":
return PendingDevicesChanged
case "LocalChangeDetected":
return LocalChangeDetected
case "RemoteChangeDetected":
return RemoteChangeDetected
case "LocalIndexUpdated":
return LocalIndexUpdated
case "RemoteIndexUpdated":
return RemoteIndexUpdated
case "ItemStarted":
return ItemStarted
case "ItemFinished":
return ItemFinished
case "StateChanged":
return StateChanged
case "FolderRejected":
return FolderRejected
lib/model: Forget pending folders no longer announced in ClusterConfig (fixes #5187) (#7205) * lib/db: Add ExpirePendingFolders(). Use-case is to drop any no-longer-pending folders for a specific device when parsing its ClusterConfig message where previously offered folders are not mentioned any more. The timestamp in ObservedFolder is stored with only second precision, so round to seconds here as well. This allows calling the function within the same second of adding or updating entries. * lib/model: Weed out pending folders when receiving ClusterConfig. Filter the entries by timestamp, which must be newer than or equal to the reception time of the ClusterConfig. For just mentioned ones, this assumption will hold as AddOrUpdatePendingFolder() updates the timestamp. * lib/model, gui: Notify when one or more pending folders expired. Introduce new event type FolderOfferCancelled and use it to trigger a complete refreshCluster() cycle. Listing individual entries would be much more code and probably just as much work to answer the API request. * lib/model: Add comment and rename ExpirePendingFolders(). * lib/events: Rename FolderOfferCancelled to ClusterPendingChanged. * lib/model: Reuse ClusterPendingChanged event for cleanPending() Changing the config does not necessarily mean that the /resut/cluster/pending endpoints need to be refreshed, but only if something was actually removed. Detect this and indicate it through the ClusterPendingChanged event, which is already hooked up to requery respective endpoints within the GUI. No more need for a separate refreshCluster() in reaction to ConfigSaved event or calling refreshConfig(). * lib/model: Gofmt. * lib/db: Warn instead of info log for failed removal. * gui: Fix pending notifications not loading on GUI start. * lib/db: Use short device ID in log message. * lib/db: Return list of expired folder IDs after deleting them. * lib/model: Refactor Pending...Changed events. * lib/model: Adjust format of removed pending folders enumeration. Use an array of objects with device / folder ID properties, matching the other places where it's used. * lib/db: Drop invalid entries in RemovePendingFoldersBeforeTime(). * lib/model: Gofmt. My local gofmt did not complain here, strangely... * gui: Handle PendingDevicesChanged event. Even though it currently only holds one device at a time, wrap the contents in an array under the "added" property name. * lib/model: Fix null values in PendingFoldersChanged removed member. * gui: Handle PendingFoldersChanged event. * lib/model: Simplify construction of expiredPendingList. * lib/model: Reduce code duplication in cleanPending(). Use goto and a label for the common parts of calling the DB removal function and building the event data part. * lib/events, gui: Mark ...Rejected events deprecated. Extend comments explaining the conditions when the replacement event types are emitted. * lib/model: Wrap removed devices in array of objects as well. * lib/db: Use iter.Value() instead of needless db.Get(iter.Key()) * lib/db: Add comment explaining RemovePendingFoldersBeforeTime(). * lib/model: Rename fields folderID and deviceID in event data. * lib/db: Only list actually expired IDs as removed. Skip entries where Delete() failed as well as invalid entries that got removed automatically. * lib/model: Gofmt
2021-01-25 10:58:10 +00:00
case "PendingFoldersChanged":
return PendingFoldersChanged
case "ConfigSaved":
return ConfigSaved
case "DownloadProgress":
return DownloadProgress
case "RemoteDownloadProgress":
return RemoteDownloadProgress
case "FolderSummary":
return FolderSummary
case "FolderCompletion":
return FolderCompletion
case "FolderErrors":
return FolderErrors
case "DevicePaused":
return DevicePaused
case "DeviceResumed":
return DeviceResumed
case "FolderScanProgress":
return FolderScanProgress
case "FolderPaused":
return FolderPaused
case "FolderResumed":
return FolderResumed
case "ListenAddressesChanged":
return ListenAddressesChanged
case "LoginAttempt":
return LoginAttempt
case "FolderWatchStateChanged":
return FolderWatchStateChanged
case "Failure":
return Failure
default:
return 0
}
}
2014-07-13 19:07:24 +00:00
const BufferSize = 64
type Logger interface {
suture.Service
Log(t EventType, data interface{})
Subscribe(mask EventType) Subscription
}
type logger struct {
subs []*subscription
nextSubscriptionIDs []int
nextGlobalID int
timeout *time.Timer
events chan Event
funcs chan func(context.Context)
toUnsubscribe chan *subscription
2014-07-13 19:07:24 +00:00
}
type Event struct {
// Per-subscription sequential event ID. Named "id" for backwards compatibility with the REST API
SubscriptionID int `json:"id"`
// Global ID of the event across all subscriptions
GlobalID int `json:"globalID"`
Time time.Time `json:"time"`
Type EventType `json:"type"`
Data interface{} `json:"data"`
2014-07-13 19:07:24 +00:00
}
type Subscription interface {
C() <-chan Event
Poll(timeout time.Duration) (Event, error)
Mask() EventType
Unsubscribe()
2014-07-13 19:07:24 +00:00
}
type subscription struct {
mask EventType
events chan Event
toUnsubscribe chan *subscription
timeout *time.Timer
ctx context.Context
}
2014-07-13 19:07:24 +00:00
var (
ErrTimeout = errors.New("timeout")
ErrClosed = errors.New("closed")
)
func NewLogger() Logger {
l := &logger{
timeout: time.NewTimer(time.Second),
events: make(chan Event, BufferSize),
funcs: make(chan func(context.Context)),
toUnsubscribe: make(chan *subscription),
}
// Make sure the timer is in the stopped state and hasn't fired anything
// into the channel.
if !l.timeout.Stop() {
<-l.timeout.C
2014-07-13 19:07:24 +00:00
}
return l
2014-07-13 19:07:24 +00:00
}
2020-11-17 12:19:04 +00:00
func (l *logger) Serve(ctx context.Context) error {
loop:
for {
select {
case e := <-l.events:
// Incoming events get sent
l.sendEvent(e)
case fn := <-l.funcs:
// Subscriptions are handled here.
fn(ctx)
case s := <-l.toUnsubscribe:
l.unsubscribe(s)
case <-ctx.Done():
break loop
}
}
// Closing the event channels corresponds to what happens when a
// subscription is unsubscribed; this stops any BufferedSubscription,
// makes Poll() return ErrClosed, etc.
for _, s := range l.subs {
close(s.events)
}
2020-11-17 12:19:04 +00:00
return nil
}
func (l *logger) Log(t EventType, data interface{}) {
l.events <- Event{
Time: time.Now(), // intentionally high precision
Type: t,
Data: data,
// SubscriptionID and GlobalID are set in sendEvent
}
}
func (l *logger) sendEvent(e Event) {
l.nextGlobalID++
dl.Debugln("log", l.nextGlobalID, e.Type, e.Data)
e.GlobalID = l.nextGlobalID
for i, s := range l.subs {
if s.mask&e.Type != 0 {
e.SubscriptionID = l.nextSubscriptionIDs[i]
l.nextSubscriptionIDs[i]++
l.timeout.Reset(eventLogTimeout)
timedOut := false
2014-07-13 19:07:24 +00:00
select {
case s.events <- e:
case <-l.timeout.C:
// if s.events is not ready, drop the event
timedOut = true
}
// If stop returns false it already sent something to the
// channel. If we didn't already read it above we must do so now
// or we get a spurious timeout on the next loop.
if !l.timeout.Stop() && !timedOut {
<-l.timeout.C
2014-07-13 19:07:24 +00:00
}
}
}
}
func (l *logger) Subscribe(mask EventType) Subscription {
res := make(chan Subscription)
l.funcs <- func(ctx context.Context) {
dl.Debugln("subscribe", mask)
s := &subscription{
mask: mask,
events: make(chan Event, BufferSize),
toUnsubscribe: l.toUnsubscribe,
timeout: time.NewTimer(0),
ctx: ctx,
}
// We need to create the timeout timer in the stopped, non-fired state so
// that Subscription.Poll() can safely reset it and select on the timeout
// channel. This ensures the timer is stopped and the channel drained.
if runningTests {
// Make the behavior stable when running tests to avoid randomly
// varying test coverage. This ensures, in practice if not in
// theory, that the timer fires and we take the true branch of the
// next if.
runtime.Gosched()
}
if !s.timeout.Stop() {
<-s.timeout.C
}
l.subs = append(l.subs, s)
l.nextSubscriptionIDs = append(l.nextSubscriptionIDs, 1)
res <- s
}
return <-res
2014-07-13 19:07:24 +00:00
}
func (l *logger) unsubscribe(s *subscription) {
dl.Debugln("unsubscribe", s.mask)
for i, ss := range l.subs {
if s == ss {
last := len(l.subs) - 1
l.subs[i] = l.subs[last]
l.subs[last] = nil
l.subs = l.subs[:last]
l.nextSubscriptionIDs[i] = l.nextSubscriptionIDs[last]
l.nextSubscriptionIDs[last] = 0
l.nextSubscriptionIDs = l.nextSubscriptionIDs[:last]
break
}
}
close(s.events)
2014-07-13 19:07:24 +00:00
}
func (l *logger) String() string {
return fmt.Sprintf("events.Logger/@%p", l)
}
// Poll returns an event from the subscription or an error if the poll times
// out of the event channel is closed. Poll should not be called concurrently
// from multiple goroutines for a single subscription.
func (s *subscription) Poll(timeout time.Duration) (Event, error) {
Implement facility based logger, debugging via REST API This implements a new debug/trace infrastructure based on a slightly hacked up logger. Instead of the traditional "if debug { ... }" I've rewritten the logger to have no-op Debugln and Debugf, unless debugging has been enabled for a given "facility". The "facility" is just a string, typically a package name. This will be slightly slower than before; but not that much as it's mostly a function call that returns immediately. For the cases where it matters (the Debugln takes a hex.Dump() of something for example, and it's not in a very occasional "if err != nil" branch) there is an l.ShouldDebug(facility) that is fast enough to be used like the old "if debug". The point of all this is that we can now toggle debugging for the various packages on and off at runtime. There's a new method /rest/system/debug that can be POSTed a set of facilities to enable and disable debug for, or GET from to get a list of facilities with descriptions and their current debug status. Similarly a /rest/system/log?since=... can grab the latest log entries, up to 250 of them (hardcoded constant in main.go) plus the initial few. Not implemented in this commit (but planned) is a simple debug GUI available on /debug that shows the current log in an easily pasteable format and has checkboxes to enable the various debug facilities. The debug instructions to a user then becomes "visit this URL, check these boxes, reproduce your problem, copy and paste the log". The actual log viewer on the hypothetical /debug URL can poll regularly for new log entries and this bypass the 250 line limit. The existing STTRACE=foo variable is still obeyed and just sets the start state of the system.
2015-10-03 15:25:21 +00:00
dl.Debugln("poll", timeout)
2014-07-25 12:50:14 +00:00
s.timeout.Reset(timeout)
2014-07-13 19:07:24 +00:00
select {
case e, ok := <-s.events:
if !ok {
return e, ErrClosed
}
if runningTests {
// Make the behavior stable when running tests to avoid randomly
// varying test coverage. This ensures, in practice if not in
// theory, that the timer fires and we take the true branch of
// the next if.
s.timeout.Reset(0)
runtime.Gosched()
}
if !s.timeout.Stop() {
// The timeout must be stopped and possibly drained to be ready
// for reuse in the next call.
<-s.timeout.C
}
2014-07-13 19:07:24 +00:00
return e, nil
case <-s.timeout.C:
2014-07-13 19:07:24 +00:00
return Event{}, ErrTimeout
}
}
func (s *subscription) C() <-chan Event {
return s.events
}
func (s *subscription) Mask() EventType {
return s.mask
}
func (s *subscription) Unsubscribe() {
select {
case s.toUnsubscribe <- s:
case <-s.ctx.Done():
}
}
type bufferedSubscription struct {
sub Subscription
2014-07-13 19:07:24 +00:00
buf []Event
next int
cur int // Current SubscriptionID
2014-07-13 19:07:24 +00:00
mut sync.Mutex
cond *sync.TimeoutCond
2014-07-13 19:07:24 +00:00
}
type BufferedSubscription interface {
Since(id int, into []Event, timeout time.Duration) []Event
Mask() EventType
}
func NewBufferedSubscription(s Subscription, size int) BufferedSubscription {
bs := &bufferedSubscription{
2014-07-13 19:07:24 +00:00
sub: s,
buf: make([]Event, size),
2015-04-22 22:54:31 +00:00
mut: sync.NewMutex(),
2014-07-13 19:07:24 +00:00
}
bs.cond = sync.NewTimeoutCond(bs.mut)
2014-07-13 19:07:24 +00:00
go bs.pollingLoop()
return bs
}
func (s *bufferedSubscription) pollingLoop() {
for ev := range s.sub.C() {
2014-07-13 19:07:24 +00:00
s.mut.Lock()
s.buf[s.next] = ev
s.next = (s.next + 1) % len(s.buf)
s.cur = ev.SubscriptionID
2014-07-13 19:07:24 +00:00
s.cond.Broadcast()
s.mut.Unlock()
}
}
func (s *bufferedSubscription) Since(id int, into []Event, timeout time.Duration) []Event {
2014-07-13 19:07:24 +00:00
s.mut.Lock()
defer s.mut.Unlock()
// Check once first before generating the TimeoutCondWaiter
if id >= s.cur {
waiter := s.cond.SetupWait(timeout)
defer waiter.Stop()
for id >= s.cur {
if eventsAvailable := waiter.Wait(); !eventsAvailable {
// Timed out
return into
}
}
2014-07-13 19:07:24 +00:00
}
for i := s.next; i < len(s.buf); i++ {
if s.buf[i].SubscriptionID > id {
2014-07-13 19:07:24 +00:00
into = append(into, s.buf[i])
}
}
for i := 0; i < s.next; i++ {
if s.buf[i].SubscriptionID > id {
2014-07-13 19:07:24 +00:00
into = append(into, s.buf[i])
}
}
return into
}
func (s *bufferedSubscription) Mask() EventType {
return s.sub.Mask()
}
// Error returns a string pointer suitable for JSON marshalling errors. It
2015-11-12 02:20:34 +00:00
// retains the "null on success" semantics, but ensures the error result is a
// string regardless of the underlying concrete error type.
func Error(err error) *string {
if err == nil {
return nil
}
str := err.Error()
return &str
}
type noopLogger struct{}
var NoopLogger Logger = &noopLogger{}
2020-11-17 12:19:04 +00:00
func (*noopLogger) Serve(ctx context.Context) error { return nil }
func (*noopLogger) Stop() {}
func (*noopLogger) Log(t EventType, data interface{}) {}
func (*noopLogger) Subscribe(mask EventType) Subscription {
return &noopSubscription{}
}
type noopSubscription struct{}
func (*noopSubscription) C() <-chan Event {
return nil
}
func (*noopSubscription) Poll(timeout time.Duration) (Event, error) {
return Event{}, errNoop
}
func (s *noopSubscription) Mask() EventType {
return 0
}
func (*noopSubscription) Unsubscribe() {}