mirror of
https://github.com/octoleo/syncthing.git
synced 2024-12-22 10:58:57 +00:00
Merge branch 'infrastructure'
* infrastructure: feat(ursrv): new metrics based approach
This commit is contained in:
commit
dbe7fa9155
@ -21,4 +21,3 @@ See `relaypoolsrv -help` for configuration options.
|
|||||||
|
|
||||||
[oschwald/geoip2-golang](https://github.com/oschwald/geoip2-golang), [oschwald/maxminddb-golang](https://github.com/oschwald/maxminddb-golang), Copyright (C) 2015 [Gregory J. Oschwald](mailto:oschwald@gmail.com).
|
[oschwald/geoip2-golang](https://github.com/oschwald/geoip2-golang), [oschwald/maxminddb-golang](https://github.com/oschwald/maxminddb-golang), Copyright (C) 2015 [Gregory J. Oschwald](mailto:oschwald@gmail.com).
|
||||||
|
|
||||||
[lib/pq](https://github.com/lib/pq)</a>, Copyright (C) 2011-2013 'pq' Contributors Portions Copyright (C) 2011 Blake Mizerany.
|
|
||||||
|
@ -1,226 +0,0 @@
|
|||||||
// Copyright (C) 2018 The Syncthing Authors.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package aggregate
|
|
||||||
|
|
||||||
import (
|
|
||||||
"database/sql"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
_ "github.com/lib/pq"
|
|
||||||
)
|
|
||||||
|
|
||||||
type CLI struct {
|
|
||||||
DBConn string `env:"UR_DB_URL" default:"postgres://user:password@localhost/ur?sslmode=disable"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cli *CLI) Run() error {
|
|
||||||
log.SetFlags(log.Ltime | log.Ldate)
|
|
||||||
log.SetOutput(os.Stdout)
|
|
||||||
|
|
||||||
db, err := sql.Open("postgres", cli.DBConn)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("database: %w", err)
|
|
||||||
}
|
|
||||||
err = setupDB(db)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("database: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
runAggregation(db)
|
|
||||||
// Sleep until one minute past next midnight
|
|
||||||
sleepUntilNext(24*time.Hour, 1*time.Minute)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func runAggregation(db *sql.DB) {
|
|
||||||
since := maxIndexedDay(db, "VersionSummary")
|
|
||||||
log.Println("Aggregating VersionSummary data since", since)
|
|
||||||
rows, err := aggregateVersionSummary(db, since.Add(24*time.Hour))
|
|
||||||
if err != nil {
|
|
||||||
log.Println("aggregate:", err)
|
|
||||||
}
|
|
||||||
log.Println("Inserted", rows, "rows")
|
|
||||||
|
|
||||||
since = maxIndexedDay(db, "Performance")
|
|
||||||
log.Println("Aggregating Performance data since", since)
|
|
||||||
rows, err = aggregatePerformance(db, since.Add(24*time.Hour))
|
|
||||||
if err != nil {
|
|
||||||
log.Println("aggregate:", err)
|
|
||||||
}
|
|
||||||
log.Println("Inserted", rows, "rows")
|
|
||||||
|
|
||||||
since = maxIndexedDay(db, "BlockStats")
|
|
||||||
log.Println("Aggregating BlockStats data since", since)
|
|
||||||
rows, err = aggregateBlockStats(db, since.Add(24*time.Hour))
|
|
||||||
if err != nil {
|
|
||||||
log.Println("aggregate:", err)
|
|
||||||
}
|
|
||||||
log.Println("Inserted", rows, "rows")
|
|
||||||
}
|
|
||||||
|
|
||||||
func sleepUntilNext(intv, margin time.Duration) {
|
|
||||||
now := time.Now().UTC()
|
|
||||||
next := now.Truncate(intv).Add(intv).Add(margin)
|
|
||||||
log.Println("Sleeping until", next)
|
|
||||||
time.Sleep(next.Sub(now))
|
|
||||||
}
|
|
||||||
|
|
||||||
func setupDB(db *sql.DB) error {
|
|
||||||
_, err := db.Exec(`CREATE TABLE IF NOT EXISTS VersionSummary (
|
|
||||||
Day TIMESTAMP NOT NULL,
|
|
||||||
Version VARCHAR(8) NOT NULL,
|
|
||||||
Count INTEGER NOT NULL
|
|
||||||
)`)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS Performance (
|
|
||||||
Day TIMESTAMP NOT NULL,
|
|
||||||
TotFiles INTEGER NOT NULL,
|
|
||||||
TotMiB INTEGER NOT NULL,
|
|
||||||
SHA256Perf DOUBLE PRECISION NOT NULL,
|
|
||||||
MemorySize INTEGER NOT NULL,
|
|
||||||
MemoryUsageMiB INTEGER NOT NULL
|
|
||||||
)`)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = db.Exec(`CREATE TABLE IF NOT EXISTS BlockStats (
|
|
||||||
Day TIMESTAMP NOT NULL,
|
|
||||||
Reports INTEGER NOT NULL,
|
|
||||||
Total BIGINT NOT NULL,
|
|
||||||
Renamed BIGINT NOT NULL,
|
|
||||||
Reused BIGINT NOT NULL,
|
|
||||||
Pulled BIGINT NOT NULL,
|
|
||||||
CopyOrigin BIGINT NOT NULL,
|
|
||||||
CopyOriginShifted BIGINT NOT NULL,
|
|
||||||
CopyElsewhere BIGINT NOT NULL
|
|
||||||
)`)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var t string
|
|
||||||
|
|
||||||
row := db.QueryRow(`SELECT 'UniqueDayVersionIndex'::regclass`)
|
|
||||||
if err := row.Scan(&t); err != nil {
|
|
||||||
_, _ = db.Exec(`CREATE UNIQUE INDEX UniqueDayVersionIndex ON VersionSummary (Day, Version)`)
|
|
||||||
}
|
|
||||||
|
|
||||||
row = db.QueryRow(`SELECT 'VersionDayIndex'::regclass`)
|
|
||||||
if err := row.Scan(&t); err != nil {
|
|
||||||
_, _ = db.Exec(`CREATE INDEX VersionDayIndex ON VersionSummary (Day)`)
|
|
||||||
}
|
|
||||||
|
|
||||||
row = db.QueryRow(`SELECT 'PerformanceDayIndex'::regclass`)
|
|
||||||
if err := row.Scan(&t); err != nil {
|
|
||||||
_, _ = db.Exec(`CREATE INDEX PerformanceDayIndex ON Performance (Day)`)
|
|
||||||
}
|
|
||||||
|
|
||||||
row = db.QueryRow(`SELECT 'BlockStatsDayIndex'::regclass`)
|
|
||||||
if err := row.Scan(&t); err != nil {
|
|
||||||
_, _ = db.Exec(`CREATE INDEX BlockStatsDayIndex ON BlockStats (Day)`)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func maxIndexedDay(db *sql.DB, table string) time.Time {
|
|
||||||
var t time.Time
|
|
||||||
row := db.QueryRow("SELECT MAX(DATE_TRUNC('day', Day)) FROM " + table)
|
|
||||||
err := row.Scan(&t)
|
|
||||||
if err != nil {
|
|
||||||
return time.Time{}
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func aggregateVersionSummary(db *sql.DB, since time.Time) (int64, error) {
|
|
||||||
res, err := db.Exec(`INSERT INTO VersionSummary (
|
|
||||||
SELECT
|
|
||||||
DATE_TRUNC('day', Received) AS Day,
|
|
||||||
SUBSTRING(Report->>'version' FROM '^v\d.\d+') AS Ver,
|
|
||||||
COUNT(*) AS Count
|
|
||||||
FROM ReportsJson
|
|
||||||
WHERE
|
|
||||||
Received > $1
|
|
||||||
AND Received < DATE_TRUNC('day', NOW())
|
|
||||||
AND Report->>'version' like 'v_.%'
|
|
||||||
GROUP BY Day, Ver
|
|
||||||
);
|
|
||||||
`, since)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.RowsAffected()
|
|
||||||
}
|
|
||||||
|
|
||||||
func aggregatePerformance(db *sql.DB, since time.Time) (int64, error) {
|
|
||||||
res, err := db.Exec(`INSERT INTO Performance (
|
|
||||||
SELECT
|
|
||||||
DATE_TRUNC('day', Received) AS Day,
|
|
||||||
AVG((Report->>'totFiles')::numeric) As TotFiles,
|
|
||||||
AVG((Report->>'totMiB')::numeric) As TotMiB,
|
|
||||||
AVG((Report->>'sha256Perf')::numeric) As SHA256Perf,
|
|
||||||
AVG((Report->>'memorySize')::numeric) As MemorySize,
|
|
||||||
AVG((Report->>'memoryUsageMiB')::numeric) As MemoryUsageMiB
|
|
||||||
FROM ReportsJson
|
|
||||||
WHERE
|
|
||||||
Received > $1
|
|
||||||
AND Received < DATE_TRUNC('day', NOW())
|
|
||||||
AND Report->>'version' like 'v_.%'
|
|
||||||
/* Some custom implementation reported bytes when we expect megabytes, cap at petabyte */
|
|
||||||
AND (Report->>'memorySize')::numeric < 1073741824
|
|
||||||
GROUP BY Day
|
|
||||||
);
|
|
||||||
`, since)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.RowsAffected()
|
|
||||||
}
|
|
||||||
|
|
||||||
func aggregateBlockStats(db *sql.DB, since time.Time) (int64, error) {
|
|
||||||
// Filter out anything prior 0.14.41 as that has sum aggregations which
|
|
||||||
// made no sense.
|
|
||||||
res, err := db.Exec(`INSERT INTO BlockStats (
|
|
||||||
SELECT
|
|
||||||
DATE_TRUNC('day', Received) AS Day,
|
|
||||||
COUNT(1) As Reports,
|
|
||||||
SUM((Report->'blockStats'->>'total')::numeric)::bigint AS Total,
|
|
||||||
SUM((Report->'blockStats'->>'renamed')::numeric)::bigint AS Renamed,
|
|
||||||
SUM((Report->'blockStats'->>'reused')::numeric)::bigint AS Reused,
|
|
||||||
SUM((Report->'blockStats'->>'pulled')::numeric)::bigint AS Pulled,
|
|
||||||
SUM((Report->'blockStats'->>'copyOrigin')::numeric)::bigint AS CopyOrigin,
|
|
||||||
SUM((Report->'blockStats'->>'copyOriginShifted')::numeric)::bigint AS CopyOriginShifted,
|
|
||||||
SUM((Report->'blockStats'->>'copyElsewhere')::numeric)::bigint AS CopyElsewhere
|
|
||||||
FROM ReportsJson
|
|
||||||
WHERE
|
|
||||||
Received > $1
|
|
||||||
AND Received < DATE_TRUNC('day', NOW())
|
|
||||||
AND (Report->>'urVersion')::numeric >= 3
|
|
||||||
AND Report->>'version' like 'v_.%'
|
|
||||||
AND Report->>'version' NOT LIKE 'v0.14.40%'
|
|
||||||
AND Report->>'version' NOT LIKE 'v0.14.39%'
|
|
||||||
AND Report->>'version' NOT LIKE 'v0.14.38%'
|
|
||||||
GROUP BY Day
|
|
||||||
);
|
|
||||||
`, since)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.RowsAffected()
|
|
||||||
}
|
|
@ -8,22 +8,22 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
"log"
|
||||||
|
"log/slog"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/alecthomas/kong"
|
"github.com/alecthomas/kong"
|
||||||
"github.com/syncthing/syncthing/cmd/infra/ursrv/aggregate"
|
|
||||||
"github.com/syncthing/syncthing/cmd/infra/ursrv/serve"
|
"github.com/syncthing/syncthing/cmd/infra/ursrv/serve"
|
||||||
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
_ "github.com/syncthing/syncthing/lib/automaxprocs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type CLI struct {
|
type CLI struct {
|
||||||
Serve serve.CLI `cmd:"" default:""`
|
Serve serve.CLI `cmd:"" default:""`
|
||||||
Aggregate aggregate.CLI `cmd:""`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
log.SetFlags(log.Ltime | log.Ldate | log.Lshortfile)
|
slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
|
||||||
log.SetOutput(os.Stdout)
|
Level: slog.LevelInfo,
|
||||||
|
})))
|
||||||
|
|
||||||
var cli CLI
|
var cli CLI
|
||||||
ctx := kong.Parse(&cli)
|
ctx := kong.Parse(&cli)
|
||||||
|
@ -1,276 +0,0 @@
|
|||||||
// Copyright (C) 2018 The Syncthing Authors.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package serve
|
|
||||||
|
|
||||||
import (
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type analytic struct {
|
|
||||||
Key string
|
|
||||||
Count int
|
|
||||||
Percentage float64
|
|
||||||
Items []analytic `json:",omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type analyticList []analytic
|
|
||||||
|
|
||||||
func (l analyticList) Less(a, b int) bool {
|
|
||||||
if l[a].Key == "Others" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if l[b].Key == "Others" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return l[b].Count < l[a].Count // inverse
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l analyticList) Swap(a, b int) {
|
|
||||||
l[a], l[b] = l[b], l[a]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l analyticList) Len() int {
|
|
||||||
return len(l)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a list of frequency analytics for a given list of strings.
|
|
||||||
func analyticsFor(ss []string, cutoff int) []analytic {
|
|
||||||
m := make(map[string]int)
|
|
||||||
t := 0
|
|
||||||
for _, s := range ss {
|
|
||||||
m[s]++
|
|
||||||
t++
|
|
||||||
}
|
|
||||||
|
|
||||||
l := make([]analytic, 0, len(m))
|
|
||||||
for k, c := range m {
|
|
||||||
l = append(l, analytic{
|
|
||||||
Key: k,
|
|
||||||
Count: c,
|
|
||||||
Percentage: 100 * float64(c) / float64(t),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(analyticList(l))
|
|
||||||
|
|
||||||
if cutoff > 0 && len(l) > cutoff {
|
|
||||||
c := 0
|
|
||||||
for _, i := range l[cutoff:] {
|
|
||||||
c += i.Count
|
|
||||||
}
|
|
||||||
l = append(l[:cutoff], analytic{
|
|
||||||
Key: "Others",
|
|
||||||
Count: c,
|
|
||||||
Percentage: 100 * float64(c) / float64(t),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the points at which certain penetration levels are met
|
|
||||||
func penetrationLevels(as []analytic, points []float64) []analytic {
|
|
||||||
sort.Slice(as, func(a, b int) bool {
|
|
||||||
return versionLess(as[b].Key, as[a].Key)
|
|
||||||
})
|
|
||||||
|
|
||||||
var res []analytic
|
|
||||||
|
|
||||||
idx := 0
|
|
||||||
sum := 0.0
|
|
||||||
for _, a := range as {
|
|
||||||
sum += a.Percentage
|
|
||||||
if sum >= points[idx] {
|
|
||||||
a.Count = int(points[idx])
|
|
||||||
a.Percentage = sum
|
|
||||||
res = append(res, a)
|
|
||||||
idx++
|
|
||||||
if idx == len(points) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func statsForInts(data []int) [4]float64 {
|
|
||||||
var res [4]float64
|
|
||||||
if len(data) == 0 {
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Ints(data)
|
|
||||||
res[0] = float64(data[int(float64(len(data))*0.05)])
|
|
||||||
res[1] = float64(data[len(data)/2])
|
|
||||||
res[2] = float64(data[int(float64(len(data))*0.95)])
|
|
||||||
res[3] = float64(data[len(data)-1])
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func statsForInt64s(data []int64) [4]float64 {
|
|
||||||
var res [4]float64
|
|
||||||
if len(data) == 0 {
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(data, func(a, b int) bool {
|
|
||||||
return data[a] < data[b]
|
|
||||||
})
|
|
||||||
|
|
||||||
res[0] = float64(data[int(float64(len(data))*0.05)])
|
|
||||||
res[1] = float64(data[len(data)/2])
|
|
||||||
res[2] = float64(data[int(float64(len(data))*0.95)])
|
|
||||||
res[3] = float64(data[len(data)-1])
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func statsForFloats(data []float64) [4]float64 {
|
|
||||||
var res [4]float64
|
|
||||||
if len(data) == 0 {
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Float64s(data)
|
|
||||||
res[0] = data[int(float64(len(data))*0.05)]
|
|
||||||
res[1] = data[len(data)/2]
|
|
||||||
res[2] = data[int(float64(len(data))*0.95)]
|
|
||||||
res[3] = data[len(data)-1]
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func group(by func(string) string, as []analytic, perGroup int, otherPct float64) []analytic {
|
|
||||||
var res []analytic
|
|
||||||
|
|
||||||
next:
|
|
||||||
for _, a := range as {
|
|
||||||
group := by(a.Key)
|
|
||||||
for i := range res {
|
|
||||||
if res[i].Key == group {
|
|
||||||
res[i].Count += a.Count
|
|
||||||
res[i].Percentage += a.Percentage
|
|
||||||
if len(res[i].Items) < perGroup {
|
|
||||||
res[i].Items = append(res[i].Items, a)
|
|
||||||
}
|
|
||||||
continue next
|
|
||||||
}
|
|
||||||
}
|
|
||||||
res = append(res, analytic{
|
|
||||||
Key: group,
|
|
||||||
Count: a.Count,
|
|
||||||
Percentage: a.Percentage,
|
|
||||||
Items: []analytic{a},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(analyticList(res))
|
|
||||||
|
|
||||||
if otherPct > 0 {
|
|
||||||
// Groups with less than otherPCt go into "Other"
|
|
||||||
other := analytic{
|
|
||||||
Key: "Other",
|
|
||||||
}
|
|
||||||
for i := 0; i < len(res); i++ {
|
|
||||||
if res[i].Percentage < otherPct || res[i].Key == "Other" {
|
|
||||||
other.Count += res[i].Count
|
|
||||||
other.Percentage += res[i].Percentage
|
|
||||||
res = append(res[:i], res[i+1:]...)
|
|
||||||
i--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if other.Count > 0 {
|
|
||||||
res = append(res, other)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func byVersion(s string) string {
|
|
||||||
parts := strings.Split(s, ".")
|
|
||||||
if len(parts) >= 2 {
|
|
||||||
return strings.Join(parts[:2], ".")
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
func byPlatform(s string) string {
|
|
||||||
parts := strings.Split(s, "-")
|
|
||||||
if len(parts) >= 2 {
|
|
||||||
return parts[0]
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
var numericGoVersion = regexp.MustCompile(`^go[0-9]\.[0-9]+`)
|
|
||||||
|
|
||||||
func byCompiler(s string) string {
|
|
||||||
if m := numericGoVersion.FindString(s); m != "" {
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
return "Other"
|
|
||||||
}
|
|
||||||
|
|
||||||
func versionLess(a, b string) bool {
|
|
||||||
arel, apre := versionParts(a)
|
|
||||||
brel, bpre := versionParts(b)
|
|
||||||
|
|
||||||
minlen := len(arel)
|
|
||||||
if l := len(brel); l < minlen {
|
|
||||||
minlen = l
|
|
||||||
}
|
|
||||||
|
|
||||||
for i := 0; i < minlen; i++ {
|
|
||||||
if arel[i] != brel[i] {
|
|
||||||
return arel[i] < brel[i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Longer version is newer, when the preceding parts are equal
|
|
||||||
if len(arel) != len(brel) {
|
|
||||||
return len(arel) < len(brel)
|
|
||||||
}
|
|
||||||
|
|
||||||
if apre != bpre {
|
|
||||||
// "(+dev)" versions are ahead
|
|
||||||
if apre == plusStr {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if bpre == plusStr {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return apre < bpre
|
|
||||||
}
|
|
||||||
|
|
||||||
// don't actually care how the prerelease stuff compares for our purposes
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Split a version as returned from transformVersion into parts.
|
|
||||||
// "1.2.3-beta.2" -> []int{1, 2, 3}, "beta.2"}
|
|
||||||
func versionParts(v string) ([]int, string) {
|
|
||||||
parts := strings.SplitN(v[1:], " ", 2) // " (+dev)" versions
|
|
||||||
if len(parts) == 1 {
|
|
||||||
parts = strings.SplitN(parts[0], "-", 2) // "-rc.1" type versions
|
|
||||||
}
|
|
||||||
fields := strings.Split(parts[0], ".")
|
|
||||||
|
|
||||||
release := make([]int, len(fields))
|
|
||||||
for i, s := range fields {
|
|
||||||
v, _ := strconv.Atoi(s)
|
|
||||||
release[i] = v
|
|
||||||
}
|
|
||||||
|
|
||||||
var prerelease string
|
|
||||||
if len(parts) > 1 {
|
|
||||||
prerelease = parts[1]
|
|
||||||
}
|
|
||||||
|
|
||||||
return release, prerelease
|
|
||||||
}
|
|
@ -1,131 +0,0 @@
|
|||||||
// Copyright (C) 2018 The Syncthing Authors.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package serve
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type NumberType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
NumberMetric NumberType = iota
|
|
||||||
NumberBinary
|
|
||||||
NumberDuration
|
|
||||||
)
|
|
||||||
|
|
||||||
func number(ntype NumberType, v float64) string {
|
|
||||||
switch ntype {
|
|
||||||
case NumberMetric:
|
|
||||||
return metric(v)
|
|
||||||
case NumberDuration:
|
|
||||||
return duration(v)
|
|
||||||
case NumberBinary:
|
|
||||||
return binary(v)
|
|
||||||
default:
|
|
||||||
return metric(v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type suffix struct {
|
|
||||||
Suffix string
|
|
||||||
Multiplier float64
|
|
||||||
}
|
|
||||||
|
|
||||||
var metricSuffixes = []suffix{
|
|
||||||
{"G", 1e9},
|
|
||||||
{"M", 1e6},
|
|
||||||
{"k", 1e3},
|
|
||||||
}
|
|
||||||
|
|
||||||
var binarySuffixes = []suffix{
|
|
||||||
{"Gi", 1 << 30},
|
|
||||||
{"Mi", 1 << 20},
|
|
||||||
{"Ki", 1 << 10},
|
|
||||||
}
|
|
||||||
|
|
||||||
var durationSuffix = []suffix{
|
|
||||||
{"year", 365 * 24 * 60 * 60},
|
|
||||||
{"month", 30 * 24 * 60 * 60},
|
|
||||||
{"day", 24 * 60 * 60},
|
|
||||||
{"hour", 60 * 60},
|
|
||||||
{"minute", 60},
|
|
||||||
{"second", 1},
|
|
||||||
}
|
|
||||||
|
|
||||||
func metric(v float64) string {
|
|
||||||
return withSuffix(v, metricSuffixes, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func binary(v float64) string {
|
|
||||||
return withSuffix(v, binarySuffixes, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
func duration(v float64) string {
|
|
||||||
return withSuffix(v, durationSuffix, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
func withSuffix(v float64, ps []suffix, pluralize bool) string {
|
|
||||||
for _, p := range ps {
|
|
||||||
if v >= p.Multiplier {
|
|
||||||
suffix := p.Suffix
|
|
||||||
if pluralize && v/p.Multiplier != 1.0 {
|
|
||||||
suffix += "s"
|
|
||||||
}
|
|
||||||
// If the number only has decimal zeroes, strip em off.
|
|
||||||
num := strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.1f", v/p.Multiplier), "0"), ".")
|
|
||||||
return fmt.Sprintf("%s %s", num, suffix)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.TrimRight(strings.TrimRight(fmt.Sprintf("%.1f", v), "0"), ".")
|
|
||||||
}
|
|
||||||
|
|
||||||
// commatize returns a number with sep as thousands separators. Handles
|
|
||||||
// integers and plain floats.
|
|
||||||
func commatize(sep, s string) string {
|
|
||||||
// If no dot, don't do anything.
|
|
||||||
if !strings.ContainsRune(s, '.') {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
var b bytes.Buffer
|
|
||||||
fs := strings.SplitN(s, ".", 2)
|
|
||||||
|
|
||||||
l := len(fs[0])
|
|
||||||
for i := range fs[0] {
|
|
||||||
b.Write([]byte{s[i]})
|
|
||||||
if i < l-1 && (l-i)%3 == 1 {
|
|
||||||
b.WriteString(sep)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(fs) > 1 && len(fs[1]) > 0 {
|
|
||||||
b.WriteString(".")
|
|
||||||
b.WriteString(fs[1])
|
|
||||||
}
|
|
||||||
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
func proportion(m map[string]int, count int) float64 {
|
|
||||||
total := 0
|
|
||||||
isMax := true
|
|
||||||
for _, n := range m {
|
|
||||||
total += n
|
|
||||||
if n > count {
|
|
||||||
isMax = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pct := (100 * float64(count)) / float64(total)
|
|
||||||
// To avoid rounding errors in the template, surpassing 100% and breaking
|
|
||||||
// the progress bars.
|
|
||||||
if isMax && len(m) > 1 && count != total {
|
|
||||||
pct -= 0.01
|
|
||||||
}
|
|
||||||
return pct
|
|
||||||
}
|
|
@ -11,16 +11,36 @@ import (
|
|||||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||||
)
|
)
|
||||||
|
|
||||||
var metricReportsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
var (
|
||||||
Namespace: "syncthing",
|
metricReportsTotal = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||||
Subsystem: "ursrv",
|
Namespace: "syncthing",
|
||||||
Name: "reports_total",
|
Subsystem: "ursrv_v2",
|
||||||
}, []string{"version"})
|
Name: "incoming_reports_total",
|
||||||
|
}, []string{"result"})
|
||||||
|
metricsCollectsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||||
|
Namespace: "syncthing",
|
||||||
|
Subsystem: "ursrv_v2",
|
||||||
|
Name: "collects_total",
|
||||||
|
})
|
||||||
|
metricsCollectSecondsTotal = promauto.NewCounter(prometheus.CounterOpts{
|
||||||
|
Namespace: "syncthing",
|
||||||
|
Subsystem: "ursrv_v2",
|
||||||
|
Name: "collect_seconds_total",
|
||||||
|
})
|
||||||
|
metricsCollectSecondsLast = promauto.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Namespace: "syncthing",
|
||||||
|
Subsystem: "ursrv_v2",
|
||||||
|
Name: "collect_seconds_last",
|
||||||
|
})
|
||||||
|
metricsWriteSecondsLast = promauto.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Namespace: "syncthing",
|
||||||
|
Subsystem: "ursrv_v2",
|
||||||
|
Name: "write_seconds_last",
|
||||||
|
})
|
||||||
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
metricReportsTotal.WithLabelValues("fail")
|
metricReportsTotal.WithLabelValues("fail")
|
||||||
metricReportsTotal.WithLabelValues("duplicate")
|
metricReportsTotal.WithLabelValues("replace")
|
||||||
metricReportsTotal.WithLabelValues("v1")
|
metricReportsTotal.WithLabelValues("accept")
|
||||||
metricReportsTotal.WithLabelValues("v2")
|
|
||||||
metricReportsTotal.WithLabelValues("v3")
|
|
||||||
}
|
}
|
||||||
|
314
cmd/infra/ursrv/serve/prometheus.go
Normal file
314
cmd/infra/ursrv/serve/prometheus.go
Normal file
@ -0,0 +1,314 @@
|
|||||||
|
// Copyright (C) 2024 The Syncthing Authors.
|
||||||
|
//
|
||||||
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||||
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
package serve
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"slices"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/syncthing/syncthing/lib/ur/contract"
|
||||||
|
)
|
||||||
|
|
||||||
|
const namePrefix = "syncthing_usage_"
|
||||||
|
|
||||||
|
type metricsSet struct {
|
||||||
|
srv *server
|
||||||
|
|
||||||
|
gauges map[string]prometheus.Gauge
|
||||||
|
gaugeVecs map[string]*prometheus.GaugeVec
|
||||||
|
gaugeVecLabels map[string][]string
|
||||||
|
summaries map[string]*metricSummary
|
||||||
|
|
||||||
|
collectMut sync.Mutex
|
||||||
|
collectCutoff time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMetricsSet(srv *server) *metricsSet {
|
||||||
|
s := &metricsSet{
|
||||||
|
srv: srv,
|
||||||
|
gauges: make(map[string]prometheus.Gauge),
|
||||||
|
gaugeVecs: make(map[string]*prometheus.GaugeVec),
|
||||||
|
gaugeVecLabels: make(map[string][]string),
|
||||||
|
summaries: make(map[string]*metricSummary),
|
||||||
|
collectCutoff: -24 * time.Hour,
|
||||||
|
}
|
||||||
|
|
||||||
|
var initForType func(reflect.Type)
|
||||||
|
initForType = func(t reflect.Type) {
|
||||||
|
for i := 0; i < t.NumField(); i++ {
|
||||||
|
field := t.Field(i)
|
||||||
|
if field.Type.Kind() == reflect.Struct {
|
||||||
|
initForType(field.Type)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name, typ, label := fieldNameTypeLabel(field)
|
||||||
|
sname, labels := nameConstLabels(name)
|
||||||
|
switch typ {
|
||||||
|
case "gauge":
|
||||||
|
s.gauges[name] = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
|
Name: namePrefix + sname,
|
||||||
|
ConstLabels: labels,
|
||||||
|
})
|
||||||
|
case "summary":
|
||||||
|
s.summaries[name] = newMetricSummary(namePrefix+sname, nil, labels)
|
||||||
|
case "gaugeVec":
|
||||||
|
s.gaugeVecLabels[name] = append(s.gaugeVecLabels[name], label)
|
||||||
|
case "summaryVec":
|
||||||
|
s.summaries[name] = newMetricSummary(namePrefix+sname, []string{label}, labels)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
initForType(reflect.ValueOf(contract.Report{}).Type())
|
||||||
|
|
||||||
|
for name, labels := range s.gaugeVecLabels {
|
||||||
|
s.gaugeVecs[name] = prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||||
|
Name: namePrefix + name,
|
||||||
|
}, labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func fieldNameTypeLabel(rf reflect.StructField) (string, string, string) {
|
||||||
|
metric := rf.Tag.Get("metric")
|
||||||
|
name, typ, ok := strings.Cut(metric, ",")
|
||||||
|
if !ok {
|
||||||
|
return "", "", ""
|
||||||
|
}
|
||||||
|
gv, label, ok := strings.Cut(typ, ":")
|
||||||
|
if ok {
|
||||||
|
typ = gv
|
||||||
|
}
|
||||||
|
return name, typ, label
|
||||||
|
}
|
||||||
|
|
||||||
|
func nameConstLabels(name string) (string, prometheus.Labels) {
|
||||||
|
if name == "-" {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
name, labels, ok := strings.Cut(name, "{")
|
||||||
|
if !ok {
|
||||||
|
return name, nil
|
||||||
|
}
|
||||||
|
lls := strings.Split(labels[:len(labels)-1], ",")
|
||||||
|
m := make(map[string]string)
|
||||||
|
for _, l := range lls {
|
||||||
|
k, v, _ := strings.Cut(l, "=")
|
||||||
|
m[k] = v
|
||||||
|
}
|
||||||
|
return name, m
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *metricsSet) addReport(r *contract.Report) {
|
||||||
|
gaugeVecs := make(map[string][]string)
|
||||||
|
s.addReportStruct(reflect.ValueOf(r).Elem(), gaugeVecs)
|
||||||
|
for name, lv := range gaugeVecs {
|
||||||
|
s.gaugeVecs[name].WithLabelValues(lv...).Add(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *metricsSet) addReportStruct(v reflect.Value, gaugeVecs map[string][]string) {
|
||||||
|
t := v.Type()
|
||||||
|
for i := 0; i < v.NumField(); i++ {
|
||||||
|
field := v.Field(i)
|
||||||
|
if field.Kind() == reflect.Struct {
|
||||||
|
s.addReportStruct(field, gaugeVecs)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
name, typ, label := fieldNameTypeLabel(t.Field(i))
|
||||||
|
switch typ {
|
||||||
|
case "gauge":
|
||||||
|
switch v := field.Interface().(type) {
|
||||||
|
case int:
|
||||||
|
s.gauges[name].Add(float64(v))
|
||||||
|
case string:
|
||||||
|
s.gaugeVecs[name].WithLabelValues(v).Add(1)
|
||||||
|
case bool:
|
||||||
|
if v {
|
||||||
|
s.gauges[name].Add(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "gaugeVec":
|
||||||
|
var labelValue string
|
||||||
|
switch v := field.Interface().(type) {
|
||||||
|
case string:
|
||||||
|
labelValue = v
|
||||||
|
case int:
|
||||||
|
labelValue = strconv.Itoa(v)
|
||||||
|
case map[string]int:
|
||||||
|
for k, v := range v {
|
||||||
|
labelValue = k
|
||||||
|
field.SetInt(int64(v))
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, ok := gaugeVecs[name]; !ok {
|
||||||
|
gaugeVecs[name] = make([]string, len(s.gaugeVecLabels[name]))
|
||||||
|
}
|
||||||
|
for i, l := range s.gaugeVecLabels[name] {
|
||||||
|
if l == label {
|
||||||
|
gaugeVecs[name][i] = labelValue
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case "summary", "summaryVec":
|
||||||
|
switch v := field.Interface().(type) {
|
||||||
|
case int:
|
||||||
|
s.summaries[name].Observe("", float64(v))
|
||||||
|
case float64:
|
||||||
|
s.summaries[name].Observe("", v)
|
||||||
|
case []int:
|
||||||
|
for _, v := range v {
|
||||||
|
s.summaries[name].Observe("", float64(v))
|
||||||
|
}
|
||||||
|
case map[string]int:
|
||||||
|
for k, v := range v {
|
||||||
|
if k == "" {
|
||||||
|
// avoid empty string labels as those are the sign
|
||||||
|
// of a non-vec summary
|
||||||
|
k = "unknown"
|
||||||
|
}
|
||||||
|
s.summaries[name].Observe(k, float64(v))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *metricsSet) Describe(c chan<- *prometheus.Desc) {
|
||||||
|
for _, g := range s.gauges {
|
||||||
|
g.Describe(c)
|
||||||
|
}
|
||||||
|
for _, g := range s.gaugeVecs {
|
||||||
|
g.Describe(c)
|
||||||
|
}
|
||||||
|
for _, g := range s.summaries {
|
||||||
|
g.Describe(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *metricsSet) Collect(c chan<- prometheus.Metric) {
|
||||||
|
s.collectMut.Lock()
|
||||||
|
defer s.collectMut.Unlock()
|
||||||
|
|
||||||
|
t0 := time.Now()
|
||||||
|
defer func() {
|
||||||
|
dur := time.Since(t0).Seconds()
|
||||||
|
metricsCollectSecondsLast.Set(dur)
|
||||||
|
metricsCollectSecondsTotal.Add(dur)
|
||||||
|
metricsCollectsTotal.Inc()
|
||||||
|
}()
|
||||||
|
|
||||||
|
for _, g := range s.gauges {
|
||||||
|
g.Set(0)
|
||||||
|
}
|
||||||
|
for _, g := range s.gaugeVecs {
|
||||||
|
g.Reset()
|
||||||
|
}
|
||||||
|
for _, g := range s.summaries {
|
||||||
|
g.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
|
cutoff := time.Now().Add(s.collectCutoff)
|
||||||
|
s.srv.reports.Range(func(key string, r *contract.Report) bool {
|
||||||
|
if s.collectCutoff < 0 && r.Received.Before(cutoff) {
|
||||||
|
s.srv.reports.Delete(key)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
s.addReport(r)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
for _, g := range s.gauges {
|
||||||
|
c <- g
|
||||||
|
}
|
||||||
|
for _, g := range s.gaugeVecs {
|
||||||
|
g.Collect(c)
|
||||||
|
}
|
||||||
|
for _, g := range s.summaries {
|
||||||
|
g.Collect(c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type metricSummary struct {
|
||||||
|
name string
|
||||||
|
values map[string][]float64
|
||||||
|
zeroes map[string]int
|
||||||
|
|
||||||
|
qDesc *prometheus.Desc
|
||||||
|
countDesc *prometheus.Desc
|
||||||
|
sumDesc *prometheus.Desc
|
||||||
|
zDesc *prometheus.Desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMetricSummary(name string, labels []string, constLabels prometheus.Labels) *metricSummary {
|
||||||
|
return &metricSummary{
|
||||||
|
name: name,
|
||||||
|
values: make(map[string][]float64),
|
||||||
|
zeroes: make(map[string]int),
|
||||||
|
qDesc: prometheus.NewDesc(name, "", append(labels, "quantile"), constLabels),
|
||||||
|
countDesc: prometheus.NewDesc(name+"_nonzero_count", "", labels, constLabels),
|
||||||
|
sumDesc: prometheus.NewDesc(name+"_sum", "", labels, constLabels),
|
||||||
|
zDesc: prometheus.NewDesc(name+"_zero_count", "", labels, constLabels),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *metricSummary) Observe(labelValue string, v float64) {
|
||||||
|
if v == 0 {
|
||||||
|
q.zeroes[labelValue]++
|
||||||
|
return
|
||||||
|
}
|
||||||
|
q.values[labelValue] = append(q.values[labelValue], v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *metricSummary) Describe(c chan<- *prometheus.Desc) {
|
||||||
|
c <- q.qDesc
|
||||||
|
c <- q.countDesc
|
||||||
|
c <- q.sumDesc
|
||||||
|
c <- q.zDesc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *metricSummary) Collect(c chan<- prometheus.Metric) {
|
||||||
|
for lv, vs := range q.values {
|
||||||
|
var labelVals []string
|
||||||
|
if lv != "" {
|
||||||
|
labelVals = []string{lv}
|
||||||
|
}
|
||||||
|
|
||||||
|
c <- prometheus.MustNewConstMetric(q.countDesc, prometheus.GaugeValue, float64(len(vs)), labelVals...)
|
||||||
|
c <- prometheus.MustNewConstMetric(q.zDesc, prometheus.GaugeValue, float64(q.zeroes[lv]), labelVals...)
|
||||||
|
|
||||||
|
var sum float64
|
||||||
|
for _, v := range vs {
|
||||||
|
sum += v
|
||||||
|
}
|
||||||
|
c <- prometheus.MustNewConstMetric(q.sumDesc, prometheus.GaugeValue, sum, labelVals...)
|
||||||
|
|
||||||
|
if len(vs) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
slices.Sort(vs)
|
||||||
|
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[0], append(labelVals, "0")...)
|
||||||
|
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)*5/100], append(labelVals, "0.05")...)
|
||||||
|
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)/2], append(labelVals, "0.5")...)
|
||||||
|
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)*9/10], append(labelVals, "0.9")...)
|
||||||
|
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)*95/100], append(labelVals, "0.95")...)
|
||||||
|
c <- prometheus.MustNewConstMetric(q.qDesc, prometheus.GaugeValue, vs[len(vs)-1], append(labelVals, "1")...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (q *metricSummary) Reset() {
|
||||||
|
clear(q.values)
|
||||||
|
clear(q.zeroes)
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Before Width: | Height: | Size: 4.8 KiB |
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
Binary file not shown.
Binary file not shown.
Before Width: | Height: | Size: 61 KiB |
Binary file not shown.
Binary file not shown.
@ -1,623 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<!--
|
|
||||||
Copyright (C) 2014 Jakob Borg and other contributors. All rights reserved.
|
|
||||||
Use of this source code is governed by an MIT-style license that can be
|
|
||||||
found in the LICENSE file.
|
|
||||||
-->
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="utf-8">
|
|
||||||
<meta http-equiv="X-UA-Compatible" content="IE=edge">
|
|
||||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
|
||||||
<meta name="description" content="">
|
|
||||||
<meta name="author" content="">
|
|
||||||
<link rel="shortcut icon" href="static/assets/img/favicon.png">
|
|
||||||
|
|
||||||
<title>Syncthing Usage Reports</title>
|
|
||||||
<link href="static/bootstrap/css/bootstrap.min.css" rel="stylesheet">
|
|
||||||
<script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js"></script>
|
|
||||||
<script type="text/javascript" src="static/bootstrap/js/bootstrap.min.js"></script>
|
|
||||||
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.7/leaflet.css">
|
|
||||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.7/leaflet.js"></script>
|
|
||||||
<script src="https://cdn.jsdelivr.net/npm/heatmapjs@2.0.2/heatmap.min.js"></script>
|
|
||||||
<script src="https://cdn.jsdelivr.net/npm/leaflet-heatmap@1.0.0/leaflet-heatmap.js"></script>
|
|
||||||
|
|
||||||
<style type="text/css">
|
|
||||||
body {
|
|
||||||
margin: 40px;
|
|
||||||
font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
|
|
||||||
}
|
|
||||||
tr.main td {
|
|
||||||
font-weight: bold;
|
|
||||||
}
|
|
||||||
tr.child td.first {
|
|
||||||
padding-left: 2em;
|
|
||||||
}
|
|
||||||
.progress-bar {
|
|
||||||
overflow:hidden;
|
|
||||||
white-space:nowrap;
|
|
||||||
text-overflow: ellipsis;
|
|
||||||
}
|
|
||||||
</style>
|
|
||||||
<script type="text/javascript"
|
|
||||||
src='https://www.google.com/jsapi?autoload={
|
|
||||||
"modules":[{
|
|
||||||
"name":"visualization",
|
|
||||||
"version":"1",
|
|
||||||
"packages":["corechart"]
|
|
||||||
}]
|
|
||||||
}'></script>
|
|
||||||
|
|
||||||
<script type="text/javascript">
|
|
||||||
google.setOnLoadCallback(drawVersionChart);
|
|
||||||
google.setOnLoadCallback(drawBlockStatsChart);
|
|
||||||
google.setOnLoadCallback(drawPerformanceCharts);
|
|
||||||
|
|
||||||
function drawVersionChart() {
|
|
||||||
// Summary version chart for versions that at some point in the chart
|
|
||||||
// reaches 250 devices. This filters out versions that are old and
|
|
||||||
// uninteresting yet linger forever with like four users.
|
|
||||||
var jsonData = $.ajax({url: "summary.json?min=250", dataType:"json", async: false}).responseText;
|
|
||||||
var rows = JSON.parse(jsonData);
|
|
||||||
|
|
||||||
var data = new google.visualization.DataTable();
|
|
||||||
data.addColumn('date', 'Day');
|
|
||||||
for (var i = 1; i < rows[0].length; i++){
|
|
||||||
data.addColumn('number', rows[0][i]);
|
|
||||||
}
|
|
||||||
for (var i = 1; i < rows.length; i++){
|
|
||||||
rows[i][0] = new Date(rows[i][0]);
|
|
||||||
data.addRow(rows[i]);
|
|
||||||
};
|
|
||||||
|
|
||||||
var options = {
|
|
||||||
legend: { position: 'bottom', alignment: 'center' },
|
|
||||||
isStacked: true,
|
|
||||||
colors: ['rgb(102,194,165)','rgb(252,141,98)','rgb(141,160,203)','rgb(231,138,195)','rgb(166,216,84)','rgb(255,217,47)'],
|
|
||||||
chartArea: {left: 80, top: 20, width: '1020', height: '300'},
|
|
||||||
};
|
|
||||||
|
|
||||||
var chart = new google.visualization.AreaChart(document.getElementById('versionChart'));
|
|
||||||
chart.draw(data, options);
|
|
||||||
}
|
|
||||||
|
|
||||||
function formatGibibytes(gibibytes, decimals) {
|
|
||||||
if(gibibytes == 0) return '0 GiB';
|
|
||||||
var k = 1024,
|
|
||||||
dm = decimals || 2,
|
|
||||||
sizes = ['GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'],
|
|
||||||
i = Math.floor(Math.log(gibibytes) / Math.log(k));
|
|
||||||
if (i < 0) {
|
|
||||||
sizes = 'MiB';
|
|
||||||
} else {
|
|
||||||
sizes = sizes[i];
|
|
||||||
}
|
|
||||||
return parseFloat((gibibytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
function drawBlockStatsChart() {
|
|
||||||
var jsonData = $.ajax({url: "blockstats.json", dataType:"json", async: false}).responseText;
|
|
||||||
var rows = JSON.parse(jsonData);
|
|
||||||
|
|
||||||
var data = new google.visualization.DataTable();
|
|
||||||
data.addColumn('date', 'Day');
|
|
||||||
for (var i = 1; i < rows[0].length; i++){
|
|
||||||
data.addColumn('number', rows[0][i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
var totals = [0, 0, 0, 0, 0, 0];
|
|
||||||
for (var i = 1; i < rows.length; i++){
|
|
||||||
rows[i][0] = new Date(rows[i][0]);
|
|
||||||
for (var j = 2; j < rows[i].length; j++) {
|
|
||||||
totals[j-2] += rows[i][j];
|
|
||||||
}
|
|
||||||
data.addRow(rows[i]);
|
|
||||||
};
|
|
||||||
|
|
||||||
var totalTotals = totals.reduce(function(a, b) { return a + b; }, 0);
|
|
||||||
|
|
||||||
if (totalTotals > 0) {
|
|
||||||
var content = "<table class='table'>\n"
|
|
||||||
for (var j = 2; j < rows[0].length; j++) {
|
|
||||||
content += "<tr><td><b>" + rows[0][j].replace(' (GiB)', '') + "</b></td><td>" + formatGibibytes(totals[j-2].toFixed(2)) + " (" + ((100*totals[j-2])/totalTotals).toFixed(2) +"%)</td></tr>\n";
|
|
||||||
}
|
|
||||||
content += "</table>";
|
|
||||||
document.getElementById("data-to-date").innerHTML = content;
|
|
||||||
} else {
|
|
||||||
// No data, hide it.
|
|
||||||
document.getElementById("block-stats").outerHTML = "";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
var options = {
|
|
||||||
focusTarget: 'category',
|
|
||||||
vAxes: {0: {}, 1: {}},
|
|
||||||
series: {0: {type: 'line', targetAxisIndex:1}},
|
|
||||||
isStacked: true,
|
|
||||||
legend: {position: 'none'},
|
|
||||||
colors: ['rgb(102,194,165)','rgb(252,141,98)','rgb(141,160,203)','rgb(231,138,195)','rgb(166,216,84)','rgb(255,217,47)'],
|
|
||||||
chartArea: {left: 80, top: 20, width: '1020', height: '300'},
|
|
||||||
};
|
|
||||||
|
|
||||||
var chart = new google.visualization.AreaChart(document.getElementById('blockStatsChart'));
|
|
||||||
chart.draw(data, options);
|
|
||||||
}
|
|
||||||
|
|
||||||
function drawPerformanceCharts() {
|
|
||||||
var jsonData = $.ajax({url: "/performance.json", dataType:"json", async: false}).responseText;
|
|
||||||
var rows = JSON.parse(jsonData);
|
|
||||||
for (var i = 1; i < rows.length; i++){
|
|
||||||
rows[i][0] = new Date(rows[i][0]);
|
|
||||||
}
|
|
||||||
|
|
||||||
drawChart(rows, 1, 'Total Number of Files', 'totFilesChart', 1e6, 1);
|
|
||||||
drawChart(rows, 2, 'Total Folder Size (GiB)', 'totMiBChart', 1e6, 1024);
|
|
||||||
drawChart(rows, 3, 'Hash Performance (MiB/s)', 'hashPerfChart', 1000, 1);
|
|
||||||
drawChart(rows, 4, 'System RAM Size (GiB)', 'memSizeChart', 1e6, 1024);
|
|
||||||
drawChart(rows, 5, 'Memory Usage (MiB)', 'memUsageChart', 250, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
function drawChart(rows, index, title, id, cutoff, divisor) {
|
|
||||||
var data = new google.visualization.DataTable();
|
|
||||||
data.addColumn('date', 'Day');
|
|
||||||
data.addColumn('number', title);
|
|
||||||
|
|
||||||
var row;
|
|
||||||
for (var i = 1; i < rows.length; i++){
|
|
||||||
row = [rows[i][0], rows[i][index] / divisor];
|
|
||||||
if (row[1] > cutoff) {
|
|
||||||
row[1] = null;
|
|
||||||
}
|
|
||||||
data.addRow(row);
|
|
||||||
}
|
|
||||||
|
|
||||||
var options = {
|
|
||||||
legend: { position: 'bottom', alignment: 'center' },
|
|
||||||
colors: ['rgb(102,194,165)','rgb(252,141,98)','rgb(141,160,203)','rgb(231,138,195)','rgb(166,216,84)','rgb(255,217,47)'],
|
|
||||||
chartArea: {left: 80, top: 20, width: '1020', height: '300'},
|
|
||||||
vAxes: {0: {minValue: 0}},
|
|
||||||
};
|
|
||||||
|
|
||||||
var chart = new google.visualization.LineChart(document.getElementById(id));
|
|
||||||
chart.draw(data, options);
|
|
||||||
}
|
|
||||||
|
|
||||||
var locations = [];
|
|
||||||
{{range $location, $weight := .locations}}
|
|
||||||
locations.push({lat:{{- $location.Latitude -}},lng:{{- $location.Longitude -}},count:Math.min(100, {{- $weight -}})});
|
|
||||||
{{- end}}
|
|
||||||
|
|
||||||
function drawHeatMap() {
|
|
||||||
if (locations.length == 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
var testData = {
|
|
||||||
data: locations
|
|
||||||
};
|
|
||||||
|
|
||||||
var baseLayer = L.tileLayer(
|
|
||||||
'https://tile.openstreetmap.org/{z}/{x}/{y}.png',{
|
|
||||||
attribution: '...',
|
|
||||||
maxZoom: 18
|
|
||||||
}
|
|
||||||
);
|
|
||||||
var cfg = {
|
|
||||||
"radius": 1,
|
|
||||||
"minOpacity": .25,
|
|
||||||
"maxOpacity": .8,
|
|
||||||
"scaleRadius": true,
|
|
||||||
"useLocalExtrema": true,
|
|
||||||
latField: 'lat',
|
|
||||||
lngField: 'lng',
|
|
||||||
valueField: 'count',
|
|
||||||
gradient: {
|
|
||||||
'.1': 'cyan',
|
|
||||||
'.8': 'blue',
|
|
||||||
'.95': 'red'
|
|
||||||
}
|
|
||||||
};
|
|
||||||
var heatmapLayer = new HeatmapOverlay(cfg);
|
|
||||||
|
|
||||||
var map = new L.Map('map', {
|
|
||||||
center: new L.LatLng(25, 0),
|
|
||||||
zoom: 1,
|
|
||||||
layers: [baseLayer, heatmapLayer]
|
|
||||||
});
|
|
||||||
heatmapLayer.setData(testData);
|
|
||||||
}
|
|
||||||
</script>
|
|
||||||
</head>
|
|
||||||
|
|
||||||
<body>
|
|
||||||
<div class="container">
|
|
||||||
<div class="row">
|
|
||||||
<div class="col-md-12">
|
|
||||||
<h1>Syncthing Usage Data</h1>
|
|
||||||
|
|
||||||
<h4 id="active-users">Active Users per Day and Version</h4>
|
|
||||||
<p>
|
|
||||||
This is the total number of unique users with reporting enabled, per day. Area color represents the major version.
|
|
||||||
</p>
|
|
||||||
<div class="img-thumbnail" id="versionChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
|
||||||
|
|
||||||
<div id="block-stats">
|
|
||||||
<h4>Data Transfers per Day</h4>
|
|
||||||
<p>
|
|
||||||
This is total data transferred per day. Also shows how much data was saved (not transferred) by each of the methods syncthing uses.
|
|
||||||
</p>
|
|
||||||
<div class="img-thumbnail" id="blockStatsChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
|
||||||
<h4 id="totals-to-date">Totals to date</h4>
|
|
||||||
<p id="data-to-date">
|
|
||||||
No data
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<h4 id="metrics">Usage Metrics</h4>
|
|
||||||
<p>
|
|
||||||
This is the aggregated usage report data for the last 24 hours. Data based on <b>{{.nodes}}</b> devices that have reported in.
|
|
||||||
</p>
|
|
||||||
|
|
||||||
{{if .locations}}
|
|
||||||
<div class="img-thumbnail" id="map" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
|
||||||
<p class="text-muted">
|
|
||||||
Heatmap max intensity is capped at 100 reports within a location.
|
|
||||||
</p>
|
|
||||||
<div class="panel panel-default">
|
|
||||||
<div class="panel-heading">
|
|
||||||
<h4 class="panel-title">
|
|
||||||
<a data-toggle="collapse" href="#collapseTwo">Break down per country</a>
|
|
||||||
</h4>
|
|
||||||
</div>
|
|
||||||
<div id="collapseTwo" class="panel-collapse collapse">
|
|
||||||
<div class="panel-body less-padding">
|
|
||||||
<div class="row">
|
|
||||||
<div class="col-md-6">
|
|
||||||
<table class="table table-striped">
|
|
||||||
<tbody>
|
|
||||||
{{range .countries | slice 2 1}}
|
|
||||||
<tr>
|
|
||||||
<td style="width: 45%">{{.Key}}</td>
|
|
||||||
<td style="width: 5%" class="text-right">{{if ge .Pct 10.0}}{{.Pct | printf "%.0f"}}{{else if ge .Pct 1.0}}{{.Pct | printf "%.01f"}}{{else}}{{.Pct | printf "%.02f"}}{{end}}%</td>
|
|
||||||
<td style="width: 5%" class="text-right">{{.Count}}</td>
|
|
||||||
<td>
|
|
||||||
<div class="progress-bar" role="progressbar" aria-valuenow="{{.Pct | printf "%.02f"}}" aria-valuemin="0" aria-valuemax="100" style="width: {{.Pct | printf "%.02f"}}%; height:20px"></div>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
</div>
|
|
||||||
<div class="col-md-6">
|
|
||||||
<table class="table table-striped">
|
|
||||||
<tbody>
|
|
||||||
{{range .countries | slice 2 2}}
|
|
||||||
<tr>
|
|
||||||
<td style="width: 45%">{{.Key}}</td>
|
|
||||||
<td style="width: 5%" class="text-right">{{if ge .Pct 10.0}}{{.Pct | printf "%.0f"}}{{else if ge .Pct 1.0}}{{.Pct | printf "%.01f"}}{{else}}{{.Pct | printf "%.02f"}}{{end}}%</td>
|
|
||||||
<td style="width: 5%" class="text-right">{{.Count}}</td>
|
|
||||||
<td>
|
|
||||||
<div class="progress-bar" role="progressbar" aria-valuenow="{{.Pct | printf "%.02f"}}" aria-valuemin="0" aria-valuemax="100" style="width: {{.Pct | printf "%.02f"}}%; height:20px"></div>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
{{end}}
|
|
||||||
<table class="table table-striped">
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th></th>
|
|
||||||
<th colspan="4" class="text-center">
|
|
||||||
<a href="https://en.wikipedia.org/wiki/Percentile">Percentile</a>
|
|
||||||
</th>
|
|
||||||
</tr>
|
|
||||||
<tr>
|
|
||||||
<th></th>
|
|
||||||
<th class="text-right">5%</th>
|
|
||||||
<th class="text-right">50%</th>
|
|
||||||
<th class="text-right">95%</th>
|
|
||||||
<th class="text-right">100%</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{{range .categories}}
|
|
||||||
<tr>
|
|
||||||
<td>{{.Descr}}</td>
|
|
||||||
<td class="text-right">{{index .Values 0 | number .Type | commatize " "}}{{.Unit}}</td>
|
|
||||||
<td class="text-right">{{index .Values 1 | number .Type | commatize " "}}{{.Unit}}</td>
|
|
||||||
<td class="text-right">{{index .Values 2 | number .Type | commatize " "}}{{.Unit}}</td>
|
|
||||||
<td class="text-right">{{index .Values 3 | number .Type | commatize " "}}{{.Unit}}</td>
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="row">
|
|
||||||
<div class="col-md-6">
|
|
||||||
<table class="table table-striped">
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Version</th><th class="text-right">Devices</th><th class="text-right">Share</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{{range .versions}}
|
|
||||||
{{if gt .Percentage 0.1}}
|
|
||||||
<tr class="main">
|
|
||||||
<td>{{.Key}}</td>
|
|
||||||
<td class="text-right">{{.Count}}</td>
|
|
||||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
|
||||||
</tr>
|
|
||||||
{{range .Items}}
|
|
||||||
{{if gt .Percentage 0.1}}
|
|
||||||
<tr class="child">
|
|
||||||
<td class="first">{{.Key}}</td>
|
|
||||||
<td class="text-right">{{.Count}}</td>
|
|
||||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
{{end}}
|
|
||||||
{{end}}
|
|
||||||
{{end}}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
<table class="table table-striped">
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Penetration Level</th>
|
|
||||||
<th>Version</th>
|
|
||||||
<th class="text-right">Actual</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{{range .versionPenetrations}}
|
|
||||||
<tr>
|
|
||||||
<td>{{.Count}}%</td>
|
|
||||||
<td>≥ {{.Key}}</td>
|
|
||||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="col-md-6">
|
|
||||||
<table class="table table-striped">
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Platform</th>
|
|
||||||
<th class="text-right">Devices</th>
|
|
||||||
<th class="text-right">Share</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{{range .platforms}}
|
|
||||||
<tr class="main">
|
|
||||||
<td>{{.Key}}</td>
|
|
||||||
<td class="text-right">{{.Count}}</td>
|
|
||||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
|
||||||
</tr>
|
|
||||||
{{range .Items}}
|
|
||||||
<tr class="child">
|
|
||||||
<td class="first">{{.Key}}</td>
|
|
||||||
<td class="text-right">{{.Count}}</td>
|
|
||||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
{{end}}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
</div>
|
|
||||||
<div class="row">
|
|
||||||
|
|
||||||
<div class="col-md-6">
|
|
||||||
<table class="table table-striped">
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Compiler</th>
|
|
||||||
<th class="text-right">Devices</th>
|
|
||||||
<th class="text-right">Share</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{{range .compilers}}
|
|
||||||
<tr class="main">
|
|
||||||
<td>{{.Key}}</td>
|
|
||||||
<td class="text-right">{{.Count}}</td>
|
|
||||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
|
||||||
</tr>
|
|
||||||
{{range .Items}}
|
|
||||||
{{if or (gt .Percentage 0.1) (eq .Key "Others")}}
|
|
||||||
<tr class="child">
|
|
||||||
<td class="first">{{.Key}}</td>
|
|
||||||
<td class="text-right">{{.Count}}</td>
|
|
||||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
{{end}}
|
|
||||||
{{end}}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="col-md-6">
|
|
||||||
<table class="table table-striped">
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Distribution Channel</th>
|
|
||||||
<th class="text-right">Devices</th>
|
|
||||||
<th class="text-right">Share</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{{range .distributions}}
|
|
||||||
<tr>
|
|
||||||
<td>{{.Key}}</td>
|
|
||||||
<td class="text-right">{{.Count}}</td>
|
|
||||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="col-md-6">
|
|
||||||
<table class="table table-striped">
|
|
||||||
<thead>
|
|
||||||
<tr>
|
|
||||||
<th>Builder</th>
|
|
||||||
<th class="text-right">Devices</th>
|
|
||||||
<th class="text-right">Share</th>
|
|
||||||
</tr>
|
|
||||||
</thead>
|
|
||||||
<tbody>
|
|
||||||
{{range .builders}}
|
|
||||||
<tr>
|
|
||||||
<td>{{.Key}}</td>
|
|
||||||
<td class="text-right">{{.Count}}</td>
|
|
||||||
<td class="text-right">{{.Percentage | printf "%.01f"}}%</td>
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="row">
|
|
||||||
<div class="col-md-12">
|
|
||||||
<h4 id="features">Feature Usage</h4>
|
|
||||||
<p>
|
|
||||||
The following lists feature usage. Some features are reported per report, some are per sum of units within report (eg. devices with static addresses among all known devices per report).
|
|
||||||
Currently there are <b>{{.versionNodes.v2}}</b> devices reporting for version 2 and <b>{{.versionNodes.v3}}</b> for version 3.
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
|
|
||||||
<div class="row">
|
|
||||||
{{$i := counter}}
|
|
||||||
{{range $featureName := .featureOrder}}
|
|
||||||
{{$featureValues := index $.features $featureName }}
|
|
||||||
{{if $i.DrawTwoDivider}}
|
|
||||||
</div>
|
|
||||||
<div class="row">
|
|
||||||
{{end}}
|
|
||||||
{{ $i.Increment }}
|
|
||||||
<div class="col-md-6">
|
|
||||||
<table class="table table-striped">
|
|
||||||
<thead><tr>
|
|
||||||
<th>{{$featureName}} Features</th><th colspan="2" class="text-center">Usage</th>
|
|
||||||
</tr></thead>
|
|
||||||
<tbody>
|
|
||||||
{{range $featureValues}}
|
|
||||||
<tr>
|
|
||||||
<td style="width: 50%">{{.Key}} ({{.Version}})</td>
|
|
||||||
<td style="width: 10%" class="text-right">{{if ge .Pct 10.0}}{{.Pct | printf "%.0f"}}{{else if ge .Pct 1.0}}{{.Pct | printf "%.01f"}}{{else}}{{.Pct | printf "%.02f"}}{{end}}%</td>
|
|
||||||
<td style="width: 40%" {{if lt .Pct 5.0}}data-toggle="tooltip" title='{{.Count}}'{{end}}>
|
|
||||||
<div class="progress-bar" role="progressbar" aria-valuenow="{{.Pct | printf "%.02f"}}" aria-valuemin="0" aria-valuemax="100" style="width: {{.Pct | printf "%.02f"}}%; height:20px" {{if ge .Pct 5.0}}data-toggle="tooltip" title='{{.Count}}'{{end}}></div>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
</div>
|
|
||||||
{{end}}
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="row">
|
|
||||||
<div class="col-md-12">
|
|
||||||
<h4 id="features">Feature Group Usage</h4>
|
|
||||||
<p>
|
|
||||||
The following lists feature usage groups, which might include multiple occourances of a feature use per report.
|
|
||||||
</p>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="row">
|
|
||||||
{{$i := counter}}
|
|
||||||
{{range $featureName := .featureOrder}}
|
|
||||||
{{$featureValues := index $.featureGroups $featureName }}
|
|
||||||
{{if $i.DrawTwoDivider}}
|
|
||||||
</div>
|
|
||||||
<div class="row">
|
|
||||||
{{end}}
|
|
||||||
{{ $i.Increment }}
|
|
||||||
<div class="col-md-6">
|
|
||||||
<table class="table table-striped">
|
|
||||||
<thead><tr>
|
|
||||||
<th>{{$featureName}} Group Features</th><th colspan="2" class="text-center">Usage</th>
|
|
||||||
</tr></thead>
|
|
||||||
<tbody>
|
|
||||||
{{range $featureValues}}
|
|
||||||
{{$counts := .Counts}}
|
|
||||||
<tr>
|
|
||||||
<td style="width: 50%">
|
|
||||||
<div data-toggle="tooltip" title='{{range $key, $value := .Counts}}{{$key}} ({{$value | proportion $counts | printf "%.02f"}}% - {{$value}})</br>{{end}}'>
|
|
||||||
{{.Key}} ({{.Version}})
|
|
||||||
</div>
|
|
||||||
</td>
|
|
||||||
<td style="width: 50%">
|
|
||||||
<div class="progress" role="progressbar" style="width: 100%">
|
|
||||||
{{$j := counter}}
|
|
||||||
{{range $key, $value := .Counts}}
|
|
||||||
{{with $valuePct := $value | proportion $counts}}
|
|
||||||
<div class="progress-bar {{ $j.Current | progressBarClassByIndex }}" style='width: {{$valuePct | printf "%.02f"}}%' data-toggle="tooltip" title='{{$key}} ({{$valuePct | printf "%.02f"}}% - {{$value}})'>
|
|
||||||
{{if ge $valuePct 30.0}}{{$key}}{{end}}
|
|
||||||
</div>
|
|
||||||
{{end}}
|
|
||||||
{{ $j.Increment }}
|
|
||||||
{{end}}
|
|
||||||
</div>
|
|
||||||
</td>
|
|
||||||
</tr>
|
|
||||||
{{end}}
|
|
||||||
</tbody>
|
|
||||||
</table>
|
|
||||||
</div>
|
|
||||||
{{end}}
|
|
||||||
</div>
|
|
||||||
<div class="row">
|
|
||||||
<div class="col-md-12">
|
|
||||||
<h1 id="performance-charts">Historical Performance Data</h1>
|
|
||||||
<p>These charts are all the average of the corresponding metric, for the entire population of a given day.</p>
|
|
||||||
|
|
||||||
<h4 id="hash-performance">Hash Performance (MiB/s)</h4>
|
|
||||||
<div class="img-thumbnail" id="hashPerfChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
|
||||||
|
|
||||||
<h4 id="memory-usage">Memory Usage (MiB)</h4>
|
|
||||||
<div class="img-thumbnail" id="memUsageChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
|
||||||
|
|
||||||
<h4 id="total-files">Total Number of Files</h4>
|
|
||||||
<div class="img-thumbnail" id="totFilesChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
|
||||||
|
|
||||||
<h4 id="total-size">Total Folder Size (GiB)</h4>
|
|
||||||
<div class="img-thumbnail" id="totMiBChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
|
||||||
|
|
||||||
<h4 id="system-ram">System RAM Size (GiB)</h4>
|
|
||||||
<div class="img-thumbnail" id="memSizeChart" style="width: 1130px; height: 400px; padding: 10px;"></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<hr>
|
|
||||||
<p>
|
|
||||||
<a href="https://github.com/syncthing/syncthing/">Source code</a>.
|
|
||||||
This product includes GeoLite2 data created by MaxMind, available from
|
|
||||||
<a href="http://www.maxmind.com">http://www.maxmind.com</a>.
|
|
||||||
</p>
|
|
||||||
<script type="text/javascript">
|
|
||||||
$('[data-toggle="tooltip"]').tooltip({html:true});
|
|
||||||
drawHeatMap();
|
|
||||||
</script>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
@ -26,6 +26,8 @@ import (
|
|||||||
|
|
||||||
"github.com/puzpuzpuz/xsync/v3"
|
"github.com/puzpuzpuz/xsync/v3"
|
||||||
"github.com/syncthing/syncthing/lib/protocol"
|
"github.com/syncthing/syncthing/lib/protocol"
|
||||||
|
"github.com/syncthing/syncthing/lib/rand"
|
||||||
|
"github.com/syncthing/syncthing/lib/s3"
|
||||||
)
|
)
|
||||||
|
|
||||||
type clock interface {
|
type clock interface {
|
||||||
@ -48,27 +50,38 @@ type inMemoryStore struct {
|
|||||||
m *xsync.MapOf[protocol.DeviceID, DatabaseRecord]
|
m *xsync.MapOf[protocol.DeviceID, DatabaseRecord]
|
||||||
dir string
|
dir string
|
||||||
flushInterval time.Duration
|
flushInterval time.Duration
|
||||||
s3 *s3Copier
|
s3 *s3.Session
|
||||||
|
objKey string
|
||||||
clock clock
|
clock clock
|
||||||
}
|
}
|
||||||
|
|
||||||
func newInMemoryStore(dir string, flushInterval time.Duration, s3 *s3Copier) *inMemoryStore {
|
func newInMemoryStore(dir string, flushInterval time.Duration, s3sess *s3.Session) *inMemoryStore {
|
||||||
|
hn, err := os.Hostname()
|
||||||
|
if err != nil {
|
||||||
|
hn = rand.String(8)
|
||||||
|
}
|
||||||
s := &inMemoryStore{
|
s := &inMemoryStore{
|
||||||
m: xsync.NewMapOf[protocol.DeviceID, DatabaseRecord](),
|
m: xsync.NewMapOf[protocol.DeviceID, DatabaseRecord](),
|
||||||
dir: dir,
|
dir: dir,
|
||||||
flushInterval: flushInterval,
|
flushInterval: flushInterval,
|
||||||
s3: s3,
|
s3: s3sess,
|
||||||
|
objKey: hn + ".db",
|
||||||
clock: defaultClock{},
|
clock: defaultClock{},
|
||||||
}
|
}
|
||||||
nr, err := s.read()
|
nr, err := s.read()
|
||||||
if os.IsNotExist(err) && s3 != nil {
|
if os.IsNotExist(err) && s3sess != nil {
|
||||||
// Try to read from AWS
|
// Try to read from AWS
|
||||||
|
latestKey, cerr := s3sess.LatestKey()
|
||||||
|
if cerr != nil {
|
||||||
|
log.Println("Error reading database from S3:", err)
|
||||||
|
return s
|
||||||
|
}
|
||||||
fd, cerr := os.Create(path.Join(s.dir, "records.db"))
|
fd, cerr := os.Create(path.Join(s.dir, "records.db"))
|
||||||
if cerr != nil {
|
if cerr != nil {
|
||||||
log.Println("Error creating database file:", err)
|
log.Println("Error creating database file:", err)
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
if err := s3.downloadLatest(fd); err != nil {
|
if cerr := s3sess.Download(fd, latestKey); cerr != nil {
|
||||||
log.Printf("Error reading database from S3: %v", err)
|
log.Printf("Error reading database from S3: %v", err)
|
||||||
}
|
}
|
||||||
_ = fd.Close()
|
_ = fd.Close()
|
||||||
@ -303,7 +316,7 @@ func (s *inMemoryStore) write() (err error) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
defer fd.Close()
|
defer fd.Close()
|
||||||
if err := s.s3.upload(fd); err != nil {
|
if err := s.s3.Upload(fd, s.objKey); err != nil {
|
||||||
log.Printf("Error uploading database to S3: %v", err)
|
log.Printf("Error uploading database to S3: %v", err)
|
||||||
}
|
}
|
||||||
log.Println("Finished uploading database")
|
log.Println("Finished uploading database")
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"github.com/syncthing/syncthing/lib/build"
|
"github.com/syncthing/syncthing/lib/build"
|
||||||
"github.com/syncthing/syncthing/lib/protocol"
|
"github.com/syncthing/syncthing/lib/protocol"
|
||||||
"github.com/syncthing/syncthing/lib/rand"
|
"github.com/syncthing/syncthing/lib/rand"
|
||||||
|
"github.com/syncthing/syncthing/lib/s3"
|
||||||
"github.com/syncthing/syncthing/lib/tlsutil"
|
"github.com/syncthing/syncthing/lib/tlsutil"
|
||||||
"github.com/thejerf/suture/v4"
|
"github.com/thejerf/suture/v4"
|
||||||
)
|
)
|
||||||
@ -117,14 +118,13 @@ func main() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// If configured, use S3 for database backups.
|
// If configured, use S3 for database backups.
|
||||||
var s3c *s3Copier
|
var s3c *s3.Session
|
||||||
if cli.DBS3Endpoint != "" {
|
if cli.DBS3Endpoint != "" {
|
||||||
hostname, err := os.Hostname()
|
var err error
|
||||||
|
s3c, err = s3.NewSession(cli.DBS3Endpoint, cli.DBS3Region, cli.DBS3Bucket, cli.DBS3AccessKeyID, cli.DBS3SecretKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to get hostname: %v", err)
|
log.Fatalf("Failed to create S3 session: %v", err)
|
||||||
}
|
}
|
||||||
key := hostname + ".db"
|
|
||||||
s3c = newS3Copier(cli.DBS3Endpoint, cli.DBS3Region, cli.DBS3Bucket, key, cli.DBS3AccessKeyID, cli.DBS3SecretKey)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start the database.
|
// Start the database.
|
||||||
|
@ -1,97 +0,0 @@
|
|||||||
// Copyright (C) 2024 The Syncthing Authors.
|
|
||||||
//
|
|
||||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
|
||||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
|
||||||
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"log"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/aws/aws-sdk-go/aws"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
|
||||||
"github.com/aws/aws-sdk-go/aws/session"
|
|
||||||
"github.com/aws/aws-sdk-go/service/s3"
|
|
||||||
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
|
||||||
)
|
|
||||||
|
|
||||||
type s3Copier struct {
|
|
||||||
endpoint string
|
|
||||||
region string
|
|
||||||
bucket string
|
|
||||||
key string
|
|
||||||
accessKeyID string
|
|
||||||
secretKey string
|
|
||||||
}
|
|
||||||
|
|
||||||
func newS3Copier(endpoint, region, bucket, key, accessKeyID, secretKey string) *s3Copier {
|
|
||||||
return &s3Copier{
|
|
||||||
endpoint: endpoint,
|
|
||||||
region: region,
|
|
||||||
bucket: bucket,
|
|
||||||
key: key,
|
|
||||||
accessKeyID: accessKeyID,
|
|
||||||
secretKey: secretKey,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *s3Copier) upload(r io.Reader) error {
|
|
||||||
sess, err := session.NewSession(&aws.Config{
|
|
||||||
Region: aws.String(s.region),
|
|
||||||
Endpoint: aws.String(s.endpoint),
|
|
||||||
Credentials: credentials.NewStaticCredentials(s.accessKeyID, s.secretKey, ""),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
uploader := s3manager.NewUploader(sess)
|
|
||||||
_, err = uploader.Upload(&s3manager.UploadInput{
|
|
||||||
Bucket: aws.String(s.bucket),
|
|
||||||
Key: aws.String(s.key),
|
|
||||||
Body: r,
|
|
||||||
})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *s3Copier) downloadLatest(w io.WriterAt) error {
|
|
||||||
sess, err := session.NewSession(&aws.Config{
|
|
||||||
Region: aws.String(s.region),
|
|
||||||
Endpoint: aws.String(s.endpoint),
|
|
||||||
Credentials: credentials.NewStaticCredentials(s.accessKeyID, s.secretKey, ""),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
svc := s3.New(sess)
|
|
||||||
resp, err := svc.ListObjectsV2(&s3.ListObjectsV2Input{Bucket: aws.String(s.bucket)})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var lastKey string
|
|
||||||
var lastModified time.Time
|
|
||||||
var lastSize int64
|
|
||||||
for _, item := range resp.Contents {
|
|
||||||
if item.LastModified.After(lastModified) && *item.Size > lastSize {
|
|
||||||
lastKey = *item.Key
|
|
||||||
lastModified = *item.LastModified
|
|
||||||
lastSize = *item.Size
|
|
||||||
} else if lastModified.Sub(*item.LastModified) < 5*time.Minute && *item.Size > lastSize {
|
|
||||||
lastKey = *item.Key
|
|
||||||
lastSize = *item.Size
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Println("Downloading database from", lastKey)
|
|
||||||
downloader := s3manager.NewDownloader(sess)
|
|
||||||
_, err = downloader.Download(w, &s3.GetObjectInput{
|
|
||||||
Bucket: aws.String(s.bucket),
|
|
||||||
Key: aws.String(lastKey),
|
|
||||||
})
|
|
||||||
return err
|
|
||||||
}
|
|
1
go.mod
1
go.mod
@ -21,7 +21,6 @@ require (
|
|||||||
github.com/jackpal/go-nat-pmp v1.0.2
|
github.com/jackpal/go-nat-pmp v1.0.2
|
||||||
github.com/julienschmidt/httprouter v1.3.0
|
github.com/julienschmidt/httprouter v1.3.0
|
||||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
||||||
github.com/lib/pq v1.10.9
|
|
||||||
github.com/maruel/panicparse/v2 v2.3.1
|
github.com/maruel/panicparse/v2 v2.3.1
|
||||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.8.1
|
github.com/maxbrunsfeld/counterfeiter/v6 v6.8.1
|
||||||
github.com/maxmind/geoipupdate/v6 v6.1.0
|
github.com/maxmind/geoipupdate/v6 v6.1.0
|
||||||
|
2
go.sum
2
go.sum
@ -144,8 +144,6 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
|||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
|
||||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
|
||||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0=
|
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0=
|
||||||
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
|
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
|
||||||
github.com/maruel/panicparse/v2 v2.3.1 h1:NtJavmbMn0DyzmmSStE8yUsmPZrZmudPH7kplxBinOA=
|
github.com/maruel/panicparse/v2 v2.3.1 h1:NtJavmbMn0DyzmmSStE8yUsmPZrZmudPH7kplxBinOA=
|
||||||
|
@ -61,7 +61,6 @@ Jakob Borg, Audrius Butkevicius, Jesse Lucas, Simon Frei, Tomasz Wilczyński, Al
|
|||||||
<li><a href="https://github.com/golang/snappy">golang/snappy</a>, Copyright © 2011 The Snappy-Go Authors.</li>
|
<li><a href="https://github.com/golang/snappy">golang/snappy</a>, Copyright © 2011 The Snappy-Go Authors.</li>
|
||||||
<li><a href="https://github.com/jackpal/gateway">jackpal/gateway</a>, Copyright © 2010 Jack Palevich.</li>
|
<li><a href="https://github.com/jackpal/gateway">jackpal/gateway</a>, Copyright © 2010 Jack Palevich.</li>
|
||||||
<li><a href="https://github.com/kballard/go-shellquote">kballard/go-shellquote</a>, Copyright © 2014 Kevin Ballard.</li>
|
<li><a href="https://github.com/kballard/go-shellquote">kballard/go-shellquote</a>, Copyright © 2014 Kevin Ballard.</li>
|
||||||
<li><a href="https://github.com/lib/pq">lib/pq</a>, Copyright © 2011-2013, 'pq' Contributors, portions Copyright © 2011 Blake Mizerany.</li>
|
|
||||||
<li><a href="https://github.com/mattn/go-isatty">mattn/go-isatty</a>, Copyright © Yasuhiro MATSUMOTO.</li>
|
<li><a href="https://github.com/mattn/go-isatty">mattn/go-isatty</a>, Copyright © Yasuhiro MATSUMOTO.</li>
|
||||||
<li><a href="https://github.com/matttproud/golang_protobuf_extensions">matttproud/golang_protobuf_extensions</a>, Copyright © 2012 Matt T. Proud.</li>
|
<li><a href="https://github.com/matttproud/golang_protobuf_extensions">matttproud/golang_protobuf_extensions</a>, Copyright © 2012 Matt T. Proud.</li>
|
||||||
<li><a href="https://github.com/oschwald/geoip2-golang">oschwald/geoip2-golang</a>, Copyright © 2015, Gregory J. Oschwald.</li>
|
<li><a href="https://github.com/oschwald/geoip2-golang">oschwald/geoip2-golang</a>, Copyright © 2015, Gregory J. Oschwald.</li>
|
||||||
|
101
lib/s3/s3.go
Normal file
101
lib/s3/s3.go
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
// Copyright (C) 2024 The Syncthing Authors.
|
||||||
|
//
|
||||||
|
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||||
|
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||||
|
// You can obtain one at https://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
package s3
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/aws/aws-sdk-go/aws"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
|
"github.com/aws/aws-sdk-go/aws/session"
|
||||||
|
"github.com/aws/aws-sdk-go/service/s3"
|
||||||
|
"github.com/aws/aws-sdk-go/service/s3/s3manager"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Session struct {
|
||||||
|
bucket string
|
||||||
|
s3sess *session.Session
|
||||||
|
}
|
||||||
|
|
||||||
|
type Object = s3.Object
|
||||||
|
|
||||||
|
func NewSession(endpoint, region, bucket, accessKeyID, secretKey string) (*Session, error) {
|
||||||
|
sess, err := session.NewSession(&aws.Config{
|
||||||
|
Region: aws.String(region),
|
||||||
|
Endpoint: aws.String(endpoint),
|
||||||
|
Credentials: credentials.NewStaticCredentials(accessKeyID, secretKey, ""),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Session{
|
||||||
|
bucket: bucket,
|
||||||
|
s3sess: sess,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Session) Upload(r io.Reader, key string) error {
|
||||||
|
uploader := s3manager.NewUploader(s.s3sess)
|
||||||
|
_, err := uploader.Upload(&s3manager.UploadInput{
|
||||||
|
Bucket: aws.String(s.bucket),
|
||||||
|
Key: aws.String(key),
|
||||||
|
Body: r,
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Session) List(fn func(*Object) bool) error {
|
||||||
|
svc := s3.New(s.s3sess)
|
||||||
|
|
||||||
|
opts := &s3.ListObjectsV2Input{
|
||||||
|
Bucket: aws.String(s.bucket),
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
resp, err := svc.ListObjectsV2(opts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, item := range resp.Contents {
|
||||||
|
if !fn(item) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.NextContinuationToken == nil || *resp.NextContinuationToken == "" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
opts.ContinuationToken = resp.NextContinuationToken
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Session) LatestKey() (string, error) {
|
||||||
|
var latestKey string
|
||||||
|
var lastModified time.Time
|
||||||
|
if err := s.List(func(obj *Object) bool {
|
||||||
|
if latestKey == "" || obj.LastModified.After(lastModified) {
|
||||||
|
latestKey = *obj.Key
|
||||||
|
lastModified = *obj.LastModified
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return latestKey, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Session) Download(w io.WriterAt, key string) error {
|
||||||
|
downloader := s3manager.NewDownloader(s.s3sess)
|
||||||
|
_, err := downloader.Download(w, &s3.GetObjectInput{
|
||||||
|
Bucket: aws.String(s.bucket),
|
||||||
|
Key: aws.String(key),
|
||||||
|
})
|
||||||
|
return err
|
||||||
|
}
|
@ -18,163 +18,177 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type Report struct {
|
type Report struct {
|
||||||
// Generated
|
|
||||||
Received time.Time `json:"-"` // Only from DB
|
|
||||||
Date string `json:"date,omitempty"`
|
|
||||||
Address string `json:"address,omitempty"`
|
|
||||||
|
|
||||||
// v1 fields
|
// v1 fields
|
||||||
|
|
||||||
UniqueID string `json:"uniqueID,omitempty" since:"1"`
|
UniqueID string `json:"uniqueID,omitempty" metric:"-" since:"1"`
|
||||||
Version string `json:"version,omitempty" since:"1"`
|
Version string `json:"version,omitempty" metric:"reports_total,gaugeVec:version" since:"1"`
|
||||||
LongVersion string `json:"longVersion,omitempty" since:"1"`
|
LongVersion string `json:"longVersion,omitempty" metric:"-" since:"1"`
|
||||||
Platform string `json:"platform,omitempty" since:"1"`
|
Platform string `json:"platform,omitempty" metric:"-" since:"1"`
|
||||||
NumFolders int `json:"numFolders,omitempty" since:"1"`
|
NumFolders int `json:"numFolders,omitempty" metric:"num_folders,summary" since:"1"`
|
||||||
NumDevices int `json:"numDevices,omitempty" since:"1"`
|
NumDevices int `json:"numDevices,omitempty" metric:"num_devices,summary" since:"1"`
|
||||||
TotFiles int `json:"totFiles,omitempty" since:"1"`
|
TotFiles int `json:"totFiles,omitempty" metric:"total_files,summary" since:"1"`
|
||||||
FolderMaxFiles int `json:"folderMaxFiles,omitempty" since:"1"`
|
FolderMaxFiles int `json:"folderMaxFiles,omitempty" metric:"folder_max_files,summary" since:"1"`
|
||||||
TotMiB int `json:"totMiB,omitempty" since:"1"`
|
TotMiB int `json:"totMiB,omitempty" metric:"total_data_mib,summary" since:"1"`
|
||||||
FolderMaxMiB int `json:"folderMaxMiB,omitempty" since:"1"`
|
FolderMaxMiB int `json:"folderMaxMiB,omitempty" metric:"folder_max_data_mib,summary" since:"1"`
|
||||||
MemoryUsageMiB int `json:"memoryUsageMiB,omitempty" since:"1"`
|
MemoryUsageMiB int `json:"memoryUsageMiB,omitempty" metric:"memory_usage_mib,summary" since:"1"`
|
||||||
SHA256Perf float64 `json:"sha256Perf,omitempty" since:"1"`
|
SHA256Perf float64 `json:"sha256Perf,omitempty" metric:"sha256_perf_mibps,summary" since:"1"`
|
||||||
HashPerf float64 `json:"hashPerf,omitempty" since:"1"` // Was previously not stored server-side
|
HashPerf float64 `json:"hashPerf,omitempty" metric:"hash_perf_mibps,summary" since:"1"`
|
||||||
MemorySize int `json:"memorySize,omitempty" since:"1"`
|
MemorySize int `json:"memorySize,omitempty" metric:"memory_size_mib,summary" since:"1"`
|
||||||
|
|
||||||
// v2 fields
|
// v2 fields
|
||||||
|
|
||||||
URVersion int `json:"urVersion,omitempty" since:"2"`
|
URVersion int `json:"urVersion,omitempty" metric:"reports_by_urversion_total,gaugeVec:version" since:"2"`
|
||||||
NumCPU int `json:"numCPU,omitempty" since:"2"`
|
NumCPU int `json:"numCPU,omitempty" metric:"num_cpu,summary" since:"2"`
|
||||||
|
|
||||||
FolderUses struct {
|
FolderUses struct {
|
||||||
SendOnly int `json:"sendonly,omitempty" since:"2"`
|
SendOnly int `json:"sendonly,omitempty" metric:"folder_feature{feature=ModeSendonly},summary" since:"2"`
|
||||||
SendReceive int `json:"sendreceive,omitempty" since:"2"` // Was previously not stored server-side
|
SendReceive int `json:"sendreceive,omitempty" metric:"folder_feature{feature=ModeSendReceive},summary" since:"2"`
|
||||||
ReceiveOnly int `json:"receiveonly,omitempty" since:"2"`
|
ReceiveOnly int `json:"receiveonly,omitempty" metric:"folder_feature{feature=ModeReceiveOnly},summary" since:"2"`
|
||||||
IgnorePerms int `json:"ignorePerms,omitempty" since:"2"`
|
IgnorePerms int `json:"ignorePerms,omitempty" metric:"folder_feature{feature=IgnorePerms},summary" since:"2"`
|
||||||
IgnoreDelete int `json:"ignoreDelete,omitempty" since:"2"`
|
IgnoreDelete int `json:"ignoreDelete,omitempty" metric:"folder_feature{feature=IgnoreDelete},summary" since:"2"`
|
||||||
AutoNormalize int `json:"autoNormalize,omitempty" since:"2"`
|
AutoNormalize int `json:"autoNormalize,omitempty" metric:"folder_feature{feature=AutoNormalize},summary" since:"2"`
|
||||||
SimpleVersioning int `json:"simpleVersioning,omitempty" since:"2"`
|
SimpleVersioning int `json:"simpleVersioning,omitempty" metric:"folder_feature{feature=VersioningSimple},summary" since:"2"`
|
||||||
ExternalVersioning int `json:"externalVersioning,omitempty" since:"2"`
|
ExternalVersioning int `json:"externalVersioning,omitempty" metric:"folder_feature{feature=VersioningExternal},summary" since:"2"`
|
||||||
StaggeredVersioning int `json:"staggeredVersioning,omitempty" since:"2"`
|
StaggeredVersioning int `json:"staggeredVersioning,omitempty" metric:"folder_feature{feature=VersioningStaggered},summary" since:"2"`
|
||||||
TrashcanVersioning int `json:"trashcanVersioning,omitempty" since:"2"`
|
TrashcanVersioning int `json:"trashcanVersioning,omitempty" metric:"folder_feature{feature=VersioningTrashcan},summary" since:"2"`
|
||||||
} `json:"folderUses,omitempty" since:"2"`
|
} `json:"folderUses,omitempty" since:"2"`
|
||||||
|
|
||||||
DeviceUses struct {
|
DeviceUses struct {
|
||||||
Introducer int `json:"introducer,omitempty" since:"2"`
|
Introducer int `json:"introducer,omitempty" metric:"device_feature{feature=Introducer},summary" since:"2"`
|
||||||
CustomCertName int `json:"customCertName,omitempty" since:"2"`
|
CustomCertName int `json:"customCertName,omitempty" metric:"device_feature{feature=CustomCertName},summary" since:"2"`
|
||||||
CompressAlways int `json:"compressAlways,omitempty" since:"2"`
|
CompressAlways int `json:"compressAlways,omitempty" metric:"device_feature{feature=CompressAlways},summary" since:"2"`
|
||||||
CompressMetadata int `json:"compressMetadata,omitempty" since:"2"`
|
CompressMetadata int `json:"compressMetadata,omitempty" metric:"device_feature{feature=CompressMetadata},summary" since:"2"`
|
||||||
CompressNever int `json:"compressNever,omitempty" since:"2"`
|
CompressNever int `json:"compressNever,omitempty" metric:"device_feature{feature=CompressNever},summary" since:"2"`
|
||||||
DynamicAddr int `json:"dynamicAddr,omitempty" since:"2"`
|
DynamicAddr int `json:"dynamicAddr,omitempty" metric:"device_feature{feature=AddressDynamic},summary" since:"2"`
|
||||||
StaticAddr int `json:"staticAddr,omitempty" since:"2"`
|
StaticAddr int `json:"staticAddr,omitempty" metric:"device_feature{feature=AddressStatic},summary" since:"2"`
|
||||||
} `json:"deviceUses,omitempty" since:"2"`
|
} `json:"deviceUses,omitempty" since:"2"`
|
||||||
|
|
||||||
Announce struct {
|
Announce struct {
|
||||||
GlobalEnabled bool `json:"globalEnabled,omitempty" since:"2"`
|
GlobalEnabled bool `json:"globalEnabled,omitempty" metric:"discovery_feature_count{feature=GlobalEnabled},gauge" since:"2"`
|
||||||
LocalEnabled bool `json:"localEnabled,omitempty" since:"2"`
|
LocalEnabled bool `json:"localEnabled,omitempty" metric:"discovery_feature_count{feature=LocalEnabled},gauge" since:"2"`
|
||||||
DefaultServersDNS int `json:"defaultServersDNS,omitempty" since:"2"`
|
DefaultServersDNS int `json:"defaultServersDNS,omitempty" metric:"discovery_default_servers,summary" since:"2"`
|
||||||
DefaultServersIP int `json:"defaultServersIP,omitempty" since:"2"` // Deprecated and not provided client-side anymore
|
OtherServers int `json:"otherServers,omitempty" metric:"discovery_other_servers,summary" since:"2"`
|
||||||
OtherServers int `json:"otherServers,omitempty" since:"2"`
|
|
||||||
} `json:"announce,omitempty" since:"2"`
|
} `json:"announce,omitempty" since:"2"`
|
||||||
|
|
||||||
Relays struct {
|
Relays struct {
|
||||||
Enabled bool `json:"enabled,omitempty" since:"2"`
|
Enabled bool `json:"enabled,omitempty" metric:"relay_feature_enabled,gauge" since:"2"`
|
||||||
DefaultServers int `json:"defaultServers,omitempty" since:"2"`
|
DefaultServers int `json:"defaultServers,omitempty" metric:"relay_feature_count{feature=DefaultServers},summary" since:"2"`
|
||||||
OtherServers int `json:"otherServers,omitempty" since:"2"`
|
OtherServers int `json:"otherServers,omitempty" metric:"relay_feature_count{feature=OtherServers},summary" since:"2"`
|
||||||
} `json:"relays,omitempty" since:"2"`
|
} `json:"relays,omitempty" since:"2"`
|
||||||
|
|
||||||
UsesRateLimit bool `json:"usesRateLimit,omitempty" since:"2"`
|
UsesRateLimit bool `json:"usesRateLimit,omitempty" metric:"feature_count{feature=RateLimitsEnabled},gauge" since:"2"`
|
||||||
UpgradeAllowedManual bool `json:"upgradeAllowedManual,omitempty" since:"2"`
|
UpgradeAllowedManual bool `json:"upgradeAllowedManual,omitempty" metric:"feature_count{feature=UpgradeAllowedManual},gauge" since:"2"`
|
||||||
UpgradeAllowedAuto bool `json:"upgradeAllowedAuto,omitempty" since:"2"`
|
UpgradeAllowedAuto bool `json:"upgradeAllowedAuto,omitempty" metric:"feature_count{feature=UpgradeAllowedAuto},gauge" since:"2"`
|
||||||
|
|
||||||
// V2.5 fields (fields that were in v2 but never added to the database
|
// V2.5 fields (fields that were in v2 but never added to the database
|
||||||
UpgradeAllowedPre bool `json:"upgradeAllowedPre,omitempty" since:"2"`
|
UpgradeAllowedPre bool `json:"upgradeAllowedPre,omitempty" metric:"upgrade_allowed_pre,gauge" since:"2"`
|
||||||
RescanIntvs []int `json:"rescanIntvs,omitempty" since:"2"`
|
RescanIntvs []int `json:"rescanIntvs,omitempty" metric:"folder_rescan_intervals,summary" since:"2"`
|
||||||
|
|
||||||
// v3 fields
|
// v3 fields
|
||||||
|
|
||||||
Uptime int `json:"uptime,omitempty" since:"3"`
|
Uptime int `json:"uptime,omitempty" metric:"uptime_seconds,summary" since:"3"`
|
||||||
NATType string `json:"natType,omitempty" since:"3"`
|
NATType string `json:"natType,omitempty" metric:"nat_detection,gaugeVec:type" since:"3"`
|
||||||
AlwaysLocalNets bool `json:"alwaysLocalNets,omitempty" since:"3"`
|
|
||||||
CacheIgnoredFiles bool `json:"cacheIgnoredFiles,omitempty" since:"3"`
|
AlwaysLocalNets bool `json:"alwaysLocalNets,omitempty" metric:"feature_count{feature=AlwaysLocalNets},gauge" since:"3"`
|
||||||
OverwriteRemoteDeviceNames bool `json:"overwriteRemoteDeviceNames,omitempty" since:"3"`
|
CacheIgnoredFiles bool `json:"cacheIgnoredFiles,omitempty" metric:"feature_count{feature=CacheIgnoredFiles},gauge" since:"3"`
|
||||||
ProgressEmitterEnabled bool `json:"progressEmitterEnabled,omitempty" since:"3"`
|
OverwriteRemoteDeviceNames bool `json:"overwriteRemoteDeviceNames,omitempty" metric:"feature_count{feature=OverwriteRemoteDeviceNames},gauge" since:"3"`
|
||||||
CustomDefaultFolderPath bool `json:"customDefaultFolderPath,omitempty" since:"3"`
|
ProgressEmitterEnabled bool `json:"progressEmitterEnabled,omitempty" metric:"feature_count{feature=ProgressEmitterEnabled},gauge" since:"3"`
|
||||||
WeakHashSelection string `json:"weakHashSelection,omitempty" since:"3"` // Deprecated and not provided client-side anymore
|
CustomDefaultFolderPath bool `json:"customDefaultFolderPath,omitempty" metric:"feature_count{feature=CustomDefaultFolderPath},gauge" since:"3"`
|
||||||
CustomTrafficClass bool `json:"customTrafficClass,omitempty" since:"3"`
|
CustomTrafficClass bool `json:"customTrafficClass,omitempty" metric:"feature_count{feature=CustomTrafficClass},gauge" since:"3"`
|
||||||
CustomTempIndexMinBlocks bool `json:"customTempIndexMinBlocks,omitempty" since:"3"`
|
CustomTempIndexMinBlocks bool `json:"customTempIndexMinBlocks,omitempty" metric:"feature_count{feature=CustomTempIndexMinBlocks},gauge" since:"3"`
|
||||||
TemporariesDisabled bool `json:"temporariesDisabled,omitempty" since:"3"`
|
TemporariesDisabled bool `json:"temporariesDisabled,omitempty" metric:"feature_count{feature=TemporariesDisabled},gauge" since:"3"`
|
||||||
TemporariesCustom bool `json:"temporariesCustom,omitempty" since:"3"`
|
TemporariesCustom bool `json:"temporariesCustom,omitempty" metric:"feature_count{feature=TemporariesCustom},gauge" since:"3"`
|
||||||
LimitBandwidthInLan bool `json:"limitBandwidthInLan,omitempty" since:"3"`
|
LimitBandwidthInLan bool `json:"limitBandwidthInLan,omitempty" metric:"feature_count{feature=LimitBandwidthInLAN},gauge" since:"3"`
|
||||||
CustomReleaseURL bool `json:"customReleaseURL,omitempty" since:"3"`
|
CustomReleaseURL bool `json:"customReleaseURL,omitempty" metric:"feature_count{feature=CustomReleaseURL},gauge" since:"3"`
|
||||||
RestartOnWakeup bool `json:"restartOnWakeup,omitempty" since:"3"`
|
RestartOnWakeup bool `json:"restartOnWakeup,omitempty" metric:"feature_count{feature=RestartOnWakeup},gauge" since:"3"`
|
||||||
CustomStunServers bool `json:"customStunServers,omitempty" since:"3"`
|
CustomStunServers bool `json:"customStunServers,omitempty" metric:"feature_count{feature=CustomSTUNServers},gauge" since:"3"`
|
||||||
|
|
||||||
FolderUsesV3 struct {
|
FolderUsesV3 struct {
|
||||||
ScanProgressDisabled int `json:"scanProgressDisabled,omitempty" since:"3"`
|
ScanProgressDisabled int `json:"scanProgressDisabled,omitempty" metric:"folder_feature{feature=ScanProgressDisabled},summary" since:"3"`
|
||||||
ConflictsDisabled int `json:"conflictsDisabled,omitempty" since:"3"`
|
ConflictsDisabled int `json:"conflictsDisabled,omitempty" metric:"folder_feature{feature=ConflictsDisabled},summary" since:"3"`
|
||||||
ConflictsUnlimited int `json:"conflictsUnlimited,omitempty" since:"3"`
|
ConflictsUnlimited int `json:"conflictsUnlimited,omitempty" metric:"folder_feature{feature=ConflictsUnlimited},summary" since:"3"`
|
||||||
ConflictsOther int `json:"conflictsOther,omitempty" since:"3"`
|
ConflictsOther int `json:"conflictsOther,omitempty" metric:"folder_feature{feature=ConflictsOther},summary" since:"3"`
|
||||||
DisableSparseFiles int `json:"disableSparseFiles,omitempty" since:"3"`
|
DisableSparseFiles int `json:"disableSparseFiles,omitempty" metric:"folder_feature{feature=DisableSparseFiles},summary" since:"3"`
|
||||||
DisableTempIndexes int `json:"disableTempIndexes,omitempty" since:"3"`
|
DisableTempIndexes int `json:"disableTempIndexes,omitempty" metric:"folder_feature{feature=DisableTempIndexes},summary" since:"3"`
|
||||||
AlwaysWeakHash int `json:"alwaysWeakHash,omitempty" since:"3"`
|
AlwaysWeakHash int `json:"alwaysWeakHash,omitempty" metric:"folder_feature{feature=AlwaysWeakhash},summary" since:"3"`
|
||||||
CustomWeakHashThreshold int `json:"customWeakHashThreshold,omitempty" since:"3"`
|
CustomWeakHashThreshold int `json:"customWeakHashThreshold,omitempty" metric:"folder_feature{feature=CustomWeakhashThreshold},summary" since:"3"`
|
||||||
FsWatcherEnabled int `json:"fsWatcherEnabled,omitempty" since:"3"`
|
FsWatcherEnabled int `json:"fsWatcherEnabled,omitempty" metric:"folder_feature{feature=FSWatcherEnabled},summary" since:"3"`
|
||||||
PullOrder map[string]int `json:"pullOrder,omitempty" since:"3"`
|
PullOrder map[string]int `json:"pullOrder,omitempty" metric:"folder_pull_order,summaryVec:order" since:"3"`
|
||||||
FilesystemType map[string]int `json:"filesystemType,omitempty" since:"3"`
|
FilesystemType map[string]int `json:"filesystemType,omitempty" metric:"folder_file_system_type,summaryVec:type" since:"3"`
|
||||||
FsWatcherDelays []int `json:"fsWatcherDelays,omitempty" since:"3"`
|
FsWatcherDelays []int `json:"fsWatcherDelays,omitempty" metric:"folder_fswatcher_delays,summary" since:"3"`
|
||||||
CustomMarkerName int `json:"customMarkerName,omitempty" since:"3"`
|
CustomMarkerName int `json:"customMarkerName,omitempty" metric:"folder_feature{feature=CustomMarkername},summary" since:"3"`
|
||||||
CopyOwnershipFromParent int `json:"copyOwnershipFromParent,omitempty" since:"3"`
|
CopyOwnershipFromParent int `json:"copyOwnershipFromParent,omitempty" metric:"folder_feature{feature=CopyParentOwnership},summary" since:"3"`
|
||||||
ModTimeWindowS []int `json:"modTimeWindowS,omitempty" since:"3"`
|
ModTimeWindowS []int `json:"modTimeWindowS,omitempty" metric:"folder_modtime_window_s,summary" since:"3"`
|
||||||
MaxConcurrentWrites []int `json:"maxConcurrentWrites,omitempty" since:"3"`
|
MaxConcurrentWrites []int `json:"maxConcurrentWrites,omitempty" metric:"folder_max_concurrent_writes,summary" since:"3"`
|
||||||
DisableFsync int `json:"disableFsync,omitempty" since:"3"`
|
DisableFsync int `json:"disableFsync,omitempty" metric:"folder_feature{feature=DisableFsync},summary" since:"3"`
|
||||||
BlockPullOrder map[string]int `json:"blockPullOrder,omitempty" since:"3"`
|
BlockPullOrder map[string]int `json:"blockPullOrder,omitempty" metric:"folder_block_pull_order:summaryVec:order" since:"3"`
|
||||||
CopyRangeMethod map[string]int `json:"copyRangeMethod,omitempty" since:"3"`
|
CopyRangeMethod map[string]int `json:"copyRangeMethod,omitempty" metric:"folder_copy_range_method:summaryVec:method" since:"3"`
|
||||||
CaseSensitiveFS int `json:"caseSensitiveFS,omitempty" since:"3"`
|
CaseSensitiveFS int `json:"caseSensitiveFS,omitempty" metric:"folder_feature{feature=CaseSensitiveFS},summary" since:"3"`
|
||||||
ReceiveEncrypted int `json:"receiveencrypted,omitempty" since:"3"`
|
ReceiveEncrypted int `json:"receiveencrypted,omitempty" metric:"folder_feature{feature=ReceiveEncrypted},summary" since:"3"`
|
||||||
|
SendXattrs int `json:"sendXattrs,omitempty" metric:"folder_feature{feature=SendXattrs},summary" since:"3"`
|
||||||
|
SyncXattrs int `json:"syncXattrs,omitempty" metric:"folder_feature{feature=SyncXattrs},summary" since:"3"`
|
||||||
|
SendOwnership int `json:"sendOwnership,omitempty" metric:"folder_feature{feature=SendOwnership},summary" since:"3"`
|
||||||
|
SyncOwnership int `json:"syncOwnership,omitempty" metric:"folder_feature{feature=SyncOwnership},summary" since:"3"`
|
||||||
} `json:"folderUsesV3,omitempty" since:"3"`
|
} `json:"folderUsesV3,omitempty" since:"3"`
|
||||||
|
|
||||||
DeviceUsesV3 struct {
|
DeviceUsesV3 struct {
|
||||||
Untrusted int `json:"untrusted,omitempty" since:"3"`
|
Untrusted int `json:"untrusted,omitempty" metric:"device_feature{feature=Untrusted},summary" since:"3"`
|
||||||
|
UsesRateLimit int `json:"usesRateLimit,omitempty" metric:"device_feature{feature=RateLimitsEnabled},summary" since:"3"`
|
||||||
|
MultipleConnections int `json:"multipleConnections,omitempty" metric:"device_feature{feature=MultipleConnections},summary" since:"3"`
|
||||||
} `json:"deviceUsesV3,omitempty" since:"3"`
|
} `json:"deviceUsesV3,omitempty" since:"3"`
|
||||||
|
|
||||||
GUIStats struct {
|
GUIStats struct {
|
||||||
Enabled int `json:"enabled,omitempty" since:"3"`
|
Enabled int `json:"enabled,omitempty" metric:"gui_feature_count{feature=Enabled},summary" since:"3"`
|
||||||
UseTLS int `json:"useTLS,omitempty" since:"3"`
|
UseTLS int `json:"useTLS,omitempty" metric:"gui_feature_count{feature=TLS},summary" since:"3"`
|
||||||
UseAuth int `json:"useAuth,omitempty" since:"3"`
|
UseAuth int `json:"useAuth,omitempty" metric:"gui_feature_count{feature=Authentication},summary" since:"3"`
|
||||||
InsecureAdminAccess int `json:"insecureAdminAccess,omitempty" since:"3"`
|
InsecureAdminAccess int `json:"insecureAdminAccess,omitempty" metric:"gui_feature_count{feature=InsecureAdminAccess},summary" since:"3"`
|
||||||
Debugging int `json:"debugging,omitempty" since:"3"`
|
Debugging int `json:"debugging,omitempty" metric:"gui_feature_count{feature=Debugging},summary" since:"3"`
|
||||||
InsecureSkipHostCheck int `json:"insecureSkipHostCheck,omitempty" since:"3"`
|
InsecureSkipHostCheck int `json:"insecureSkipHostCheck,omitempty" metric:"gui_feature_count{feature=InsecureSkipHostCheck},summary" since:"3"`
|
||||||
InsecureAllowFrameLoading int `json:"insecureAllowFrameLoading,omitempty" since:"3"`
|
InsecureAllowFrameLoading int `json:"insecureAllowFrameLoading,omitempty" metric:"gui_feature_count{feature=InsecureAllowFrameLoading},summary" since:"3"`
|
||||||
ListenLocal int `json:"listenLocal,omitempty" since:"3"`
|
ListenLocal int `json:"listenLocal,omitempty" metric:"gui_feature_count{feature=ListenLocal},summary" since:"3"`
|
||||||
ListenUnspecified int `json:"listenUnspecified,omitempty" since:"3"`
|
ListenUnspecified int `json:"listenUnspecified,omitempty" metric:"gui_feature_count{feature=ListenUnspecified},summary" since:"3"`
|
||||||
Theme map[string]int `json:"theme,omitempty" since:"3"`
|
Theme map[string]int `json:"theme,omitempty" metric:"gui_theme,summaryVec:theme" since:"3"`
|
||||||
} `json:"guiStats,omitempty" since:"3"`
|
} `json:"guiStats,omitempty" since:"3"`
|
||||||
|
|
||||||
BlockStats struct {
|
BlockStats struct {
|
||||||
Total int `json:"total,omitempty" since:"3"`
|
Total int `json:"total,omitempty" metric:"blocks_processed_total,gauge" since:"3"`
|
||||||
Renamed int `json:"renamed,omitempty" since:"3"`
|
Renamed int `json:"renamed,omitempty" metric:"blocks_processed{source=renamed},gauge" since:"3"`
|
||||||
Reused int `json:"reused,omitempty" since:"3"`
|
Reused int `json:"reused,omitempty" metric:"blocks_processed{source=reused},gauge" since:"3"`
|
||||||
Pulled int `json:"pulled,omitempty" since:"3"`
|
Pulled int `json:"pulled,omitempty" metric:"blocks_processed{source=pulled},gauge" since:"3"`
|
||||||
CopyOrigin int `json:"copyOrigin,omitempty" since:"3"`
|
CopyOrigin int `json:"copyOrigin,omitempty" metric:"blocks_processed{source=copy_origin},gauge" since:"3"`
|
||||||
CopyOriginShifted int `json:"copyOriginShifted,omitempty" since:"3"`
|
CopyOriginShifted int `json:"copyOriginShifted,omitempty" metric:"blocks_processed{source=copy_origin_shifted},gauge" since:"3"`
|
||||||
CopyElsewhere int `json:"copyElsewhere,omitempty" since:"3"`
|
CopyElsewhere int `json:"copyElsewhere,omitempty" metric:"blocks_processed{source=copy_elsewhere},gauge" since:"3"`
|
||||||
} `json:"blockStats,omitempty" since:"3"`
|
} `json:"blockStats,omitempty" since:"3"`
|
||||||
|
|
||||||
TransportStats map[string]int `json:"transportStats,omitempty" since:"3"`
|
TransportStats map[string]int `json:"transportStats,omitempty" since:"3"`
|
||||||
|
|
||||||
IgnoreStats struct {
|
IgnoreStats struct {
|
||||||
Lines int `json:"lines,omitempty" since:"3"`
|
Lines int `json:"lines,omitempty" metric:"folder_ignore_lines_total,summary" since:"3"`
|
||||||
Inverts int `json:"inverts,omitempty" since:"3"`
|
Inverts int `json:"inverts,omitempty" metric:"folder_ignore_lines{kind=inverts},summary" since:"3"`
|
||||||
Folded int `json:"folded,omitempty" since:"3"`
|
Folded int `json:"folded,omitempty" metric:"folder_ignore_lines{kind=folded},summary" since:"3"`
|
||||||
Deletable int `json:"deletable,omitempty" since:"3"`
|
Deletable int `json:"deletable,omitempty" metric:"folder_ignore_lines{kind=deletable},summary" since:"3"`
|
||||||
Rooted int `json:"rooted,omitempty" since:"3"`
|
Rooted int `json:"rooted,omitempty" metric:"folder_ignore_lines{kind=rooted},summary" since:"3"`
|
||||||
Includes int `json:"includes,omitempty" since:"3"`
|
Includes int `json:"includes,omitempty" metric:"folder_ignore_lines{kind=includes},summary" since:"3"`
|
||||||
EscapedIncludes int `json:"escapedIncludes,omitempty" since:"3"`
|
EscapedIncludes int `json:"escapedIncludes,omitempty" metric:"folder_ignore_lines{kind=escapedIncludes},summary" since:"3"`
|
||||||
DoubleStars int `json:"doubleStars,omitempty" since:"3"`
|
DoubleStars int `json:"doubleStars,omitempty" metric:"folder_ignore_lines{kind=doubleStars},summary" since:"3"`
|
||||||
Stars int `json:"stars,omitempty" since:"3"`
|
Stars int `json:"stars,omitempty" metric:"folder_ignore_lines{kind=stars},summary" since:"3"`
|
||||||
} `json:"ignoreStats,omitempty" since:"3"`
|
} `json:"ignoreStats,omitempty" since:"3"`
|
||||||
|
|
||||||
// V3 fields added late in the RC
|
// V3 fields added late in the RC
|
||||||
WeakHashEnabled bool `json:"weakHashEnabled,omitempty" since:"3"` // Deprecated and not provided client-side anymore
|
WeakHashEnabled bool `json:"weakHashEnabled,omitempty" metric:"-" since:"3"` // Deprecated and not provided client-side anymore
|
||||||
|
|
||||||
|
// Added in post processing
|
||||||
|
Received time.Time `json:"received,omitempty"`
|
||||||
|
Date string `json:"date,omitempty"`
|
||||||
|
Address string `json:"address,omitempty"`
|
||||||
|
OS string `json:"os" metric:"reports_total,gaugeVec:os"`
|
||||||
|
Arch string `json:"arch" metric:"reports_total,gaugeVec:arch"`
|
||||||
|
Compiler string `json:"compiler" metric:"builder,gaugeVec:compiler"`
|
||||||
|
Builder string `json:"builder" metric:"builder,gaugeVec:builder"`
|
||||||
|
Distribution string `json:"distribution" metric:"builder,gaugeVec:distribution"`
|
||||||
|
Country string `json:"country" metric:"location,gaugeVec:country"`
|
||||||
|
CountryCode string `json:"countryCode" metric:"location,gaugeVec:countryCode"`
|
||||||
|
MajorVersion string `json:"majorVersion" metric:"reports_by_major_total,gaugeVec:version"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func New() *Report {
|
func New() *Report {
|
||||||
@ -206,182 +220,6 @@ func (r *Report) ClearForVersion(version int) error {
|
|||||||
return clear(r, version)
|
return clear(r, version)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Report) FieldPointers() []interface{} {
|
|
||||||
// All the fields of the Report, in the same order as the database fields.
|
|
||||||
return []interface{}{
|
|
||||||
&r.Received, &r.UniqueID, &r.Version, &r.LongVersion, &r.Platform,
|
|
||||||
&r.NumFolders, &r.NumDevices, &r.TotFiles, &r.FolderMaxFiles,
|
|
||||||
&r.TotMiB, &r.FolderMaxMiB, &r.MemoryUsageMiB, &r.SHA256Perf,
|
|
||||||
&r.MemorySize, &r.Date,
|
|
||||||
// V2
|
|
||||||
&r.URVersion, &r.NumCPU, &r.FolderUses.SendOnly, &r.FolderUses.IgnorePerms,
|
|
||||||
&r.FolderUses.IgnoreDelete, &r.FolderUses.AutoNormalize, &r.DeviceUses.Introducer,
|
|
||||||
&r.DeviceUses.CustomCertName, &r.DeviceUses.CompressAlways,
|
|
||||||
&r.DeviceUses.CompressMetadata, &r.DeviceUses.CompressNever,
|
|
||||||
&r.DeviceUses.DynamicAddr, &r.DeviceUses.StaticAddr,
|
|
||||||
&r.Announce.GlobalEnabled, &r.Announce.LocalEnabled,
|
|
||||||
&r.Announce.DefaultServersDNS, &r.Announce.DefaultServersIP,
|
|
||||||
&r.Announce.OtherServers, &r.Relays.Enabled, &r.Relays.DefaultServers,
|
|
||||||
&r.Relays.OtherServers, &r.UsesRateLimit, &r.UpgradeAllowedManual,
|
|
||||||
&r.UpgradeAllowedAuto, &r.FolderUses.SimpleVersioning,
|
|
||||||
&r.FolderUses.ExternalVersioning, &r.FolderUses.StaggeredVersioning,
|
|
||||||
&r.FolderUses.TrashcanVersioning,
|
|
||||||
|
|
||||||
// V2.5
|
|
||||||
&r.UpgradeAllowedPre,
|
|
||||||
|
|
||||||
// V3
|
|
||||||
&r.Uptime, &r.NATType, &r.AlwaysLocalNets, &r.CacheIgnoredFiles,
|
|
||||||
&r.OverwriteRemoteDeviceNames, &r.ProgressEmitterEnabled, &r.CustomDefaultFolderPath,
|
|
||||||
&r.WeakHashSelection, &r.CustomTrafficClass, &r.CustomTempIndexMinBlocks,
|
|
||||||
&r.TemporariesDisabled, &r.TemporariesCustom, &r.LimitBandwidthInLan,
|
|
||||||
&r.CustomReleaseURL, &r.RestartOnWakeup, &r.CustomStunServers,
|
|
||||||
|
|
||||||
&r.FolderUsesV3.ScanProgressDisabled, &r.FolderUsesV3.ConflictsDisabled,
|
|
||||||
&r.FolderUsesV3.ConflictsUnlimited, &r.FolderUsesV3.ConflictsOther,
|
|
||||||
&r.FolderUsesV3.DisableSparseFiles, &r.FolderUsesV3.DisableTempIndexes,
|
|
||||||
&r.FolderUsesV3.AlwaysWeakHash, &r.FolderUsesV3.CustomWeakHashThreshold,
|
|
||||||
&r.FolderUsesV3.FsWatcherEnabled,
|
|
||||||
|
|
||||||
&r.GUIStats.Enabled, &r.GUIStats.UseTLS, &r.GUIStats.UseAuth,
|
|
||||||
&r.GUIStats.InsecureAdminAccess,
|
|
||||||
&r.GUIStats.Debugging, &r.GUIStats.InsecureSkipHostCheck,
|
|
||||||
&r.GUIStats.InsecureAllowFrameLoading, &r.GUIStats.ListenLocal,
|
|
||||||
&r.GUIStats.ListenUnspecified,
|
|
||||||
|
|
||||||
&r.BlockStats.Total, &r.BlockStats.Renamed,
|
|
||||||
&r.BlockStats.Reused, &r.BlockStats.Pulled, &r.BlockStats.CopyOrigin,
|
|
||||||
&r.BlockStats.CopyOriginShifted, &r.BlockStats.CopyElsewhere,
|
|
||||||
|
|
||||||
&r.IgnoreStats.Lines, &r.IgnoreStats.Inverts, &r.IgnoreStats.Folded,
|
|
||||||
&r.IgnoreStats.Deletable, &r.IgnoreStats.Rooted, &r.IgnoreStats.Includes,
|
|
||||||
&r.IgnoreStats.EscapedIncludes, &r.IgnoreStats.DoubleStars, &r.IgnoreStats.Stars,
|
|
||||||
|
|
||||||
// V3 added late in the RC
|
|
||||||
&r.WeakHashEnabled,
|
|
||||||
&r.Address,
|
|
||||||
|
|
||||||
// Receive only folders
|
|
||||||
&r.FolderUses.ReceiveOnly,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (*Report) FieldNames() []string {
|
|
||||||
// The database fields that back this struct in PostgreSQL
|
|
||||||
return []string{
|
|
||||||
// V1
|
|
||||||
"Received",
|
|
||||||
"UniqueID",
|
|
||||||
"Version",
|
|
||||||
"LongVersion",
|
|
||||||
"Platform",
|
|
||||||
"NumFolders",
|
|
||||||
"NumDevices",
|
|
||||||
"TotFiles",
|
|
||||||
"FolderMaxFiles",
|
|
||||||
"TotMiB",
|
|
||||||
"FolderMaxMiB",
|
|
||||||
"MemoryUsageMiB",
|
|
||||||
"SHA256Perf",
|
|
||||||
"MemorySize",
|
|
||||||
"Date",
|
|
||||||
// V2
|
|
||||||
"ReportVersion",
|
|
||||||
"NumCPU",
|
|
||||||
"FolderRO",
|
|
||||||
"FolderIgnorePerms",
|
|
||||||
"FolderIgnoreDelete",
|
|
||||||
"FolderAutoNormalize",
|
|
||||||
"DeviceIntroducer",
|
|
||||||
"DeviceCustomCertName",
|
|
||||||
"DeviceCompressionAlways",
|
|
||||||
"DeviceCompressionMetadata",
|
|
||||||
"DeviceCompressionNever",
|
|
||||||
"DeviceDynamicAddr",
|
|
||||||
"DeviceStaticAddr",
|
|
||||||
"AnnounceGlobalEnabled",
|
|
||||||
"AnnounceLocalEnabled",
|
|
||||||
"AnnounceDefaultServersDNS",
|
|
||||||
"AnnounceDefaultServersIP",
|
|
||||||
"AnnounceOtherServers",
|
|
||||||
"RelayEnabled",
|
|
||||||
"RelayDefaultServers",
|
|
||||||
"RelayOtherServers",
|
|
||||||
"RateLimitEnabled",
|
|
||||||
"UpgradeAllowedManual",
|
|
||||||
"UpgradeAllowedAuto",
|
|
||||||
// v0.12.19+
|
|
||||||
"FolderSimpleVersioning",
|
|
||||||
"FolderExternalVersioning",
|
|
||||||
"FolderStaggeredVersioning",
|
|
||||||
"FolderTrashcanVersioning",
|
|
||||||
// V2.5
|
|
||||||
"UpgradeAllowedPre",
|
|
||||||
// V3
|
|
||||||
"Uptime",
|
|
||||||
"NATType",
|
|
||||||
"AlwaysLocalNets",
|
|
||||||
"CacheIgnoredFiles",
|
|
||||||
"OverwriteRemoteDeviceNames",
|
|
||||||
"ProgressEmitterEnabled",
|
|
||||||
"CustomDefaultFolderPath",
|
|
||||||
"WeakHashSelection",
|
|
||||||
"CustomTrafficClass",
|
|
||||||
"CustomTempIndexMinBlocks",
|
|
||||||
"TemporariesDisabled",
|
|
||||||
"TemporariesCustom",
|
|
||||||
"LimitBandwidthInLan",
|
|
||||||
"CustomReleaseURL",
|
|
||||||
"RestartOnWakeup",
|
|
||||||
"CustomStunServers",
|
|
||||||
|
|
||||||
"FolderScanProgressDisabled",
|
|
||||||
"FolderConflictsDisabled",
|
|
||||||
"FolderConflictsUnlimited",
|
|
||||||
"FolderConflictsOther",
|
|
||||||
"FolderDisableSparseFiles",
|
|
||||||
"FolderDisableTempIndexes",
|
|
||||||
"FolderAlwaysWeakHash",
|
|
||||||
"FolderCustomWeakHashThreshold",
|
|
||||||
"FolderFsWatcherEnabled",
|
|
||||||
|
|
||||||
"GUIEnabled",
|
|
||||||
"GUIUseTLS",
|
|
||||||
"GUIUseAuth",
|
|
||||||
"GUIInsecureAdminAccess",
|
|
||||||
"GUIDebugging",
|
|
||||||
"GUIInsecureSkipHostCheck",
|
|
||||||
"GUIInsecureAllowFrameLoading",
|
|
||||||
"GUIListenLocal",
|
|
||||||
"GUIListenUnspecified",
|
|
||||||
|
|
||||||
"BlocksTotal",
|
|
||||||
"BlocksRenamed",
|
|
||||||
"BlocksReused",
|
|
||||||
"BlocksPulled",
|
|
||||||
"BlocksCopyOrigin",
|
|
||||||
"BlocksCopyOriginShifted",
|
|
||||||
"BlocksCopyElsewhere",
|
|
||||||
|
|
||||||
"IgnoreLines",
|
|
||||||
"IgnoreInverts",
|
|
||||||
"IgnoreFolded",
|
|
||||||
"IgnoreDeletable",
|
|
||||||
"IgnoreRooted",
|
|
||||||
"IgnoreIncludes",
|
|
||||||
"IgnoreEscapedIncludes",
|
|
||||||
"IgnoreDoubleStars",
|
|
||||||
"IgnoreStars",
|
|
||||||
|
|
||||||
// V3 added late in the RC
|
|
||||||
"WeakHashEnabled",
|
|
||||||
"Address",
|
|
||||||
|
|
||||||
// Receive only folders
|
|
||||||
"FolderRecvOnly",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r Report) Value() (driver.Value, error) {
|
func (r Report) Value() (driver.Value, error) {
|
||||||
// This needs to be string, yet we read back bytes..
|
// This needs to be string, yet we read back bytes..
|
||||||
bs, err := json.Marshal(r)
|
bs, err := json.Marshal(r)
|
||||||
|
@ -274,6 +274,18 @@ func (s *Service) reportData(ctx context.Context, urVersion int, preview bool) (
|
|||||||
if cfg.Type == config.FolderTypeReceiveEncrypted {
|
if cfg.Type == config.FolderTypeReceiveEncrypted {
|
||||||
report.FolderUsesV3.ReceiveEncrypted++
|
report.FolderUsesV3.ReceiveEncrypted++
|
||||||
}
|
}
|
||||||
|
if cfg.SendXattrs {
|
||||||
|
report.FolderUsesV3.SendXattrs++
|
||||||
|
}
|
||||||
|
if cfg.SyncXattrs {
|
||||||
|
report.FolderUsesV3.SyncXattrs++
|
||||||
|
}
|
||||||
|
if cfg.SendOwnership {
|
||||||
|
report.FolderUsesV3.SendOwnership++
|
||||||
|
}
|
||||||
|
if cfg.SyncOwnership {
|
||||||
|
report.FolderUsesV3.SyncOwnership++
|
||||||
|
}
|
||||||
}
|
}
|
||||||
sort.Ints(report.FolderUsesV3.FsWatcherDelays)
|
sort.Ints(report.FolderUsesV3.FsWatcherDelays)
|
||||||
|
|
||||||
@ -281,6 +293,12 @@ func (s *Service) reportData(ctx context.Context, urVersion int, preview bool) (
|
|||||||
if cfg.Untrusted {
|
if cfg.Untrusted {
|
||||||
report.DeviceUsesV3.Untrusted++
|
report.DeviceUsesV3.Untrusted++
|
||||||
}
|
}
|
||||||
|
if cfg.MaxRecvKbps > 0 || cfg.MaxSendKbps > 0 {
|
||||||
|
report.DeviceUsesV3.UsesRateLimit++
|
||||||
|
}
|
||||||
|
if cfg.RawNumConnections > 1 {
|
||||||
|
report.DeviceUsesV3.MultipleConnections++
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
guiCfg := s.cfg.GUI()
|
guiCfg := s.cfg.GUI()
|
||||||
|
Loading…
Reference in New Issue
Block a user