2018-09-09 13:52:59 +00:00
// Copyright (C) 2018 The Syncthing Authors.
//
// This Source Code Form is subject to the terms of the Mozilla Public
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at https://mozilla.org/MPL/2.0/.
2014-06-11 23:40:54 +00:00
package main
import (
2015-03-04 12:02:53 +00:00
"bytes"
2014-10-13 09:18:13 +00:00
"crypto/tls"
2015-03-04 10:31:46 +00:00
"database/sql"
2014-06-11 23:40:54 +00:00
"encoding/json"
2014-06-28 07:46:03 +00:00
"html/template"
2014-06-11 23:40:54 +00:00
"io"
"log"
2017-10-25 07:26:13 +00:00
"net"
2014-06-11 23:40:54 +00:00
"net/http"
"os"
2014-06-28 07:46:03 +00:00
"regexp"
2015-05-21 06:52:19 +00:00
"sort"
2020-04-16 07:13:01 +00:00
"strconv"
2014-06-28 07:46:03 +00:00
"strings"
2014-06-11 23:40:54 +00:00
"sync"
"time"
2017-11-09 22:22:47 +00:00
"unicode"
2015-03-04 10:31:46 +00:00
2020-06-23 08:47:15 +00:00
"github.com/oschwald/geoip2-golang"
2020-09-18 06:11:48 +00:00
"github.com/syncthing/syncthing/lib/upgrade"
2020-06-23 08:47:15 +00:00
"github.com/syncthing/syncthing/lib/ur/contract"
2014-06-11 23:40:54 +00:00
)
var (
2019-05-15 11:42:55 +00:00
useHTTP = os . Getenv ( "UR_USE_HTTP" ) != ""
debug = os . Getenv ( "UR_DEBUG" ) != ""
keyFile = getEnvDefault ( "UR_KEY_FILE" , "key.pem" )
certFile = getEnvDefault ( "UR_CRT_FILE" , "crt.pem" )
dbConn = getEnvDefault ( "UR_DB_URL" , "postgres://user:password@localhost/ur?sslmode=disable" )
listenAddr = getEnvDefault ( "UR_LISTEN" , "0.0.0.0:8443" )
geoIPPath = getEnvDefault ( "UR_GEOIP" , "GeoLite2-City.mmdb" )
tpl * template . Template
compilerRe = regexp . MustCompile ( ` \(([A-Za-z0-9()., -]+) \w+-\w+(?:| android| default)\) ([\w@.-]+) ` )
progressBarClass = [ ] string { "" , "progress-bar-success" , "progress-bar-info" , "progress-bar-warning" , "progress-bar-danger" }
featureOrder = [ ] string { "Various" , "Folder" , "Device" , "Connection" , "GUI" }
knownVersions = [ ] string { "v2" , "v3" }
knownDistributions = [ ] distributionMatch {
// Maps well known builders to the official distribution method that
// they represent
{ regexp . MustCompile ( "android-.*teamcity@build.syncthing.net" ) , "Google Play" } ,
{ regexp . MustCompile ( "teamcity@build.syncthing.net" ) , "GitHub" } ,
{ regexp . MustCompile ( "deb@build.syncthing.net" ) , "APT" } ,
{ regexp . MustCompile ( "docker@syncthing.net" ) , "Docker Hub" } ,
{ regexp . MustCompile ( "jenkins@build.syncthing.net" ) , "GitHub" } ,
2019-05-17 08:18:37 +00:00
{ regexp . MustCompile ( "snap@build.syncthing.net" ) , "Snapcraft" } ,
{ regexp . MustCompile ( "android-.*vagrant@basebox-stretch64" ) , "F-Droid" } ,
2020-09-02 06:20:25 +00:00
{ regexp . MustCompile ( "builduser@(archlinux|svetlemodry)" ) , "Arch (3rd party)" } ,
2020-03-06 06:46:11 +00:00
{ regexp . MustCompile ( "synology@kastelo.net" ) , "Synology (Kastelo)" } ,
2019-05-17 08:18:37 +00:00
{ regexp . MustCompile ( "@debian" ) , "Debian (3rd party)" } ,
{ regexp . MustCompile ( "@fedora" ) , "Fedora (3rd party)" } ,
{ regexp . MustCompile ( ` \bbrew@ ` ) , "Homebrew (3rd party)" } ,
2019-05-15 11:42:55 +00:00
{ regexp . MustCompile ( "." ) , "Others" } ,
}
2014-06-11 23:40:54 +00:00
)
2019-05-15 11:42:55 +00:00
type distributionMatch struct {
matcher * regexp . Regexp
distribution string
}
2014-06-28 07:46:03 +00:00
var funcs = map [ string ] interface { } {
2017-11-09 22:22:47 +00:00
"commatize" : commatize ,
"number" : number ,
"proportion" : proportion ,
"counter" : func ( ) * counter {
return & counter { }
} ,
"progressBarClassByIndex" : func ( a int ) string {
return progressBarClass [ a % len ( progressBarClass ) ]
} ,
2017-11-11 15:51:59 +00:00
"slice" : func ( numParts , whichPart int , input [ ] feature ) [ ] feature {
var part [ ] feature
perPart := ( len ( input ) / numParts ) + len ( input ) % 2
parts := make ( [ ] [ ] feature , 0 , numParts )
for len ( input ) >= perPart {
part , input = input [ : perPart ] , input [ perPart : ]
parts = append ( parts , part )
}
if len ( input ) > 0 {
2021-03-17 20:04:36 +00:00
parts = append ( parts , input )
2017-11-11 15:51:59 +00:00
}
return parts [ whichPart - 1 ]
} ,
2014-06-28 07:46:03 +00:00
}
2015-03-04 10:31:46 +00:00
func getEnvDefault ( key , def string ) string {
if val := os . Getenv ( key ) ; val != "" {
return val
}
return def
}
func setupDB ( db * sql . DB ) error {
2020-06-23 08:47:15 +00:00
_ , err := db . Exec ( ` CREATE TABLE IF NOT EXISTS ReportsJson (
2015-03-04 10:31:46 +00:00
Received TIMESTAMP NOT NULL ,
2020-06-23 08:47:15 +00:00
Report JSONB NOT NULL
2015-03-04 10:31:46 +00:00
) ` )
if err != nil {
return err
}
2015-09-10 12:03:34 +00:00
var t string
2020-06-23 08:47:15 +00:00
if err := db . QueryRow ( ` SELECT 'UniqueIDJsonIndex'::regclass ` ) . Scan ( & t ) ; err != nil {
if _ , err = db . Exec ( ` CREATE UNIQUE INDEX UniqueIDJsonIndex ON ReportsJson ((Report->>'date'), (Report->>'uniqueID')) ` ) ; err != nil {
2015-09-10 12:03:34 +00:00
return err
}
2015-03-04 10:31:46 +00:00
}
2020-06-23 08:47:15 +00:00
if err := db . QueryRow ( ` SELECT 'ReceivedJsonIndex'::regclass ` ) . Scan ( & t ) ; err != nil {
if _ , err = db . Exec ( ` CREATE INDEX ReceivedJsonIndex ON ReportsJson (Received) ` ) ; err != nil {
2015-09-10 12:03:34 +00:00
return err
}
2015-03-04 10:31:46 +00:00
}
2020-06-23 08:47:15 +00:00
if err := db . QueryRow ( ` SELECT 'ReportVersionJsonIndex'::regclass ` ) . Scan ( & t ) ; err != nil {
if _ , err = db . Exec ( ` CREATE INDEX ReportVersionJsonIndex ON ReportsJson (cast((Report->>'urVersion') as numeric)) ` ) ; err != nil {
2015-09-10 12:03:34 +00:00
return err
}
}
2020-06-23 08:47:15 +00:00
// Migrate from old schema to new schema if the table exists.
if err := migrate ( db ) ; err != nil {
return err
2018-09-09 13:39:54 +00:00
}
2015-09-10 12:03:34 +00:00
return nil
2015-03-04 10:31:46 +00:00
}
2020-06-23 08:47:15 +00:00
func insertReport ( db * sql . DB , r contract . Report ) error {
_ , err := db . Exec ( "INSERT INTO ReportsJson (Report, Received) VALUES ($1, $2)" , r , time . Now ( ) . UTC ( ) )
2015-03-04 10:31:46 +00:00
return err
}
type withDBFunc func ( * sql . DB , http . ResponseWriter , * http . Request )
func withDB ( db * sql . DB , f withDBFunc ) http . HandlerFunc {
2020-06-23 08:47:15 +00:00
return func ( w http . ResponseWriter , r * http . Request ) {
2015-03-04 10:31:46 +00:00
f ( db , w , r )
2020-06-23 08:47:15 +00:00
}
2015-03-04 10:31:46 +00:00
}
2014-06-11 23:40:54 +00:00
func main ( ) {
2017-10-25 07:26:13 +00:00
log . SetFlags ( log . Ltime | log . Ldate | log . Lshortfile )
2014-12-07 14:48:48 +00:00
log . SetOutput ( os . Stdout )
2015-03-04 10:31:46 +00:00
// Template
2014-06-11 23:40:54 +00:00
2014-06-28 07:46:03 +00:00
fd , err := os . Open ( "static/index.html" )
if err != nil {
2015-03-04 10:31:46 +00:00
log . Fatalln ( "template:" , err )
2014-06-28 07:46:03 +00:00
}
2021-11-22 07:59:47 +00:00
bs , err := io . ReadAll ( fd )
2014-06-28 07:46:03 +00:00
if err != nil {
2015-03-04 10:31:46 +00:00
log . Fatalln ( "template:" , err )
2014-06-28 07:46:03 +00:00
}
fd . Close ( )
tpl = template . Must ( template . New ( "index.html" ) . Funcs ( funcs ) . Parse ( string ( bs ) ) )
2015-03-04 10:31:46 +00:00
// DB
2014-06-11 23:40:54 +00:00
2015-03-04 10:31:46 +00:00
db , err := sql . Open ( "postgres" , dbConn )
2014-10-13 09:18:13 +00:00
if err != nil {
2015-03-04 10:31:46 +00:00
log . Fatalln ( "database:" , err )
}
err = setupDB ( db )
if err != nil {
log . Fatalln ( "database:" , err )
}
2017-10-25 07:26:13 +00:00
// TLS & Listening
2015-03-04 10:31:46 +00:00
2017-10-25 07:26:13 +00:00
var listener net . Listener
if useHTTP {
listener , err = net . Listen ( "tcp" , listenAddr )
} else {
2019-05-18 09:59:32 +00:00
var cert tls . Certificate
cert , err = tls . LoadX509KeyPair ( certFile , keyFile )
2017-10-25 07:26:13 +00:00
if err != nil {
log . Fatalln ( "tls:" , err )
}
2014-10-13 09:18:13 +00:00
2017-10-25 07:26:13 +00:00
cfg := & tls . Config {
Certificates : [ ] tls . Certificate { cert } ,
SessionTicketsDisabled : true ,
}
listener , err = tls . Listen ( "tcp" , listenAddr , cfg )
2014-10-13 09:18:13 +00:00
}
if err != nil {
2017-10-25 07:26:13 +00:00
log . Fatalln ( "listen:" , err )
2014-10-13 09:18:13 +00:00
}
srv := http . Server {
ReadTimeout : 5 * time . Second ,
2017-11-11 15:51:59 +00:00
WriteTimeout : 15 * time . Second ,
2014-10-13 09:18:13 +00:00
}
2015-03-04 10:31:46 +00:00
http . HandleFunc ( "/" , withDB ( db , rootHandler ) )
http . HandleFunc ( "/newdata" , withDB ( db , newDataHandler ) )
2015-05-21 06:52:19 +00:00
http . HandleFunc ( "/summary.json" , withDB ( db , summaryHandler ) )
2015-07-15 11:23:13 +00:00
http . HandleFunc ( "/movement.json" , withDB ( db , movementHandler ) )
2016-09-06 18:15:18 +00:00
http . HandleFunc ( "/performance.json" , withDB ( db , performanceHandler ) )
2017-11-09 22:22:47 +00:00
http . HandleFunc ( "/blockstats.json" , withDB ( db , blockStatsHandler ) )
2020-03-25 13:19:35 +00:00
http . HandleFunc ( "/locations.json" , withDB ( db , locationsHandler ) )
2015-03-04 10:31:46 +00:00
http . Handle ( "/static/" , http . StripPrefix ( "/static/" , http . FileServer ( http . Dir ( "static" ) ) ) )
2019-08-21 08:33:06 +00:00
go cacheRefresher ( db )
2014-10-13 09:18:13 +00:00
err = srv . Serve ( listener )
2014-06-11 23:40:54 +00:00
if err != nil {
2015-03-04 10:31:46 +00:00
log . Fatalln ( "https:" , err )
2014-06-11 23:40:54 +00:00
}
}
2015-03-04 12:02:53 +00:00
var (
2020-03-25 13:19:35 +00:00
cachedIndex [ ] byte
cachedLocations [ ] byte
cacheTime time . Time
cacheMut sync . Mutex
2015-03-04 12:02:53 +00:00
)
2019-08-21 08:33:06 +00:00
const maxCacheTime = 15 * time . Minute
func cacheRefresher ( db * sql . DB ) {
ticker := time . NewTicker ( maxCacheTime - time . Minute )
defer ticker . Stop ( )
2020-06-23 08:47:15 +00:00
for ; true ; <- ticker . C {
2019-08-21 08:33:06 +00:00
cacheMut . Lock ( )
if err := refreshCacheLocked ( db ) ; err != nil {
log . Println ( err )
}
cacheMut . Unlock ( )
}
}
func refreshCacheLocked ( db * sql . DB ) error {
rep := getReport ( db )
buf := new ( bytes . Buffer )
err := tpl . Execute ( buf , rep )
if err != nil {
return err
}
2020-03-25 13:19:35 +00:00
cachedIndex = buf . Bytes ( )
2019-08-21 08:33:06 +00:00
cacheTime = time . Now ( )
2020-03-25 13:19:35 +00:00
locs := rep [ "locations" ] . ( map [ location ] int )
wlocs := make ( [ ] weightedLocation , 0 , len ( locs ) )
for loc , w := range locs {
wlocs = append ( wlocs , weightedLocation { loc , w } )
}
cachedLocations , _ = json . Marshal ( wlocs )
2019-08-21 08:33:06 +00:00
return nil
}
2015-03-04 12:02:53 +00:00
2015-03-04 10:31:46 +00:00
func rootHandler ( db * sql . DB , w http . ResponseWriter , r * http . Request ) {
2014-06-28 07:46:03 +00:00
if r . URL . Path == "/" || r . URL . Path == "/index.html" {
2015-03-04 12:02:53 +00:00
cacheMut . Lock ( )
defer cacheMut . Unlock ( )
if time . Since ( cacheTime ) > maxCacheTime {
2019-08-21 08:33:06 +00:00
if err := refreshCacheLocked ( db ) ; err != nil {
2015-03-04 12:02:53 +00:00
log . Println ( err )
http . Error ( w , "Template Error" , http . StatusInternalServerError )
return
}
}
2014-06-28 07:46:03 +00:00
w . Header ( ) . Set ( "Content-Type" , "text/html; charset=utf-8" )
2020-03-25 13:19:35 +00:00
w . Write ( cachedIndex )
2014-06-28 07:46:03 +00:00
} else {
http . Error ( w , "Not found" , 404 )
2015-03-04 12:02:53 +00:00
return
2014-06-28 07:46:03 +00:00
}
}
2020-03-25 13:19:35 +00:00
func locationsHandler ( db * sql . DB , w http . ResponseWriter , r * http . Request ) {
cacheMut . Lock ( )
defer cacheMut . Unlock ( )
if time . Since ( cacheTime ) > maxCacheTime {
if err := refreshCacheLocked ( db ) ; err != nil {
log . Println ( err )
http . Error ( w , "Template Error" , http . StatusInternalServerError )
return
}
}
w . Header ( ) . Set ( "Content-Type" , "application/json; charset=utf-8" )
w . Write ( cachedLocations )
}
2015-03-04 10:31:46 +00:00
func newDataHandler ( db * sql . DB , w http . ResponseWriter , r * http . Request ) {
defer r . Body . Close ( )
2014-06-28 07:46:03 +00:00
2017-11-09 22:22:47 +00:00
addr := r . Header . Get ( "X-Forwarded-For" )
if addr != "" {
addr = strings . Split ( addr , ", " ) [ 0 ]
} else {
addr = r . RemoteAddr
}
if host , _ , err := net . SplitHostPort ( addr ) ; err == nil {
addr = host
}
if net . ParseIP ( addr ) == nil {
addr = ""
}
2020-06-23 08:47:15 +00:00
var rep contract . Report
2015-03-04 10:31:46 +00:00
rep . Date = time . Now ( ) . UTC ( ) . Format ( "20060102" )
2017-11-09 22:22:47 +00:00
rep . Address = addr
2014-06-11 23:40:54 +00:00
2017-10-15 07:49:30 +00:00
lr := & io . LimitedReader { R : r . Body , N : 40 * 1024 }
2021-11-22 07:59:47 +00:00
bs , _ := io . ReadAll ( lr )
2017-10-26 13:10:17 +00:00
if err := json . Unmarshal ( bs , & rep ) ; err != nil {
log . Println ( "decode:" , err )
if debug {
log . Printf ( "%s" , bs )
}
2015-03-04 10:31:46 +00:00
http . Error ( w , "JSON Decode Error" , http . StatusInternalServerError )
2014-06-11 23:40:54 +00:00
return
}
2015-03-04 10:31:46 +00:00
if err := rep . Validate ( ) ; err != nil {
log . Println ( "validate:" , err )
2017-10-26 13:10:17 +00:00
if debug {
log . Printf ( "%#v" , rep )
}
2015-03-04 10:31:46 +00:00
http . Error ( w , "Validation Error" , http . StatusInternalServerError )
2014-06-11 23:40:54 +00:00
return
}
2014-06-16 09:14:01 +00:00
2015-03-04 10:31:46 +00:00
if err := insertReport ( db , rep ) ; err != nil {
2020-06-23 12:18:52 +00:00
if err . Error ( ) == ` pq: duplicate key value violates unique constraint "uniqueidjsonindex" ` {
2019-01-03 20:46:02 +00:00
// We already have a report today for the same unique ID; drop
// this one without complaining.
return
}
2015-03-04 10:31:46 +00:00
log . Println ( "insert:" , err )
2017-10-26 13:10:17 +00:00
if debug {
log . Printf ( "%#v" , rep )
}
2015-03-04 10:31:46 +00:00
http . Error ( w , "Database Error" , http . StatusInternalServerError )
return
2014-06-16 09:14:01 +00:00
}
}
2015-05-21 06:52:19 +00:00
func summaryHandler ( db * sql . DB , w http . ResponseWriter , r * http . Request ) {
2020-04-16 07:13:01 +00:00
min , _ := strconv . Atoi ( r . URL . Query ( ) . Get ( "min" ) )
s , err := getSummary ( db , min )
2015-05-21 06:52:19 +00:00
if err != nil {
log . Println ( "summaryHandler:" , err )
http . Error ( w , "Database Error" , http . StatusInternalServerError )
return
}
bs , err := s . MarshalJSON ( )
if err != nil {
log . Println ( "summaryHandler:" , err )
http . Error ( w , "JSON Encode Error" , http . StatusInternalServerError )
return
}
w . Header ( ) . Set ( "Content-Type" , "application/json" )
w . Write ( bs )
}
2015-07-15 11:23:13 +00:00
func movementHandler ( db * sql . DB , w http . ResponseWriter , r * http . Request ) {
s , err := getMovement ( db )
if err != nil {
log . Println ( "movementHandler:" , err )
http . Error ( w , "Database Error" , http . StatusInternalServerError )
return
}
bs , err := json . Marshal ( s )
if err != nil {
log . Println ( "movementHandler:" , err )
http . Error ( w , "JSON Encode Error" , http . StatusInternalServerError )
return
}
w . Header ( ) . Set ( "Content-Type" , "application/json" )
w . Write ( bs )
}
2016-09-06 18:15:18 +00:00
func performanceHandler ( db * sql . DB , w http . ResponseWriter , r * http . Request ) {
s , err := getPerformance ( db )
if err != nil {
log . Println ( "performanceHandler:" , err )
http . Error ( w , "Database Error" , http . StatusInternalServerError )
return
}
bs , err := json . Marshal ( s )
if err != nil {
log . Println ( "performanceHandler:" , err )
http . Error ( w , "JSON Encode Error" , http . StatusInternalServerError )
return
}
w . Header ( ) . Set ( "Content-Type" , "application/json" )
w . Write ( bs )
}
2017-11-09 22:22:47 +00:00
func blockStatsHandler ( db * sql . DB , w http . ResponseWriter , r * http . Request ) {
s , err := getBlockStats ( db )
if err != nil {
log . Println ( "blockStatsHandler:" , err )
http . Error ( w , "Database Error" , http . StatusInternalServerError )
return
}
bs , err := json . Marshal ( s )
if err != nil {
log . Println ( "blockStatsHandler:" , err )
http . Error ( w , "JSON Encode Error" , http . StatusInternalServerError )
return
}
w . Header ( ) . Set ( "Content-Type" , "application/json" )
w . Write ( bs )
}
2014-06-28 09:24:25 +00:00
type category struct {
Values [ 4 ] float64
Key string
Descr string
Unit string
2017-11-09 22:22:47 +00:00
Type NumberType
2014-06-28 09:24:25 +00:00
}
2015-09-11 08:56:32 +00:00
type feature struct {
2017-11-09 22:22:47 +00:00
Key string
Version string
Count int
Pct float64
}
type featureGroup struct {
Key string
Version string
Counts map [ string ] int
}
// Used in the templates
type counter struct {
n int
}
func ( c * counter ) Current ( ) int {
return c . n
}
func ( c * counter ) Increment ( ) string {
c . n ++
return ""
}
func ( c * counter ) DrawTwoDivider ( ) bool {
return c . n != 0 && c . n % 2 == 0
}
// add sets a key in a nested map, initializing things if needed as we go.
func add ( storage map [ string ] map [ string ] int , parent , child string , value int ) {
n , ok := storage [ parent ]
if ! ok {
n = make ( map [ string ] int )
storage [ parent ] = n
}
n [ child ] += value
}
// inc makes sure that even for unused features, we initialize them in the
// feature map. Furthermore, this acts as a helper that accepts booleans
// to increment by one, or integers to increment by that integer.
func inc ( storage map [ string ] int , key string , i interface { } ) {
cv := storage [ key ]
switch v := i . ( type ) {
case bool :
if v {
cv ++
}
case int :
cv += v
}
storage [ key ] = cv
2015-09-11 08:56:32 +00:00
}
2017-11-11 15:51:59 +00:00
type location struct {
2020-03-25 13:19:35 +00:00
Latitude float64 ` json:"lat" `
Longitude float64 ` json:"lon" `
}
type weightedLocation struct {
location
Weight int ` json:"weight" `
2017-11-11 15:51:59 +00:00
}
2015-03-04 12:02:53 +00:00
func getReport ( db * sql . DB ) map [ string ] interface { } {
2017-11-11 15:51:59 +00:00
geoip , err := geoip2 . Open ( geoIPPath )
if err != nil {
log . Println ( "opening geoip db" , err )
geoip = nil
} else {
defer geoip . Close ( )
}
2014-06-28 07:46:03 +00:00
nodes := 0
2017-11-11 15:51:59 +00:00
countriesTotal := 0
2014-06-28 07:46:03 +00:00
var versions [ ] string
var platforms [ ] string
2015-03-04 10:31:46 +00:00
var numFolders [ ] int
var numDevices [ ] int
2014-06-28 07:46:03 +00:00
var totFiles [ ] int
var maxFiles [ ] int
2020-06-23 08:47:15 +00:00
var totMiB [ ] int64
var maxMiB [ ] int64
var memoryUsage [ ] int64
2014-06-28 07:46:03 +00:00
var sha256Perf [ ] float64
2020-06-23 08:47:15 +00:00
var memorySize [ ] int64
2017-11-09 22:22:47 +00:00
var uptime [ ] int
2015-05-21 06:52:19 +00:00
var compilers [ ] string
var builders [ ] string
2019-05-15 11:42:55 +00:00
var distributions [ ] string
2017-11-11 15:51:59 +00:00
locations := make ( map [ location ] int )
countries := make ( map [ string ] int )
2014-06-11 23:40:54 +00:00
2017-11-09 22:22:47 +00:00
reports := make ( map [ string ] int )
totals := make ( map [ string ] int )
// category -> version -> feature -> count
features := make ( map [ string ] map [ string ] map [ string ] int )
// category -> version -> feature -> group -> count
featureGroups := make ( map [ string ] map [ string ] map [ string ] map [ string ] int )
for _ , category := range featureOrder {
features [ category ] = make ( map [ string ] map [ string ] int )
featureGroups [ category ] = make ( map [ string ] map [ string ] map [ string ] int )
for _ , version := range knownVersions {
features [ category ] [ version ] = make ( map [ string ] int )
featureGroups [ category ] [ version ] = make ( map [ string ] map [ string ] int )
}
2015-09-11 08:56:32 +00:00
}
2017-11-09 22:22:47 +00:00
// Initialize some features that hide behind if conditions, and might not
// be initialized.
add ( featureGroups [ "Various" ] [ "v2" ] , "Upgrades" , "Pre-release" , 0 )
add ( featureGroups [ "Various" ] [ "v2" ] , "Upgrades" , "Automatic" , 0 )
add ( featureGroups [ "Various" ] [ "v2" ] , "Upgrades" , "Manual" , 0 )
add ( featureGroups [ "Various" ] [ "v2" ] , "Upgrades" , "Disabled" , 0 )
add ( featureGroups [ "Various" ] [ "v3" ] , "Temporary Retention" , "Disabled" , 0 )
add ( featureGroups [ "Various" ] [ "v3" ] , "Temporary Retention" , "Custom" , 0 )
add ( featureGroups [ "Various" ] [ "v3" ] , "Temporary Retention" , "Default" , 0 )
add ( featureGroups [ "Connection" ] [ "v3" ] , "IP version" , "IPv4" , 0 )
add ( featureGroups [ "Connection" ] [ "v3" ] , "IP version" , "IPv6" , 0 )
add ( featureGroups [ "Connection" ] [ "v3" ] , "IP version" , "Unknown" , 0 )
2015-09-11 08:56:32 +00:00
var numCPU [ ] int
2020-06-23 08:47:15 +00:00
var rep contract . Report
2015-09-10 13:55:25 +00:00
2020-06-23 08:47:15 +00:00
rows , err := db . Query ( ` SELECT Received, Report FROM ReportsJson WHERE Received > now() - '1 day'::INTERVAL ` )
2015-03-04 10:31:46 +00:00
if err != nil {
log . Println ( "sql:" , err )
return nil
}
defer rows . Close ( )
for rows . Next ( ) {
2020-06-23 08:47:15 +00:00
err := rows . Scan ( & rep . Received , & rep )
2015-03-04 10:31:46 +00:00
2014-06-28 07:46:03 +00:00
if err != nil {
2015-03-04 10:31:46 +00:00
log . Println ( "sql:" , err )
return nil
2014-06-28 07:46:03 +00:00
}
2014-06-11 23:40:54 +00:00
2017-11-11 15:51:59 +00:00
if geoip != nil && rep . Address != "" {
if addr , err := net . ResolveTCPAddr ( "tcp" , net . JoinHostPort ( rep . Address , "0" ) ) ; err == nil {
city , err := geoip . City ( addr . IP )
if err == nil {
loc := location {
Latitude : city . Location . Latitude ,
Longitude : city . Location . Longitude ,
}
locations [ loc ] ++
countries [ city . Country . Names [ "en" ] ] ++
countriesTotal ++
}
}
}
2014-06-28 07:46:03 +00:00
nodes ++
versions = append ( versions , transformVersion ( rep . Version ) )
platforms = append ( platforms , rep . Platform )
2019-05-15 11:42:55 +00:00
2015-05-21 06:52:19 +00:00
if m := compilerRe . FindStringSubmatch ( rep . LongVersion ) ; len ( m ) == 3 {
compilers = append ( compilers , m [ 1 ] )
builders = append ( builders , m [ 2 ] )
2019-05-15 11:42:55 +00:00
loop :
for _ , d := range knownDistributions {
if d . matcher . MatchString ( rep . LongVersion ) {
distributions = append ( distributions , d . distribution )
break loop
}
}
2015-05-21 06:52:19 +00:00
}
2019-05-15 11:42:55 +00:00
2015-03-04 10:31:46 +00:00
if rep . NumFolders > 0 {
numFolders = append ( numFolders , rep . NumFolders )
2014-06-28 07:46:03 +00:00
}
2015-03-04 10:31:46 +00:00
if rep . NumDevices > 0 {
numDevices = append ( numDevices , rep . NumDevices )
2014-06-28 07:46:03 +00:00
}
if rep . TotFiles > 0 {
totFiles = append ( totFiles , rep . TotFiles )
}
2015-03-04 10:31:46 +00:00
if rep . FolderMaxFiles > 0 {
maxFiles = append ( maxFiles , rep . FolderMaxFiles )
2014-06-28 07:46:03 +00:00
}
if rep . TotMiB > 0 {
2020-06-23 08:47:15 +00:00
totMiB = append ( totMiB , int64 ( rep . TotMiB ) * ( 1 << 20 ) )
2014-06-28 07:46:03 +00:00
}
2015-03-04 10:31:46 +00:00
if rep . FolderMaxMiB > 0 {
2020-06-23 08:47:15 +00:00
maxMiB = append ( maxMiB , int64 ( rep . FolderMaxMiB ) * ( 1 << 20 ) )
2014-06-11 23:40:54 +00:00
}
2014-06-28 07:46:03 +00:00
if rep . MemoryUsageMiB > 0 {
2020-06-23 08:47:15 +00:00
memoryUsage = append ( memoryUsage , int64 ( rep . MemoryUsageMiB ) * ( 1 << 20 ) )
2014-06-28 07:46:03 +00:00
}
if rep . SHA256Perf > 0 {
2014-06-28 09:24:25 +00:00
sha256Perf = append ( sha256Perf , rep . SHA256Perf * ( 1 << 20 ) )
2014-06-28 07:46:03 +00:00
}
if rep . MemorySize > 0 {
2020-06-23 08:47:15 +00:00
memorySize = append ( memorySize , int64 ( rep . MemorySize ) * ( 1 << 20 ) )
2014-06-28 07:46:03 +00:00
}
2017-11-09 22:22:47 +00:00
if rep . Uptime > 0 {
uptime = append ( uptime , rep . Uptime )
}
totals [ "Device" ] += rep . NumDevices
totals [ "Folder" ] += rep . NumFolders
2015-09-11 08:56:32 +00:00
if rep . URVersion >= 2 {
2017-11-09 22:22:47 +00:00
reports [ "v2" ] ++
2015-09-11 08:56:32 +00:00
numCPU = append ( numCPU , rep . NumCPU )
2017-11-09 22:22:47 +00:00
// Various
inc ( features [ "Various" ] [ "v2" ] , "Rate limiting" , rep . UsesRateLimit )
if rep . UpgradeAllowedPre {
add ( featureGroups [ "Various" ] [ "v2" ] , "Upgrades" , "Pre-release" , 1 )
} else if rep . UpgradeAllowedAuto {
add ( featureGroups [ "Various" ] [ "v2" ] , "Upgrades" , "Automatic" , 1 )
} else if rep . UpgradeAllowedManual {
add ( featureGroups [ "Various" ] [ "v2" ] , "Upgrades" , "Manual" , 1 )
} else {
add ( featureGroups [ "Various" ] [ "v2" ] , "Upgrades" , "Disabled" , 1 )
2015-09-11 08:56:32 +00:00
}
2017-11-09 22:22:47 +00:00
// Folders
inc ( features [ "Folder" ] [ "v2" ] , "Automatic normalization" , rep . FolderUses . AutoNormalize )
inc ( features [ "Folder" ] [ "v2" ] , "Ignore deletes" , rep . FolderUses . IgnoreDelete )
inc ( features [ "Folder" ] [ "v2" ] , "Ignore permissions" , rep . FolderUses . IgnorePerms )
2018-09-09 13:39:54 +00:00
inc ( features [ "Folder" ] [ "v2" ] , "Mode, send only" , rep . FolderUses . SendOnly )
inc ( features [ "Folder" ] [ "v2" ] , "Mode, receive only" , rep . FolderUses . ReceiveOnly )
2017-11-09 22:22:47 +00:00
add ( featureGroups [ "Folder" ] [ "v2" ] , "Versioning" , "Simple" , rep . FolderUses . SimpleVersioning )
add ( featureGroups [ "Folder" ] [ "v2" ] , "Versioning" , "External" , rep . FolderUses . ExternalVersioning )
add ( featureGroups [ "Folder" ] [ "v2" ] , "Versioning" , "Staggered" , rep . FolderUses . StaggeredVersioning )
add ( featureGroups [ "Folder" ] [ "v2" ] , "Versioning" , "Trashcan" , rep . FolderUses . TrashcanVersioning )
add ( featureGroups [ "Folder" ] [ "v2" ] , "Versioning" , "Disabled" , rep . NumFolders - rep . FolderUses . SimpleVersioning - rep . FolderUses . ExternalVersioning - rep . FolderUses . StaggeredVersioning - rep . FolderUses . TrashcanVersioning )
// Device
inc ( features [ "Device" ] [ "v2" ] , "Custom certificate" , rep . DeviceUses . CustomCertName )
inc ( features [ "Device" ] [ "v2" ] , "Introducer" , rep . DeviceUses . Introducer )
add ( featureGroups [ "Device" ] [ "v2" ] , "Compress" , "Always" , rep . DeviceUses . CompressAlways )
add ( featureGroups [ "Device" ] [ "v2" ] , "Compress" , "Metadata" , rep . DeviceUses . CompressMetadata )
add ( featureGroups [ "Device" ] [ "v2" ] , "Compress" , "Nothing" , rep . DeviceUses . CompressNever )
add ( featureGroups [ "Device" ] [ "v2" ] , "Addresses" , "Dynamic" , rep . DeviceUses . DynamicAddr )
add ( featureGroups [ "Device" ] [ "v2" ] , "Addresses" , "Static" , rep . DeviceUses . StaticAddr )
// Connections
inc ( features [ "Connection" ] [ "v2" ] , "Relaying, enabled" , rep . Relays . Enabled )
inc ( features [ "Connection" ] [ "v2" ] , "Discovery, global enabled" , rep . Announce . GlobalEnabled )
inc ( features [ "Connection" ] [ "v2" ] , "Discovery, local enabled" , rep . Announce . LocalEnabled )
add ( featureGroups [ "Connection" ] [ "v2" ] , "Discovery" , "Default servers (using DNS)" , rep . Announce . DefaultServersDNS )
add ( featureGroups [ "Connection" ] [ "v2" ] , "Discovery" , "Default servers (using IP)" , rep . Announce . DefaultServersIP )
add ( featureGroups [ "Connection" ] [ "v2" ] , "Discovery" , "Other servers" , rep . Announce . DefaultServersIP )
add ( featureGroups [ "Connection" ] [ "v2" ] , "Relaying" , "Default relays" , rep . Relays . DefaultServers )
add ( featureGroups [ "Connection" ] [ "v2" ] , "Relaying" , "Other relays" , rep . Relays . OtherServers )
}
if rep . URVersion >= 3 {
reports [ "v3" ] ++
inc ( features [ "Various" ] [ "v3" ] , "Custom LAN classification" , rep . AlwaysLocalNets )
inc ( features [ "Various" ] [ "v3" ] , "Ignore caching" , rep . CacheIgnoredFiles )
inc ( features [ "Various" ] [ "v3" ] , "Overwrite device names" , rep . OverwriteRemoteDeviceNames )
inc ( features [ "Various" ] [ "v3" ] , "Download progress disabled" , ! rep . ProgressEmitterEnabled )
inc ( features [ "Various" ] [ "v3" ] , "Custom default path" , rep . CustomDefaultFolderPath )
inc ( features [ "Various" ] [ "v3" ] , "Custom traffic class" , rep . CustomTrafficClass )
inc ( features [ "Various" ] [ "v3" ] , "Custom temporary index threshold" , rep . CustomTempIndexMinBlocks )
inc ( features [ "Various" ] [ "v3" ] , "Weak hash enabled" , rep . WeakHashEnabled )
inc ( features [ "Various" ] [ "v3" ] , "LAN rate limiting" , rep . LimitBandwidthInLan )
inc ( features [ "Various" ] [ "v3" ] , "Custom release server" , rep . CustomReleaseURL )
inc ( features [ "Various" ] [ "v3" ] , "Restart after suspend" , rep . RestartOnWakeup )
inc ( features [ "Various" ] [ "v3" ] , "Custom stun servers" , rep . CustomStunServers )
inc ( features [ "Various" ] [ "v3" ] , "Ignore patterns" , rep . IgnoreStats . Lines > 0 )
if rep . NATType != "" {
natType := rep . NATType
2021-03-17 22:12:26 +00:00
natType = strings . ReplaceAll ( natType , "unknown" , "Unknown" )
natType = strings . ReplaceAll ( natType , "Symetric" , "Symmetric" )
2017-11-09 22:22:47 +00:00
add ( featureGroups [ "Various" ] [ "v3" ] , "NAT Type" , natType , 1 )
2015-09-11 08:56:32 +00:00
}
2017-11-09 22:22:47 +00:00
if rep . TemporariesDisabled {
add ( featureGroups [ "Various" ] [ "v3" ] , "Temporary Retention" , "Disabled" , 1 )
} else if rep . TemporariesCustom {
add ( featureGroups [ "Various" ] [ "v3" ] , "Temporary Retention" , "Custom" , 1 )
} else {
add ( featureGroups [ "Various" ] [ "v3" ] , "Temporary Retention" , "Default" , 1 )
2015-09-11 08:56:32 +00:00
}
2017-11-09 22:22:47 +00:00
inc ( features [ "Folder" ] [ "v3" ] , "Scan progress disabled" , rep . FolderUsesV3 . ScanProgressDisabled )
inc ( features [ "Folder" ] [ "v3" ] , "Disable sharing of partial files" , rep . FolderUsesV3 . DisableTempIndexes )
inc ( features [ "Folder" ] [ "v3" ] , "Disable sparse files" , rep . FolderUsesV3 . DisableSparseFiles )
inc ( features [ "Folder" ] [ "v3" ] , "Weak hash, always" , rep . FolderUsesV3 . AlwaysWeakHash )
inc ( features [ "Folder" ] [ "v3" ] , "Weak hash, custom threshold" , rep . FolderUsesV3 . CustomWeakHashThreshold )
inc ( features [ "Folder" ] [ "v3" ] , "Filesystem watcher" , rep . FolderUsesV3 . FsWatcherEnabled )
2020-07-28 09:13:15 +00:00
inc ( features [ "Folder" ] [ "v3" ] , "Case sensitive FS" , rep . FolderUsesV3 . CaseSensitiveFS )
2021-03-10 22:26:56 +00:00
inc ( features [ "Folder" ] [ "v3" ] , "Mode, receive encrypted" , rep . FolderUsesV3 . ReceiveEncrypted )
2017-11-09 22:22:47 +00:00
add ( featureGroups [ "Folder" ] [ "v3" ] , "Conflicts" , "Disabled" , rep . FolderUsesV3 . ConflictsDisabled )
add ( featureGroups [ "Folder" ] [ "v3" ] , "Conflicts" , "Unlimited" , rep . FolderUsesV3 . ConflictsUnlimited )
add ( featureGroups [ "Folder" ] [ "v3" ] , "Conflicts" , "Limited" , rep . FolderUsesV3 . ConflictsOther )
for key , value := range rep . FolderUsesV3 . PullOrder {
add ( featureGroups [ "Folder" ] [ "v3" ] , "Pull Order" , prettyCase ( key ) , value )
2015-09-11 08:56:32 +00:00
}
2017-11-09 22:22:47 +00:00
2020-11-09 14:33:32 +00:00
inc ( features [ "Device" ] [ "v3" ] , "Untrusted" , rep . DeviceUsesV3 . Untrusted )
2017-11-09 22:22:47 +00:00
totals [ "GUI" ] += rep . GUIStats . Enabled
inc ( features [ "GUI" ] [ "v3" ] , "Auth Enabled" , rep . GUIStats . UseAuth )
inc ( features [ "GUI" ] [ "v3" ] , "TLS Enabled" , rep . GUIStats . UseTLS )
inc ( features [ "GUI" ] [ "v3" ] , "Insecure Admin Access" , rep . GUIStats . InsecureAdminAccess )
inc ( features [ "GUI" ] [ "v3" ] , "Skip Host check" , rep . GUIStats . InsecureSkipHostCheck )
inc ( features [ "GUI" ] [ "v3" ] , "Allow Frame loading" , rep . GUIStats . InsecureAllowFrameLoading )
add ( featureGroups [ "GUI" ] [ "v3" ] , "Listen address" , "Local" , rep . GUIStats . ListenLocal )
add ( featureGroups [ "GUI" ] [ "v3" ] , "Listen address" , "Unspecified" , rep . GUIStats . ListenUnspecified )
add ( featureGroups [ "GUI" ] [ "v3" ] , "Listen address" , "Other" , rep . GUIStats . Enabled - rep . GUIStats . ListenLocal - rep . GUIStats . ListenUnspecified )
for theme , count := range rep . GUIStats . Theme {
add ( featureGroups [ "GUI" ] [ "v3" ] , "Theme" , prettyCase ( theme ) , count )
2015-09-11 08:56:32 +00:00
}
2017-11-09 22:22:47 +00:00
for transport , count := range rep . TransportStats {
add ( featureGroups [ "Connection" ] [ "v3" ] , "Transport" , strings . Title ( transport ) , count )
if strings . HasSuffix ( transport , "4" ) {
add ( featureGroups [ "Connection" ] [ "v3" ] , "IP version" , "IPv4" , count )
} else if strings . HasSuffix ( transport , "6" ) {
add ( featureGroups [ "Connection" ] [ "v3" ] , "IP version" , "IPv6" , count )
} else {
add ( featureGroups [ "Connection" ] [ "v3" ] , "IP version" , "Unknown" , count )
}
2015-09-11 08:56:32 +00:00
}
}
2014-06-28 07:46:03 +00:00
}
2014-06-28 09:24:25 +00:00
var categories [ ] category
categories = append ( categories , category {
Values : statsForInts ( totFiles ) ,
2015-02-15 11:00:15 +00:00
Descr : "Files Managed per Device" ,
2014-06-28 09:24:25 +00:00
} )
categories = append ( categories , category {
Values : statsForInts ( maxFiles ) ,
2015-02-15 11:00:15 +00:00
Descr : "Files in Largest Folder" ,
2014-06-28 09:24:25 +00:00
} )
categories = append ( categories , category {
2020-06-23 08:47:15 +00:00
Values : statsForInt64s ( totMiB ) ,
2015-02-15 11:00:15 +00:00
Descr : "Data Managed per Device" ,
2014-06-28 09:24:25 +00:00
Unit : "B" ,
2017-11-09 22:22:47 +00:00
Type : NumberBinary ,
2014-06-28 09:24:25 +00:00
} )
categories = append ( categories , category {
2020-06-23 08:47:15 +00:00
Values : statsForInt64s ( maxMiB ) ,
2015-02-15 11:00:15 +00:00
Descr : "Data in Largest Folder" ,
2014-06-28 09:24:25 +00:00
Unit : "B" ,
2017-11-09 22:22:47 +00:00
Type : NumberBinary ,
2014-06-28 09:24:25 +00:00
} )
categories = append ( categories , category {
2015-03-04 10:31:46 +00:00
Values : statsForInts ( numDevices ) ,
2015-02-15 11:00:15 +00:00
Descr : "Number of Devices in Cluster" ,
2014-06-28 09:24:25 +00:00
} )
categories = append ( categories , category {
2015-03-04 10:31:46 +00:00
Values : statsForInts ( numFolders ) ,
2015-02-15 11:00:15 +00:00
Descr : "Number of Folders Configured" ,
2014-06-28 09:24:25 +00:00
} )
categories = append ( categories , category {
2020-06-23 08:47:15 +00:00
Values : statsForInt64s ( memoryUsage ) ,
2014-06-28 09:24:25 +00:00
Descr : "Memory Usage" ,
Unit : "B" ,
2017-11-09 22:22:47 +00:00
Type : NumberBinary ,
2014-06-28 09:24:25 +00:00
} )
categories = append ( categories , category {
2020-06-23 08:47:15 +00:00
Values : statsForInt64s ( memorySize ) ,
2014-06-28 09:24:25 +00:00
Descr : "System Memory" ,
Unit : "B" ,
2017-11-09 22:22:47 +00:00
Type : NumberBinary ,
2014-06-28 09:24:25 +00:00
} )
categories = append ( categories , category {
Values : statsForFloats ( sha256Perf ) ,
Descr : "SHA-256 Hashing Performance" ,
Unit : "B/s" ,
2017-11-09 22:22:47 +00:00
Type : NumberBinary ,
2014-06-28 09:24:25 +00:00
} )
2015-09-11 08:56:32 +00:00
categories = append ( categories , category {
Values : statsForInts ( numCPU ) ,
Descr : "Number of CPU cores" ,
} )
2017-11-09 22:22:47 +00:00
categories = append ( categories , category {
Values : statsForInts ( uptime ) ,
Descr : "Uptime (v3)" ,
Type : NumberDuration ,
} )
reportFeatures := make ( map [ string ] [ ] feature )
for featureType , versions := range features {
var featureList [ ] feature
for version , featureMap := range versions {
// We count totals of the given feature type, for example number of
// folders or devices, if that doesn't exist, we work out percentage
// against the total of the version reports. Things like "Various"
// never have counts.
total , ok := totals [ featureType ]
if ! ok {
total = reports [ version ]
}
for key , count := range featureMap {
featureList = append ( featureList , feature {
Key : key ,
Version : version ,
Count : count ,
Pct : ( 100 * float64 ( count ) ) / float64 ( total ) ,
} )
}
2015-09-30 06:29:41 +00:00
}
sort . Sort ( sort . Reverse ( sortableFeatureList ( featureList ) ) )
2017-11-09 22:22:47 +00:00
reportFeatures [ featureType ] = featureList
}
reportFeatureGroups := make ( map [ string ] [ ] featureGroup )
for featureType , versions := range featureGroups {
var featureList [ ] featureGroup
for version , featureMap := range versions {
for key , counts := range featureMap {
featureList = append ( featureList , featureGroup {
Key : key ,
Version : version ,
Counts : counts ,
} )
}
}
reportFeatureGroups [ featureType ] = featureList
2015-09-11 08:56:32 +00:00
}
2017-11-11 15:51:59 +00:00
var countryList [ ] feature
for country , count := range countries {
countryList = append ( countryList , feature {
Key : country ,
Count : count ,
Pct : ( 100 * float64 ( count ) ) / float64 ( countriesTotal ) ,
} )
sort . Sort ( sort . Reverse ( sortableFeatureList ( countryList ) ) )
}
2014-06-28 07:46:03 +00:00
r := make ( map [ string ] interface { } )
2017-11-09 22:22:47 +00:00
r [ "features" ] = reportFeatures
r [ "featureGroups" ] = reportFeatureGroups
2014-06-28 07:46:03 +00:00
r [ "nodes" ] = nodes
2017-11-09 22:22:47 +00:00
r [ "versionNodes" ] = reports
2014-06-28 07:46:03 +00:00
r [ "categories" ] = categories
2018-09-09 12:28:02 +00:00
r [ "versions" ] = group ( byVersion , analyticsFor ( versions , 2000 ) , 10 )
2018-02-25 16:55:08 +00:00
r [ "versionPenetrations" ] = penetrationLevels ( analyticsFor ( versions , 2000 ) , [ ] float64 { 50 , 75 , 90 , 95 } )
2019-12-03 20:34:26 +00:00
r [ "platforms" ] = group ( byPlatform , analyticsFor ( platforms , 2000 ) , 10 )
2018-09-09 12:28:02 +00:00
r [ "compilers" ] = group ( byCompiler , analyticsFor ( compilers , 2000 ) , 5 )
2015-05-21 06:52:19 +00:00
r [ "builders" ] = analyticsFor ( builders , 12 )
2020-03-06 06:46:11 +00:00
r [ "distributions" ] = analyticsFor ( distributions , len ( knownDistributions ) )
2017-11-09 22:22:47 +00:00
r [ "featureOrder" ] = featureOrder
2017-11-11 15:51:59 +00:00
r [ "locations" ] = locations
r [ "contries" ] = countryList
2014-06-28 07:46:03 +00:00
return r
}
2018-03-24 08:08:58 +00:00
var (
2020-09-18 06:11:44 +00:00
plusRe = regexp . MustCompile ( ` (\+.*|\.dev\..*)$ ` )
2018-03-24 08:08:58 +00:00
plusStr = "(+dev)"
)
2014-06-28 07:46:03 +00:00
// transformVersion returns a version number formatted correctly, with all
// development versions aggregated into one.
func transformVersion ( v string ) string {
2014-08-07 12:55:13 +00:00
if v == "unknown-dev" {
return v
}
2014-06-28 07:46:03 +00:00
if ! strings . HasPrefix ( v , "v" ) {
v = "v" + v
}
2018-03-24 08:08:58 +00:00
v = plusRe . ReplaceAllString ( v , " " + plusStr )
2014-12-09 15:56:47 +00:00
2014-06-28 07:46:03 +00:00
return v
}
2015-05-21 06:52:19 +00:00
type summary struct {
versions map [ string ] int // version string to count index
2016-07-08 08:00:07 +00:00
max map [ string ] int // version string to max users per day
2015-05-21 06:52:19 +00:00
rows map [ string ] [ ] int // date to list of counts
}
func newSummary ( ) summary {
return summary {
versions : make ( map [ string ] int ) ,
2016-07-08 08:00:07 +00:00
max : make ( map [ string ] int ) ,
2015-05-21 06:52:19 +00:00
rows : make ( map [ string ] [ ] int ) ,
}
}
func ( s * summary ) setCount ( date , version string , count int ) {
idx , ok := s . versions [ version ]
if ! ok {
idx = len ( s . versions )
s . versions [ version ] = idx
}
2016-07-08 08:00:07 +00:00
if s . max [ version ] < count {
s . max [ version ] = count
}
2015-05-21 06:52:19 +00:00
row := s . rows [ date ]
if len ( row ) <= idx {
old := row
row = make ( [ ] int , idx + 1 )
copy ( row , old )
s . rows [ date ] = row
}
row [ idx ] = count
}
func ( s * summary ) MarshalJSON ( ) ( [ ] byte , error ) {
var versions [ ] string
for v := range s . versions {
versions = append ( versions , v )
}
2020-09-18 06:11:48 +00:00
sort . Slice ( versions , func ( a , b int ) bool {
return upgrade . CompareVersions ( versions [ a ] , versions [ b ] ) < 0
} )
2015-05-21 06:52:19 +00:00
2016-07-08 08:00:07 +00:00
var filtered [ ] string
for _ , v := range versions {
if s . max [ v ] > 50 {
filtered = append ( filtered , v )
}
}
versions = filtered
2015-05-21 06:52:19 +00:00
headerRow := [ ] interface { } { "Day" }
for _ , v := range versions {
headerRow = append ( headerRow , v )
}
var table [ ] [ ] interface { }
table = append ( table , headerRow )
var dates [ ] string
for k := range s . rows {
dates = append ( dates , k )
}
sort . Strings ( dates )
for _ , date := range dates {
row := [ ] interface { } { date }
for _ , ver := range versions {
idx := s . versions [ ver ]
2015-06-15 08:46:48 +00:00
if len ( s . rows [ date ] ) > idx && s . rows [ date ] [ idx ] > 0 {
2015-05-21 06:52:19 +00:00
row = append ( row , s . rows [ date ] [ idx ] )
} else {
2015-06-15 08:46:48 +00:00
row = append ( row , nil )
2015-05-21 06:52:19 +00:00
}
}
table = append ( table , row )
}
return json . Marshal ( table )
}
2020-04-16 07:13:01 +00:00
// filter removes versions that never reach the specified min count.
func ( s * summary ) filter ( min int ) {
// We cheat and just remove the versions from the "index" and leave the
// data points alone. The version index is used to build the table when
// we do the serialization, so at that point the data points are
// filtered out as well.
for ver := range s . versions {
if s . max [ ver ] < min {
delete ( s . versions , ver )
delete ( s . max , ver )
}
}
}
func getSummary ( db * sql . DB , min int ) ( summary , error ) {
2015-05-21 06:52:19 +00:00
s := newSummary ( )
2016-12-07 14:22:25 +00:00
rows , err := db . Query ( ` SELECT Day, Version, Count FROM VersionSummary WHERE Day > now() - '2 year'::INTERVAL; ` )
2015-05-21 06:52:19 +00:00
if err != nil {
return summary { } , err
}
defer rows . Close ( )
for rows . Next ( ) {
var day time . Time
var ver string
var num int
err := rows . Scan ( & day , & ver , & num )
if err != nil {
return summary { } , err
}
if ver == "v0.0" {
// ?
continue
}
// SUPER UGLY HACK to avoid having to do sorting properly
2019-01-03 20:46:02 +00:00
if len ( ver ) == 4 && strings . HasPrefix ( ver , "v0." ) { // v0.x
2015-05-21 06:52:19 +00:00
ver = ver [ : 3 ] + "0" + ver [ 3 : ] // now v0.0x
}
s . setCount ( day . Format ( "2006-01-02" ) , ver , num )
}
2020-04-16 07:13:01 +00:00
s . filter ( min )
2015-05-21 06:52:19 +00:00
return s , nil
}
2015-07-15 11:23:13 +00:00
func getMovement ( db * sql . DB ) ( [ ] [ ] interface { } , error ) {
2016-12-07 14:22:25 +00:00
rows , err := db . Query ( ` SELECT Day, Added, Removed, Bounced FROM UserMovement WHERE Day > now() - '2 year'::INTERVAL ORDER BY Day ` )
2015-07-15 11:23:13 +00:00
if err != nil {
return nil , err
}
defer rows . Close ( )
res := [ ] [ ] interface { } {
2015-07-15 11:45:33 +00:00
{ "Day" , "Joined" , "Left" , "Bounced" } ,
2015-07-15 11:23:13 +00:00
}
for rows . Next ( ) {
var day time . Time
2015-07-15 11:45:33 +00:00
var added , removed , bounced int
err := rows . Scan ( & day , & added , & removed , & bounced )
2015-07-15 11:23:13 +00:00
if err != nil {
return nil , err
}
2015-07-15 11:45:33 +00:00
row := [ ] interface { } { day . Format ( "2006-01-02" ) , added , - removed , bounced }
2015-07-15 11:23:13 +00:00
if removed == 0 {
row [ 2 ] = nil
}
2015-07-15 11:45:33 +00:00
if bounced == 0 {
row [ 3 ] = nil
}
2015-07-15 11:23:13 +00:00
res = append ( res , row )
}
return res , nil
}
2015-09-30 06:29:41 +00:00
2016-09-06 18:15:18 +00:00
func getPerformance ( db * sql . DB ) ( [ ] [ ] interface { } , error ) {
rows , err := db . Query ( ` SELECT Day, TotFiles, TotMiB, SHA256Perf, MemorySize, MemoryUsageMiB FROM Performance WHERE Day > '2014-06-20'::TIMESTAMP ORDER BY Day ` )
if err != nil {
return nil , err
}
defer rows . Close ( )
res := [ ] [ ] interface { } {
{ "Day" , "TotFiles" , "TotMiB" , "SHA256Perf" , "MemorySize" , "MemoryUsageMiB" } ,
}
for rows . Next ( ) {
var day time . Time
var sha256Perf float64
var totFiles , totMiB , memorySize , memoryUsage int
err := rows . Scan ( & day , & totFiles , & totMiB , & sha256Perf , & memorySize , & memoryUsage )
if err != nil {
return nil , err
}
row := [ ] interface { } { day . Format ( "2006-01-02" ) , totFiles , totMiB , float64 ( int ( sha256Perf * 10 ) ) / 10 , memorySize , memoryUsage }
res = append ( res , row )
}
return res , nil
}
2017-11-09 22:22:47 +00:00
func getBlockStats ( db * sql . DB ) ( [ ] [ ] interface { } , error ) {
rows , err := db . Query ( ` SELECT Day, Reports, Pulled, Renamed, Reused, CopyOrigin, CopyOriginShifted, CopyElsewhere FROM BlockStats WHERE Day > '2017-10-23'::TIMESTAMP ORDER BY Day ` )
if err != nil {
return nil , err
}
defer rows . Close ( )
res := [ ] [ ] interface { } {
{ "Day" , "Number of Reports" , "Transferred (GiB)" , "Saved by renaming files (GiB)" , "Saved by resuming transfer (GiB)" , "Saved by reusing data from old file (GiB)" , "Saved by reusing shifted data from old file (GiB)" , "Saved by reusing data from other files (GiB)" } ,
}
blocksToGb := float64 ( 8 * 1024 )
for rows . Next ( ) {
var day time . Time
var reports , pulled , renamed , reused , copyOrigin , copyOriginShifted , copyElsewhere float64
err := rows . Scan ( & day , & reports , & pulled , & renamed , & reused , & copyOrigin , & copyOriginShifted , & copyElsewhere )
if err != nil {
return nil , err
}
row := [ ] interface { } {
day . Format ( "2006-01-02" ) ,
reports ,
pulled / blocksToGb ,
renamed / blocksToGb ,
reused / blocksToGb ,
copyOrigin / blocksToGb ,
copyOriginShifted / blocksToGb ,
copyElsewhere / blocksToGb ,
}
res = append ( res , row )
}
return res , nil
}
2015-09-30 06:29:41 +00:00
type sortableFeatureList [ ] feature
func ( l sortableFeatureList ) Len ( ) int {
return len ( l )
}
func ( l sortableFeatureList ) Swap ( a , b int ) {
l [ a ] , l [ b ] = l [ b ] , l [ a ]
}
func ( l sortableFeatureList ) Less ( a , b int ) bool {
2016-02-15 11:50:00 +00:00
if l [ a ] . Pct != l [ b ] . Pct {
return l [ a ] . Pct < l [ b ] . Pct
}
return l [ a ] . Key > l [ b ] . Key
2015-09-30 06:29:41 +00:00
}
2017-11-09 22:22:47 +00:00
func prettyCase ( input string ) string {
output := ""
for i , runeValue := range input {
if i == 0 {
runeValue = unicode . ToUpper ( runeValue )
} else if unicode . IsUpper ( runeValue ) {
output += " "
}
output += string ( runeValue )
}
return output
}