2014-06-11 23:40:54 +00:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2015-03-04 12:02:53 +00:00
|
|
|
"bytes"
|
2014-10-13 09:18:13 +00:00
|
|
|
"crypto/tls"
|
2015-03-04 10:31:46 +00:00
|
|
|
"database/sql"
|
2014-06-11 23:40:54 +00:00
|
|
|
"encoding/json"
|
|
|
|
"fmt"
|
2014-06-28 07:46:03 +00:00
|
|
|
"html/template"
|
2014-06-11 23:40:54 +00:00
|
|
|
"io"
|
2014-06-28 07:46:03 +00:00
|
|
|
"io/ioutil"
|
2014-06-11 23:40:54 +00:00
|
|
|
"log"
|
|
|
|
"net/http"
|
|
|
|
"os"
|
2014-06-28 07:46:03 +00:00
|
|
|
"regexp"
|
|
|
|
"strings"
|
2014-06-11 23:40:54 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
2015-03-04 10:31:46 +00:00
|
|
|
|
|
|
|
_ "github.com/lib/pq"
|
2014-06-11 23:40:54 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
2015-03-04 10:31:46 +00:00
|
|
|
keyFile = getEnvDefault("UR_KEY_FILE", "key.pem")
|
|
|
|
certFile = getEnvDefault("UR_CRT_FILE", "crt.pem")
|
|
|
|
dbConn = getEnvDefault("UR_DB_URL", "postgres://user:password@localhost/ur?sslmode=disable")
|
|
|
|
listenAddr = getEnvDefault("UR_LISTEN", "0.0.0.0:8443")
|
|
|
|
tpl *template.Template
|
2014-06-11 23:40:54 +00:00
|
|
|
)
|
|
|
|
|
2014-06-28 07:46:03 +00:00
|
|
|
var funcs = map[string]interface{}{
|
|
|
|
"commatize": commatize,
|
2014-06-28 09:24:25 +00:00
|
|
|
"number": number,
|
2014-06-28 07:46:03 +00:00
|
|
|
}
|
|
|
|
|
2015-03-04 10:31:46 +00:00
|
|
|
func getEnvDefault(key, def string) string {
|
|
|
|
if val := os.Getenv(key); val != "" {
|
|
|
|
return val
|
|
|
|
}
|
|
|
|
return def
|
|
|
|
}
|
|
|
|
|
|
|
|
type report struct {
|
|
|
|
Received time.Time // Only from DB
|
|
|
|
|
|
|
|
UniqueID string
|
|
|
|
Version string
|
|
|
|
LongVersion string
|
|
|
|
Platform string
|
|
|
|
NumFolders int
|
|
|
|
NumDevices int
|
|
|
|
TotFiles int
|
|
|
|
FolderMaxFiles int
|
|
|
|
TotMiB int
|
|
|
|
FolderMaxMiB int
|
|
|
|
MemoryUsageMiB int
|
|
|
|
SHA256Perf float64
|
|
|
|
MemorySize int
|
|
|
|
|
|
|
|
Date string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (r *report) Validate() error {
|
|
|
|
if r.UniqueID == "" || r.Version == "" || r.Platform == "" {
|
|
|
|
return fmt.Errorf("missing required field")
|
|
|
|
}
|
|
|
|
if len(r.Date) != 8 {
|
|
|
|
return fmt.Errorf("date not initialized")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func setupDB(db *sql.DB) error {
|
|
|
|
_, err := db.Exec(`CREATE TABLE IF NOT EXISTS Reports (
|
|
|
|
Received TIMESTAMP NOT NULL,
|
|
|
|
UniqueID VARCHAR(32) NOT NULL,
|
|
|
|
Version VARCHAR(32) NOT NULL,
|
|
|
|
LongVersion VARCHAR(256) NOT NULL,
|
|
|
|
Platform VARCHAR(32) NOT NULL,
|
|
|
|
NumFolders INTEGER NOT NULL,
|
|
|
|
NumDevices INTEGER NOT NULL,
|
|
|
|
TotFiles INTEGER NOT NULL,
|
|
|
|
FolderMaxFiles INTEGER NOT NULL,
|
|
|
|
TotMiB INTEGER NOT NULL,
|
|
|
|
FolderMaxMiB INTEGER NOT NULL,
|
|
|
|
MemoryUsageMiB INTEGER NOT NULL,
|
|
|
|
SHA256Perf DOUBLE PRECISION NOT NULL,
|
|
|
|
MemorySize INTEGER NOT NULL,
|
|
|
|
Date VARCHAR(8) NOT NULL
|
|
|
|
)`)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
row := db.QueryRow(`SELECT 'UniqueIDIndex'::regclass`)
|
|
|
|
if err := row.Scan(nil); err != nil {
|
|
|
|
_, err = db.Exec(`CREATE UNIQUE INDEX UniqueIDIndex ON Reports (Date, UniqueID)`)
|
|
|
|
}
|
|
|
|
|
|
|
|
row = db.QueryRow(`SELECT 'ReceivedIndex'::regclass`)
|
|
|
|
if err := row.Scan(nil); err != nil {
|
|
|
|
_, err = db.Exec(`CREATE INDEX ReceivedIndex ON Reports (Received)`)
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func insertReport(db *sql.DB, r report) error {
|
|
|
|
_, err := db.Exec(`INSERT INTO Reports VALUES (now(), $1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)`,
|
|
|
|
r.UniqueID, r.Version, r.LongVersion, r.Platform, r.NumFolders,
|
|
|
|
r.NumDevices, r.TotFiles, r.FolderMaxFiles, r.TotMiB, r.FolderMaxMiB,
|
|
|
|
r.MemoryUsageMiB, r.SHA256Perf, r.MemorySize, r.Date)
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
type withDBFunc func(*sql.DB, http.ResponseWriter, *http.Request)
|
|
|
|
|
|
|
|
func withDB(db *sql.DB, f withDBFunc) http.HandlerFunc {
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
|
|
f(db, w, r)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-06-11 23:40:54 +00:00
|
|
|
func main() {
|
2015-03-04 10:31:46 +00:00
|
|
|
log.SetFlags(log.Ltime | log.Ldate)
|
2014-12-07 14:48:48 +00:00
|
|
|
log.SetOutput(os.Stdout)
|
2015-03-04 10:31:46 +00:00
|
|
|
|
|
|
|
// Template
|
2014-06-11 23:40:54 +00:00
|
|
|
|
2014-06-28 07:46:03 +00:00
|
|
|
fd, err := os.Open("static/index.html")
|
|
|
|
if err != nil {
|
2015-03-04 10:31:46 +00:00
|
|
|
log.Fatalln("template:", err)
|
2014-06-28 07:46:03 +00:00
|
|
|
}
|
|
|
|
bs, err := ioutil.ReadAll(fd)
|
|
|
|
if err != nil {
|
2015-03-04 10:31:46 +00:00
|
|
|
log.Fatalln("template:", err)
|
2014-06-28 07:46:03 +00:00
|
|
|
}
|
|
|
|
fd.Close()
|
|
|
|
tpl = template.Must(template.New("index.html").Funcs(funcs).Parse(string(bs)))
|
|
|
|
|
2015-03-04 10:31:46 +00:00
|
|
|
// DB
|
2014-06-11 23:40:54 +00:00
|
|
|
|
2015-03-04 10:31:46 +00:00
|
|
|
db, err := sql.Open("postgres", dbConn)
|
2014-10-13 09:18:13 +00:00
|
|
|
if err != nil {
|
2015-03-04 10:31:46 +00:00
|
|
|
log.Fatalln("database:", err)
|
|
|
|
}
|
|
|
|
err = setupDB(db)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalln("database:", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TLS
|
|
|
|
|
|
|
|
cert, err := tls.LoadX509KeyPair(certFile, keyFile)
|
|
|
|
if err != nil {
|
|
|
|
log.Fatalln("tls:", err)
|
2014-10-13 09:18:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
cfg := &tls.Config{
|
|
|
|
Certificates: []tls.Certificate{cert},
|
|
|
|
SessionTicketsDisabled: true,
|
|
|
|
}
|
|
|
|
|
2015-03-04 10:31:46 +00:00
|
|
|
// HTTPS
|
|
|
|
|
|
|
|
listener, err := tls.Listen("tcp", listenAddr, cfg)
|
2014-10-13 09:18:13 +00:00
|
|
|
if err != nil {
|
2015-03-04 10:31:46 +00:00
|
|
|
log.Fatalln("https:", err)
|
2014-10-13 09:18:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
srv := http.Server{
|
|
|
|
ReadTimeout: 5 * time.Second,
|
|
|
|
WriteTimeout: 5 * time.Second,
|
|
|
|
}
|
|
|
|
|
2015-03-04 10:31:46 +00:00
|
|
|
http.HandleFunc("/", withDB(db, rootHandler))
|
|
|
|
http.HandleFunc("/newdata", withDB(db, newDataHandler))
|
|
|
|
http.Handle("/static/", http.StripPrefix("/static/", http.FileServer(http.Dir("static"))))
|
|
|
|
|
2014-10-13 09:18:13 +00:00
|
|
|
err = srv.Serve(listener)
|
2014-06-11 23:40:54 +00:00
|
|
|
if err != nil {
|
2015-03-04 10:31:46 +00:00
|
|
|
log.Fatalln("https:", err)
|
2014-06-11 23:40:54 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-04 12:02:53 +00:00
|
|
|
var (
|
|
|
|
cacheData []byte
|
|
|
|
cacheTime time.Time
|
|
|
|
cacheMut sync.Mutex
|
|
|
|
)
|
|
|
|
|
|
|
|
const maxCacheTime = 5 * 60 * time.Second
|
|
|
|
|
2015-03-04 10:31:46 +00:00
|
|
|
func rootHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
|
2014-06-28 07:46:03 +00:00
|
|
|
if r.URL.Path == "/" || r.URL.Path == "/index.html" {
|
2015-03-04 12:02:53 +00:00
|
|
|
cacheMut.Lock()
|
|
|
|
defer cacheMut.Unlock()
|
|
|
|
|
|
|
|
if time.Since(cacheTime) > maxCacheTime {
|
|
|
|
rep := getReport(db)
|
|
|
|
buf := new(bytes.Buffer)
|
|
|
|
err := tpl.Execute(buf, rep)
|
|
|
|
if err != nil {
|
|
|
|
log.Println(err)
|
|
|
|
http.Error(w, "Template Error", http.StatusInternalServerError)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
cacheData = buf.Bytes()
|
|
|
|
cacheTime = time.Now()
|
|
|
|
}
|
2014-06-28 07:46:03 +00:00
|
|
|
|
|
|
|
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
2015-03-04 12:02:53 +00:00
|
|
|
w.Write(cacheData)
|
2014-06-28 07:46:03 +00:00
|
|
|
} else {
|
|
|
|
http.Error(w, "Not found", 404)
|
2015-03-04 12:02:53 +00:00
|
|
|
return
|
2014-06-28 07:46:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-04 10:31:46 +00:00
|
|
|
func newDataHandler(db *sql.DB, w http.ResponseWriter, r *http.Request) {
|
|
|
|
defer r.Body.Close()
|
2014-06-28 07:46:03 +00:00
|
|
|
|
2015-03-04 10:31:46 +00:00
|
|
|
var rep report
|
|
|
|
rep.Date = time.Now().UTC().Format("20060102")
|
2014-06-11 23:40:54 +00:00
|
|
|
|
|
|
|
lr := &io.LimitedReader{R: r.Body, N: 10240}
|
2015-03-04 10:31:46 +00:00
|
|
|
if err := json.NewDecoder(lr).Decode(&rep); err != nil {
|
|
|
|
log.Println("json decode:", err)
|
|
|
|
http.Error(w, "JSON Decode Error", http.StatusInternalServerError)
|
2014-06-11 23:40:54 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2015-03-04 10:31:46 +00:00
|
|
|
if err := rep.Validate(); err != nil {
|
|
|
|
log.Println("validate:", err)
|
|
|
|
log.Printf("%#v", rep)
|
|
|
|
http.Error(w, "Validation Error", http.StatusInternalServerError)
|
2014-06-11 23:40:54 +00:00
|
|
|
return
|
|
|
|
}
|
2014-06-16 09:14:01 +00:00
|
|
|
|
2015-03-04 10:31:46 +00:00
|
|
|
if err := insertReport(db, rep); err != nil {
|
|
|
|
log.Println("insert:", err)
|
|
|
|
log.Printf("%#v", rep)
|
|
|
|
http.Error(w, "Database Error", http.StatusInternalServerError)
|
|
|
|
return
|
2014-06-16 09:14:01 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-28 09:24:25 +00:00
|
|
|
type category struct {
|
|
|
|
Values [4]float64
|
|
|
|
Key string
|
|
|
|
Descr string
|
|
|
|
Unit string
|
|
|
|
Binary bool
|
|
|
|
}
|
|
|
|
|
2015-03-04 12:02:53 +00:00
|
|
|
func getReport(db *sql.DB) map[string]interface{} {
|
2014-06-28 07:46:03 +00:00
|
|
|
nodes := 0
|
|
|
|
var versions []string
|
|
|
|
var platforms []string
|
|
|
|
var oses []string
|
2015-03-04 10:31:46 +00:00
|
|
|
var numFolders []int
|
|
|
|
var numDevices []int
|
2014-06-28 07:46:03 +00:00
|
|
|
var totFiles []int
|
|
|
|
var maxFiles []int
|
|
|
|
var totMiB []int
|
|
|
|
var maxMiB []int
|
|
|
|
var memoryUsage []int
|
|
|
|
var sha256Perf []float64
|
|
|
|
var memorySize []int
|
2014-06-11 23:40:54 +00:00
|
|
|
|
2015-03-04 10:31:46 +00:00
|
|
|
rows, err := db.Query(`SELECT * FROM Reports WHERE Received > now() - '1 day'::INTERVAL`)
|
|
|
|
if err != nil {
|
|
|
|
log.Println("sql:", err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
defer rows.Close()
|
|
|
|
|
|
|
|
for rows.Next() {
|
2014-06-11 23:40:54 +00:00
|
|
|
|
2014-06-28 07:46:03 +00:00
|
|
|
var rep report
|
2015-03-04 10:31:46 +00:00
|
|
|
err := rows.Scan(&rep.Received, &rep.UniqueID, &rep.Version,
|
|
|
|
&rep.LongVersion, &rep.Platform, &rep.NumFolders, &rep.NumDevices,
|
|
|
|
&rep.TotFiles, &rep.FolderMaxFiles, &rep.TotMiB, &rep.FolderMaxMiB,
|
|
|
|
&rep.MemoryUsageMiB, &rep.SHA256Perf, &rep.MemorySize, &rep.Date)
|
|
|
|
|
2014-06-28 07:46:03 +00:00
|
|
|
if err != nil {
|
2015-03-04 10:31:46 +00:00
|
|
|
log.Println("sql:", err)
|
|
|
|
return nil
|
2014-06-28 07:46:03 +00:00
|
|
|
}
|
2014-06-11 23:40:54 +00:00
|
|
|
|
2014-06-28 07:46:03 +00:00
|
|
|
nodes++
|
|
|
|
versions = append(versions, transformVersion(rep.Version))
|
|
|
|
platforms = append(platforms, rep.Platform)
|
|
|
|
ps := strings.Split(rep.Platform, "-")
|
|
|
|
oses = append(oses, ps[0])
|
2015-03-04 10:31:46 +00:00
|
|
|
if rep.NumFolders > 0 {
|
|
|
|
numFolders = append(numFolders, rep.NumFolders)
|
2014-06-28 07:46:03 +00:00
|
|
|
}
|
2015-03-04 10:31:46 +00:00
|
|
|
if rep.NumDevices > 0 {
|
|
|
|
numDevices = append(numDevices, rep.NumDevices)
|
2014-06-28 07:46:03 +00:00
|
|
|
}
|
|
|
|
if rep.TotFiles > 0 {
|
|
|
|
totFiles = append(totFiles, rep.TotFiles)
|
|
|
|
}
|
2015-03-04 10:31:46 +00:00
|
|
|
if rep.FolderMaxFiles > 0 {
|
|
|
|
maxFiles = append(maxFiles, rep.FolderMaxFiles)
|
2014-06-28 07:46:03 +00:00
|
|
|
}
|
|
|
|
if rep.TotMiB > 0 {
|
2014-06-28 09:24:25 +00:00
|
|
|
totMiB = append(totMiB, rep.TotMiB*(1<<20))
|
2014-06-28 07:46:03 +00:00
|
|
|
}
|
2015-03-04 10:31:46 +00:00
|
|
|
if rep.FolderMaxMiB > 0 {
|
|
|
|
maxMiB = append(maxMiB, rep.FolderMaxMiB*(1<<20))
|
2014-06-11 23:40:54 +00:00
|
|
|
}
|
2014-06-28 07:46:03 +00:00
|
|
|
if rep.MemoryUsageMiB > 0 {
|
2014-06-28 09:24:25 +00:00
|
|
|
memoryUsage = append(memoryUsage, rep.MemoryUsageMiB*(1<<20))
|
2014-06-28 07:46:03 +00:00
|
|
|
}
|
|
|
|
if rep.SHA256Perf > 0 {
|
2014-06-28 09:24:25 +00:00
|
|
|
sha256Perf = append(sha256Perf, rep.SHA256Perf*(1<<20))
|
2014-06-28 07:46:03 +00:00
|
|
|
}
|
|
|
|
if rep.MemorySize > 0 {
|
2014-06-28 09:24:25 +00:00
|
|
|
memorySize = append(memorySize, rep.MemorySize*(1<<20))
|
2014-06-28 07:46:03 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-06-28 09:24:25 +00:00
|
|
|
var categories []category
|
|
|
|
categories = append(categories, category{
|
|
|
|
Values: statsForInts(totFiles),
|
2015-02-15 11:00:15 +00:00
|
|
|
Descr: "Files Managed per Device",
|
2014-06-28 09:24:25 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
categories = append(categories, category{
|
|
|
|
Values: statsForInts(maxFiles),
|
2015-02-15 11:00:15 +00:00
|
|
|
Descr: "Files in Largest Folder",
|
2014-06-28 09:24:25 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
categories = append(categories, category{
|
|
|
|
Values: statsForInts(totMiB),
|
2015-02-15 11:00:15 +00:00
|
|
|
Descr: "Data Managed per Device",
|
2014-06-28 09:24:25 +00:00
|
|
|
Unit: "B",
|
|
|
|
Binary: true,
|
|
|
|
})
|
|
|
|
|
|
|
|
categories = append(categories, category{
|
|
|
|
Values: statsForInts(maxMiB),
|
2015-02-15 11:00:15 +00:00
|
|
|
Descr: "Data in Largest Folder",
|
2014-06-28 09:24:25 +00:00
|
|
|
Unit: "B",
|
|
|
|
Binary: true,
|
|
|
|
})
|
|
|
|
|
|
|
|
categories = append(categories, category{
|
2015-03-04 10:31:46 +00:00
|
|
|
Values: statsForInts(numDevices),
|
2015-02-15 11:00:15 +00:00
|
|
|
Descr: "Number of Devices in Cluster",
|
2014-06-28 09:24:25 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
categories = append(categories, category{
|
2015-03-04 10:31:46 +00:00
|
|
|
Values: statsForInts(numFolders),
|
2015-02-15 11:00:15 +00:00
|
|
|
Descr: "Number of Folders Configured",
|
2014-06-28 09:24:25 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
categories = append(categories, category{
|
|
|
|
Values: statsForInts(memoryUsage),
|
|
|
|
Descr: "Memory Usage",
|
|
|
|
Unit: "B",
|
|
|
|
Binary: true,
|
|
|
|
})
|
|
|
|
|
|
|
|
categories = append(categories, category{
|
|
|
|
Values: statsForInts(memorySize),
|
|
|
|
Descr: "System Memory",
|
|
|
|
Unit: "B",
|
|
|
|
Binary: true,
|
|
|
|
})
|
|
|
|
|
|
|
|
categories = append(categories, category{
|
|
|
|
Values: statsForFloats(sha256Perf),
|
|
|
|
Descr: "SHA-256 Hashing Performance",
|
|
|
|
Unit: "B/s",
|
|
|
|
Binary: true,
|
|
|
|
})
|
|
|
|
|
2014-06-28 07:46:03 +00:00
|
|
|
r := make(map[string]interface{})
|
|
|
|
r["nodes"] = nodes
|
|
|
|
r["categories"] = categories
|
2014-12-09 15:52:02 +00:00
|
|
|
r["versions"] = analyticsFor(versions, 10)
|
|
|
|
r["platforms"] = analyticsFor(platforms, 0)
|
|
|
|
r["os"] = analyticsFor(oses, 0)
|
2014-06-28 07:46:03 +00:00
|
|
|
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
2014-06-11 23:40:54 +00:00
|
|
|
func ensureDir(dir string, mode int) {
|
|
|
|
fi, err := os.Stat(dir)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
os.MkdirAll(dir, 0700)
|
|
|
|
} else if mode >= 0 && err == nil && int(fi.Mode()&0777) != mode {
|
|
|
|
os.Chmod(dir, os.FileMode(mode))
|
|
|
|
}
|
|
|
|
}
|
2014-06-28 07:46:03 +00:00
|
|
|
|
2014-08-07 12:55:13 +00:00
|
|
|
var vRe = regexp.MustCompile(`^(v\d+\.\d+\.\d+(?:-[a-z]\w+)?)[+\.-]`)
|
2014-06-28 07:46:03 +00:00
|
|
|
|
|
|
|
// transformVersion returns a version number formatted correctly, with all
|
|
|
|
// development versions aggregated into one.
|
|
|
|
func transformVersion(v string) string {
|
2014-08-07 12:55:13 +00:00
|
|
|
if v == "unknown-dev" {
|
|
|
|
return v
|
|
|
|
}
|
2014-06-28 07:46:03 +00:00
|
|
|
if !strings.HasPrefix(v, "v") {
|
|
|
|
v = "v" + v
|
|
|
|
}
|
|
|
|
if m := vRe.FindStringSubmatch(v); len(m) > 0 {
|
|
|
|
return m[1] + " (+dev)"
|
|
|
|
}
|
2014-12-09 15:56:47 +00:00
|
|
|
|
|
|
|
// Truncate old versions to just the generation part
|
|
|
|
if strings.HasPrefix(v, "v0.7") {
|
|
|
|
return "v0.7.x"
|
|
|
|
}
|
|
|
|
if strings.HasPrefix(v, "v0.8") {
|
|
|
|
return "v0.8.x"
|
|
|
|
}
|
|
|
|
if strings.HasPrefix(v, "v0.9") {
|
|
|
|
return "v0.9.x"
|
|
|
|
}
|
|
|
|
|
2014-06-28 07:46:03 +00:00
|
|
|
return v
|
|
|
|
}
|