syncthing/vendor/github.com/prometheus/procfs/bcache/get.go
Jakob Borg 916ec63af6 cmd/stdiscosrv: New discovery server (fixes #4618)
This is a new revision of the discovery server. Relevant changes and
non-changes:

- Protocol towards clients is unchanged.

- Recommended large scale design is still to be deployed nehind nginx (I
  tested, and it's still a lot faster at terminating TLS).

- Database backend is leveldb again, only. It scales enough, is easy to
  setup, and we don't need any backend to take care of.

- Server supports replication. This is a simple TCP channel - protect it
  with a firewall when deploying over the internet. (We deploy this within
  the same datacenter, and with firewall.) Any incoming client announces
  are sent over the replication channel(s) to other peer discosrvs.
  Incoming replication changes are applied to the database as if they came
  from clients, but without the TLS/certificate overhead.

- Metrics are exposed using the prometheus library, when enabled.

- The database values and replication protocol is protobuf, because JSON
  was quite CPU intensive when I tried that and benchmarked it.

- The "Retry-After" value for failed lookups gets slowly increased from
  a default of 120 seconds, by 5 seconds for each failed lookup,
  independently by each discosrv. This lowers the query load over time for
  clients that are never seen. The Retry-After maxes out at 3600 after a
  couple of weeks of this increase. The number of failed lookups is
  stored in the database, now and then (avoiding making each lookup a
  database put).

All in all this means clients can be pointed towards a cluster using
just multiple A / AAAA records to gain both load sharing and redundancy
(if one is down, clients will talk to the remaining ones).

GitHub-Pull-Request: https://github.com/syncthing/syncthing/pull/4648
2018-01-14 08:52:31 +00:00

331 lines
8.7 KiB
Go

// Copyright 2017 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bcache
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strconv"
"strings"
)
// ParsePseudoFloat parses the peculiar format produced by bcache's bch_hprint.
func parsePseudoFloat(str string) (float64, error) {
ss := strings.Split(str, ".")
intPart, err := strconv.ParseFloat(ss[0], 64)
if err != nil {
return 0, err
}
if len(ss) == 1 {
// Pure integers are fine.
return intPart, nil
}
fracPart, err := strconv.ParseFloat(ss[1], 64)
if err != nil {
return 0, err
}
// fracPart is a number between 0 and 1023 divided by 100; it is off
// by a small amount. Unexpected bumps in time lines may occur because
// for bch_hprint .1 != .10 and .10 > .9 (at least up to Linux
// v4.12-rc3).
// Restore the proper order:
fracPart = fracPart / 10.24
return intPart + fracPart, nil
}
// Dehumanize converts a human-readable byte slice into a uint64.
func dehumanize(hbytes []byte) (uint64, error) {
ll := len(hbytes)
if ll == 0 {
return 0, fmt.Errorf("zero-length reply")
}
lastByte := hbytes[ll-1]
mul := float64(1)
var (
mant float64
err error
)
// If lastByte is beyond the range of ASCII digits, it must be a
// multiplier.
if lastByte > 57 {
// Remove multiplier from slice.
hbytes = hbytes[:len(hbytes)-1]
const (
_ = 1 << (10 * iota)
KiB
MiB
GiB
TiB
PiB
EiB
ZiB
YiB
)
multipliers := map[rune]float64{
// Source for conversion rules:
// linux-kernel/drivers/md/bcache/util.c:bch_hprint()
'k': KiB,
'M': MiB,
'G': GiB,
'T': TiB,
'P': PiB,
'E': EiB,
'Z': ZiB,
'Y': YiB,
}
mul = multipliers[rune(lastByte)]
mant, err = parsePseudoFloat(string(hbytes))
if err != nil {
return 0, err
}
} else {
// Not humanized by bch_hprint
mant, err = strconv.ParseFloat(string(hbytes), 64)
if err != nil {
return 0, err
}
}
res := uint64(mant * mul)
return res, nil
}
type parser struct {
uuidPath string
subDir string
currentDir string
err error
}
func (p *parser) setSubDir(pathElements ...string) {
p.subDir = path.Join(pathElements...)
p.currentDir = path.Join(p.uuidPath, p.subDir)
}
func (p *parser) readValue(fileName string) uint64 {
if p.err != nil {
return 0
}
path := path.Join(p.currentDir, fileName)
byt, err := ioutil.ReadFile(path)
if err != nil {
p.err = fmt.Errorf("failed to read: %s", path)
return 0
}
// Remove trailing newline.
byt = byt[:len(byt)-1]
res, err := dehumanize(byt)
p.err = err
return res
}
// ParsePriorityStats parses lines from the priority_stats file.
func parsePriorityStats(line string, ps *PriorityStats) error {
var (
value uint64
err error
)
switch {
case strings.HasPrefix(line, "Unused:"):
fields := strings.Fields(line)
rawValue := fields[len(fields)-1]
valueStr := strings.TrimSuffix(rawValue, "%")
value, err = strconv.ParseUint(valueStr, 10, 64)
if err != nil {
return err
}
ps.UnusedPercent = value
case strings.HasPrefix(line, "Metadata:"):
fields := strings.Fields(line)
rawValue := fields[len(fields)-1]
valueStr := strings.TrimSuffix(rawValue, "%")
value, err = strconv.ParseUint(valueStr, 10, 64)
if err != nil {
return err
}
ps.MetadataPercent = value
}
return nil
}
func (p *parser) getPriorityStats() PriorityStats {
var res PriorityStats
if p.err != nil {
return res
}
path := path.Join(p.currentDir, "priority_stats")
file, err := os.Open(path)
if err != nil {
p.err = fmt.Errorf("failed to read: %s", path)
return res
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
err = parsePriorityStats(scanner.Text(), &res)
if err != nil {
p.err = fmt.Errorf("failed to parse: %s (%s)", path, err)
return res
}
}
if err := scanner.Err(); err != nil {
p.err = fmt.Errorf("failed to parse: %s (%s)", path, err)
return res
}
return res
}
// GetStats collects from sysfs files data tied to one bcache ID.
func GetStats(uuidPath string) (*Stats, error) {
var bs Stats
par := parser{uuidPath: uuidPath}
// bcache stats
// dir <uuidPath>
par.setSubDir("")
bs.Bcache.AverageKeySize = par.readValue("average_key_size")
bs.Bcache.BtreeCacheSize = par.readValue("btree_cache_size")
bs.Bcache.CacheAvailablePercent = par.readValue("cache_available_percent")
bs.Bcache.Congested = par.readValue("congested")
bs.Bcache.RootUsagePercent = par.readValue("root_usage_percent")
bs.Bcache.TreeDepth = par.readValue("tree_depth")
// bcache stats (internal)
// dir <uuidPath>/internal
par.setSubDir("internal")
bs.Bcache.Internal.ActiveJournalEntries = par.readValue("active_journal_entries")
bs.Bcache.Internal.BtreeNodes = par.readValue("btree_nodes")
bs.Bcache.Internal.BtreeReadAverageDurationNanoSeconds = par.readValue("btree_read_average_duration_us")
bs.Bcache.Internal.CacheReadRaces = par.readValue("cache_read_races")
// bcache stats (period)
// dir <uuidPath>/stats_five_minute
par.setSubDir("stats_five_minute")
bs.Bcache.FiveMin.Bypassed = par.readValue("bypassed")
bs.Bcache.FiveMin.CacheHits = par.readValue("cache_hits")
bs.Bcache.FiveMin.Bypassed = par.readValue("bypassed")
bs.Bcache.FiveMin.CacheBypassHits = par.readValue("cache_bypass_hits")
bs.Bcache.FiveMin.CacheBypassMisses = par.readValue("cache_bypass_misses")
bs.Bcache.FiveMin.CacheHits = par.readValue("cache_hits")
bs.Bcache.FiveMin.CacheMissCollisions = par.readValue("cache_miss_collisions")
bs.Bcache.FiveMin.CacheMisses = par.readValue("cache_misses")
bs.Bcache.FiveMin.CacheReadaheads = par.readValue("cache_readaheads")
// dir <uuidPath>/stats_total
par.setSubDir("stats_total")
bs.Bcache.Total.Bypassed = par.readValue("bypassed")
bs.Bcache.Total.CacheHits = par.readValue("cache_hits")
bs.Bcache.Total.Bypassed = par.readValue("bypassed")
bs.Bcache.Total.CacheBypassHits = par.readValue("cache_bypass_hits")
bs.Bcache.Total.CacheBypassMisses = par.readValue("cache_bypass_misses")
bs.Bcache.Total.CacheHits = par.readValue("cache_hits")
bs.Bcache.Total.CacheMissCollisions = par.readValue("cache_miss_collisions")
bs.Bcache.Total.CacheMisses = par.readValue("cache_misses")
bs.Bcache.Total.CacheReadaheads = par.readValue("cache_readaheads")
if par.err != nil {
return nil, par.err
}
// bdev stats
reg := path.Join(uuidPath, "bdev[0-9]*")
bdevDirs, err := filepath.Glob(reg)
if err != nil {
return nil, err
}
bs.Bdevs = make([]BdevStats, len(bdevDirs))
for ii, bdevDir := range bdevDirs {
var bds = &bs.Bdevs[ii]
bds.Name = filepath.Base(bdevDir)
par.setSubDir(bds.Name)
bds.DirtyData = par.readValue("dirty_data")
// dir <uuidPath>/<bds.Name>/stats_five_minute
par.setSubDir(bds.Name, "stats_five_minute")
bds.FiveMin.Bypassed = par.readValue("bypassed")
bds.FiveMin.CacheBypassHits = par.readValue("cache_bypass_hits")
bds.FiveMin.CacheBypassMisses = par.readValue("cache_bypass_misses")
bds.FiveMin.CacheHits = par.readValue("cache_hits")
bds.FiveMin.CacheMissCollisions = par.readValue("cache_miss_collisions")
bds.FiveMin.CacheMisses = par.readValue("cache_misses")
bds.FiveMin.CacheReadaheads = par.readValue("cache_readaheads")
// dir <uuidPath>/<bds.Name>/stats_total
par.setSubDir("stats_total")
bds.Total.Bypassed = par.readValue("bypassed")
bds.Total.CacheBypassHits = par.readValue("cache_bypass_hits")
bds.Total.CacheBypassMisses = par.readValue("cache_bypass_misses")
bds.Total.CacheHits = par.readValue("cache_hits")
bds.Total.CacheMissCollisions = par.readValue("cache_miss_collisions")
bds.Total.CacheMisses = par.readValue("cache_misses")
bds.Total.CacheReadaheads = par.readValue("cache_readaheads")
}
if par.err != nil {
return nil, par.err
}
// cache stats
reg = path.Join(uuidPath, "cache[0-9]*")
cacheDirs, err := filepath.Glob(reg)
if err != nil {
return nil, err
}
bs.Caches = make([]CacheStats, len(cacheDirs))
for ii, cacheDir := range cacheDirs {
var cs = &bs.Caches[ii]
cs.Name = filepath.Base(cacheDir)
// dir is <uuidPath>/<cs.Name>
par.setSubDir(cs.Name)
cs.IOErrors = par.readValue("io_errors")
cs.MetadataWritten = par.readValue("metadata_written")
cs.Written = par.readValue("written")
ps := par.getPriorityStats()
cs.Priority = ps
}
if par.err != nil {
return nil, par.err
}
return &bs, nil
}