mirror of
https://github.com/octoleo/syncthing.git
synced 2024-12-22 19:08:58 +00:00
parent
9745679c63
commit
75f9ea623c
@ -109,5 +109,15 @@ func init() {
|
||||
databaseKeys, databaseStatisticsSeconds,
|
||||
databaseOperations, databaseOperationSeconds)
|
||||
|
||||
prometheus.MustRegister(prometheus.NewProcessCollector(os.Getpid(), "syncthing_discovery"))
|
||||
processCollectorOpts := prometheus.ProcessCollectorOpts{
|
||||
Namespace: "syncthing_discovery",
|
||||
PidFn: func() (int, error) {
|
||||
return os.Getpid(), nil
|
||||
},
|
||||
}
|
||||
|
||||
prometheus.MustRegister(
|
||||
prometheus.NewProcessCollector(processCollectorOpts),
|
||||
)
|
||||
|
||||
}
|
||||
|
@ -14,7 +14,16 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(prometheus.NewProcessCollector(os.Getpid(), "syncthing_relaypoolsrv"))
|
||||
processCollectorOpts := prometheus.ProcessCollectorOpts{
|
||||
Namespace: "syncthing_relaypoolsrv",
|
||||
PidFn: func() (int, error) {
|
||||
return os.Getpid(), nil
|
||||
},
|
||||
}
|
||||
|
||||
prometheus.MustRegister(
|
||||
prometheus.NewProcessCollector(processCollectorOpts),
|
||||
)
|
||||
}
|
||||
|
||||
var (
|
||||
|
73
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
73
vendor/github.com/prometheus/client_golang/prometheus/collector.go
generated
vendored
@ -29,27 +29,72 @@ type Collector interface {
|
||||
// collected by this Collector to the provided channel and returns once
|
||||
// the last descriptor has been sent. The sent descriptors fulfill the
|
||||
// consistency and uniqueness requirements described in the Desc
|
||||
// documentation. (It is valid if one and the same Collector sends
|
||||
// duplicate descriptors. Those duplicates are simply ignored. However,
|
||||
// two different Collectors must not send duplicate descriptors.) This
|
||||
// method idempotently sends the same descriptors throughout the
|
||||
// lifetime of the Collector. If a Collector encounters an error while
|
||||
// executing this method, it must send an invalid descriptor (created
|
||||
// with NewInvalidDesc) to signal the error to the registry.
|
||||
// documentation.
|
||||
//
|
||||
// It is valid if one and the same Collector sends duplicate
|
||||
// descriptors. Those duplicates are simply ignored. However, two
|
||||
// different Collectors must not send duplicate descriptors.
|
||||
//
|
||||
// Sending no descriptor at all marks the Collector as “unchecked”,
|
||||
// i.e. no checks will be performed at registration time, and the
|
||||
// Collector may yield any Metric it sees fit in its Collect method.
|
||||
//
|
||||
// This method idempotently sends the same descriptors throughout the
|
||||
// lifetime of the Collector. It may be called concurrently and
|
||||
// therefore must be implemented in a concurrency safe way.
|
||||
//
|
||||
// If a Collector encounters an error while executing this method, it
|
||||
// must send an invalid descriptor (created with NewInvalidDesc) to
|
||||
// signal the error to the registry.
|
||||
Describe(chan<- *Desc)
|
||||
// Collect is called by the Prometheus registry when collecting
|
||||
// metrics. The implementation sends each collected metric via the
|
||||
// provided channel and returns once the last metric has been sent. The
|
||||
// descriptor of each sent metric is one of those returned by
|
||||
// Describe. Returned metrics that share the same descriptor must differ
|
||||
// in their variable label values. This method may be called
|
||||
// concurrently and must therefore be implemented in a concurrency safe
|
||||
// way. Blocking occurs at the expense of total performance of rendering
|
||||
// all registered metrics. Ideally, Collector implementations support
|
||||
// concurrent readers.
|
||||
// descriptor of each sent metric is one of those returned by Describe
|
||||
// (unless the Collector is unchecked, see above). Returned metrics that
|
||||
// share the same descriptor must differ in their variable label
|
||||
// values.
|
||||
//
|
||||
// This method may be called concurrently and must therefore be
|
||||
// implemented in a concurrency safe way. Blocking occurs at the expense
|
||||
// of total performance of rendering all registered metrics. Ideally,
|
||||
// Collector implementations support concurrent readers.
|
||||
Collect(chan<- Metric)
|
||||
}
|
||||
|
||||
// DescribeByCollect is a helper to implement the Describe method of a custom
|
||||
// Collector. It collects the metrics from the provided Collector and sends
|
||||
// their descriptors to the provided channel.
|
||||
//
|
||||
// If a Collector collects the same metrics throughout its lifetime, its
|
||||
// Describe method can simply be implemented as:
|
||||
//
|
||||
// func (c customCollector) Describe(ch chan<- *Desc) {
|
||||
// DescribeByCollect(c, ch)
|
||||
// }
|
||||
//
|
||||
// However, this will not work if the metrics collected change dynamically over
|
||||
// the lifetime of the Collector in a way that their combined set of descriptors
|
||||
// changes as well. The shortcut implementation will then violate the contract
|
||||
// of the Describe method. If a Collector sometimes collects no metrics at all
|
||||
// (for example vectors like CounterVec, GaugeVec, etc., which only collect
|
||||
// metrics after a metric with a fully specified label set has been accessed),
|
||||
// it might even get registered as an unchecked Collecter (cf. the Register
|
||||
// method of the Registerer interface). Hence, only use this shortcut
|
||||
// implementation of Describe if you are certain to fulfill the contract.
|
||||
//
|
||||
// The Collector example demonstrates a use of DescribeByCollect.
|
||||
func DescribeByCollect(c Collector, descs chan<- *Desc) {
|
||||
metrics := make(chan Metric)
|
||||
go func() {
|
||||
c.Collect(metrics)
|
||||
close(metrics)
|
||||
}()
|
||||
for m := range metrics {
|
||||
descs <- m.Desc()
|
||||
}
|
||||
}
|
||||
|
||||
// selfCollector implements Collector for a single Metric so that the Metric
|
||||
// collects itself. Add it as an anonymous field to a struct that implements
|
||||
// Metric, and call init with the Metric itself as an argument.
|
||||
|
65
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
65
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
@ -15,6 +15,10 @@ package prometheus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math"
|
||||
"sync/atomic"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// Counter is a Metric that represents a single numerical value that only ever
|
||||
@ -42,6 +46,14 @@ type Counter interface {
|
||||
type CounterOpts Opts
|
||||
|
||||
// NewCounter creates a new Counter based on the provided CounterOpts.
|
||||
//
|
||||
// The returned implementation tracks the counter value in two separate
|
||||
// variables, a float64 and a uint64. The latter is used to track calls of the
|
||||
// Inc method and calls of the Add method with a value that can be represented
|
||||
// as a uint64. This allows atomic increments of the counter with optimal
|
||||
// performance. (It is common to have an Inc call in very hot execution paths.)
|
||||
// Both internal tracking values are added up in the Write method. This has to
|
||||
// be taken into account when it comes to precision and overflow behavior.
|
||||
func NewCounter(opts CounterOpts) Counter {
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
@ -49,20 +61,58 @@ func NewCounter(opts CounterOpts) Counter {
|
||||
nil,
|
||||
opts.ConstLabels,
|
||||
)
|
||||
result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}
|
||||
result := &counter{desc: desc, labelPairs: desc.constLabelPairs}
|
||||
result.init(result) // Init self-collection.
|
||||
return result
|
||||
}
|
||||
|
||||
type counter struct {
|
||||
value
|
||||
// valBits contains the bits of the represented float64 value, while
|
||||
// valInt stores values that are exact integers. Both have to go first
|
||||
// in the struct to guarantee alignment for atomic operations.
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
valBits uint64
|
||||
valInt uint64
|
||||
|
||||
selfCollector
|
||||
desc *Desc
|
||||
|
||||
labelPairs []*dto.LabelPair
|
||||
}
|
||||
|
||||
func (c *counter) Desc() *Desc {
|
||||
return c.desc
|
||||
}
|
||||
|
||||
func (c *counter) Add(v float64) {
|
||||
if v < 0 {
|
||||
panic(errors.New("counter cannot decrease in value"))
|
||||
}
|
||||
c.value.Add(v)
|
||||
ival := uint64(v)
|
||||
if float64(ival) == v {
|
||||
atomic.AddUint64(&c.valInt, ival)
|
||||
return
|
||||
}
|
||||
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&c.valBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||
if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *counter) Inc() {
|
||||
atomic.AddUint64(&c.valInt, 1)
|
||||
}
|
||||
|
||||
func (c *counter) Write(out *dto.Metric) error {
|
||||
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
|
||||
ival := atomic.LoadUint64(&c.valInt)
|
||||
val := fval + float64(ival)
|
||||
|
||||
return populateMetric(CounterValue, val, c.labelPairs, out)
|
||||
}
|
||||
|
||||
// CounterVec is a Collector that bundles a set of Counters that all share the
|
||||
@ -85,11 +135,10 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
|
||||
)
|
||||
return &CounterVec{
|
||||
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
result := &counter{value: value{
|
||||
desc: desc,
|
||||
valType: CounterValue,
|
||||
labelPairs: makeLabelPairs(desc, lvs),
|
||||
}}
|
||||
if len(lvs) != len(desc.variableLabels) {
|
||||
panic(errInconsistentCardinality)
|
||||
}
|
||||
result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
|
||||
result.init(result) // Init self-collection.
|
||||
return result
|
||||
}),
|
||||
|
8
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
8
vendor/github.com/prometheus/client_golang/prometheus/desc.go
generated
vendored
@ -67,7 +67,7 @@ type Desc struct {
|
||||
|
||||
// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc
|
||||
// and will be reported on registration time. variableLabels and constLabels can
|
||||
// be nil if no such labels should be set. fqName and help must not be empty.
|
||||
// be nil if no such labels should be set. fqName must not be empty.
|
||||
//
|
||||
// variableLabels only contain the label names. Their label values are variable
|
||||
// and therefore not part of the Desc. (They are managed within the Metric.)
|
||||
@ -80,10 +80,6 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
||||
help: help,
|
||||
variableLabels: variableLabels,
|
||||
}
|
||||
if help == "" {
|
||||
d.err = errors.New("empty help string")
|
||||
return d
|
||||
}
|
||||
if !model.IsValidMetricName(model.LabelValue(fqName)) {
|
||||
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
|
||||
return d
|
||||
@ -156,7 +152,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
|
||||
Value: proto.String(v),
|
||||
})
|
||||
}
|
||||
sort.Sort(LabelPairSorter(d.constLabelPairs))
|
||||
sort.Sort(labelPairSorter(d.constLabelPairs))
|
||||
return d
|
||||
}
|
||||
|
||||
|
27
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
27
vendor/github.com/prometheus/client_golang/prometheus/doc.go
generated
vendored
@ -11,10 +11,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package prometheus provides metrics primitives to instrument code for
|
||||
// monitoring. It also offers a registry for metrics. Sub-packages allow to
|
||||
// expose the registered metrics via HTTP (package promhttp) or push them to a
|
||||
// Pushgateway (package push).
|
||||
// Package prometheus is the core instrumentation package. It provides metrics
|
||||
// primitives to instrument code for monitoring. It also offers a registry for
|
||||
// metrics. Sub-packages allow to expose the registered metrics via HTTP
|
||||
// (package promhttp) or push them to a Pushgateway (package push). There is
|
||||
// also a sub-package promauto, which provides metrics constructors with
|
||||
// automatic registration.
|
||||
//
|
||||
// All exported functions and methods are safe to be used concurrently unless
|
||||
// specified otherwise.
|
||||
@ -72,7 +74,10 @@
|
||||
// The number of exported identifiers in this package might appear a bit
|
||||
// overwhelming. However, in addition to the basic plumbing shown in the example
|
||||
// above, you only need to understand the different metric types and their
|
||||
// vector versions for basic usage.
|
||||
// vector versions for basic usage. Furthermore, if you are not concerned with
|
||||
// fine-grained control of when and how to register metrics with the registry,
|
||||
// have a look at the promauto package, which will effectively allow you to
|
||||
// ignore registration altogether in simple cases.
|
||||
//
|
||||
// Above, you have already touched the Counter and the Gauge. There are two more
|
||||
// advanced metric types: the Summary and Histogram. A more thorough description
|
||||
@ -116,7 +121,17 @@
|
||||
// NewConstSummary (and their respective Must… versions). That will happen in
|
||||
// the Collect method. The Describe method has to return separate Desc
|
||||
// instances, representative of the “throw-away” metrics to be created later.
|
||||
// NewDesc comes in handy to create those Desc instances.
|
||||
// NewDesc comes in handy to create those Desc instances. Alternatively, you
|
||||
// could return no Desc at all, which will marke the Collector “unchecked”. No
|
||||
// checks are porformed at registration time, but metric consistency will still
|
||||
// be ensured at scrape time, i.e. any inconsistencies will lead to scrape
|
||||
// errors. Thus, with unchecked Collectors, the responsibility to not collect
|
||||
// metrics that lead to inconsistencies in the total scrape result lies with the
|
||||
// implementer of the Collector. While this is not a desirable state, it is
|
||||
// sometimes necessary. The typical use case is a situatios where the exact
|
||||
// metrics to be returned by a Collector cannot be predicted at registration
|
||||
// time, but the implementer has sufficient knowledge of the whole system to
|
||||
// guarantee metric consistency.
|
||||
//
|
||||
// The Collector example illustrates the use case. You can also look at the
|
||||
// source code of the processCollector (mirroring process metrics), the
|
||||
|
13
vendor/github.com/prometheus/client_golang/prometheus/fnv.go
generated
vendored
13
vendor/github.com/prometheus/client_golang/prometheus/fnv.go
generated
vendored
@ -1,3 +1,16 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
// Inline and byte-free variant of hash/fnv's fnv64a.
|
||||
|
80
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
80
vendor/github.com/prometheus/client_golang/prometheus/gauge.go
generated
vendored
@ -13,6 +13,14 @@
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// Gauge is a Metric that represents a single numerical value that can
|
||||
// arbitrarily go up and down.
|
||||
//
|
||||
@ -48,13 +56,74 @@ type Gauge interface {
|
||||
type GaugeOpts Opts
|
||||
|
||||
// NewGauge creates a new Gauge based on the provided GaugeOpts.
|
||||
//
|
||||
// The returned implementation is optimized for a fast Set method. If you have a
|
||||
// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick
|
||||
// the former. For example, the Inc method of the returned Gauge is slower than
|
||||
// the Inc method of a Counter returned by NewCounter. This matches the typical
|
||||
// scenarios for Gauges and Counters, where the former tends to be Set-heavy and
|
||||
// the latter Inc-heavy.
|
||||
func NewGauge(opts GaugeOpts) Gauge {
|
||||
return newValue(NewDesc(
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
nil,
|
||||
opts.ConstLabels,
|
||||
), GaugeValue, 0)
|
||||
)
|
||||
result := &gauge{desc: desc, labelPairs: desc.constLabelPairs}
|
||||
result.init(result) // Init self-collection.
|
||||
return result
|
||||
}
|
||||
|
||||
type gauge struct {
|
||||
// valBits contains the bits of the represented float64 value. It has
|
||||
// to go first in the struct to guarantee alignment for atomic
|
||||
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
valBits uint64
|
||||
|
||||
selfCollector
|
||||
|
||||
desc *Desc
|
||||
labelPairs []*dto.LabelPair
|
||||
}
|
||||
|
||||
func (g *gauge) Desc() *Desc {
|
||||
return g.desc
|
||||
}
|
||||
|
||||
func (g *gauge) Set(val float64) {
|
||||
atomic.StoreUint64(&g.valBits, math.Float64bits(val))
|
||||
}
|
||||
|
||||
func (g *gauge) SetToCurrentTime() {
|
||||
g.Set(float64(time.Now().UnixNano()) / 1e9)
|
||||
}
|
||||
|
||||
func (g *gauge) Inc() {
|
||||
g.Add(1)
|
||||
}
|
||||
|
||||
func (g *gauge) Dec() {
|
||||
g.Add(-1)
|
||||
}
|
||||
|
||||
func (g *gauge) Add(val float64) {
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&g.valBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
|
||||
if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (g *gauge) Sub(val float64) {
|
||||
g.Add(val * -1)
|
||||
}
|
||||
|
||||
func (g *gauge) Write(out *dto.Metric) error {
|
||||
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
|
||||
return populateMetric(GaugeValue, val, g.labelPairs, out)
|
||||
}
|
||||
|
||||
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
|
||||
@ -77,7 +146,12 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
|
||||
)
|
||||
return &GaugeVec{
|
||||
metricVec: newMetricVec(desc, func(lvs ...string) Metric {
|
||||
return newValue(desc, GaugeValue, 0, lvs...)
|
||||
if len(lvs) != len(desc.variableLabels) {
|
||||
panic(errInconsistentCardinality)
|
||||
}
|
||||
result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)}
|
||||
result.init(result) // Init self-collection.
|
||||
return result
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
23
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
23
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
@ -1,3 +1,16 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
@ -17,8 +30,12 @@ type goCollector struct {
|
||||
metrics memStatsMetrics
|
||||
}
|
||||
|
||||
// NewGoCollector returns a collector which exports metrics about the current
|
||||
// go process.
|
||||
// NewGoCollector returns a collector which exports metrics about the current Go
|
||||
// process. This includes memory stats. To collect those, runtime.ReadMemStats
|
||||
// is called. This causes a stop-the-world, which is very short with Go1.9+
|
||||
// (~25µs). However, with older Go versions, the stop-the-world duration depends
|
||||
// on the heap size and can be quite significant (~1.7 ms/GiB as per
|
||||
// https://go-review.googlesource.com/c/go/+/34937).
|
||||
func NewGoCollector() Collector {
|
||||
return &goCollector{
|
||||
goroutinesDesc: NewDesc(
|
||||
@ -265,7 +282,7 @@ func (c *goCollector) Collect(ch chan<- Metric) {
|
||||
quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()
|
||||
}
|
||||
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
|
||||
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
|
||||
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
|
||||
|
||||
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
|
||||
|
||||
|
8
vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge.go
generated
vendored
8
vendor/github.com/prometheus/client_golang/prometheus/graphite/bridge.go
generated
vendored
@ -191,8 +191,10 @@ func writeMetrics(w io.Writer, mfs []*dto.MetricFamily, prefix string, now model
|
||||
|
||||
buf := bufio.NewWriter(w)
|
||||
for _, s := range vec {
|
||||
if err := writeSanitized(buf, prefix); err != nil {
|
||||
return err
|
||||
for _, c := range prefix {
|
||||
if _, err := buf.WriteRune(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := buf.WriteByte('.'); err != nil {
|
||||
return err
|
||||
@ -273,7 +275,7 @@ func replaceInvalidRune(c rune) rune {
|
||||
if c == ' ' {
|
||||
return '.'
|
||||
}
|
||||
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == ':' || (c >= '0' && c <= '9')) {
|
||||
if !((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c == ':' || c == '-' || (c >= '0' && c <= '9')) {
|
||||
return '_'
|
||||
}
|
||||
return c
|
||||
|
155
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
155
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
@ -16,7 +16,9 @@ package prometheus
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
@ -108,8 +110,9 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
|
||||
}
|
||||
|
||||
// HistogramOpts bundles the options for creating a Histogram metric. It is
|
||||
// mandatory to set Name and Help to a non-empty string. All other fields are
|
||||
// optional and can safely be left at their zero value.
|
||||
// mandatory to set Name to a non-empty string. All other fields are optional
|
||||
// and can safely be left at their zero value, although it is strongly
|
||||
// encouraged to set a Help string.
|
||||
type HistogramOpts struct {
|
||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||
// name of the Histogram (created by joining these components with
|
||||
@ -120,7 +123,7 @@ type HistogramOpts struct {
|
||||
Subsystem string
|
||||
Name string
|
||||
|
||||
// Help provides information about this Histogram. Mandatory!
|
||||
// Help provides information about this Histogram.
|
||||
//
|
||||
// Metrics with the same fully-qualified name must have the same Help
|
||||
// string.
|
||||
@ -184,6 +187,7 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
||||
desc: desc,
|
||||
upperBounds: opts.Buckets,
|
||||
labelPairs: makeLabelPairs(desc, labelValues),
|
||||
counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}},
|
||||
}
|
||||
for i, upperBound := range h.upperBounds {
|
||||
if i < len(h.upperBounds)-1 {
|
||||
@ -200,28 +204,53 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
|
||||
}
|
||||
}
|
||||
}
|
||||
// Finally we know the final length of h.upperBounds and can make counts.
|
||||
h.counts = make([]uint64, len(h.upperBounds))
|
||||
// Finally we know the final length of h.upperBounds and can make counts
|
||||
// for both states:
|
||||
h.counts[0].buckets = make([]uint64, len(h.upperBounds))
|
||||
h.counts[1].buckets = make([]uint64, len(h.upperBounds))
|
||||
|
||||
h.init(h) // Init self-collection.
|
||||
return h
|
||||
}
|
||||
|
||||
type histogram struct {
|
||||
type histogramCounts struct {
|
||||
// sumBits contains the bits of the float64 representing the sum of all
|
||||
// observations. sumBits and count have to go first in the struct to
|
||||
// guarantee alignment for atomic operations.
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
sumBits uint64
|
||||
count uint64
|
||||
buckets []uint64
|
||||
}
|
||||
|
||||
type histogram struct {
|
||||
// countAndHotIdx is a complicated one. For lock-free yet atomic
|
||||
// observations, we need to save the total count of observations again,
|
||||
// combined with the index of the currently-hot counts struct, so that
|
||||
// we can perform the operation on both values atomically. The least
|
||||
// significant bit defines the hot counts struct. The remaining 63 bits
|
||||
// represent the total count of observations. This happens under the
|
||||
// assumption that the 63bit count will never overflow. Rationale: An
|
||||
// observations takes about 30ns. Let's assume it could happen in
|
||||
// 10ns. Overflowing the counter will then take at least (2^63)*10ns,
|
||||
// which is about 3000 years.
|
||||
//
|
||||
// This has to be first in the struct for 64bit alignment. See
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
countAndHotIdx uint64
|
||||
|
||||
selfCollector
|
||||
// Note that there is no mutex required.
|
||||
|
||||
desc *Desc
|
||||
desc *Desc
|
||||
writeMtx sync.Mutex // Only used in the Write method.
|
||||
|
||||
upperBounds []float64
|
||||
counts []uint64
|
||||
|
||||
// Two counts, one is "hot" for lock-free observations, the other is
|
||||
// "cold" for writing out a dto.Metric. It has to be an array of
|
||||
// pointers to guarantee 64bit alignment of the histogramCounts, see
|
||||
// http://golang.org/pkg/sync/atomic/#pkg-note-BUG.
|
||||
counts [2]*histogramCounts
|
||||
hotIdx int // Index of currently-hot counts. Only used within Write.
|
||||
|
||||
labelPairs []*dto.LabelPair
|
||||
}
|
||||
@ -241,36 +270,113 @@ func (h *histogram) Observe(v float64) {
|
||||
// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op
|
||||
// 300 buckets: 154 ns/op linear - binary 61.6 ns/op
|
||||
i := sort.SearchFloat64s(h.upperBounds, v)
|
||||
if i < len(h.counts) {
|
||||
atomic.AddUint64(&h.counts[i], 1)
|
||||
|
||||
// We increment h.countAndHotIdx by 2 so that the counter in the upper
|
||||
// 63 bits gets incremented by 1. At the same time, we get the new value
|
||||
// back, which we can use to find the currently-hot counts.
|
||||
n := atomic.AddUint64(&h.countAndHotIdx, 2)
|
||||
hotCounts := h.counts[n%2]
|
||||
|
||||
if i < len(h.upperBounds) {
|
||||
atomic.AddUint64(&hotCounts.buckets[i], 1)
|
||||
}
|
||||
atomic.AddUint64(&h.count, 1)
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&h.sumBits)
|
||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + v)
|
||||
if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {
|
||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||
break
|
||||
}
|
||||
}
|
||||
// Increment count last as we take it as a signal that the observation
|
||||
// is complete.
|
||||
atomic.AddUint64(&hotCounts.count, 1)
|
||||
}
|
||||
|
||||
func (h *histogram) Write(out *dto.Metric) error {
|
||||
his := &dto.Histogram{}
|
||||
buckets := make([]*dto.Bucket, len(h.upperBounds))
|
||||
var (
|
||||
his = &dto.Histogram{}
|
||||
buckets = make([]*dto.Bucket, len(h.upperBounds))
|
||||
hotCounts, coldCounts *histogramCounts
|
||||
count uint64
|
||||
)
|
||||
|
||||
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))
|
||||
his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))
|
||||
var count uint64
|
||||
// For simplicity, we mutex the rest of this method. It is not in the
|
||||
// hot path, i.e. Observe is called much more often than Write. The
|
||||
// complication of making Write lock-free isn't worth it.
|
||||
h.writeMtx.Lock()
|
||||
defer h.writeMtx.Unlock()
|
||||
|
||||
// This is a bit arcane, which is why the following spells out this if
|
||||
// clause in English:
|
||||
//
|
||||
// If the currently-hot counts struct is #0, we atomically increment
|
||||
// h.countAndHotIdx by 1 so that from now on Observe will use the counts
|
||||
// struct #1. Furthermore, the atomic increment gives us the new value,
|
||||
// which, in its most significant 63 bits, tells us the count of
|
||||
// observations done so far up to and including currently ongoing
|
||||
// observations still using the counts struct just changed from hot to
|
||||
// cold. To have a normal uint64 for the count, we bitshift by 1 and
|
||||
// save the result in count. We also set h.hotIdx to 1 for the next
|
||||
// Write call, and we will refer to counts #1 as hotCounts and to counts
|
||||
// #0 as coldCounts.
|
||||
//
|
||||
// If the currently-hot counts struct is #1, we do the corresponding
|
||||
// things the other way round. We have to _decrement_ h.countAndHotIdx
|
||||
// (which is a bit arcane in itself, as we have to express -1 with an
|
||||
// unsigned int...).
|
||||
if h.hotIdx == 0 {
|
||||
count = atomic.AddUint64(&h.countAndHotIdx, 1) >> 1
|
||||
h.hotIdx = 1
|
||||
hotCounts = h.counts[1]
|
||||
coldCounts = h.counts[0]
|
||||
} else {
|
||||
count = atomic.AddUint64(&h.countAndHotIdx, ^uint64(0)) >> 1 // Decrement.
|
||||
h.hotIdx = 0
|
||||
hotCounts = h.counts[0]
|
||||
coldCounts = h.counts[1]
|
||||
}
|
||||
|
||||
// Now we have to wait for the now-declared-cold counts to actually cool
|
||||
// down, i.e. wait for all observations still using it to finish. That's
|
||||
// the case once the count in the cold counts struct is the same as the
|
||||
// one atomically retrieved from the upper 63bits of h.countAndHotIdx.
|
||||
for {
|
||||
if count == atomic.LoadUint64(&coldCounts.count) {
|
||||
break
|
||||
}
|
||||
runtime.Gosched() // Let observations get work done.
|
||||
}
|
||||
|
||||
his.SampleCount = proto.Uint64(count)
|
||||
his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits)))
|
||||
var cumCount uint64
|
||||
for i, upperBound := range h.upperBounds {
|
||||
count += atomic.LoadUint64(&h.counts[i])
|
||||
cumCount += atomic.LoadUint64(&coldCounts.buckets[i])
|
||||
buckets[i] = &dto.Bucket{
|
||||
CumulativeCount: proto.Uint64(count),
|
||||
CumulativeCount: proto.Uint64(cumCount),
|
||||
UpperBound: proto.Float64(upperBound),
|
||||
}
|
||||
}
|
||||
|
||||
his.Bucket = buckets
|
||||
out.Histogram = his
|
||||
out.Label = h.labelPairs
|
||||
|
||||
// Finally add all the cold counts to the new hot counts and reset the cold counts.
|
||||
atomic.AddUint64(&hotCounts.count, count)
|
||||
atomic.StoreUint64(&coldCounts.count, 0)
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&hotCounts.sumBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum())
|
||||
if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) {
|
||||
atomic.StoreUint64(&coldCounts.sumBits, 0)
|
||||
break
|
||||
}
|
||||
}
|
||||
for i := range h.upperBounds {
|
||||
atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i]))
|
||||
atomic.StoreUint64(&coldCounts.buckets[i], 0)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -454,7 +560,7 @@ func (h *constHistogram) Write(out *dto.Metric) error {
|
||||
// bucket.
|
||||
//
|
||||
// NewConstHistogram returns an error if the length of labelValues is not
|
||||
// consistent with the variable labels in Desc.
|
||||
// consistent with the variable labels in Desc or if Desc is invalid.
|
||||
func NewConstHistogram(
|
||||
desc *Desc,
|
||||
count uint64,
|
||||
@ -462,6 +568,9 @@ func NewConstHistogram(
|
||||
buckets map[float64]uint64,
|
||||
labelValues ...string,
|
||||
) (Metric, error) {
|
||||
if desc.err != nil {
|
||||
return nil, desc.err
|
||||
}
|
||||
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
51
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
51
vendor/github.com/prometheus/client_golang/prometheus/http.go
generated
vendored
@ -61,16 +61,15 @@ func giveBuf(buf *bytes.Buffer) {
|
||||
// name).
|
||||
//
|
||||
// Deprecated: Please note the issues described in the doc comment of
|
||||
// InstrumentHandler. You might want to consider using promhttp.Handler instead
|
||||
// (which is not instrumented, but can be instrumented with the tooling provided
|
||||
// in package promhttp).
|
||||
// InstrumentHandler. You might want to consider using promhttp.Handler instead.
|
||||
func Handler() http.Handler {
|
||||
return InstrumentHandler("prometheus", UninstrumentedHandler())
|
||||
}
|
||||
|
||||
// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.
|
||||
//
|
||||
// Deprecated: Use promhttp.Handler instead. See there for further documentation.
|
||||
// Deprecated: Use promhttp.HandlerFor(DefaultGatherer, promhttp.HandlerOpts{})
|
||||
// instead. See there for further documentation.
|
||||
func UninstrumentedHandler() http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
mfs, err := DefaultGatherer.Gather()
|
||||
@ -116,7 +115,7 @@ func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string)
|
||||
header := request.Header.Get(acceptEncodingHeader)
|
||||
parts := strings.Split(header, ",")
|
||||
for _, part := range parts {
|
||||
part := strings.TrimSpace(part)
|
||||
part = strings.TrimSpace(part)
|
||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
||||
return gzip.NewWriter(writer), "gzip"
|
||||
}
|
||||
@ -140,16 +139,6 @@ var now nower = nowFunc(func() time.Time {
|
||||
return time.Now()
|
||||
})
|
||||
|
||||
func nowSeries(t ...time.Time) nower {
|
||||
return nowFunc(func() time.Time {
|
||||
defer func() {
|
||||
t = t[1:]
|
||||
}()
|
||||
|
||||
return t[0]
|
||||
})
|
||||
}
|
||||
|
||||
// InstrumentHandler wraps the given HTTP handler for instrumentation. It
|
||||
// registers four metric collectors (if not already done) and reports HTTP
|
||||
// metrics to the (newly or already) registered collectors: http_requests_total
|
||||
@ -160,21 +149,14 @@ func nowSeries(t ...time.Time) nower {
|
||||
// (label name "method") and HTTP status code (label name "code").
|
||||
//
|
||||
// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
|
||||
// package promhttp instead. The issues are the following:
|
||||
//
|
||||
// - It uses Summaries rather than Histograms. Summaries are not useful if
|
||||
// aggregation across multiple instances is required.
|
||||
//
|
||||
// - It uses microseconds as unit, which is deprecated and should be replaced by
|
||||
// seconds.
|
||||
//
|
||||
// - The size of the request is calculated in a separate goroutine. Since this
|
||||
// calculator requires access to the request header, it creates a race with
|
||||
// any writes to the header performed during request handling.
|
||||
// httputil.ReverseProxy is a prominent example for a handler
|
||||
// performing such writes.
|
||||
//
|
||||
// - It has additional issues with HTTP/2, cf.
|
||||
// package promhttp instead. The issues are the following: (1) It uses Summaries
|
||||
// rather than Histograms. Summaries are not useful if aggregation across
|
||||
// multiple instances is required. (2) It uses microseconds as unit, which is
|
||||
// deprecated and should be replaced by seconds. (3) The size of the request is
|
||||
// calculated in a separate goroutine. Since this calculator requires access to
|
||||
// the request header, it creates a race with any writes to the header performed
|
||||
// during request handling. httputil.ReverseProxy is a prominent example for a
|
||||
// handler performing such writes. (4) It has additional issues with HTTP/2, cf.
|
||||
// https://github.com/prometheus/client_golang/issues/272.
|
||||
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
|
||||
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
|
||||
@ -318,7 +300,7 @@ func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.Respo
|
||||
}
|
||||
|
||||
func computeApproximateRequestSize(r *http.Request) <-chan int {
|
||||
// Get URL length in current go routine for avoiding a race condition.
|
||||
// Get URL length in current goroutine for avoiding a race condition.
|
||||
// HandlerFunc that runs in parallel may modify the URL.
|
||||
s := 0
|
||||
if r.URL != nil {
|
||||
@ -353,10 +335,9 @@ func computeApproximateRequestSize(r *http.Request) <-chan int {
|
||||
type responseWriterDelegator struct {
|
||||
http.ResponseWriter
|
||||
|
||||
handler, method string
|
||||
status int
|
||||
written int64
|
||||
wroteHeader bool
|
||||
status int
|
||||
written int64
|
||||
wroteHeader bool
|
||||
}
|
||||
|
||||
func (r *responseWriterDelegator) WriteHeader(code int) {
|
||||
|
85
vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
generated
vendored
Normal file
85
vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go
generated
vendored
Normal file
@ -0,0 +1,85 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package internal
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// metricSorter is a sortable slice of *dto.Metric.
|
||||
type metricSorter []*dto.Metric
|
||||
|
||||
func (s metricSorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s metricSorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s metricSorter) Less(i, j int) bool {
|
||||
if len(s[i].Label) != len(s[j].Label) {
|
||||
// This should not happen. The metrics are
|
||||
// inconsistent. However, we have to deal with the fact, as
|
||||
// people might use custom collectors or metric family injection
|
||||
// to create inconsistent metrics. So let's simply compare the
|
||||
// number of labels in this case. That will still yield
|
||||
// reproducible sorting.
|
||||
return len(s[i].Label) < len(s[j].Label)
|
||||
}
|
||||
for n, lp := range s[i].Label {
|
||||
vi := lp.GetValue()
|
||||
vj := s[j].Label[n].GetValue()
|
||||
if vi != vj {
|
||||
return vi < vj
|
||||
}
|
||||
}
|
||||
|
||||
// We should never arrive here. Multiple metrics with the same
|
||||
// label set in the same scrape will lead to undefined ingestion
|
||||
// behavior. However, as above, we have to provide stable sorting
|
||||
// here, even for inconsistent metrics. So sort equal metrics
|
||||
// by their timestamp, with missing timestamps (implying "now")
|
||||
// coming last.
|
||||
if s[i].TimestampMs == nil {
|
||||
return false
|
||||
}
|
||||
if s[j].TimestampMs == nil {
|
||||
return true
|
||||
}
|
||||
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
|
||||
}
|
||||
|
||||
// NormalizeMetricFamilies returns a MetricFamily slice with empty
|
||||
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
|
||||
// the slice, with the contained Metrics sorted within each MetricFamily.
|
||||
func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
|
||||
for _, mf := range metricFamiliesByName {
|
||||
sort.Sort(metricSorter(mf.Metric))
|
||||
}
|
||||
names := make([]string, 0, len(metricFamiliesByName))
|
||||
for name, mf := range metricFamiliesByName {
|
||||
if len(mf.Metric) > 0 {
|
||||
names = append(names, name)
|
||||
}
|
||||
}
|
||||
sort.Strings(names)
|
||||
result := make([]*dto.MetricFamily, 0, len(names))
|
||||
for _, name := range names {
|
||||
result = append(result, metricFamiliesByName[name])
|
||||
}
|
||||
return result
|
||||
}
|
13
vendor/github.com/prometheus/client_golang/prometheus/labels.go
generated
vendored
13
vendor/github.com/prometheus/client_golang/prometheus/labels.go
generated
vendored
@ -1,3 +1,16 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
|
70
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
70
vendor/github.com/prometheus/client_golang/prometheus/metric.go
generated
vendored
@ -15,6 +15,9 @@ package prometheus
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
@ -43,9 +46,8 @@ type Metric interface {
|
||||
// While populating dto.Metric, it is the responsibility of the
|
||||
// implementation to ensure validity of the Metric protobuf (like valid
|
||||
// UTF-8 strings or syntactically valid metric and label names). It is
|
||||
// recommended to sort labels lexicographically. (Implementers may find
|
||||
// LabelPairSorter useful for that.) Callers of Write should still make
|
||||
// sure of sorting if they depend on it.
|
||||
// recommended to sort labels lexicographically. Callers of Write should
|
||||
// still make sure of sorting if they depend on it.
|
||||
Write(*dto.Metric) error
|
||||
// TODO(beorn7): The original rationale of passing in a pre-allocated
|
||||
// dto.Metric protobuf to save allocations has disappeared. The
|
||||
@ -57,8 +59,9 @@ type Metric interface {
|
||||
// implementation XXX has its own XXXOpts type, but in most cases, it is just be
|
||||
// an alias of this type (which might change when the requirement arises.)
|
||||
//
|
||||
// It is mandatory to set Name and Help to a non-empty string. All other fields
|
||||
// are optional and can safely be left at their zero value.
|
||||
// It is mandatory to set Name to a non-empty string. All other fields are
|
||||
// optional and can safely be left at their zero value, although it is strongly
|
||||
// encouraged to set a Help string.
|
||||
type Opts struct {
|
||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||
// name of the Metric (created by joining these components with
|
||||
@ -69,7 +72,7 @@ type Opts struct {
|
||||
Subsystem string
|
||||
Name string
|
||||
|
||||
// Help provides information about this metric. Mandatory!
|
||||
// Help provides information about this metric.
|
||||
//
|
||||
// Metrics with the same fully-qualified name must have the same Help
|
||||
// string.
|
||||
@ -110,37 +113,22 @@ func BuildFQName(namespace, subsystem, name string) string {
|
||||
return name
|
||||
}
|
||||
|
||||
// LabelPairSorter implements sort.Interface. It is used to sort a slice of
|
||||
// dto.LabelPair pointers. This is useful for implementing the Write method of
|
||||
// custom metrics.
|
||||
type LabelPairSorter []*dto.LabelPair
|
||||
// labelPairSorter implements sort.Interface. It is used to sort a slice of
|
||||
// dto.LabelPair pointers.
|
||||
type labelPairSorter []*dto.LabelPair
|
||||
|
||||
func (s LabelPairSorter) Len() int {
|
||||
func (s labelPairSorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s LabelPairSorter) Swap(i, j int) {
|
||||
func (s labelPairSorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s LabelPairSorter) Less(i, j int) bool {
|
||||
func (s labelPairSorter) Less(i, j int) bool {
|
||||
return s[i].GetName() < s[j].GetName()
|
||||
}
|
||||
|
||||
type hashSorter []uint64
|
||||
|
||||
func (s hashSorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s hashSorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s hashSorter) Less(i, j int) bool {
|
||||
return s[i] < s[j]
|
||||
}
|
||||
|
||||
type invalidMetric struct {
|
||||
desc *Desc
|
||||
err error
|
||||
@ -156,3 +144,31 @@ func NewInvalidMetric(desc *Desc, err error) Metric {
|
||||
func (m *invalidMetric) Desc() *Desc { return m.desc }
|
||||
|
||||
func (m *invalidMetric) Write(*dto.Metric) error { return m.err }
|
||||
|
||||
type timestampedMetric struct {
|
||||
Metric
|
||||
t time.Time
|
||||
}
|
||||
|
||||
func (m timestampedMetric) Write(pb *dto.Metric) error {
|
||||
e := m.Metric.Write(pb)
|
||||
pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000))
|
||||
return e
|
||||
}
|
||||
|
||||
// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a
|
||||
// way that it has an explicit timestamp set to the provided Time. This is only
|
||||
// useful in rare cases as the timestamp of a Prometheus metric should usually
|
||||
// be set by the Prometheus server during scraping. Exceptions include mirroring
|
||||
// metrics with given timestamps from other metric
|
||||
// sources.
|
||||
//
|
||||
// NewMetricWithTimestamp works best with MustNewConstMetric,
|
||||
// MustNewConstHistogram, and MustNewConstSummary, see example.
|
||||
//
|
||||
// Currently, the exposition formats used by Prometheus are limited to
|
||||
// millisecond resolution. Thus, the provided time will be rounded down to the
|
||||
// next full millisecond value.
|
||||
func NewMetricWithTimestamp(t time.Time, m Metric) Metric {
|
||||
return timestampedMetric{Metric: m, t: t}
|
||||
}
|
||||
|
122
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
122
vendor/github.com/prometheus/client_golang/prometheus/process_collector.go
generated
vendored
@ -13,46 +13,74 @@
|
||||
|
||||
package prometheus
|
||||
|
||||
import "github.com/prometheus/procfs"
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/prometheus/procfs"
|
||||
)
|
||||
|
||||
type processCollector struct {
|
||||
pid int
|
||||
collectFn func(chan<- Metric)
|
||||
pidFn func() (int, error)
|
||||
reportErrors bool
|
||||
cpuTotal *Desc
|
||||
openFDs, maxFDs *Desc
|
||||
vsize, rss *Desc
|
||||
vsize, maxVsize *Desc
|
||||
rss *Desc
|
||||
startTime *Desc
|
||||
}
|
||||
|
||||
// NewProcessCollector returns a collector which exports the current state of
|
||||
// process metrics including cpu, memory and file descriptor usage as well as
|
||||
// the process start time for the given process id under the given namespace.
|
||||
func NewProcessCollector(pid int, namespace string) Collector {
|
||||
return NewProcessCollectorPIDFn(
|
||||
func() (int, error) { return pid, nil },
|
||||
namespace,
|
||||
)
|
||||
// ProcessCollectorOpts defines the behavior of a process metrics collector
|
||||
// created with NewProcessCollector.
|
||||
type ProcessCollectorOpts struct {
|
||||
// PidFn returns the PID of the process the collector collects metrics
|
||||
// for. It is called upon each collection. By default, the PID of the
|
||||
// current process is used, as determined on construction time by
|
||||
// calling os.Getpid().
|
||||
PidFn func() (int, error)
|
||||
// If non-empty, each of the collected metrics is prefixed by the
|
||||
// provided string and an underscore ("_").
|
||||
Namespace string
|
||||
// If true, any error encountered during collection is reported as an
|
||||
// invalid metric (see NewInvalidMetric). Otherwise, errors are ignored
|
||||
// and the collected metrics will be incomplete. (Possibly, no metrics
|
||||
// will be collected at all.) While that's usually not desired, it is
|
||||
// appropriate for the common "mix-in" of process metrics, where process
|
||||
// metrics are nice to have, but failing to collect them should not
|
||||
// disrupt the collection of the remaining metrics.
|
||||
ReportErrors bool
|
||||
}
|
||||
|
||||
// NewProcessCollectorPIDFn returns a collector which exports the current state
|
||||
// of process metrics including cpu, memory and file descriptor usage as well
|
||||
// as the process start time under the given namespace. The given pidFn is
|
||||
// called on each collect and is used to determine the process to export
|
||||
// metrics for.
|
||||
func NewProcessCollectorPIDFn(
|
||||
pidFn func() (int, error),
|
||||
namespace string,
|
||||
) Collector {
|
||||
// NewProcessCollector returns a collector which exports the current state of
|
||||
// process metrics including CPU, memory and file descriptor usage as well as
|
||||
// the process start time. The detailed behavior is defined by the provided
|
||||
// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a
|
||||
// collector for the current process with an empty namespace string and no error
|
||||
// reporting.
|
||||
//
|
||||
// Currently, the collector depends on a Linux-style proc filesystem and
|
||||
// therefore only exports metrics for Linux.
|
||||
//
|
||||
// Note: An older version of this function had the following signature:
|
||||
//
|
||||
// NewProcessCollector(pid int, namespace string) Collector
|
||||
//
|
||||
// Most commonly, it was called as
|
||||
//
|
||||
// NewProcessCollector(os.Getpid(), "")
|
||||
//
|
||||
// The following call of the current version is equivalent to the above:
|
||||
//
|
||||
// NewProcessCollector(ProcessCollectorOpts{})
|
||||
func NewProcessCollector(opts ProcessCollectorOpts) Collector {
|
||||
ns := ""
|
||||
if len(namespace) > 0 {
|
||||
ns = namespace + "_"
|
||||
if len(opts.Namespace) > 0 {
|
||||
ns = opts.Namespace + "_"
|
||||
}
|
||||
|
||||
c := processCollector{
|
||||
pidFn: pidFn,
|
||||
collectFn: func(chan<- Metric) {},
|
||||
|
||||
c := &processCollector{
|
||||
reportErrors: opts.ReportErrors,
|
||||
cpuTotal: NewDesc(
|
||||
ns+"process_cpu_seconds_total",
|
||||
"Total user and system CPU time spent in seconds.",
|
||||
@ -73,6 +101,11 @@ func NewProcessCollectorPIDFn(
|
||||
"Virtual memory size in bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
maxVsize: NewDesc(
|
||||
ns+"process_virtual_memory_max_bytes",
|
||||
"Maximum amount of virtual memory available in bytes.",
|
||||
nil, nil,
|
||||
),
|
||||
rss: NewDesc(
|
||||
ns+"process_resident_memory_bytes",
|
||||
"Resident memory size in bytes.",
|
||||
@ -85,12 +118,23 @@ func NewProcessCollectorPIDFn(
|
||||
),
|
||||
}
|
||||
|
||||
if opts.PidFn == nil {
|
||||
pid := os.Getpid()
|
||||
c.pidFn = func() (int, error) { return pid, nil }
|
||||
} else {
|
||||
c.pidFn = opts.PidFn
|
||||
}
|
||||
|
||||
// Set up process metric collection if supported by the runtime.
|
||||
if _, err := procfs.NewStat(); err == nil {
|
||||
c.collectFn = c.processCollect
|
||||
} else {
|
||||
c.collectFn = func(ch chan<- Metric) {
|
||||
c.reportError(ch, nil, errors.New("process metrics not supported on this platform"))
|
||||
}
|
||||
}
|
||||
|
||||
return &c
|
||||
return c
|
||||
}
|
||||
|
||||
// Describe returns all descriptions of the collector.
|
||||
@ -99,6 +143,7 @@ func (c *processCollector) Describe(ch chan<- *Desc) {
|
||||
ch <- c.openFDs
|
||||
ch <- c.maxFDs
|
||||
ch <- c.vsize
|
||||
ch <- c.maxVsize
|
||||
ch <- c.rss
|
||||
ch <- c.startTime
|
||||
}
|
||||
@ -108,16 +153,16 @@ func (c *processCollector) Collect(ch chan<- Metric) {
|
||||
c.collectFn(ch)
|
||||
}
|
||||
|
||||
// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the
|
||||
// client allows users to configure the error behavior.
|
||||
func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
pid, err := c.pidFn()
|
||||
if err != nil {
|
||||
c.reportError(ch, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
p, err := procfs.NewProc(pid)
|
||||
if err != nil {
|
||||
c.reportError(ch, nil, err)
|
||||
return
|
||||
}
|
||||
|
||||
@ -127,14 +172,33 @@ func (c *processCollector) processCollect(ch chan<- Metric) {
|
||||
ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))
|
||||
if startTime, err := stat.StartTime(); err == nil {
|
||||
ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)
|
||||
} else {
|
||||
c.reportError(ch, c.startTime, err)
|
||||
}
|
||||
} else {
|
||||
c.reportError(ch, nil, err)
|
||||
}
|
||||
|
||||
if fds, err := p.FileDescriptorsLen(); err == nil {
|
||||
ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))
|
||||
} else {
|
||||
c.reportError(ch, c.openFDs, err)
|
||||
}
|
||||
|
||||
if limits, err := p.NewLimits(); err == nil {
|
||||
ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))
|
||||
ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace))
|
||||
} else {
|
||||
c.reportError(ch, nil, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) {
|
||||
if !c.reportErrors {
|
||||
return
|
||||
}
|
||||
if desc == nil {
|
||||
desc = NewInvalidDesc(err)
|
||||
}
|
||||
ch <- NewInvalidMetric(desc, err)
|
||||
}
|
||||
|
223
vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go
generated
vendored
Normal file
223
vendor/github.com/prometheus/client_golang/prometheus/promauto/auto.go
generated
vendored
Normal file
@ -0,0 +1,223 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package promauto provides constructors for the usual Prometheus metrics that
|
||||
// return them already registered with the global registry
|
||||
// (prometheus.DefaultRegisterer). This allows very compact code, avoiding any
|
||||
// references to the registry altogether, but all the constructors in this
|
||||
// package will panic if the registration fails.
|
||||
//
|
||||
// The following example is a complete program to create a histogram of normally
|
||||
// distributed random numbers from the math/rand package:
|
||||
//
|
||||
// package main
|
||||
//
|
||||
// import (
|
||||
// "math/rand"
|
||||
// "net/http"
|
||||
//
|
||||
// "github.com/prometheus/client_golang/prometheus"
|
||||
// "github.com/prometheus/client_golang/prometheus/promauto"
|
||||
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
// )
|
||||
//
|
||||
// var histogram = promauto.NewHistogram(prometheus.HistogramOpts{
|
||||
// Name: "random_numbers",
|
||||
// Help: "A histogram of normally distributed random numbers.",
|
||||
// Buckets: prometheus.LinearBuckets(-3, .1, 61),
|
||||
// })
|
||||
//
|
||||
// func Random() {
|
||||
// for {
|
||||
// histogram.Observe(rand.NormFloat64())
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// func main() {
|
||||
// go Random()
|
||||
// http.Handle("/metrics", promhttp.Handler())
|
||||
// http.ListenAndServe(":1971", nil)
|
||||
// }
|
||||
//
|
||||
// Prometheus's version of a minimal hello-world program:
|
||||
//
|
||||
// package main
|
||||
//
|
||||
// import (
|
||||
// "fmt"
|
||||
// "net/http"
|
||||
//
|
||||
// "github.com/prometheus/client_golang/prometheus"
|
||||
// "github.com/prometheus/client_golang/prometheus/promauto"
|
||||
// "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
// )
|
||||
//
|
||||
// func main() {
|
||||
// http.Handle("/", promhttp.InstrumentHandlerCounter(
|
||||
// promauto.NewCounterVec(
|
||||
// prometheus.CounterOpts{
|
||||
// Name: "hello_requests_total",
|
||||
// Help: "Total number of hello-world requests by HTTP code.",
|
||||
// },
|
||||
// []string{"code"},
|
||||
// ),
|
||||
// http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// fmt.Fprint(w, "Hello, world!")
|
||||
// }),
|
||||
// ))
|
||||
// http.Handle("/metrics", promhttp.Handler())
|
||||
// http.ListenAndServe(":1971", nil)
|
||||
// }
|
||||
//
|
||||
// This appears very handy. So why are these constructors locked away in a
|
||||
// separate package? There are two caveats:
|
||||
//
|
||||
// First, in more complex programs, global state is often quite problematic.
|
||||
// That's the reason why the metrics constructors in the prometheus package do
|
||||
// not interact with the global prometheus.DefaultRegisterer on their own. You
|
||||
// are free to use the Register or MustRegister functions to register them with
|
||||
// the global prometheus.DefaultRegisterer, but you could as well choose a local
|
||||
// Registerer (usually created with prometheus.NewRegistry, but there are other
|
||||
// scenarios, e.g. testing).
|
||||
//
|
||||
// The second issue is that registration may fail, e.g. if a metric inconsistent
|
||||
// with the newly to be registered one is already registered. But how to signal
|
||||
// and handle a panic in the automatic registration with the default registry?
|
||||
// The only way is panicking. While panicking on invalid input provided by the
|
||||
// programmer is certainly fine, things are a bit more subtle in this case: You
|
||||
// might just add another package to the program, and that package (in its init
|
||||
// function) happens to register a metric with the same name as your code. Now,
|
||||
// all of a sudden, either your code or the code of the newly imported package
|
||||
// panics, depending on initialization order, without any opportunity to handle
|
||||
// the case gracefully. Even worse is a scenario where registration happens
|
||||
// later during the runtime (e.g. upon loading some kind of plugin), where the
|
||||
// panic could be triggered long after the code has been deployed to
|
||||
// production. A possibility to panic should be explicitly called out by the
|
||||
// Must… idiom, cf. prometheus.MustRegister. But adding a separate set of
|
||||
// constructors in the prometheus package called MustRegisterNewCounterVec or
|
||||
// similar would be quite unwieldy. Adding an extra MustRegister method to each
|
||||
// metric, returning the registered metric, would result in nice code for those
|
||||
// using the method, but would pollute every single metric interface for
|
||||
// everybody avoiding the global registry.
|
||||
//
|
||||
// To address both issues, the problematic auto-registering and possibly
|
||||
// panicking constructors are all in this package with a clear warning
|
||||
// ahead. And whoever cares about avoiding global state and possibly panicking
|
||||
// function calls can simply ignore the existence of the promauto package
|
||||
// altogether.
|
||||
//
|
||||
// A final note: There is a similar case in the net/http package of the standard
|
||||
// library. It has DefaultServeMux as a global instance of ServeMux, and the
|
||||
// Handle function acts on it, panicking if a handler for the same pattern has
|
||||
// already been registered. However, one might argue that the whole HTTP routing
|
||||
// is usually set up closely together in the same package or file, while
|
||||
// Prometheus metrics tend to be spread widely over the codebase, increasing the
|
||||
// chance of surprising registration failures. Furthermore, the use of global
|
||||
// state in net/http has been criticized widely, and some avoid it altogether.
|
||||
package promauto
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
// NewCounter works like the function of the same name in the prometheus package
|
||||
// but it automatically registers the Counter with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewCounter panics.
|
||||
func NewCounter(opts prometheus.CounterOpts) prometheus.Counter {
|
||||
c := prometheus.NewCounter(opts)
|
||||
prometheus.MustRegister(c)
|
||||
return c
|
||||
}
|
||||
|
||||
// NewCounterVec works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the CounterVec with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewCounterVec
|
||||
// panics.
|
||||
func NewCounterVec(opts prometheus.CounterOpts, labelNames []string) *prometheus.CounterVec {
|
||||
c := prometheus.NewCounterVec(opts, labelNames)
|
||||
prometheus.MustRegister(c)
|
||||
return c
|
||||
}
|
||||
|
||||
// NewCounterFunc works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the CounterFunc with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewCounterFunc
|
||||
// panics.
|
||||
func NewCounterFunc(opts prometheus.CounterOpts, function func() float64) prometheus.CounterFunc {
|
||||
g := prometheus.NewCounterFunc(opts, function)
|
||||
prometheus.MustRegister(g)
|
||||
return g
|
||||
}
|
||||
|
||||
// NewGauge works like the function of the same name in the prometheus package
|
||||
// but it automatically registers the Gauge with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewGauge panics.
|
||||
func NewGauge(opts prometheus.GaugeOpts) prometheus.Gauge {
|
||||
g := prometheus.NewGauge(opts)
|
||||
prometheus.MustRegister(g)
|
||||
return g
|
||||
}
|
||||
|
||||
// NewGaugeVec works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the GaugeVec with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewGaugeVec panics.
|
||||
func NewGaugeVec(opts prometheus.GaugeOpts, labelNames []string) *prometheus.GaugeVec {
|
||||
g := prometheus.NewGaugeVec(opts, labelNames)
|
||||
prometheus.MustRegister(g)
|
||||
return g
|
||||
}
|
||||
|
||||
// NewGaugeFunc works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the GaugeFunc with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewGaugeFunc panics.
|
||||
func NewGaugeFunc(opts prometheus.GaugeOpts, function func() float64) prometheus.GaugeFunc {
|
||||
g := prometheus.NewGaugeFunc(opts, function)
|
||||
prometheus.MustRegister(g)
|
||||
return g
|
||||
}
|
||||
|
||||
// NewSummary works like the function of the same name in the prometheus package
|
||||
// but it automatically registers the Summary with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewSummary panics.
|
||||
func NewSummary(opts prometheus.SummaryOpts) prometheus.Summary {
|
||||
s := prometheus.NewSummary(opts)
|
||||
prometheus.MustRegister(s)
|
||||
return s
|
||||
}
|
||||
|
||||
// NewSummaryVec works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the SummaryVec with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewSummaryVec
|
||||
// panics.
|
||||
func NewSummaryVec(opts prometheus.SummaryOpts, labelNames []string) *prometheus.SummaryVec {
|
||||
s := prometheus.NewSummaryVec(opts, labelNames)
|
||||
prometheus.MustRegister(s)
|
||||
return s
|
||||
}
|
||||
|
||||
// NewHistogram works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the Histogram with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewHistogram panics.
|
||||
func NewHistogram(opts prometheus.HistogramOpts) prometheus.Histogram {
|
||||
h := prometheus.NewHistogram(opts)
|
||||
prometheus.MustRegister(h)
|
||||
return h
|
||||
}
|
||||
|
||||
// NewHistogramVec works like the function of the same name in the prometheus
|
||||
// package but it automatically registers the HistogramVec with the
|
||||
// prometheus.DefaultRegisterer. If the registration fails, NewHistogramVec
|
||||
// panics.
|
||||
func NewHistogramVec(opts prometheus.HistogramOpts, labelNames []string) *prometheus.HistogramVec {
|
||||
h := prometheus.NewHistogramVec(opts, labelNames)
|
||||
prometheus.MustRegister(h)
|
||||
return h
|
||||
}
|
30
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
30
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
generated
vendored
@ -76,16 +76,16 @@ type flusherDelegator struct{ *responseWriterDelegator }
|
||||
type hijackerDelegator struct{ *responseWriterDelegator }
|
||||
type readerFromDelegator struct{ *responseWriterDelegator }
|
||||
|
||||
func (d *closeNotifierDelegator) CloseNotify() <-chan bool {
|
||||
func (d closeNotifierDelegator) CloseNotify() <-chan bool {
|
||||
return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
func (d *flusherDelegator) Flush() {
|
||||
func (d flusherDelegator) Flush() {
|
||||
d.ResponseWriter.(http.Flusher).Flush()
|
||||
}
|
||||
func (d *hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
|
||||
return d.ResponseWriter.(http.Hijacker).Hijack()
|
||||
}
|
||||
func (d *readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
|
||||
func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
|
||||
if !d.wroteHeader {
|
||||
d.WriteHeader(http.StatusOK)
|
||||
}
|
||||
@ -112,7 +112,7 @@ func init() {
|
||||
*responseWriterDelegator
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, &flusherDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
|
||||
return hijackerDelegator{d}
|
||||
@ -122,14 +122,14 @@ func init() {
|
||||
*responseWriterDelegator
|
||||
http.Hijacker
|
||||
http.CloseNotifier
|
||||
}{d, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
}{d, &hijackerDelegator{d}, &flusherDelegator{d}}
|
||||
}{d, hijackerDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
|
||||
return struct {
|
||||
@ -137,7 +137,7 @@ func init() {
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
|
||||
return readerFromDelegator{d}
|
||||
@ -147,14 +147,14 @@ func init() {
|
||||
*responseWriterDelegator
|
||||
io.ReaderFrom
|
||||
http.CloseNotifier
|
||||
}{d, &readerFromDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, readerFromDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
io.ReaderFrom
|
||||
http.Flusher
|
||||
}{d, &readerFromDelegator{d}, &flusherDelegator{d}}
|
||||
}{d, readerFromDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
|
||||
return struct {
|
||||
@ -162,14 +162,14 @@ func init() {
|
||||
io.ReaderFrom
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
}{d, &readerFromDelegator{d}, &hijackerDelegator{d}}
|
||||
}{d, readerFromDelegator{d}, hijackerDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
|
||||
return struct {
|
||||
@ -177,7 +177,7 @@ func init() {
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.CloseNotifier
|
||||
}{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
|
||||
return struct {
|
||||
@ -185,7 +185,7 @@ func init() {
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
}{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
|
||||
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
|
||||
return struct {
|
||||
@ -194,6 +194,6 @@ func init() {
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
}
|
||||
|
32
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
generated
vendored
32
vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
|
||||
type pusherDelegator struct{ *responseWriterDelegator }
|
||||
|
||||
func (d *pusherDelegator) Push(target string, opts *http.PushOptions) error {
|
||||
func (d pusherDelegator) Push(target string, opts *http.PushOptions) error {
|
||||
return d.ResponseWriter.(http.Pusher).Push(target, opts)
|
||||
}
|
||||
|
||||
@ -35,14 +35,14 @@ func init() {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.CloseNotifier
|
||||
}{d, &pusherDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, pusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Flusher
|
||||
}{d, &pusherDelegator{d}, &flusherDelegator{d}}
|
||||
}{d, pusherDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
|
||||
return struct {
|
||||
@ -50,14 +50,14 @@ func init() {
|
||||
http.Pusher
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, &pusherDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
http.Hijacker
|
||||
}{d, &pusherDelegator{d}, &hijackerDelegator{d}}
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
|
||||
return struct {
|
||||
@ -65,7 +65,7 @@ func init() {
|
||||
http.Pusher
|
||||
http.Hijacker
|
||||
http.CloseNotifier
|
||||
}{d, &pusherDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
|
||||
return struct {
|
||||
@ -73,7 +73,7 @@ func init() {
|
||||
http.Pusher
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
}{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
|
||||
return struct {
|
||||
@ -82,14 +82,14 @@ func init() {
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
|
||||
return struct {
|
||||
*responseWriterDelegator
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
}{d, &pusherDelegator{d}, &readerFromDelegator{d}}
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
|
||||
return struct {
|
||||
@ -97,7 +97,7 @@ func init() {
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.CloseNotifier
|
||||
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
|
||||
return struct {
|
||||
@ -105,7 +105,7 @@ func init() {
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Flusher
|
||||
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}}
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
|
||||
return struct {
|
||||
@ -114,7 +114,7 @@ func init() {
|
||||
io.ReaderFrom
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
|
||||
return struct {
|
||||
@ -122,7 +122,7 @@ func init() {
|
||||
http.Pusher
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}}
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
|
||||
return struct {
|
||||
@ -131,7 +131,7 @@ func init() {
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.CloseNotifier
|
||||
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
|
||||
return struct {
|
||||
@ -140,7 +140,7 @@ func init() {
|
||||
io.ReaderFrom
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}}
|
||||
}
|
||||
pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
|
||||
return struct {
|
||||
@ -150,7 +150,7 @@ func init() {
|
||||
http.Hijacker
|
||||
http.Flusher
|
||||
http.CloseNotifier
|
||||
}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
|
||||
}{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}}
|
||||
}
|
||||
}
|
||||
|
||||
|
131
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
131
vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
generated
vendored
@ -39,6 +39,7 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
@ -67,21 +68,51 @@ func giveBuf(buf *bytes.Buffer) {
|
||||
bufPool.Put(buf)
|
||||
}
|
||||
|
||||
// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The
|
||||
// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP
|
||||
// error, no error logging, and compression if requested by the client.
|
||||
// Handler returns an http.Handler for the prometheus.DefaultGatherer, using
|
||||
// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has
|
||||
// no error logging, and it applies compression if requested by the client.
|
||||
//
|
||||
// If you want to create a Handler for the DefaultGatherer with different
|
||||
// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and
|
||||
// your desired HandlerOpts.
|
||||
// The returned http.Handler is already instrumented using the
|
||||
// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you
|
||||
// create multiple http.Handlers by separate calls of the Handler function, the
|
||||
// metrics used for instrumentation will be shared between them, providing
|
||||
// global scrape counts.
|
||||
//
|
||||
// This function is meant to cover the bulk of basic use cases. If you are doing
|
||||
// anything that requires more customization (including using a non-default
|
||||
// Gatherer, different instrumentation, and non-default HandlerOpts), use the
|
||||
// HandlerFor function. See there for details.
|
||||
func Handler() http.Handler {
|
||||
return HandlerFor(prometheus.DefaultGatherer, HandlerOpts{})
|
||||
return InstrumentMetricHandler(
|
||||
prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}),
|
||||
)
|
||||
}
|
||||
|
||||
// HandlerFor returns an http.Handler for the provided Gatherer. The behavior
|
||||
// of the Handler is defined by the provided HandlerOpts.
|
||||
// HandlerFor returns an uninstrumented http.Handler for the provided
|
||||
// Gatherer. The behavior of the Handler is defined by the provided
|
||||
// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom
|
||||
// Gatherers, with non-default HandlerOpts, and/or with custom (or no)
|
||||
// instrumentation. Use the InstrumentMetricHandler function to apply the same
|
||||
// kind of instrumentation as it is used by the Handler function.
|
||||
func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
var inFlightSem chan struct{}
|
||||
if opts.MaxRequestsInFlight > 0 {
|
||||
inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight)
|
||||
}
|
||||
|
||||
h := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
if inFlightSem != nil {
|
||||
select {
|
||||
case inFlightSem <- struct{}{}: // All good, carry on.
|
||||
defer func() { <-inFlightSem }()
|
||||
default:
|
||||
http.Error(w, fmt.Sprintf(
|
||||
"Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight,
|
||||
), http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
mfs, err := reg.Gather()
|
||||
if err != nil {
|
||||
if opts.ErrorLog != nil {
|
||||
@ -137,9 +168,70 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
|
||||
if encoding != "" {
|
||||
header.Set(contentEncodingHeader, encoding)
|
||||
}
|
||||
w.Write(buf.Bytes())
|
||||
if _, err := w.Write(buf.Bytes()); err != nil && opts.ErrorLog != nil {
|
||||
opts.ErrorLog.Println("error while sending encoded metrics:", err)
|
||||
}
|
||||
// TODO(beorn7): Consider streaming serving of metrics.
|
||||
})
|
||||
|
||||
if opts.Timeout <= 0 {
|
||||
return h
|
||||
}
|
||||
return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf(
|
||||
"Exceeded configured timeout of %v.\n",
|
||||
opts.Timeout,
|
||||
))
|
||||
}
|
||||
|
||||
// InstrumentMetricHandler is usually used with an http.Handler returned by the
|
||||
// HandlerFor function. It instruments the provided http.Handler with two
|
||||
// metrics: A counter vector "promhttp_metric_handler_requests_total" to count
|
||||
// scrapes partitioned by HTTP status code, and a gauge
|
||||
// "promhttp_metric_handler_requests_in_flight" to track the number of
|
||||
// simultaneous scrapes. This function idempotently registers collectors for
|
||||
// both metrics with the provided Registerer. It panics if the registration
|
||||
// fails. The provided metrics are useful to see how many scrapes hit the
|
||||
// monitored target (which could be from different Prometheus servers or other
|
||||
// scrapers), and how often they overlap (which would result in more than one
|
||||
// scrape in flight at the same time). Note that the scrapes-in-flight gauge
|
||||
// will contain the scrape by which it is exposed, while the scrape counter will
|
||||
// only get incremented after the scrape is complete (as only then the status
|
||||
// code is known). For tracking scrape durations, use the
|
||||
// "scrape_duration_seconds" gauge created by the Prometheus server upon each
|
||||
// scrape.
|
||||
func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler {
|
||||
cnt := prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Name: "promhttp_metric_handler_requests_total",
|
||||
Help: "Total number of scrapes by HTTP status code.",
|
||||
},
|
||||
[]string{"code"},
|
||||
)
|
||||
// Initialize the most likely HTTP status codes.
|
||||
cnt.WithLabelValues("200")
|
||||
cnt.WithLabelValues("500")
|
||||
cnt.WithLabelValues("503")
|
||||
if err := reg.Register(cnt); err != nil {
|
||||
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
|
||||
cnt = are.ExistingCollector.(*prometheus.CounterVec)
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
gge := prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: "promhttp_metric_handler_requests_in_flight",
|
||||
Help: "Current number of scrapes being served.",
|
||||
})
|
||||
if err := reg.Register(gge); err != nil {
|
||||
if are, ok := err.(prometheus.AlreadyRegisteredError); ok {
|
||||
gge = are.ExistingCollector.(prometheus.Gauge)
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler))
|
||||
}
|
||||
|
||||
// HandlerErrorHandling defines how a Handler serving metrics will handle
|
||||
@ -183,6 +275,21 @@ type HandlerOpts struct {
|
||||
// If DisableCompression is true, the handler will never compress the
|
||||
// response, even if requested by the client.
|
||||
DisableCompression bool
|
||||
// The number of concurrent HTTP requests is limited to
|
||||
// MaxRequestsInFlight. Additional requests are responded to with 503
|
||||
// Service Unavailable and a suitable message in the body. If
|
||||
// MaxRequestsInFlight is 0 or negative, no limit is applied.
|
||||
MaxRequestsInFlight int
|
||||
// If handling a request takes longer than Timeout, it is responded to
|
||||
// with 503 ServiceUnavailable and a suitable Message. No timeout is
|
||||
// applied if Timeout is 0 or negative. Note that with the current
|
||||
// implementation, reaching the timeout simply ends the HTTP requests as
|
||||
// described above (and even that only if sending of the body hasn't
|
||||
// started yet), while the bulk work of gathering all the metrics keeps
|
||||
// running in the background (with the eventual result to be thrown
|
||||
// away). Until the implementation is improved, it is recommended to
|
||||
// implement a separate timeout in potentially slow Collectors.
|
||||
Timeout time.Duration
|
||||
}
|
||||
|
||||
// decorateWriter wraps a writer to handle gzip compression if requested. It
|
||||
@ -195,7 +302,7 @@ func decorateWriter(request *http.Request, writer io.Writer, compressionDisabled
|
||||
header := request.Header.Get(acceptEncodingHeader)
|
||||
parts := strings.Split(header, ",")
|
||||
for _, part := range parts {
|
||||
part := strings.TrimSpace(part)
|
||||
part = strings.TrimSpace(part)
|
||||
if part == "gzip" || strings.HasPrefix(part, "gzip;") {
|
||||
return gzip.NewWriter(writer), "gzip"
|
||||
}
|
||||
|
18
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
18
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
@ -68,15 +68,15 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
|
||||
}
|
||||
|
||||
// InstrumentRoundTripperDuration is a middleware that wraps the provided
|
||||
// http.RoundTripper to observe the request duration with the provided ObserverVec.
|
||||
// The ObserverVec must have zero, one, or two labels. The only allowed label
|
||||
// names are "code" and "method". The function panics if any other instance
|
||||
// labels are provided. The Observe method of the Observer in the ObserverVec
|
||||
// is called with the request duration in seconds. Partitioning happens by HTTP
|
||||
// status code and/or HTTP method if the respective instance label names are
|
||||
// present in the ObserverVec. For unpartitioned observations, use an
|
||||
// ObserverVec with zero labels. Note that partitioning of Histograms is
|
||||
// expensive and should be used judiciously.
|
||||
// http.RoundTripper to observe the request duration with the provided
|
||||
// ObserverVec. The ObserverVec must have zero, one, or two non-const
|
||||
// non-curried labels. For those, the only allowed label names are "code" and
|
||||
// "method". The function panics otherwise. The Observe method of the Observer
|
||||
// in the ObserverVec is called with the request duration in
|
||||
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
|
||||
// respective instance label names are present in the ObserverVec. For
|
||||
// unpartitioned observations, use an ObserverVec with zero labels. Note that
|
||||
// partitioning of Histograms is expensive and should be used judiciously.
|
||||
//
|
||||
// If the wrapped RoundTripper panics or returns a non-nil error, no values are
|
||||
// reported.
|
||||
|
@ -81,8 +81,8 @@ func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) Ro
|
||||
}
|
||||
},
|
||||
DNSDone: func(_ httptrace.DNSDoneInfo) {
|
||||
if it.DNSStart != nil {
|
||||
it.DNSStart(time.Since(start).Seconds())
|
||||
if it.DNSDone != nil {
|
||||
it.DNSDone(time.Since(start).Seconds())
|
||||
}
|
||||
},
|
||||
ConnectStart: func(_, _ string) {
|
||||
|
172
vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go
generated
vendored
Normal file
172
vendor/github.com/prometheus/client_golang/prometheus/push/deprecated.go
generated
vendored
Normal file
@ -0,0 +1,172 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package push
|
||||
|
||||
// This file contains only deprecated code. Remove after v0.9 is released.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// FromGatherer triggers a metric collection by the provided Gatherer (which is
|
||||
// usually implemented by a prometheus.Registry) and pushes all gathered metrics
|
||||
// to the Pushgateway specified by url, using the provided job name and the
|
||||
// (optional) further grouping labels (the grouping map may be nil). See the
|
||||
// Pushgateway documentation for detailed implications of the job and other
|
||||
// grouping labels. Neither the job name nor any grouping label value may
|
||||
// contain a "/". The metrics pushed must not contain a job label of their own
|
||||
// nor any of the grouping labels.
|
||||
//
|
||||
// You can use just host:port or ip:port as url, in which case 'http://' is
|
||||
// added automatically. You can also include the schema in the URL. However, do
|
||||
// not include the '/metrics/jobs/...' part.
|
||||
//
|
||||
// Note that all previously pushed metrics with the same job and other grouping
|
||||
// labels will be replaced with the metrics pushed by this call. (It uses HTTP
|
||||
// method 'PUT' to push to the Pushgateway.)
|
||||
//
|
||||
// Deprecated: Please use a Pusher created with New instead.
|
||||
func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error {
|
||||
return push(job, grouping, url, g, "PUT")
|
||||
}
|
||||
|
||||
// AddFromGatherer works like FromGatherer, but only previously pushed metrics
|
||||
// with the same name (and the same job and other grouping labels) will be
|
||||
// replaced. (It uses HTTP method 'POST' to push to the Pushgateway.)
|
||||
//
|
||||
// Deprecated: Please use a Pusher created with New instead.
|
||||
func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error {
|
||||
return push(job, grouping, url, g, "POST")
|
||||
}
|
||||
|
||||
func push(job string, grouping map[string]string, pushURL string, g prometheus.Gatherer, method string) error {
|
||||
if !strings.Contains(pushURL, "://") {
|
||||
pushURL = "http://" + pushURL
|
||||
}
|
||||
if strings.HasSuffix(pushURL, "/") {
|
||||
pushURL = pushURL[:len(pushURL)-1]
|
||||
}
|
||||
|
||||
if strings.Contains(job, "/") {
|
||||
return fmt.Errorf("job contains '/': %s", job)
|
||||
}
|
||||
urlComponents := []string{url.QueryEscape(job)}
|
||||
for ln, lv := range grouping {
|
||||
if !model.LabelName(ln).IsValid() {
|
||||
return fmt.Errorf("grouping label has invalid name: %s", ln)
|
||||
}
|
||||
if strings.Contains(lv, "/") {
|
||||
return fmt.Errorf("value of grouping label %s contains '/': %s", ln, lv)
|
||||
}
|
||||
urlComponents = append(urlComponents, ln, lv)
|
||||
}
|
||||
pushURL = fmt.Sprintf("%s/metrics/job/%s", pushURL, strings.Join(urlComponents, "/"))
|
||||
|
||||
mfs, err := g.Gather()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
buf := &bytes.Buffer{}
|
||||
enc := expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)
|
||||
// Check for pre-existing grouping labels:
|
||||
for _, mf := range mfs {
|
||||
for _, m := range mf.GetMetric() {
|
||||
for _, l := range m.GetLabel() {
|
||||
if l.GetName() == "job" {
|
||||
return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m)
|
||||
}
|
||||
if _, ok := grouping[l.GetName()]; ok {
|
||||
return fmt.Errorf(
|
||||
"pushed metric %s (%s) already contains grouping label %s",
|
||||
mf.GetName(), m, l.GetName(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
enc.Encode(mf)
|
||||
}
|
||||
req, err := http.NewRequest(method, pushURL, buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim))
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 202 {
|
||||
body, _ := ioutil.ReadAll(resp.Body) // Ignore any further error as this is for an error message only.
|
||||
return fmt.Errorf("unexpected status code %d while pushing to %s: %s", resp.StatusCode, pushURL, body)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collectors works like FromGatherer, but it does not use a Gatherer. Instead,
|
||||
// it collects from the provided collectors directly. It is a convenient way to
|
||||
// push only a few metrics.
|
||||
//
|
||||
// Deprecated: Please use a Pusher created with New instead.
|
||||
func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
|
||||
return pushCollectors(job, grouping, url, "PUT", collectors...)
|
||||
}
|
||||
|
||||
// AddCollectors works like AddFromGatherer, but it does not use a Gatherer.
|
||||
// Instead, it collects from the provided collectors directly. It is a
|
||||
// convenient way to push only a few metrics.
|
||||
//
|
||||
// Deprecated: Please use a Pusher created with New instead.
|
||||
func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
|
||||
return pushCollectors(job, grouping, url, "POST", collectors...)
|
||||
}
|
||||
|
||||
func pushCollectors(job string, grouping map[string]string, url, method string, collectors ...prometheus.Collector) error {
|
||||
r := prometheus.NewRegistry()
|
||||
for _, collector := range collectors {
|
||||
if err := r.Register(collector); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return push(job, grouping, url, r, method)
|
||||
}
|
||||
|
||||
// HostnameGroupingKey returns a label map with the only entry
|
||||
// {instance="<hostname>"}. This can be conveniently used as the grouping
|
||||
// parameter if metrics should be pushed with the hostname as label. The
|
||||
// returned map is created upon each call so that the caller is free to add more
|
||||
// labels to the map.
|
||||
//
|
||||
// Deprecated: Usually, metrics pushed to the Pushgateway should not be
|
||||
// host-centric. (You would use https://github.com/prometheus/node_exporter in
|
||||
// that case.) If you have the need to add the hostname to the grouping key, you
|
||||
// are probably doing something wrong. See
|
||||
// https://prometheus.io/docs/practices/pushing/ for details.
|
||||
func HostnameGroupingKey() map[string]string {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return map[string]string{"instance": "unknown"}
|
||||
}
|
||||
return map[string]string{"instance": hostname}
|
||||
}
|
248
vendor/github.com/prometheus/client_golang/prometheus/push/push.go
generated
vendored
248
vendor/github.com/prometheus/client_golang/prometheus/push/push.go
generated
vendored
@ -11,20 +11,27 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Copyright (c) 2013, The Prometheus Authors
|
||||
// All rights reserved.
|
||||
// Package push provides functions to push metrics to a Pushgateway. It uses a
|
||||
// builder approach. Create a Pusher with New and then add the various options
|
||||
// by using its methods, finally calling Add or Push, like this:
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license that can be found
|
||||
// in the LICENSE file.
|
||||
|
||||
// Package push provides functions to push metrics to a Pushgateway. The metrics
|
||||
// to push are either collected from a provided registry, or from explicitly
|
||||
// listed collectors.
|
||||
// // Easy case:
|
||||
// push.New("http://example.org/metrics", "my_job").Gatherer(myRegistry).Push()
|
||||
//
|
||||
// See the documentation of the Pushgateway to understand the meaning of the
|
||||
// grouping parameters and the differences between push.Registry and
|
||||
// push.Collectors on the one hand and push.AddRegistry and push.AddCollectors
|
||||
// on the other hand: https://github.com/prometheus/pushgateway
|
||||
// // Complex case:
|
||||
// push.New("http://example.org/metrics", "my_job").
|
||||
// Collector(myCollector1).
|
||||
// Collector(myCollector2).
|
||||
// Grouping("zone", "xy").
|
||||
// Client(&myHTTPClient).
|
||||
// BasicAuth("top", "secret").
|
||||
// Add()
|
||||
//
|
||||
// See the examples section for more detailed examples.
|
||||
//
|
||||
// See the documentation of the Pushgateway to understand the meaning of
|
||||
// the grouping key and the differences between Push and Add:
|
||||
// https://github.com/prometheus/pushgateway
|
||||
package push
|
||||
|
||||
import (
|
||||
@ -33,7 +40,6 @@ import (
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
@ -44,57 +50,149 @@ import (
|
||||
|
||||
const contentTypeHeader = "Content-Type"
|
||||
|
||||
// FromGatherer triggers a metric collection by the provided Gatherer (which is
|
||||
// usually implemented by a prometheus.Registry) and pushes all gathered metrics
|
||||
// to the Pushgateway specified by url, using the provided job name and the
|
||||
// (optional) further grouping labels (the grouping map may be nil). See the
|
||||
// Pushgateway documentation for detailed implications of the job and other
|
||||
// grouping labels. Neither the job name nor any grouping label value may
|
||||
// contain a "/". The metrics pushed must not contain a job label of their own
|
||||
// nor any of the grouping labels.
|
||||
//
|
||||
// You can use just host:port or ip:port as url, in which case 'http://' is
|
||||
// added automatically. You can also include the schema in the URL. However, do
|
||||
// not include the '/metrics/jobs/...' part.
|
||||
//
|
||||
// Note that all previously pushed metrics with the same job and other grouping
|
||||
// labels will be replaced with the metrics pushed by this call. (It uses HTTP
|
||||
// method 'PUT' to push to the Pushgateway.)
|
||||
func FromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error {
|
||||
return push(job, grouping, url, g, "PUT")
|
||||
// Pusher manages a push to the Pushgateway. Use New to create one, configure it
|
||||
// with its methods, and finally use the Add or Push method to push.
|
||||
type Pusher struct {
|
||||
error error
|
||||
|
||||
url, job string
|
||||
grouping map[string]string
|
||||
|
||||
gatherers prometheus.Gatherers
|
||||
registerer prometheus.Registerer
|
||||
|
||||
client *http.Client
|
||||
useBasicAuth bool
|
||||
username, password string
|
||||
}
|
||||
|
||||
// AddFromGatherer works like FromGatherer, but only previously pushed metrics
|
||||
// with the same name (and the same job and other grouping labels) will be
|
||||
// replaced. (It uses HTTP method 'POST' to push to the Pushgateway.)
|
||||
func AddFromGatherer(job string, grouping map[string]string, url string, g prometheus.Gatherer) error {
|
||||
return push(job, grouping, url, g, "POST")
|
||||
}
|
||||
|
||||
func push(job string, grouping map[string]string, pushURL string, g prometheus.Gatherer, method string) error {
|
||||
if !strings.Contains(pushURL, "://") {
|
||||
pushURL = "http://" + pushURL
|
||||
// New creates a new Pusher to push to the provided URL with the provided job
|
||||
// name. You can use just host:port or ip:port as url, in which case “http://”
|
||||
// is added automatically. Alternatively, include the schema in the
|
||||
// URL. However, do not include the “/metrics/jobs/…” part.
|
||||
//
|
||||
// Note that until https://github.com/prometheus/pushgateway/issues/97 is
|
||||
// resolved, a “/” character in the job name is prohibited.
|
||||
func New(url, job string) *Pusher {
|
||||
var (
|
||||
reg = prometheus.NewRegistry()
|
||||
err error
|
||||
)
|
||||
if !strings.Contains(url, "://") {
|
||||
url = "http://" + url
|
||||
}
|
||||
if strings.HasSuffix(pushURL, "/") {
|
||||
pushURL = pushURL[:len(pushURL)-1]
|
||||
if strings.HasSuffix(url, "/") {
|
||||
url = url[:len(url)-1]
|
||||
}
|
||||
|
||||
if strings.Contains(job, "/") {
|
||||
return fmt.Errorf("job contains '/': %s", job)
|
||||
err = fmt.Errorf("job contains '/': %s", job)
|
||||
}
|
||||
urlComponents := []string{url.QueryEscape(job)}
|
||||
for ln, lv := range grouping {
|
||||
if !model.LabelName(ln).IsValid() {
|
||||
return fmt.Errorf("grouping label has invalid name: %s", ln)
|
||||
|
||||
return &Pusher{
|
||||
error: err,
|
||||
url: url,
|
||||
job: job,
|
||||
grouping: map[string]string{},
|
||||
gatherers: prometheus.Gatherers{reg},
|
||||
registerer: reg,
|
||||
client: &http.Client{},
|
||||
}
|
||||
}
|
||||
|
||||
// Push collects/gathers all metrics from all Collectors and Gatherers added to
|
||||
// this Pusher. Then, it pushes them to the Pushgateway configured while
|
||||
// creating this Pusher, using the configured job name and any added grouping
|
||||
// labels as grouping key. All previously pushed metrics with the same job and
|
||||
// other grouping labels will be replaced with the metrics pushed by this
|
||||
// call. (It uses HTTP method “PUT” to push to the Pushgateway.)
|
||||
//
|
||||
// Push returns the first error encountered by any method call (including this
|
||||
// one) in the lifetime of the Pusher.
|
||||
func (p *Pusher) Push() error {
|
||||
return p.push("PUT")
|
||||
}
|
||||
|
||||
// Add works like push, but only previously pushed metrics with the same name
|
||||
// (and the same job and other grouping labels) will be replaced. (It uses HTTP
|
||||
// method “POST” to push to the Pushgateway.)
|
||||
func (p *Pusher) Add() error {
|
||||
return p.push("POST")
|
||||
}
|
||||
|
||||
// Gatherer adds a Gatherer to the Pusher, from which metrics will be gathered
|
||||
// to push them to the Pushgateway. The gathered metrics must not contain a job
|
||||
// label of their own.
|
||||
//
|
||||
// For convenience, this method returns a pointer to the Pusher itself.
|
||||
func (p *Pusher) Gatherer(g prometheus.Gatherer) *Pusher {
|
||||
p.gatherers = append(p.gatherers, g)
|
||||
return p
|
||||
}
|
||||
|
||||
// Collector adds a Collector to the Pusher, from which metrics will be
|
||||
// collected to push them to the Pushgateway. The collected metrics must not
|
||||
// contain a job label of their own.
|
||||
//
|
||||
// For convenience, this method returns a pointer to the Pusher itself.
|
||||
func (p *Pusher) Collector(c prometheus.Collector) *Pusher {
|
||||
if p.error == nil {
|
||||
p.error = p.registerer.Register(c)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Grouping adds a label pair to the grouping key of the Pusher, replacing any
|
||||
// previously added label pair with the same label name. Note that setting any
|
||||
// labels in the grouping key that are already contained in the metrics to push
|
||||
// will lead to an error.
|
||||
//
|
||||
// For convenience, this method returns a pointer to the Pusher itself.
|
||||
//
|
||||
// Note that until https://github.com/prometheus/pushgateway/issues/97 is
|
||||
// resolved, this method does not allow a “/” character in the label value.
|
||||
func (p *Pusher) Grouping(name, value string) *Pusher {
|
||||
if p.error == nil {
|
||||
if !model.LabelName(name).IsValid() {
|
||||
p.error = fmt.Errorf("grouping label has invalid name: %s", name)
|
||||
return p
|
||||
}
|
||||
if strings.Contains(lv, "/") {
|
||||
return fmt.Errorf("value of grouping label %s contains '/': %s", ln, lv)
|
||||
if strings.Contains(value, "/") {
|
||||
p.error = fmt.Errorf("value of grouping label %s contains '/': %s", name, value)
|
||||
return p
|
||||
}
|
||||
p.grouping[name] = value
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Client sets a custom HTTP client for the Pusher. For convenience, this method
|
||||
// returns a pointer to the Pusher itself.
|
||||
func (p *Pusher) Client(c *http.Client) *Pusher {
|
||||
p.client = c
|
||||
return p
|
||||
}
|
||||
|
||||
// BasicAuth configures the Pusher to use HTTP Basic Authentication with the
|
||||
// provided username and password. For convenience, this method returns a
|
||||
// pointer to the Pusher itself.
|
||||
func (p *Pusher) BasicAuth(username, password string) *Pusher {
|
||||
p.useBasicAuth = true
|
||||
p.username = username
|
||||
p.password = password
|
||||
return p
|
||||
}
|
||||
|
||||
func (p *Pusher) push(method string) error {
|
||||
if p.error != nil {
|
||||
return p.error
|
||||
}
|
||||
urlComponents := []string{url.QueryEscape(p.job)}
|
||||
for ln, lv := range p.grouping {
|
||||
urlComponents = append(urlComponents, ln, lv)
|
||||
}
|
||||
pushURL = fmt.Sprintf("%s/metrics/job/%s", pushURL, strings.Join(urlComponents, "/"))
|
||||
pushURL := fmt.Sprintf("%s/metrics/job/%s", p.url, strings.Join(urlComponents, "/"))
|
||||
|
||||
mfs, err := g.Gather()
|
||||
mfs, err := p.gatherers.Gather()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -107,7 +205,7 @@ func push(job string, grouping map[string]string, pushURL string, g prometheus.G
|
||||
if l.GetName() == "job" {
|
||||
return fmt.Errorf("pushed metric %s (%s) already contains a job label", mf.GetName(), m)
|
||||
}
|
||||
if _, ok := grouping[l.GetName()]; ok {
|
||||
if _, ok := p.grouping[l.GetName()]; ok {
|
||||
return fmt.Errorf(
|
||||
"pushed metric %s (%s) already contains grouping label %s",
|
||||
mf.GetName(), m, l.GetName(),
|
||||
@ -121,8 +219,11 @@ func push(job string, grouping map[string]string, pushURL string, g prometheus.G
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if p.useBasicAuth {
|
||||
req.SetBasicAuth(p.username, p.password)
|
||||
}
|
||||
req.Header.Set(contentTypeHeader, string(expfmt.FmtProtoDelim))
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
resp, err := p.client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -133,40 +234,3 @@ func push(job string, grouping map[string]string, pushURL string, g prometheus.G
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Collectors works like FromGatherer, but it does not use a Gatherer. Instead,
|
||||
// it collects from the provided collectors directly. It is a convenient way to
|
||||
// push only a few metrics.
|
||||
func Collectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
|
||||
return pushCollectors(job, grouping, url, "PUT", collectors...)
|
||||
}
|
||||
|
||||
// AddCollectors works like AddFromGatherer, but it does not use a Gatherer.
|
||||
// Instead, it collects from the provided collectors directly. It is a
|
||||
// convenient way to push only a few metrics.
|
||||
func AddCollectors(job string, grouping map[string]string, url string, collectors ...prometheus.Collector) error {
|
||||
return pushCollectors(job, grouping, url, "POST", collectors...)
|
||||
}
|
||||
|
||||
func pushCollectors(job string, grouping map[string]string, url, method string, collectors ...prometheus.Collector) error {
|
||||
r := prometheus.NewRegistry()
|
||||
for _, collector := range collectors {
|
||||
if err := r.Register(collector); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return push(job, grouping, url, r, method)
|
||||
}
|
||||
|
||||
// HostnameGroupingKey returns a label map with the only entry
|
||||
// {instance="<hostname>"}. This can be conveniently used as the grouping
|
||||
// parameter if metrics should be pushed with the hostname as label. The
|
||||
// returned map is created upon each call so that the caller is free to add more
|
||||
// labels to the map.
|
||||
func HostnameGroupingKey() map[string]string {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return map[string]string{"instance": "unknown"}
|
||||
}
|
||||
return map[string]string{"instance": hostname}
|
||||
}
|
||||
|
625
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
625
vendor/github.com/prometheus/client_golang/prometheus/registry.go
generated
vendored
@ -15,16 +15,18 @@ package prometheus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/internal"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -36,13 +38,14 @@ const (
|
||||
// DefaultRegisterer and DefaultGatherer are the implementations of the
|
||||
// Registerer and Gatherer interface a number of convenience functions in this
|
||||
// package act on. Initially, both variables point to the same Registry, which
|
||||
// has a process collector (see NewProcessCollector) and a Go collector (see
|
||||
// NewGoCollector) already registered. This approach to keep default instances
|
||||
// as global state mirrors the approach of other packages in the Go standard
|
||||
// library. Note that there are caveats. Change the variables with caution and
|
||||
// only if you understand the consequences. Users who want to avoid global state
|
||||
// altogether should not use the convenience function and act on custom
|
||||
// instances instead.
|
||||
// has a process collector (currently on Linux only, see NewProcessCollector)
|
||||
// and a Go collector (see NewGoCollector, in particular the note about
|
||||
// stop-the-world implication with Go versions older than 1.9) already
|
||||
// registered. This approach to keep default instances as global state mirrors
|
||||
// the approach of other packages in the Go standard library. Note that there
|
||||
// are caveats. Change the variables with caution and only if you understand the
|
||||
// consequences. Users who want to avoid global state altogether should not use
|
||||
// the convenience functions and act on custom instances instead.
|
||||
var (
|
||||
defaultRegistry = NewRegistry()
|
||||
DefaultRegisterer Registerer = defaultRegistry
|
||||
@ -50,7 +53,7 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
MustRegister(NewProcessCollector(os.Getpid(), ""))
|
||||
MustRegister(NewProcessCollector(ProcessCollectorOpts{}))
|
||||
MustRegister(NewGoCollector())
|
||||
}
|
||||
|
||||
@ -66,7 +69,8 @@ func NewRegistry() *Registry {
|
||||
|
||||
// NewPedanticRegistry returns a registry that checks during collection if each
|
||||
// collected Metric is consistent with its reported Desc, and if the Desc has
|
||||
// actually been registered with the registry.
|
||||
// actually been registered with the registry. Unchecked Collectors (those whose
|
||||
// Describe methed does not yield any descriptors) are excluded from the check.
|
||||
//
|
||||
// Usually, a Registry will be happy as long as the union of all collected
|
||||
// Metrics is consistent and valid even if some metrics are not consistent with
|
||||
@ -96,8 +100,13 @@ type Registerer interface {
|
||||
// returned error is an instance of AlreadyRegisteredError, which
|
||||
// contains the previously registered Collector.
|
||||
//
|
||||
// It is in general not safe to register the same Collector multiple
|
||||
// times concurrently.
|
||||
// A Collector whose Describe method does not yield any Desc is treated
|
||||
// as unchecked. Registration will always succeed. No check for
|
||||
// re-registering (see previous paragraph) is performed. Thus, the
|
||||
// caller is responsible for not double-registering the same unchecked
|
||||
// Collector, and for providing a Collector that will not cause
|
||||
// inconsistent metrics on collection. (This would lead to scrape
|
||||
// errors.)
|
||||
Register(Collector) error
|
||||
// MustRegister works like Register but registers any number of
|
||||
// Collectors and panics upon the first registration that causes an
|
||||
@ -106,7 +115,9 @@ type Registerer interface {
|
||||
// Unregister unregisters the Collector that equals the Collector passed
|
||||
// in as an argument. (Two Collectors are considered equal if their
|
||||
// Describe method yields the same set of descriptors.) The function
|
||||
// returns whether a Collector was unregistered.
|
||||
// returns whether a Collector was unregistered. Note that an unchecked
|
||||
// Collector cannot be unregistered (as its Describe method does not
|
||||
// yield any descriptor).
|
||||
//
|
||||
// Note that even after unregistering, it will not be possible to
|
||||
// register a new Collector that is inconsistent with the unregistered
|
||||
@ -124,15 +135,23 @@ type Registerer interface {
|
||||
type Gatherer interface {
|
||||
// Gather calls the Collect method of the registered Collectors and then
|
||||
// gathers the collected metrics into a lexicographically sorted slice
|
||||
// of MetricFamily protobufs. Even if an error occurs, Gather attempts
|
||||
// to gather as many metrics as possible. Hence, if a non-nil error is
|
||||
// returned, the returned MetricFamily slice could be nil (in case of a
|
||||
// fatal error that prevented any meaningful metric collection) or
|
||||
// contain a number of MetricFamily protobufs, some of which might be
|
||||
// incomplete, and some might be missing altogether. The returned error
|
||||
// (which might be a MultiError) explains the details. In scenarios
|
||||
// where complete collection is critical, the returned MetricFamily
|
||||
// protobufs should be disregarded if the returned error is non-nil.
|
||||
// of uniquely named MetricFamily protobufs. Gather ensures that the
|
||||
// returned slice is valid and self-consistent so that it can be used
|
||||
// for valid exposition. As an exception to the strict consistency
|
||||
// requirements described for metric.Desc, Gather will tolerate
|
||||
// different sets of label names for metrics of the same metric family.
|
||||
//
|
||||
// Even if an error occurs, Gather attempts to gather as many metrics as
|
||||
// possible. Hence, if a non-nil error is returned, the returned
|
||||
// MetricFamily slice could be nil (in case of a fatal error that
|
||||
// prevented any meaningful metric collection) or contain a number of
|
||||
// MetricFamily protobufs, some of which might be incomplete, and some
|
||||
// might be missing altogether. The returned error (which might be a
|
||||
// MultiError) explains the details. Note that this is mostly useful for
|
||||
// debugging purposes. If the gathered protobufs are to be used for
|
||||
// exposition in actual monitoring, it is almost always better to not
|
||||
// expose an incomplete result and instead disregard the returned
|
||||
// MetricFamily protobufs in case the returned error is non-nil.
|
||||
Gather() ([]*dto.MetricFamily, error)
|
||||
}
|
||||
|
||||
@ -202,6 +221,13 @@ func (errs MultiError) Error() string {
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Append appends the provided error if it is not nil.
|
||||
func (errs *MultiError) Append(err error) {
|
||||
if err != nil {
|
||||
*errs = append(*errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only
|
||||
// contained error as error if len(errs is 1). In all other cases, it returns
|
||||
// the MultiError directly. This is helpful for returning a MultiError in a way
|
||||
@ -226,6 +252,7 @@ type Registry struct {
|
||||
collectorsByID map[uint64]Collector // ID is a hash of the descIDs.
|
||||
descIDs map[uint64]struct{}
|
||||
dimHashesByName map[string]uint64
|
||||
uncheckedCollectors []Collector
|
||||
pedanticChecksEnabled bool
|
||||
}
|
||||
|
||||
@ -243,7 +270,12 @@ func (r *Registry) Register(c Collector) error {
|
||||
close(descChan)
|
||||
}()
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
defer func() {
|
||||
// Drain channel in case of premature return to not leak a goroutine.
|
||||
for range descChan {
|
||||
}
|
||||
r.mtx.Unlock()
|
||||
}()
|
||||
// Conduct various tests...
|
||||
for desc := range descChan {
|
||||
|
||||
@ -283,9 +315,10 @@ func (r *Registry) Register(c Collector) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
// Did anything happen at all?
|
||||
// A Collector yielding no Desc at all is considered unchecked.
|
||||
if len(newDescIDs) == 0 {
|
||||
return errors.New("collector has no descriptors")
|
||||
r.uncheckedCollectors = append(r.uncheckedCollectors, c)
|
||||
return nil
|
||||
}
|
||||
if existing, exists := r.collectorsByID[collectorID]; exists {
|
||||
return AlreadyRegisteredError{
|
||||
@ -359,31 +392,25 @@ func (r *Registry) MustRegister(cs ...Collector) {
|
||||
// Gather implements Gatherer.
|
||||
func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
||||
var (
|
||||
metricChan = make(chan Metric, capMetricChan)
|
||||
metricHashes = map[uint64]struct{}{}
|
||||
dimHashes = map[string]uint64{}
|
||||
wg sync.WaitGroup
|
||||
errs MultiError // The collected errors to return in the end.
|
||||
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
|
||||
checkedMetricChan = make(chan Metric, capMetricChan)
|
||||
uncheckedMetricChan = make(chan Metric, capMetricChan)
|
||||
metricHashes = map[uint64]struct{}{}
|
||||
wg sync.WaitGroup
|
||||
errs MultiError // The collected errors to return in the end.
|
||||
registeredDescIDs map[uint64]struct{} // Only used for pedantic checks
|
||||
)
|
||||
|
||||
r.mtx.RLock()
|
||||
goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors)
|
||||
metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))
|
||||
|
||||
// Scatter.
|
||||
// (Collectors could be complex and slow, so we call them all at once.)
|
||||
wg.Add(len(r.collectorsByID))
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(metricChan)
|
||||
}()
|
||||
checkedCollectors := make(chan Collector, len(r.collectorsByID))
|
||||
uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors))
|
||||
for _, collector := range r.collectorsByID {
|
||||
go func(collector Collector) {
|
||||
defer wg.Done()
|
||||
collector.Collect(metricChan)
|
||||
}(collector)
|
||||
checkedCollectors <- collector
|
||||
}
|
||||
for _, collector := range r.uncheckedCollectors {
|
||||
uncheckedCollectors <- collector
|
||||
}
|
||||
|
||||
// In case pedantic checks are enabled, we have to copy the map before
|
||||
// giving up the RLock.
|
||||
if r.pedanticChecksEnabled {
|
||||
@ -392,127 +419,226 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
|
||||
registeredDescIDs[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
r.mtx.RUnlock()
|
||||
|
||||
// Drain metricChan in case of premature return.
|
||||
wg.Add(goroutineBudget)
|
||||
|
||||
collectWorker := func() {
|
||||
for {
|
||||
select {
|
||||
case collector := <-checkedCollectors:
|
||||
collector.Collect(checkedMetricChan)
|
||||
case collector := <-uncheckedCollectors:
|
||||
collector.Collect(uncheckedMetricChan)
|
||||
default:
|
||||
return
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
}
|
||||
|
||||
// Start the first worker now to make sure at least one is running.
|
||||
go collectWorker()
|
||||
goroutineBudget--
|
||||
|
||||
// Close checkedMetricChan and uncheckedMetricChan once all collectors
|
||||
// are collected.
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(checkedMetricChan)
|
||||
close(uncheckedMetricChan)
|
||||
}()
|
||||
|
||||
// Drain checkedMetricChan and uncheckedMetricChan in case of premature return.
|
||||
defer func() {
|
||||
for range metricChan {
|
||||
if checkedMetricChan != nil {
|
||||
for range checkedMetricChan {
|
||||
}
|
||||
}
|
||||
if uncheckedMetricChan != nil {
|
||||
for range uncheckedMetricChan {
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Gather.
|
||||
for metric := range metricChan {
|
||||
// This could be done concurrently, too, but it required locking
|
||||
// of metricFamiliesByName (and of metricHashes if checks are
|
||||
// enabled). Most likely not worth it.
|
||||
desc := metric.Desc()
|
||||
dtoMetric := &dto.Metric{}
|
||||
if err := metric.Write(dtoMetric); err != nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"error collecting metric %v: %s", desc, err,
|
||||
// Copy the channel references so we can nil them out later to remove
|
||||
// them from the select statements below.
|
||||
cmc := checkedMetricChan
|
||||
umc := uncheckedMetricChan
|
||||
|
||||
for {
|
||||
select {
|
||||
case metric, ok := <-cmc:
|
||||
if !ok {
|
||||
cmc = nil
|
||||
break
|
||||
}
|
||||
errs.Append(processMetric(
|
||||
metric, metricFamiliesByName,
|
||||
metricHashes,
|
||||
registeredDescIDs,
|
||||
))
|
||||
continue
|
||||
case metric, ok := <-umc:
|
||||
if !ok {
|
||||
umc = nil
|
||||
break
|
||||
}
|
||||
errs.Append(processMetric(
|
||||
metric, metricFamiliesByName,
|
||||
metricHashes,
|
||||
nil,
|
||||
))
|
||||
default:
|
||||
if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 {
|
||||
// All collectors are already being worked on or
|
||||
// we have already as many goroutines started as
|
||||
// there are collectors. Do the same as above,
|
||||
// just without the default.
|
||||
select {
|
||||
case metric, ok := <-cmc:
|
||||
if !ok {
|
||||
cmc = nil
|
||||
break
|
||||
}
|
||||
errs.Append(processMetric(
|
||||
metric, metricFamiliesByName,
|
||||
metricHashes,
|
||||
registeredDescIDs,
|
||||
))
|
||||
case metric, ok := <-umc:
|
||||
if !ok {
|
||||
umc = nil
|
||||
break
|
||||
}
|
||||
errs.Append(processMetric(
|
||||
metric, metricFamiliesByName,
|
||||
metricHashes,
|
||||
nil,
|
||||
))
|
||||
}
|
||||
break
|
||||
}
|
||||
// Start more workers.
|
||||
go collectWorker()
|
||||
goroutineBudget--
|
||||
runtime.Gosched()
|
||||
}
|
||||
metricFamily, ok := metricFamiliesByName[desc.fqName]
|
||||
if ok {
|
||||
if metricFamily.GetHelp() != desc.help {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"collected metric %s %s has help %q but should have %q",
|
||||
desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
|
||||
))
|
||||
continue
|
||||
}
|
||||
// TODO(beorn7): Simplify switch once Desc has type.
|
||||
switch metricFamily.GetType() {
|
||||
case dto.MetricType_COUNTER:
|
||||
if dtoMetric.Counter == nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"collected metric %s %s should be a Counter",
|
||||
desc.fqName, dtoMetric,
|
||||
))
|
||||
continue
|
||||
}
|
||||
case dto.MetricType_GAUGE:
|
||||
if dtoMetric.Gauge == nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"collected metric %s %s should be a Gauge",
|
||||
desc.fqName, dtoMetric,
|
||||
))
|
||||
continue
|
||||
}
|
||||
case dto.MetricType_SUMMARY:
|
||||
if dtoMetric.Summary == nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"collected metric %s %s should be a Summary",
|
||||
desc.fqName, dtoMetric,
|
||||
))
|
||||
continue
|
||||
}
|
||||
case dto.MetricType_UNTYPED:
|
||||
if dtoMetric.Untyped == nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"collected metric %s %s should be Untyped",
|
||||
desc.fqName, dtoMetric,
|
||||
))
|
||||
continue
|
||||
}
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
if dtoMetric.Histogram == nil {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"collected metric %s %s should be a Histogram",
|
||||
desc.fqName, dtoMetric,
|
||||
))
|
||||
continue
|
||||
}
|
||||
default:
|
||||
panic("encountered MetricFamily with invalid type")
|
||||
}
|
||||
} else {
|
||||
metricFamily = &dto.MetricFamily{}
|
||||
metricFamily.Name = proto.String(desc.fqName)
|
||||
metricFamily.Help = proto.String(desc.help)
|
||||
// TODO(beorn7): Simplify switch once Desc has type.
|
||||
switch {
|
||||
case dtoMetric.Gauge != nil:
|
||||
metricFamily.Type = dto.MetricType_GAUGE.Enum()
|
||||
case dtoMetric.Counter != nil:
|
||||
metricFamily.Type = dto.MetricType_COUNTER.Enum()
|
||||
case dtoMetric.Summary != nil:
|
||||
metricFamily.Type = dto.MetricType_SUMMARY.Enum()
|
||||
case dtoMetric.Untyped != nil:
|
||||
metricFamily.Type = dto.MetricType_UNTYPED.Enum()
|
||||
case dtoMetric.Histogram != nil:
|
||||
metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
|
||||
default:
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"empty metric collected: %s", dtoMetric,
|
||||
))
|
||||
continue
|
||||
}
|
||||
metricFamiliesByName[desc.fqName] = metricFamily
|
||||
// Once both checkedMetricChan and uncheckdMetricChan are closed
|
||||
// and drained, the contraption above will nil out cmc and umc,
|
||||
// and then we can leave the collect loop here.
|
||||
if cmc == nil && umc == nil {
|
||||
break
|
||||
}
|
||||
if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
if r.pedanticChecksEnabled {
|
||||
// Is the desc registered at all?
|
||||
if _, exist := registeredDescIDs[desc.id]; !exist {
|
||||
errs = append(errs, fmt.Errorf(
|
||||
"collected metric %s %s with unregistered descriptor %s",
|
||||
metricFamily.GetName(), dtoMetric, desc,
|
||||
))
|
||||
continue
|
||||
}
|
||||
if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
|
||||
}
|
||||
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
||||
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
||||
}
|
||||
|
||||
// processMetric is an internal helper method only used by the Gather method.
|
||||
func processMetric(
|
||||
metric Metric,
|
||||
metricFamiliesByName map[string]*dto.MetricFamily,
|
||||
metricHashes map[uint64]struct{},
|
||||
registeredDescIDs map[uint64]struct{},
|
||||
) error {
|
||||
desc := metric.Desc()
|
||||
// Wrapped metrics collected by an unchecked Collector can have an
|
||||
// invalid Desc.
|
||||
if desc.err != nil {
|
||||
return desc.err
|
||||
}
|
||||
dtoMetric := &dto.Metric{}
|
||||
if err := metric.Write(dtoMetric); err != nil {
|
||||
return fmt.Errorf("error collecting metric %v: %s", desc, err)
|
||||
}
|
||||
metricFamily, ok := metricFamiliesByName[desc.fqName]
|
||||
if ok { // Existing name.
|
||||
if metricFamily.GetHelp() != desc.help {
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s has help %q but should have %q",
|
||||
desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),
|
||||
)
|
||||
}
|
||||
// TODO(beorn7): Simplify switch once Desc has type.
|
||||
switch metricFamily.GetType() {
|
||||
case dto.MetricType_COUNTER:
|
||||
if dtoMetric.Counter == nil {
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s should be a Counter",
|
||||
desc.fqName, dtoMetric,
|
||||
)
|
||||
}
|
||||
case dto.MetricType_GAUGE:
|
||||
if dtoMetric.Gauge == nil {
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s should be a Gauge",
|
||||
desc.fqName, dtoMetric,
|
||||
)
|
||||
}
|
||||
case dto.MetricType_SUMMARY:
|
||||
if dtoMetric.Summary == nil {
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s should be a Summary",
|
||||
desc.fqName, dtoMetric,
|
||||
)
|
||||
}
|
||||
case dto.MetricType_UNTYPED:
|
||||
if dtoMetric.Untyped == nil {
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s should be Untyped",
|
||||
desc.fqName, dtoMetric,
|
||||
)
|
||||
}
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
if dtoMetric.Histogram == nil {
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s should be a Histogram",
|
||||
desc.fqName, dtoMetric,
|
||||
)
|
||||
}
|
||||
default:
|
||||
panic("encountered MetricFamily with invalid type")
|
||||
}
|
||||
} else { // New name.
|
||||
metricFamily = &dto.MetricFamily{}
|
||||
metricFamily.Name = proto.String(desc.fqName)
|
||||
metricFamily.Help = proto.String(desc.help)
|
||||
// TODO(beorn7): Simplify switch once Desc has type.
|
||||
switch {
|
||||
case dtoMetric.Gauge != nil:
|
||||
metricFamily.Type = dto.MetricType_GAUGE.Enum()
|
||||
case dtoMetric.Counter != nil:
|
||||
metricFamily.Type = dto.MetricType_COUNTER.Enum()
|
||||
case dtoMetric.Summary != nil:
|
||||
metricFamily.Type = dto.MetricType_SUMMARY.Enum()
|
||||
case dtoMetric.Untyped != nil:
|
||||
metricFamily.Type = dto.MetricType_UNTYPED.Enum()
|
||||
case dtoMetric.Histogram != nil:
|
||||
metricFamily.Type = dto.MetricType_HISTOGRAM.Enum()
|
||||
default:
|
||||
return fmt.Errorf("empty metric collected: %s", dtoMetric)
|
||||
}
|
||||
if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil {
|
||||
return err
|
||||
}
|
||||
metricFamiliesByName[desc.fqName] = metricFamily
|
||||
}
|
||||
if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil {
|
||||
return err
|
||||
}
|
||||
if registeredDescIDs != nil {
|
||||
// Is the desc registered at all?
|
||||
if _, exist := registeredDescIDs[desc.id]; !exist {
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s with unregistered descriptor %s",
|
||||
metricFamily.GetName(), dtoMetric, desc,
|
||||
)
|
||||
}
|
||||
if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
metricFamily.Metric = append(metricFamily.Metric, dtoMetric)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gatherers is a slice of Gatherer instances that implements the Gatherer
|
||||
@ -538,7 +664,6 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
|
||||
var (
|
||||
metricFamiliesByName = map[string]*dto.MetricFamily{}
|
||||
metricHashes = map[uint64]struct{}{}
|
||||
dimHashes = map[string]uint64{}
|
||||
errs MultiError // The collected errors to return in the end.
|
||||
)
|
||||
|
||||
@ -575,10 +700,14 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
|
||||
existingMF.Name = mf.Name
|
||||
existingMF.Help = mf.Help
|
||||
existingMF.Type = mf.Type
|
||||
if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
metricFamiliesByName[mf.GetName()] = existingMF
|
||||
}
|
||||
for _, m := range mf.Metric {
|
||||
if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil {
|
||||
if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
@ -586,88 +715,80 @@ func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
||||
return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()
|
||||
}
|
||||
|
||||
// metricSorter is a sortable slice of *dto.Metric.
|
||||
type metricSorter []*dto.Metric
|
||||
|
||||
func (s metricSorter) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
func (s metricSorter) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
func (s metricSorter) Less(i, j int) bool {
|
||||
if len(s[i].Label) != len(s[j].Label) {
|
||||
// This should not happen. The metrics are
|
||||
// inconsistent. However, we have to deal with the fact, as
|
||||
// people might use custom collectors or metric family injection
|
||||
// to create inconsistent metrics. So let's simply compare the
|
||||
// number of labels in this case. That will still yield
|
||||
// reproducible sorting.
|
||||
return len(s[i].Label) < len(s[j].Label)
|
||||
// checkSuffixCollisions checks for collisions with the “magic” suffixes the
|
||||
// Prometheus text format and the internal metric representation of the
|
||||
// Prometheus server add while flattening Summaries and Histograms.
|
||||
func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error {
|
||||
var (
|
||||
newName = mf.GetName()
|
||||
newType = mf.GetType()
|
||||
newNameWithoutSuffix = ""
|
||||
)
|
||||
switch {
|
||||
case strings.HasSuffix(newName, "_count"):
|
||||
newNameWithoutSuffix = newName[:len(newName)-6]
|
||||
case strings.HasSuffix(newName, "_sum"):
|
||||
newNameWithoutSuffix = newName[:len(newName)-4]
|
||||
case strings.HasSuffix(newName, "_bucket"):
|
||||
newNameWithoutSuffix = newName[:len(newName)-7]
|
||||
}
|
||||
for n, lp := range s[i].Label {
|
||||
vi := lp.GetValue()
|
||||
vj := s[j].Label[n].GetValue()
|
||||
if vi != vj {
|
||||
return vi < vj
|
||||
if newNameWithoutSuffix != "" {
|
||||
if existingMF, ok := mfs[newNameWithoutSuffix]; ok {
|
||||
switch existingMF.GetType() {
|
||||
case dto.MetricType_SUMMARY:
|
||||
if !strings.HasSuffix(newName, "_bucket") {
|
||||
return fmt.Errorf(
|
||||
"collected metric named %q collides with previously collected summary named %q",
|
||||
newName, newNameWithoutSuffix,
|
||||
)
|
||||
}
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
return fmt.Errorf(
|
||||
"collected metric named %q collides with previously collected histogram named %q",
|
||||
newName, newNameWithoutSuffix,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We should never arrive here. Multiple metrics with the same
|
||||
// label set in the same scrape will lead to undefined ingestion
|
||||
// behavior. However, as above, we have to provide stable sorting
|
||||
// here, even for inconsistent metrics. So sort equal metrics
|
||||
// by their timestamp, with missing timestamps (implying "now")
|
||||
// coming last.
|
||||
if s[i].TimestampMs == nil {
|
||||
return false
|
||||
}
|
||||
if s[j].TimestampMs == nil {
|
||||
return true
|
||||
}
|
||||
return s[i].GetTimestampMs() < s[j].GetTimestampMs()
|
||||
}
|
||||
|
||||
// normalizeMetricFamilies returns a MetricFamily slice with empty
|
||||
// MetricFamilies pruned and the remaining MetricFamilies sorted by name within
|
||||
// the slice, with the contained Metrics sorted within each MetricFamily.
|
||||
func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {
|
||||
for _, mf := range metricFamiliesByName {
|
||||
sort.Sort(metricSorter(mf.Metric))
|
||||
}
|
||||
names := make([]string, 0, len(metricFamiliesByName))
|
||||
for name, mf := range metricFamiliesByName {
|
||||
if len(mf.Metric) > 0 {
|
||||
names = append(names, name)
|
||||
if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM {
|
||||
if _, ok := mfs[newName+"_count"]; ok {
|
||||
return fmt.Errorf(
|
||||
"collected histogram or summary named %q collides with previously collected metric named %q",
|
||||
newName, newName+"_count",
|
||||
)
|
||||
}
|
||||
if _, ok := mfs[newName+"_sum"]; ok {
|
||||
return fmt.Errorf(
|
||||
"collected histogram or summary named %q collides with previously collected metric named %q",
|
||||
newName, newName+"_sum",
|
||||
)
|
||||
}
|
||||
}
|
||||
sort.Strings(names)
|
||||
result := make([]*dto.MetricFamily, 0, len(names))
|
||||
for _, name := range names {
|
||||
result = append(result, metricFamiliesByName[name])
|
||||
if newType == dto.MetricType_HISTOGRAM {
|
||||
if _, ok := mfs[newName+"_bucket"]; ok {
|
||||
return fmt.Errorf(
|
||||
"collected histogram named %q collides with previously collected metric named %q",
|
||||
newName, newName+"_bucket",
|
||||
)
|
||||
}
|
||||
}
|
||||
return result
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkMetricConsistency checks if the provided Metric is consistent with the
|
||||
// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
|
||||
// provided MetricFamily. It also hashes the Metric labels and the MetricFamily
|
||||
// name. If the resulting hash is already in the provided metricHashes, an error
|
||||
// is returned. If not, it is added to metricHashes. The provided dimHashes maps
|
||||
// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
|
||||
// doesn't yet contain a hash for the provided MetricFamily, it is
|
||||
// added. Otherwise, an error is returned if the existing dimHashes in not equal
|
||||
// the calculated dimHash.
|
||||
// is returned. If not, it is added to metricHashes.
|
||||
func checkMetricConsistency(
|
||||
metricFamily *dto.MetricFamily,
|
||||
dtoMetric *dto.Metric,
|
||||
metricHashes map[uint64]struct{},
|
||||
dimHashes map[string]uint64,
|
||||
) error {
|
||||
name := metricFamily.GetName()
|
||||
|
||||
// Type consistency with metric family.
|
||||
if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||
|
||||
metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||
|
||||
@ -675,47 +796,59 @@ func checkMetricConsistency(
|
||||
metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||
|
||||
metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s is not a %s",
|
||||
metricFamily.GetName(), dtoMetric, metricFamily.GetType(),
|
||||
"collected metric %q { %s} is not a %s",
|
||||
name, dtoMetric, metricFamily.GetType(),
|
||||
)
|
||||
}
|
||||
|
||||
previousLabelName := ""
|
||||
for _, labelPair := range dtoMetric.GetLabel() {
|
||||
if !utf8.ValidString(*labelPair.Value) {
|
||||
return fmt.Errorf("collected metric's label %s is not utf8: %#v", *labelPair.Name, *labelPair.Value)
|
||||
labelName := labelPair.GetName()
|
||||
if labelName == previousLabelName {
|
||||
return fmt.Errorf(
|
||||
"collected metric %q { %s} has two or more labels with the same name: %s",
|
||||
name, dtoMetric, labelName,
|
||||
)
|
||||
}
|
||||
if !checkLabelName(labelName) {
|
||||
return fmt.Errorf(
|
||||
"collected metric %q { %s} has a label with an invalid name: %s",
|
||||
name, dtoMetric, labelName,
|
||||
)
|
||||
}
|
||||
if dtoMetric.Summary != nil && labelName == quantileLabel {
|
||||
return fmt.Errorf(
|
||||
"collected metric %q { %s} must not have an explicit %q label",
|
||||
name, dtoMetric, quantileLabel,
|
||||
)
|
||||
}
|
||||
if !utf8.ValidString(labelPair.GetValue()) {
|
||||
return fmt.Errorf(
|
||||
"collected metric %q { %s} has a label named %q whose value is not utf8: %#v",
|
||||
name, dtoMetric, labelName, labelPair.GetValue())
|
||||
}
|
||||
previousLabelName = labelName
|
||||
}
|
||||
|
||||
// Is the metric unique (i.e. no other metric with the same name and the same label values)?
|
||||
// Is the metric unique (i.e. no other metric with the same name and the same labels)?
|
||||
h := hashNew()
|
||||
h = hashAdd(h, metricFamily.GetName())
|
||||
h = hashAdd(h, name)
|
||||
h = hashAddByte(h, separatorByte)
|
||||
dh := hashNew()
|
||||
// Make sure label pairs are sorted. We depend on it for the consistency
|
||||
// check.
|
||||
sort.Sort(LabelPairSorter(dtoMetric.Label))
|
||||
sort.Sort(labelPairSorter(dtoMetric.Label))
|
||||
for _, lp := range dtoMetric.Label {
|
||||
h = hashAdd(h, lp.GetName())
|
||||
h = hashAddByte(h, separatorByte)
|
||||
h = hashAdd(h, lp.GetValue())
|
||||
h = hashAddByte(h, separatorByte)
|
||||
dh = hashAdd(dh, lp.GetName())
|
||||
dh = hashAddByte(dh, separatorByte)
|
||||
}
|
||||
if _, exists := metricHashes[h]; exists {
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s was collected before with the same name and label values",
|
||||
metricFamily.GetName(), dtoMetric,
|
||||
"collected metric %q { %s} was collected before with the same name and label values",
|
||||
name, dtoMetric,
|
||||
)
|
||||
}
|
||||
if dimHash, ok := dimHashes[metricFamily.GetName()]; ok {
|
||||
if dimHash != dh {
|
||||
return fmt.Errorf(
|
||||
"collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family",
|
||||
metricFamily.GetName(), dtoMetric,
|
||||
)
|
||||
}
|
||||
} else {
|
||||
dimHashes[metricFamily.GetName()] = dh
|
||||
}
|
||||
metricHashes[h] = struct{}{}
|
||||
return nil
|
||||
}
|
||||
@ -747,7 +880,7 @@ func checkDescConsistency(
|
||||
metricFamily.GetName(), dtoMetric, desc,
|
||||
)
|
||||
}
|
||||
sort.Sort(LabelPairSorter(lpsFromDesc))
|
||||
sort.Sort(labelPairSorter(lpsFromDesc))
|
||||
for i, lpFromDesc := range lpsFromDesc {
|
||||
lpFromMetric := dtoMetric.Label[i]
|
||||
if lpFromDesc.GetName() != lpFromMetric.GetName() ||
|
||||
|
31
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
31
vendor/github.com/prometheus/client_golang/prometheus/summary.go
generated
vendored
@ -37,7 +37,7 @@ const quantileLabel = "quantile"
|
||||
// A typical use-case is the observation of request latencies. By default, a
|
||||
// Summary provides the median, the 90th and the 99th percentile of the latency
|
||||
// as rank estimations. However, the default behavior will change in the
|
||||
// upcoming v0.10 of the library. There will be no rank estiamtions at all by
|
||||
// upcoming v0.10 of the library. There will be no rank estimations at all by
|
||||
// default. For a sane transition, it is recommended to set the desired rank
|
||||
// estimations explicitly.
|
||||
//
|
||||
@ -81,10 +81,10 @@ const (
|
||||
)
|
||||
|
||||
// SummaryOpts bundles the options for creating a Summary metric. It is
|
||||
// mandatory to set Name and Help to a non-empty string. While all other fields
|
||||
// are optional and can safely be left at their zero value, it is recommended to
|
||||
// explicitly set the Objectives field to the desired value as the default value
|
||||
// will change in the upcoming v0.10 of the library.
|
||||
// mandatory to set Name to a non-empty string. While all other fields are
|
||||
// optional and can safely be left at their zero value, it is recommended to set
|
||||
// a help string and to explicitly set the Objectives field to the desired value
|
||||
// as the default value will change in the upcoming v0.10 of the library.
|
||||
type SummaryOpts struct {
|
||||
// Namespace, Subsystem, and Name are components of the fully-qualified
|
||||
// name of the Summary (created by joining these components with
|
||||
@ -95,7 +95,7 @@ type SummaryOpts struct {
|
||||
Subsystem string
|
||||
Name string
|
||||
|
||||
// Help provides information about this Summary. Mandatory!
|
||||
// Help provides information about this Summary.
|
||||
//
|
||||
// Metrics with the same fully-qualified name must have the same Help
|
||||
// string.
|
||||
@ -105,6 +105,11 @@ type SummaryOpts struct {
|
||||
// with the same fully-qualified name must have the same label names in
|
||||
// their ConstLabels.
|
||||
//
|
||||
// Due to the way a Summary is represented in the Prometheus text format
|
||||
// and how it is handled by the Prometheus server internally, “quantile”
|
||||
// is an illegal label name. Construction of a Summary or SummaryVec
|
||||
// will panic if this label name is used in ConstLabels.
|
||||
//
|
||||
// ConstLabels are only used rarely. In particular, do not use them to
|
||||
// attach the same labels to all your metrics. Those use cases are
|
||||
// better covered by target labels set by the scraping Prometheus
|
||||
@ -402,7 +407,16 @@ type SummaryVec struct {
|
||||
|
||||
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
|
||||
// partitioned by the given label names.
|
||||
//
|
||||
// Due to the way a Summary is represented in the Prometheus text format and how
|
||||
// it is handled by the Prometheus server internally, “quantile” is an illegal
|
||||
// label name. NewSummaryVec will panic if this label name is used.
|
||||
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
|
||||
for _, ln := range labelNames {
|
||||
if ln == quantileLabel {
|
||||
panic(errQuantileLabelNotAllowed)
|
||||
}
|
||||
}
|
||||
desc := NewDesc(
|
||||
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
|
||||
opts.Help,
|
||||
@ -572,7 +586,7 @@ func (s *constSummary) Write(out *dto.Metric) error {
|
||||
// map[float64]float64{0.5: 0.23, 0.99: 0.56}
|
||||
//
|
||||
// NewConstSummary returns an error if the length of labelValues is not
|
||||
// consistent with the variable labels in Desc.
|
||||
// consistent with the variable labels in Desc or if Desc is invalid.
|
||||
func NewConstSummary(
|
||||
desc *Desc,
|
||||
count uint64,
|
||||
@ -580,6 +594,9 @@ func NewConstSummary(
|
||||
quantiles map[float64]float64,
|
||||
labelValues ...string,
|
||||
) (Metric, error) {
|
||||
if desc.err != nil {
|
||||
return nil, desc.err
|
||||
}
|
||||
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
184
vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go
generated
vendored
Normal file
184
vendor/github.com/prometheus/client_golang/prometheus/testutil/testutil.go
generated
vendored
Normal file
@ -0,0 +1,184 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package testutil provides helpers to test code using the prometheus package
|
||||
// of client_golang.
|
||||
//
|
||||
// While writing unit tests to verify correct instrumentation of your code, it's
|
||||
// a common mistake to mostly test the instrumentation library instead of your
|
||||
// own code. Rather than verifying that a prometheus.Counter's value has changed
|
||||
// as expected or that it shows up in the exposition after registration, it is
|
||||
// in general more robust and more faithful to the concept of unit tests to use
|
||||
// mock implementations of the prometheus.Counter and prometheus.Registerer
|
||||
// interfaces that simply assert that the Add or Register methods have been
|
||||
// called with the expected arguments. However, this might be overkill in simple
|
||||
// scenarios. The ToFloat64 function is provided for simple inspection of a
|
||||
// single-value metric, but it has to be used with caution.
|
||||
//
|
||||
// End-to-end tests to verify all or larger parts of the metrics exposition can
|
||||
// be implemented with the CollectAndCompare or GatherAndCompare functions. The
|
||||
// most appropriate use is not so much testing instrumentation of your code, but
|
||||
// testing custom prometheus.Collector implementations and in particular whole
|
||||
// exporters, i.e. programs that retrieve telemetry data from a 3rd party source
|
||||
// and convert it into Prometheus metrics.
|
||||
package testutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
|
||||
"github.com/prometheus/common/expfmt"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/internal"
|
||||
)
|
||||
|
||||
// ToFloat64 collects all Metrics from the provided Collector. It expects that
|
||||
// this results in exactly one Metric being collected, which must be a Gauge,
|
||||
// Counter, or Untyped. In all other cases, ToFloat64 panics. ToFloat64 returns
|
||||
// the value of the collected Metric.
|
||||
//
|
||||
// The Collector provided is typically a simple instance of Gauge or Counter, or
|
||||
// – less commonly – a GaugeVec or CounterVec with exactly one element. But any
|
||||
// Collector fulfilling the prerequisites described above will do.
|
||||
//
|
||||
// Use this function with caution. It is computationally very expensive and thus
|
||||
// not suited at all to read values from Metrics in regular code. This is really
|
||||
// only for testing purposes, and even for testing, other approaches are often
|
||||
// more appropriate (see this package's documentation).
|
||||
//
|
||||
// A clear anti-pattern would be to use a metric type from the prometheus
|
||||
// package to track values that are also needed for something else than the
|
||||
// exposition of Prometheus metrics. For example, you would like to track the
|
||||
// number of items in a queue because your code should reject queuing further
|
||||
// items if a certain limit is reached. It is tempting to track the number of
|
||||
// items in a prometheus.Gauge, as it is then easily available as a metric for
|
||||
// exposition, too. However, then you would need to call ToFloat64 in your
|
||||
// regular code, potentially quite often. The recommended way is to track the
|
||||
// number of items conventionally (in the way you would have done it without
|
||||
// considering Prometheus metrics) and then expose the number with a
|
||||
// prometheus.GaugeFunc.
|
||||
func ToFloat64(c prometheus.Collector) float64 {
|
||||
var (
|
||||
m prometheus.Metric
|
||||
mCount int
|
||||
mChan = make(chan prometheus.Metric)
|
||||
done = make(chan struct{})
|
||||
)
|
||||
|
||||
go func() {
|
||||
for m = range mChan {
|
||||
mCount++
|
||||
}
|
||||
close(done)
|
||||
}()
|
||||
|
||||
c.Collect(mChan)
|
||||
close(mChan)
|
||||
<-done
|
||||
|
||||
if mCount != 1 {
|
||||
panic(fmt.Errorf("collected %d metrics instead of exactly 1", mCount))
|
||||
}
|
||||
|
||||
pb := &dto.Metric{}
|
||||
m.Write(pb)
|
||||
if pb.Gauge != nil {
|
||||
return pb.Gauge.GetValue()
|
||||
}
|
||||
if pb.Counter != nil {
|
||||
return pb.Counter.GetValue()
|
||||
}
|
||||
if pb.Untyped != nil {
|
||||
return pb.Untyped.GetValue()
|
||||
}
|
||||
panic(fmt.Errorf("collected a non-gauge/counter/untyped metric: %s", pb))
|
||||
}
|
||||
|
||||
// CollectAndCompare registers the provided Collector with a newly created
|
||||
// pedantic Registry. It then does the same as GatherAndCompare, gathering the
|
||||
// metrics from the pedantic Registry.
|
||||
func CollectAndCompare(c prometheus.Collector, expected io.Reader, metricNames ...string) error {
|
||||
reg := prometheus.NewPedanticRegistry()
|
||||
if err := reg.Register(c); err != nil {
|
||||
return fmt.Errorf("registering collector failed: %s", err)
|
||||
}
|
||||
return GatherAndCompare(reg, expected, metricNames...)
|
||||
}
|
||||
|
||||
// GatherAndCompare gathers all metrics from the provided Gatherer and compares
|
||||
// it to an expected output read from the provided Reader in the Prometheus text
|
||||
// exposition format. If any metricNames are provided, only metrics with those
|
||||
// names are compared.
|
||||
func GatherAndCompare(g prometheus.Gatherer, expected io.Reader, metricNames ...string) error {
|
||||
metrics, err := g.Gather()
|
||||
if err != nil {
|
||||
return fmt.Errorf("gathering metrics failed: %s", err)
|
||||
}
|
||||
if metricNames != nil {
|
||||
metrics = filterMetrics(metrics, metricNames)
|
||||
}
|
||||
var tp expfmt.TextParser
|
||||
expectedMetrics, err := tp.TextToMetricFamilies(expected)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing expected metrics failed: %s", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(metrics, internal.NormalizeMetricFamilies(expectedMetrics)) {
|
||||
// Encode the gathered output to the readable text format for comparison.
|
||||
var buf1 bytes.Buffer
|
||||
enc := expfmt.NewEncoder(&buf1, expfmt.FmtText)
|
||||
for _, mf := range metrics {
|
||||
if err := enc.Encode(mf); err != nil {
|
||||
return fmt.Errorf("encoding result failed: %s", err)
|
||||
}
|
||||
}
|
||||
// Encode normalized expected metrics again to generate them in the same ordering
|
||||
// the registry does to spot differences more easily.
|
||||
var buf2 bytes.Buffer
|
||||
enc = expfmt.NewEncoder(&buf2, expfmt.FmtText)
|
||||
for _, mf := range internal.NormalizeMetricFamilies(expectedMetrics) {
|
||||
if err := enc.Encode(mf); err != nil {
|
||||
return fmt.Errorf("encoding result failed: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf(`
|
||||
metric output does not match expectation; want:
|
||||
|
||||
%s
|
||||
|
||||
got:
|
||||
|
||||
%s
|
||||
`, buf2.String(), buf1.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func filterMetrics(metrics []*dto.MetricFamily, names []string) []*dto.MetricFamily {
|
||||
var filtered []*dto.MetricFamily
|
||||
for _, m := range metrics {
|
||||
for _, name := range names {
|
||||
if m.GetName() == name {
|
||||
filtered = append(filtered, m)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
92
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
92
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
@ -15,14 +15,11 @@ package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// ValueType is an enumeration of metric types that represent a simple value.
|
||||
@ -36,79 +33,6 @@ const (
|
||||
UntypedValue
|
||||
)
|
||||
|
||||
// value is a generic metric for simple values. It implements Metric, Collector,
|
||||
// Counter, Gauge, and Untyped. Its effective type is determined by
|
||||
// ValueType. This is a low-level building block used by the library to back the
|
||||
// implementations of Counter, Gauge, and Untyped.
|
||||
type value struct {
|
||||
// valBits contains the bits of the represented float64 value. It has
|
||||
// to go first in the struct to guarantee alignment for atomic
|
||||
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
|
||||
valBits uint64
|
||||
|
||||
selfCollector
|
||||
|
||||
desc *Desc
|
||||
valType ValueType
|
||||
labelPairs []*dto.LabelPair
|
||||
}
|
||||
|
||||
// newValue returns a newly allocated value with the given Desc, ValueType,
|
||||
// sample value and label values. It panics if the number of label
|
||||
// values is different from the number of variable labels in Desc.
|
||||
func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {
|
||||
if len(labelValues) != len(desc.variableLabels) {
|
||||
panic(errInconsistentCardinality)
|
||||
}
|
||||
result := &value{
|
||||
desc: desc,
|
||||
valType: valueType,
|
||||
valBits: math.Float64bits(val),
|
||||
labelPairs: makeLabelPairs(desc, labelValues),
|
||||
}
|
||||
result.init(result)
|
||||
return result
|
||||
}
|
||||
|
||||
func (v *value) Desc() *Desc {
|
||||
return v.desc
|
||||
}
|
||||
|
||||
func (v *value) Set(val float64) {
|
||||
atomic.StoreUint64(&v.valBits, math.Float64bits(val))
|
||||
}
|
||||
|
||||
func (v *value) SetToCurrentTime() {
|
||||
v.Set(float64(time.Now().UnixNano()) / 1e9)
|
||||
}
|
||||
|
||||
func (v *value) Inc() {
|
||||
v.Add(1)
|
||||
}
|
||||
|
||||
func (v *value) Dec() {
|
||||
v.Add(-1)
|
||||
}
|
||||
|
||||
func (v *value) Add(val float64) {
|
||||
for {
|
||||
oldBits := atomic.LoadUint64(&v.valBits)
|
||||
newBits := math.Float64bits(math.Float64frombits(oldBits) + val)
|
||||
if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (v *value) Sub(val float64) {
|
||||
v.Add(val * -1)
|
||||
}
|
||||
|
||||
func (v *value) Write(out *dto.Metric) error {
|
||||
val := math.Float64frombits(atomic.LoadUint64(&v.valBits))
|
||||
return populateMetric(v.valType, val, v.labelPairs, out)
|
||||
}
|
||||
|
||||
// valueFunc is a generic metric for simple values retrieved on collect time
|
||||
// from a function. It implements Metric and Collector. Its effective type is
|
||||
// determined by ValueType. This is a low-level building block used by the
|
||||
@ -153,8 +77,12 @@ func (v *valueFunc) Write(out *dto.Metric) error {
|
||||
// operations. However, when implementing custom Collectors, it is useful as a
|
||||
// throw-away metric that is generated on the fly to send it to Prometheus in
|
||||
// the Collect method. NewConstMetric returns an error if the length of
|
||||
// labelValues is not consistent with the variable labels in Desc.
|
||||
// labelValues is not consistent with the variable labels in Desc or if Desc is
|
||||
// invalid.
|
||||
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
|
||||
if desc.err != nil {
|
||||
return nil, desc.err
|
||||
}
|
||||
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -228,9 +156,7 @@ func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
|
||||
Value: proto.String(labelValues[i]),
|
||||
})
|
||||
}
|
||||
for _, lp := range desc.constLabelPairs {
|
||||
labelPairs = append(labelPairs, lp)
|
||||
}
|
||||
sort.Sort(LabelPairSorter(labelPairs))
|
||||
labelPairs = append(labelPairs, desc.constLabelPairs...)
|
||||
sort.Sort(labelPairSorter(labelPairs))
|
||||
return labelPairs
|
||||
}
|
||||
|
3
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
3
vendor/github.com/prometheus/client_golang/prometheus/vec.go
generated
vendored
@ -277,6 +277,9 @@ func (m *metricMap) deleteByHashWithLabelValues(
|
||||
func (m *metricMap) deleteByHashWithLabels(
|
||||
h uint64, labels Labels, curry []curriedLabelValue,
|
||||
) bool {
|
||||
m.mtx.Lock()
|
||||
defer m.mtx.Unlock()
|
||||
|
||||
metrics, ok := m.metrics[h]
|
||||
if !ok {
|
||||
return false
|
||||
|
179
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
Normal file
179
vendor/github.com/prometheus/client_golang/prometheus/wrap.go
generated
vendored
Normal file
@ -0,0 +1,179 @@
|
||||
// Copyright 2018 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
)
|
||||
|
||||
// WrapRegistererWith returns a Registerer wrapping the provided
|
||||
// Registerer. Collectors registered with the returned Registerer will be
|
||||
// registered with the wrapped Registerer in a modified way. The modified
|
||||
// Collector adds the provided Labels to all Metrics it collects (as
|
||||
// ConstLabels). The Metrics collected by the unmodified Collector must not
|
||||
// duplicate any of those labels.
|
||||
//
|
||||
// WrapRegistererWith provides a way to add fixed labels to a subset of
|
||||
// Collectors. It should not be used to add fixed labels to all metrics exposed.
|
||||
//
|
||||
// The Collector example demonstrates a use of WrapRegistererWith.
|
||||
func WrapRegistererWith(labels Labels, reg Registerer) Registerer {
|
||||
return &wrappingRegisterer{
|
||||
wrappedRegisterer: reg,
|
||||
labels: labels,
|
||||
}
|
||||
}
|
||||
|
||||
// WrapRegistererWithPrefix returns a Registerer wrapping the provided
|
||||
// Registerer. Collectors registered with the returned Registerer will be
|
||||
// registered with the wrapped Registerer in a modified way. The modified
|
||||
// Collector adds the provided prefix to the name of all Metrics it collects.
|
||||
//
|
||||
// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of
|
||||
// a sub-system. To make this work, register metrics of the sub-system with the
|
||||
// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful
|
||||
// to use the same prefix for all metrics exposed. In particular, do not prefix
|
||||
// metric names that are standardized across applications, as that would break
|
||||
// horizontal monitoring, for example the metrics provided by the Go collector
|
||||
// (see NewGoCollector) and the process collector (see NewProcessCollector). (In
|
||||
// fact, those metrics are already prefixed with “go_” or “process_”,
|
||||
// respectively.)
|
||||
func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer {
|
||||
return &wrappingRegisterer{
|
||||
wrappedRegisterer: reg,
|
||||
prefix: prefix,
|
||||
}
|
||||
}
|
||||
|
||||
type wrappingRegisterer struct {
|
||||
wrappedRegisterer Registerer
|
||||
prefix string
|
||||
labels Labels
|
||||
}
|
||||
|
||||
func (r *wrappingRegisterer) Register(c Collector) error {
|
||||
return r.wrappedRegisterer.Register(&wrappingCollector{
|
||||
wrappedCollector: c,
|
||||
prefix: r.prefix,
|
||||
labels: r.labels,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *wrappingRegisterer) MustRegister(cs ...Collector) {
|
||||
for _, c := range cs {
|
||||
if err := r.Register(c); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *wrappingRegisterer) Unregister(c Collector) bool {
|
||||
return r.wrappedRegisterer.Unregister(&wrappingCollector{
|
||||
wrappedCollector: c,
|
||||
prefix: r.prefix,
|
||||
labels: r.labels,
|
||||
})
|
||||
}
|
||||
|
||||
type wrappingCollector struct {
|
||||
wrappedCollector Collector
|
||||
prefix string
|
||||
labels Labels
|
||||
}
|
||||
|
||||
func (c *wrappingCollector) Collect(ch chan<- Metric) {
|
||||
wrappedCh := make(chan Metric)
|
||||
go func() {
|
||||
c.wrappedCollector.Collect(wrappedCh)
|
||||
close(wrappedCh)
|
||||
}()
|
||||
for m := range wrappedCh {
|
||||
ch <- &wrappingMetric{
|
||||
wrappedMetric: m,
|
||||
prefix: c.prefix,
|
||||
labels: c.labels,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *wrappingCollector) Describe(ch chan<- *Desc) {
|
||||
wrappedCh := make(chan *Desc)
|
||||
go func() {
|
||||
c.wrappedCollector.Describe(wrappedCh)
|
||||
close(wrappedCh)
|
||||
}()
|
||||
for desc := range wrappedCh {
|
||||
ch <- wrapDesc(desc, c.prefix, c.labels)
|
||||
}
|
||||
}
|
||||
|
||||
type wrappingMetric struct {
|
||||
wrappedMetric Metric
|
||||
prefix string
|
||||
labels Labels
|
||||
}
|
||||
|
||||
func (m *wrappingMetric) Desc() *Desc {
|
||||
return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels)
|
||||
}
|
||||
|
||||
func (m *wrappingMetric) Write(out *dto.Metric) error {
|
||||
if err := m.wrappedMetric.Write(out); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(m.labels) == 0 {
|
||||
// No wrapping labels.
|
||||
return nil
|
||||
}
|
||||
for ln, lv := range m.labels {
|
||||
out.Label = append(out.Label, &dto.LabelPair{
|
||||
Name: proto.String(ln),
|
||||
Value: proto.String(lv),
|
||||
})
|
||||
}
|
||||
sort.Sort(labelPairSorter(out.Label))
|
||||
return nil
|
||||
}
|
||||
|
||||
func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc {
|
||||
constLabels := Labels{}
|
||||
for _, lp := range desc.constLabelPairs {
|
||||
constLabels[*lp.Name] = *lp.Value
|
||||
}
|
||||
for ln, lv := range labels {
|
||||
if _, alreadyUsed := constLabels[ln]; alreadyUsed {
|
||||
return &Desc{
|
||||
fqName: desc.fqName,
|
||||
help: desc.help,
|
||||
variableLabels: desc.variableLabels,
|
||||
constLabelPairs: desc.constLabelPairs,
|
||||
err: fmt.Errorf("attempted wrapping with already existing label name %q", ln),
|
||||
}
|
||||
}
|
||||
constLabels[ln] = lv
|
||||
}
|
||||
// NewDesc will do remaining validations.
|
||||
newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels)
|
||||
// Propagate errors if there was any. This will override any errer
|
||||
// created by NewDesc above, i.e. earlier errors get precedence.
|
||||
if desc.err != nil {
|
||||
newDesc.err = desc.err
|
||||
}
|
||||
return newDesc
|
||||
}
|
2
vendor/manifest
vendored
2
vendor/manifest
vendored
@ -334,7 +334,7 @@
|
||||
"importpath": "github.com/prometheus/client_golang/prometheus",
|
||||
"repository": "https://github.com/prometheus/client_golang",
|
||||
"vcs": "git",
|
||||
"revision": "180b8fdc22b4ea7750bcb43c925277654a1ea2f3",
|
||||
"revision": "1cafe34db7fdec6022e17e00e1c1ea501022f3e4",
|
||||
"branch": "master",
|
||||
"path": "/prometheus",
|
||||
"notests": true
|
||||
|
Loading…
Reference in New Issue
Block a user