mirror of
https://github.com/weaveworks/scope.git
synced 2026-03-03 02:00:43 +00:00
Update go-metrics library to latest
Signed-off-by: Bryan Boreham <bjboreham@gmail.com>
This commit is contained in:
71
vendor/github.com/armon/go-metrics/README.md
generated
vendored
71
vendor/github.com/armon/go-metrics/README.md
generated
vendored
@@ -1,71 +0,0 @@
|
||||
go-metrics
|
||||
==========
|
||||
|
||||
This library provides a `metrics` package which can be used to instrument code,
|
||||
expose application metrics, and profile runtime performance in a flexible manner.
|
||||
|
||||
Current API: [](https://godoc.org/github.com/armon/go-metrics)
|
||||
|
||||
Sinks
|
||||
=====
|
||||
|
||||
The `metrics` package makes use of a `MetricSink` interface to support delivery
|
||||
to any type of backend. Currently the following sinks are provided:
|
||||
|
||||
* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP)
|
||||
* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP)
|
||||
* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes)
|
||||
* InmemSink : Provides in-memory aggregation, can be used to export stats
|
||||
* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example.
|
||||
* BlackholeSink : Sinks to nowhere
|
||||
|
||||
In addition to the sinks, the `InmemSignal` can be used to catch a signal,
|
||||
and dump a formatted output of recent metrics. For example, when a process gets
|
||||
a SIGUSR1, it can dump to stderr recent performance metrics for debugging.
|
||||
|
||||
Examples
|
||||
========
|
||||
|
||||
Here is an example of using the package:
|
||||
|
||||
func SlowMethod() {
|
||||
// Profiling the runtime of a method
|
||||
defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now())
|
||||
}
|
||||
|
||||
// Configure a statsite sink as the global metrics sink
|
||||
sink, _ := metrics.NewStatsiteSink("statsite:8125")
|
||||
metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink)
|
||||
|
||||
// Emit a Key/Value pair
|
||||
metrics.EmitKey([]string{"questions", "meaning of life"}, 42)
|
||||
|
||||
|
||||
Here is an example of setting up an signal handler:
|
||||
|
||||
// Setup the inmem sink and signal handler
|
||||
inm := metrics.NewInmemSink(10*time.Second, time.Minute)
|
||||
sig := metrics.DefaultInmemSignal(inm)
|
||||
metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm)
|
||||
|
||||
// Run some code
|
||||
inm.SetGauge([]string{"foo"}, 42)
|
||||
inm.EmitKey([]string{"bar"}, 30)
|
||||
|
||||
inm.IncrCounter([]string{"baz"}, 42)
|
||||
inm.IncrCounter([]string{"baz"}, 1)
|
||||
inm.IncrCounter([]string{"baz"}, 80)
|
||||
|
||||
inm.AddSample([]string{"method", "wow"}, 42)
|
||||
inm.AddSample([]string{"method", "wow"}, 100)
|
||||
inm.AddSample([]string{"method", "wow"}, 22)
|
||||
|
||||
....
|
||||
|
||||
When a signal comes in, output like the following will be dumped to stderr:
|
||||
|
||||
[2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000
|
||||
[2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000
|
||||
[2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509
|
||||
[2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513
|
||||
|
||||
119
vendor/github.com/armon/go-metrics/circonus/circonus.go
generated
vendored
Normal file
119
vendor/github.com/armon/go-metrics/circonus/circonus.go
generated
vendored
Normal file
@@ -0,0 +1,119 @@
|
||||
// Circonus Metrics Sink
|
||||
|
||||
package circonus
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
cgm "github.com/circonus-labs/circonus-gometrics"
|
||||
)
|
||||
|
||||
// CirconusSink provides an interface to forward metrics to Circonus with
|
||||
// automatic check creation and metric management
|
||||
type CirconusSink struct {
|
||||
metrics *cgm.CirconusMetrics
|
||||
}
|
||||
|
||||
// Config options for CirconusSink
|
||||
// See https://github.com/circonus-labs/circonus-gometrics for configuration options
|
||||
type Config cgm.Config
|
||||
|
||||
// NewCirconusSink - create new metric sink for circonus
|
||||
//
|
||||
// one of the following must be supplied:
|
||||
// - API Token - search for an existing check or create a new check
|
||||
// - API Token + Check Id - the check identified by check id will be used
|
||||
// - API Token + Check Submission URL - the check identified by the submission url will be used
|
||||
// - Check Submission URL - the check identified by the submission url will be used
|
||||
// metric management will be *disabled*
|
||||
//
|
||||
// Note: If submission url is supplied w/o an api token, the public circonus ca cert will be used
|
||||
// to verify the broker for metrics submission.
|
||||
func NewCirconusSink(cc *Config) (*CirconusSink, error) {
|
||||
cfg := cgm.Config{}
|
||||
if cc != nil {
|
||||
cfg = cgm.Config(*cc)
|
||||
}
|
||||
|
||||
metrics, err := cgm.NewCirconusMetrics(&cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &CirconusSink{
|
||||
metrics: metrics,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Start submitting metrics to Circonus (flush every SubmitInterval)
|
||||
func (s *CirconusSink) Start() {
|
||||
s.metrics.Start()
|
||||
}
|
||||
|
||||
// Flush manually triggers metric submission to Circonus
|
||||
func (s *CirconusSink) Flush() {
|
||||
s.metrics.Flush()
|
||||
}
|
||||
|
||||
// SetGauge sets value for a gauge metric
|
||||
func (s *CirconusSink) SetGauge(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.metrics.SetGauge(flatKey, int64(val))
|
||||
}
|
||||
|
||||
// SetGaugeWithLabels sets value for a gauge metric with the given labels
|
||||
func (s *CirconusSink) SetGaugeWithLabels(key []string, val float32, labels []metrics.Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.metrics.SetGauge(flatKey, int64(val))
|
||||
}
|
||||
|
||||
// EmitKey is not implemented in circonus
|
||||
func (s *CirconusSink) EmitKey(key []string, val float32) {
|
||||
// NOP
|
||||
}
|
||||
|
||||
// IncrCounter increments a counter metric
|
||||
func (s *CirconusSink) IncrCounter(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.metrics.IncrementByValue(flatKey, uint64(val))
|
||||
}
|
||||
|
||||
// IncrCounterWithLabels increments a counter metric with the given labels
|
||||
func (s *CirconusSink) IncrCounterWithLabels(key []string, val float32, labels []metrics.Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.metrics.IncrementByValue(flatKey, uint64(val))
|
||||
}
|
||||
|
||||
// AddSample adds a sample to a histogram metric
|
||||
func (s *CirconusSink) AddSample(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.metrics.RecordValue(flatKey, float64(val))
|
||||
}
|
||||
|
||||
// AddSampleWithLabels adds a sample to a histogram metric with the given labels
|
||||
func (s *CirconusSink) AddSampleWithLabels(key []string, val float32, labels []metrics.Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.metrics.RecordValue(flatKey, float64(val))
|
||||
}
|
||||
|
||||
// Flattens key to Circonus metric name
|
||||
func (s *CirconusSink) flattenKey(parts []string) string {
|
||||
joined := strings.Join(parts, "`")
|
||||
return strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case ' ':
|
||||
return '_'
|
||||
default:
|
||||
return r
|
||||
}
|
||||
}, joined)
|
||||
}
|
||||
|
||||
// Flattens the key along with labels for formatting, removes spaces
|
||||
func (s *CirconusSink) flattenKeyLabels(parts []string, labels []metrics.Label) string {
|
||||
for _, label := range labels {
|
||||
parts = append(parts, label.Value)
|
||||
}
|
||||
return s.flattenKey(parts)
|
||||
}
|
||||
154
vendor/github.com/armon/go-metrics/circonus/circonus_test.go
generated
vendored
Normal file
154
vendor/github.com/armon/go-metrics/circonus/circonus_test.go
generated
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
package circonus
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewCirconusSink(t *testing.T) {
|
||||
|
||||
// test with invalid config (nil)
|
||||
expectedError := errors.New("invalid check manager configuration (no API token AND no submission url)")
|
||||
_, err := NewCirconusSink(nil)
|
||||
if err == nil || !strings.Contains(err.Error(), expectedError.Error()) {
|
||||
t.Errorf("Expected an '%#v' error, got '%#v'", expectedError, err)
|
||||
}
|
||||
|
||||
// test w/submission url and w/o token
|
||||
cfg := &Config{}
|
||||
cfg.CheckManager.Check.SubmissionURL = "http://127.0.0.1:43191/"
|
||||
_, err = NewCirconusSink(cfg)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error, got '%v'", err)
|
||||
}
|
||||
|
||||
// note: a test with a valid token is *not* done as it *will* create a
|
||||
// check resulting in testing the api more than the circonus sink
|
||||
// see circonus-gometrics/checkmgr/checkmgr_test.go for testing of api token
|
||||
}
|
||||
|
||||
func TestFlattenKey(t *testing.T) {
|
||||
var testKeys = []struct {
|
||||
input []string
|
||||
expected string
|
||||
}{
|
||||
{[]string{"a", "b", "c"}, "a`b`c"},
|
||||
{[]string{"a-a", "b_b", "c/c"}, "a-a`b_b`c/c"},
|
||||
{[]string{"spaces must", "flatten", "to", "underscores"}, "spaces_must`flatten`to`underscores"},
|
||||
}
|
||||
|
||||
c := &CirconusSink{}
|
||||
|
||||
for _, test := range testKeys {
|
||||
if actual := c.flattenKey(test.input); actual != test.expected {
|
||||
t.Fatalf("Flattening %v failed, expected '%s' got '%s'", test.input, test.expected, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func fakeBroker(q chan string) *httptest.Server {
|
||||
handler := func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(200)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
defer r.Body.Close()
|
||||
body, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
q <- err.Error()
|
||||
fmt.Fprintln(w, err.Error())
|
||||
} else {
|
||||
q <- string(body)
|
||||
fmt.Fprintln(w, `{"stats":1}`)
|
||||
}
|
||||
}
|
||||
|
||||
return httptest.NewServer(http.HandlerFunc(handler))
|
||||
}
|
||||
|
||||
func TestSetGauge(t *testing.T) {
|
||||
q := make(chan string)
|
||||
|
||||
server := fakeBroker(q)
|
||||
defer server.Close()
|
||||
|
||||
cfg := &Config{}
|
||||
cfg.CheckManager.Check.SubmissionURL = server.URL
|
||||
|
||||
cs, err := NewCirconusSink(cfg)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error, got '%v'", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
cs.SetGauge([]string{"foo", "bar"}, 1)
|
||||
cs.Flush()
|
||||
}()
|
||||
|
||||
expect := "{\"foo`bar\":{\"_type\":\"l\",\"_value\":1}}"
|
||||
actual := <-q
|
||||
|
||||
if actual != expect {
|
||||
t.Errorf("Expected '%s', got '%s'", expect, actual)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestIncrCounter(t *testing.T) {
|
||||
q := make(chan string)
|
||||
|
||||
server := fakeBroker(q)
|
||||
defer server.Close()
|
||||
|
||||
cfg := &Config{}
|
||||
cfg.CheckManager.Check.SubmissionURL = server.URL
|
||||
|
||||
cs, err := NewCirconusSink(cfg)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error, got '%v'", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
cs.IncrCounter([]string{"foo", "bar"}, 1)
|
||||
cs.Flush()
|
||||
}()
|
||||
|
||||
expect := "{\"foo`bar\":{\"_type\":\"L\",\"_value\":1}}"
|
||||
actual := <-q
|
||||
|
||||
if actual != expect {
|
||||
t.Errorf("Expected '%s', got '%s'", expect, actual)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddSample(t *testing.T) {
|
||||
q := make(chan string)
|
||||
|
||||
server := fakeBroker(q)
|
||||
defer server.Close()
|
||||
|
||||
cfg := &Config{}
|
||||
cfg.CheckManager.Check.SubmissionURL = server.URL
|
||||
|
||||
cs, err := NewCirconusSink(cfg)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error, got '%v'", err)
|
||||
}
|
||||
|
||||
go func() {
|
||||
cs.AddSample([]string{"foo", "bar"}, 1)
|
||||
cs.Flush()
|
||||
}()
|
||||
|
||||
expect := "{\"foo`bar\":{\"_type\":\"n\",\"_value\":[\"H[1.0e+00]=1\"]}}"
|
||||
actual := <-q
|
||||
|
||||
if actual != expect {
|
||||
t.Errorf("Expected '%s', got '%s'", expect, actual)
|
||||
|
||||
}
|
||||
}
|
||||
85
vendor/github.com/armon/go-metrics/datadog/dogstatsd.go
generated
vendored
85
vendor/github.com/armon/go-metrics/datadog/dogstatsd.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/DataDog/datadog-go/statsd"
|
||||
"github.com/armon/go-metrics"
|
||||
)
|
||||
|
||||
// DogStatsdSink provides a MetricSink that can be used
|
||||
@@ -45,54 +46,49 @@ func (s *DogStatsdSink) EnableHostNamePropagation() {
|
||||
|
||||
func (s *DogStatsdSink) flattenKey(parts []string) string {
|
||||
joined := strings.Join(parts, ".")
|
||||
return strings.Map(func(r rune) rune {
|
||||
switch r {
|
||||
case ':':
|
||||
fallthrough
|
||||
case ' ':
|
||||
return '_'
|
||||
default:
|
||||
return r
|
||||
}
|
||||
}, joined)
|
||||
return strings.Map(sanitize, joined)
|
||||
}
|
||||
|
||||
func (s *DogStatsdSink) parseKey(key []string) ([]string, []string) {
|
||||
func sanitize(r rune) rune {
|
||||
switch r {
|
||||
case ':':
|
||||
fallthrough
|
||||
case ' ':
|
||||
return '_'
|
||||
default:
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
||||
func (s *DogStatsdSink) parseKey(key []string) ([]string, []metrics.Label) {
|
||||
// Since DogStatsd supports dimensionality via tags on metric keys, this sink's approach is to splice the hostname out of the key in favor of a `host` tag
|
||||
// The `host` tag is either forced here, or set downstream by the DogStatsd server
|
||||
|
||||
var tags []string
|
||||
var labels []metrics.Label
|
||||
hostName := s.hostName
|
||||
|
||||
//Splice the hostname out of the key
|
||||
// Splice the hostname out of the key
|
||||
for i, el := range key {
|
||||
if el == hostName {
|
||||
key = append(key[:i], key[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if s.propagateHostname {
|
||||
tags = append(tags, fmt.Sprintf("host:%s", hostName))
|
||||
labels = append(labels, metrics.Label{"host", hostName})
|
||||
}
|
||||
return key, tags
|
||||
return key, labels
|
||||
}
|
||||
|
||||
// Implementation of methods in the MetricSink interface
|
||||
|
||||
func (s *DogStatsdSink) SetGauge(key []string, val float32) {
|
||||
key, tags := s.parseKey(key)
|
||||
flatKey := s.flattenKey(key)
|
||||
|
||||
rate := 1.0
|
||||
s.client.Gauge(flatKey, float64(val), tags, rate)
|
||||
s.SetGaugeWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (s *DogStatsdSink) IncrCounter(key []string, val float32) {
|
||||
key, tags := s.parseKey(key)
|
||||
flatKey := s.flattenKey(key)
|
||||
|
||||
rate := 1.0
|
||||
s.client.Count(flatKey, int64(val), tags, rate)
|
||||
s.IncrCounterWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
// EmitKey is not implemented since DogStatsd does not provide a metric type that holds an
|
||||
@@ -101,9 +97,44 @@ func (s *DogStatsdSink) EmitKey(key []string, val float32) {
|
||||
}
|
||||
|
||||
func (s *DogStatsdSink) AddSample(key []string, val float32) {
|
||||
key, tags := s.parseKey(key)
|
||||
flatKey := s.flattenKey(key)
|
||||
s.AddSampleWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
// The following ...WithLabels methods correspond to Datadog's Tag extension to Statsd.
|
||||
// http://docs.datadoghq.com/guides/dogstatsd/#tags
|
||||
func (s *DogStatsdSink) SetGaugeWithLabels(key []string, val float32, labels []metrics.Label) {
|
||||
flatKey, tags := s.getFlatkeyAndCombinedLabels(key, labels)
|
||||
rate := 1.0
|
||||
s.client.Gauge(flatKey, float64(val), tags, rate)
|
||||
}
|
||||
|
||||
func (s *DogStatsdSink) IncrCounterWithLabels(key []string, val float32, labels []metrics.Label) {
|
||||
flatKey, tags := s.getFlatkeyAndCombinedLabels(key, labels)
|
||||
rate := 1.0
|
||||
s.client.Count(flatKey, int64(val), tags, rate)
|
||||
}
|
||||
|
||||
func (s *DogStatsdSink) AddSampleWithLabels(key []string, val float32, labels []metrics.Label) {
|
||||
flatKey, tags := s.getFlatkeyAndCombinedLabels(key, labels)
|
||||
rate := 1.0
|
||||
s.client.TimeInMilliseconds(flatKey, float64(val), tags, rate)
|
||||
}
|
||||
|
||||
func (s *DogStatsdSink) getFlatkeyAndCombinedLabels(key []string, labels []metrics.Label) (string, []string) {
|
||||
key, parsedLabels := s.parseKey(key)
|
||||
flatKey := s.flattenKey(key)
|
||||
labels = append(labels, parsedLabels...)
|
||||
|
||||
var tags []string
|
||||
for _, label := range labels {
|
||||
label.Name = strings.Map(sanitize, label.Name)
|
||||
label.Value = strings.Map(sanitize, label.Value)
|
||||
if label.Value != "" {
|
||||
tags = append(tags, fmt.Sprintf("%s:%s", label.Name, label.Value))
|
||||
} else {
|
||||
tags = append(tags, label.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return flatKey, tags
|
||||
}
|
||||
|
||||
117
vendor/github.com/armon/go-metrics/datadog/dogstatsd_test.go
generated
vendored
117
vendor/github.com/armon/go-metrics/datadog/dogstatsd_test.go
generated
vendored
@@ -1,13 +1,14 @@
|
||||
package datadog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
)
|
||||
|
||||
var EmptyTags []string
|
||||
var EmptyTags []metrics.Label
|
||||
|
||||
const (
|
||||
DogStatsdAddr = "127.0.0.1:7254"
|
||||
@@ -22,14 +23,14 @@ func MockGetHostname() string {
|
||||
|
||||
var ParseKeyTests = []struct {
|
||||
KeyToParse []string
|
||||
Tags []string
|
||||
Tags []metrics.Label
|
||||
PropagateHostname bool
|
||||
ExpectedKey []string
|
||||
ExpectedTags []string
|
||||
ExpectedTags []metrics.Label
|
||||
}{
|
||||
{[]string{"a", MockGetHostname(), "b", "c"}, EmptyTags, HostnameDisabled, []string{"a", "b", "c"}, EmptyTags},
|
||||
{[]string{"a", "b", "c"}, EmptyTags, HostnameDisabled, []string{"a", "b", "c"}, EmptyTags},
|
||||
{[]string{"a", "b", "c"}, EmptyTags, HostnameEnabled, []string{"a", "b", "c"}, []string{fmt.Sprintf("host:%s", MockGetHostname())}},
|
||||
{[]string{"a", "b", "c"}, EmptyTags, HostnameEnabled, []string{"a", "b", "c"}, []metrics.Label{{"host", MockGetHostname()}}},
|
||||
}
|
||||
|
||||
var FlattenKeyTests = []struct {
|
||||
@@ -44,7 +45,7 @@ var MetricSinkTests = []struct {
|
||||
Method string
|
||||
Metric []string
|
||||
Value interface{}
|
||||
Tags []string
|
||||
Tags []metrics.Label
|
||||
PropagateHostname bool
|
||||
Expected string
|
||||
}{
|
||||
@@ -53,13 +54,15 @@ var MetricSinkTests = []struct {
|
||||
{"AddSample", []string{"sample", "thing"}, float32(4), EmptyTags, HostnameDisabled, "sample.thing:4.000000|ms"},
|
||||
{"IncrCounter", []string{"count", "me"}, float32(3), EmptyTags, HostnameDisabled, "count.me:3|c"},
|
||||
|
||||
{"SetGauge", []string{"foo", "baz"}, float32(42), []string{"my_tag:my_value"}, HostnameDisabled, "foo.baz:42.000000|g|#my_tag:my_value"},
|
||||
{"SetGauge", []string{"foo", "bar"}, float32(42), []string{"my_tag:my_value", "other_tag:other_value"}, HostnameDisabled, "foo.bar:42.000000|g|#my_tag:my_value,other_tag:other_value"},
|
||||
{"SetGauge", []string{"foo", "bar"}, float32(42), []string{"my_tag:my_value", "other_tag:other_value"}, HostnameEnabled, "foo.bar:42.000000|g|#my_tag:my_value,other_tag:other_value,host:test_hostname"},
|
||||
{"SetGauge", []string{"foo", "baz"}, float32(42), []metrics.Label{{"my_tag", ""}}, HostnameDisabled, "foo.baz:42.000000|g|#my_tag"},
|
||||
{"SetGauge", []string{"foo", "baz"}, float32(42), []metrics.Label{{"my tag", "my_value"}}, HostnameDisabled, "foo.baz:42.000000|g|#my_tag:my_value"},
|
||||
{"SetGauge", []string{"foo", "bar"}, float32(42), []metrics.Label{{"my_tag", "my_value"}, {"other_tag", "other_value"}}, HostnameDisabled, "foo.bar:42.000000|g|#my_tag:my_value,other_tag:other_value"},
|
||||
{"SetGauge", []string{"foo", "bar"}, float32(42), []metrics.Label{{"my_tag", "my_value"}, {"other_tag", "other_value"}}, HostnameEnabled, "foo.bar:42.000000|g|#my_tag:my_value,other_tag:other_value,host:test_hostname"},
|
||||
}
|
||||
|
||||
func MockNewDogStatsdSink(addr string, tags []string, tagWithHostname bool) *DogStatsdSink {
|
||||
func mockNewDogStatsdSink(addr string, labels []metrics.Label, tagWithHostname bool) *DogStatsdSink {
|
||||
dog, _ := NewDogStatsdSink(addr, MockGetHostname())
|
||||
_, tags := dog.getFlatkeyAndCombinedLabels(nil, labels)
|
||||
dog.SetTags(tags)
|
||||
if tagWithHostname {
|
||||
dog.EnableHostNamePropagation()
|
||||
@@ -68,31 +71,7 @@ func MockNewDogStatsdSink(addr string, tags []string, tagWithHostname bool) *Dog
|
||||
return dog
|
||||
}
|
||||
|
||||
func TestParseKey(t *testing.T) {
|
||||
for _, tt := range ParseKeyTests {
|
||||
dog := MockNewDogStatsdSink(DogStatsdAddr, tt.Tags, tt.PropagateHostname)
|
||||
key, tags := dog.parseKey(tt.KeyToParse)
|
||||
|
||||
if !reflect.DeepEqual(key, tt.ExpectedKey) {
|
||||
t.Fatalf("Key Parsing failed for %v", tt.KeyToParse)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tags, tt.ExpectedTags) {
|
||||
t.Fatalf("Tag Parsing Failed for %v", tt.KeyToParse)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlattenKey(t *testing.T) {
|
||||
dog := MockNewDogStatsdSink(DogStatsdAddr, EmptyTags, HostnameDisabled)
|
||||
for _, tt := range FlattenKeyTests {
|
||||
if !reflect.DeepEqual(dog.flattenKey(tt.KeyToFlatten), tt.Expected) {
|
||||
t.Fatalf("Flattening %v failed", tt.KeyToFlatten)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricSink(t *testing.T) {
|
||||
func setupTestServerAndBuffer(t *testing.T) (*net.UDPConn, []byte) {
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", DogStatsdAddr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -101,21 +80,71 @@ func TestMetricSink(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return server, make([]byte, 1024)
|
||||
}
|
||||
|
||||
func TestParseKey(t *testing.T) {
|
||||
for _, tt := range ParseKeyTests {
|
||||
dog := mockNewDogStatsdSink(DogStatsdAddr, tt.Tags, tt.PropagateHostname)
|
||||
key, tags := dog.parseKey(tt.KeyToParse)
|
||||
|
||||
if !reflect.DeepEqual(key, tt.ExpectedKey) {
|
||||
t.Fatalf("Key Parsing failed for %v", tt.KeyToParse)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tags, tt.ExpectedTags) {
|
||||
t.Fatalf("Tag Parsing Failed for %v, %v != %v", tt.KeyToParse, tags, tt.ExpectedTags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlattenKey(t *testing.T) {
|
||||
dog := mockNewDogStatsdSink(DogStatsdAddr, EmptyTags, HostnameDisabled)
|
||||
for _, tt := range FlattenKeyTests {
|
||||
if !reflect.DeepEqual(dog.flattenKey(tt.KeyToFlatten), tt.Expected) {
|
||||
t.Fatalf("Flattening %v failed", tt.KeyToFlatten)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetricSink(t *testing.T) {
|
||||
server, buf := setupTestServerAndBuffer(t)
|
||||
defer server.Close()
|
||||
|
||||
buf := make([]byte, 1024)
|
||||
|
||||
for _, tt := range MetricSinkTests {
|
||||
dog := MockNewDogStatsdSink(DogStatsdAddr, tt.Tags, tt.PropagateHostname)
|
||||
dog := mockNewDogStatsdSink(DogStatsdAddr, tt.Tags, tt.PropagateHostname)
|
||||
method := reflect.ValueOf(dog).MethodByName(tt.Method)
|
||||
method.Call([]reflect.Value{
|
||||
reflect.ValueOf(tt.Metric),
|
||||
reflect.ValueOf(tt.Value)})
|
||||
|
||||
n, _ := server.Read(buf)
|
||||
msg := buf[:n]
|
||||
if string(msg) != tt.Expected {
|
||||
t.Fatalf("Line %s does not match expected: %s", string(msg), tt.Expected)
|
||||
}
|
||||
assertServerMatchesExpected(t, server, buf, tt.Expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTaggableMetrics(t *testing.T) {
|
||||
server, buf := setupTestServerAndBuffer(t)
|
||||
defer server.Close()
|
||||
|
||||
dog := mockNewDogStatsdSink(DogStatsdAddr, EmptyTags, HostnameDisabled)
|
||||
|
||||
dog.AddSampleWithLabels([]string{"sample", "thing"}, float32(4), []metrics.Label{{"tagkey", "tagvalue"}})
|
||||
assertServerMatchesExpected(t, server, buf, "sample.thing:4.000000|ms|#tagkey:tagvalue")
|
||||
|
||||
dog.SetGaugeWithLabels([]string{"sample", "thing"}, float32(4), []metrics.Label{{"tagkey", "tagvalue"}})
|
||||
assertServerMatchesExpected(t, server, buf, "sample.thing:4.000000|g|#tagkey:tagvalue")
|
||||
|
||||
dog.IncrCounterWithLabels([]string{"sample", "thing"}, float32(4), []metrics.Label{{"tagkey", "tagvalue"}})
|
||||
assertServerMatchesExpected(t, server, buf, "sample.thing:4|c|#tagkey:tagvalue")
|
||||
|
||||
dog = mockNewDogStatsdSink(DogStatsdAddr, []metrics.Label{{Name: "global"}}, HostnameEnabled) // with hostname, global tags
|
||||
dog.IncrCounterWithLabels([]string{"sample", "thing"}, float32(4), []metrics.Label{{"tagkey", "tagvalue"}})
|
||||
assertServerMatchesExpected(t, server, buf, "sample.thing:4|c|#global,tagkey:tagvalue,host:test_hostname")
|
||||
}
|
||||
|
||||
func assertServerMatchesExpected(t *testing.T, server *net.UDPConn, buf []byte, expected string) {
|
||||
n, _ := server.Read(buf)
|
||||
msg := buf[:n]
|
||||
if string(msg) != expected {
|
||||
t.Fatalf("Line %s does not match expected: %s", string(msg), expected)
|
||||
}
|
||||
}
|
||||
|
||||
157
vendor/github.com/armon/go-metrics/inmem.go
generated
vendored
157
vendor/github.com/armon/go-metrics/inmem.go
generated
vendored
@@ -1,8 +1,10 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -25,6 +27,8 @@ type InmemSink struct {
|
||||
// intervals is a slice of the retained intervals
|
||||
intervals []*IntervalMetrics
|
||||
intervalLock sync.RWMutex
|
||||
|
||||
rateDenom float64
|
||||
}
|
||||
|
||||
// IntervalMetrics stores the aggregated metrics
|
||||
@@ -36,7 +40,7 @@ type IntervalMetrics struct {
|
||||
Interval time.Time
|
||||
|
||||
// Gauges maps the key to the last set value
|
||||
Gauges map[string]float32
|
||||
Gauges map[string]GaugeValue
|
||||
|
||||
// Points maps the string to the list of emitted values
|
||||
// from EmitKey
|
||||
@@ -44,21 +48,21 @@ type IntervalMetrics struct {
|
||||
|
||||
// Counters maps the string key to a sum of the counter
|
||||
// values
|
||||
Counters map[string]*AggregateSample
|
||||
Counters map[string]SampledValue
|
||||
|
||||
// Samples maps the key to an AggregateSample,
|
||||
// which has the rolled up view of a sample
|
||||
Samples map[string]*AggregateSample
|
||||
Samples map[string]SampledValue
|
||||
}
|
||||
|
||||
// NewIntervalMetrics creates a new IntervalMetrics for a given interval
|
||||
func NewIntervalMetrics(intv time.Time) *IntervalMetrics {
|
||||
return &IntervalMetrics{
|
||||
Interval: intv,
|
||||
Gauges: make(map[string]float32),
|
||||
Gauges: make(map[string]GaugeValue),
|
||||
Points: make(map[string][]float32),
|
||||
Counters: make(map[string]*AggregateSample),
|
||||
Samples: make(map[string]*AggregateSample),
|
||||
Counters: make(map[string]SampledValue),
|
||||
Samples: make(map[string]SampledValue),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -66,11 +70,12 @@ func NewIntervalMetrics(intv time.Time) *IntervalMetrics {
|
||||
// about a sample
|
||||
type AggregateSample struct {
|
||||
Count int // The count of emitted pairs
|
||||
Rate float64 // The values rate per time unit (usually 1 second)
|
||||
Sum float64 // The sum of values
|
||||
SumSq float64 // The sum of squared values
|
||||
SumSq float64 `json:"-"` // The sum of squared values
|
||||
Min float64 // Minimum value
|
||||
Max float64 // Maximum value
|
||||
LastUpdated time.Time // When value was last updated
|
||||
LastUpdated time.Time `json:"-"` // When value was last updated
|
||||
}
|
||||
|
||||
// Computes a Stddev of the values
|
||||
@@ -92,7 +97,7 @@ func (a *AggregateSample) Mean() float64 {
|
||||
}
|
||||
|
||||
// Ingest is used to update a sample
|
||||
func (a *AggregateSample) Ingest(v float64) {
|
||||
func (a *AggregateSample) Ingest(v float64, rateDenom float64) {
|
||||
a.Count++
|
||||
a.Sum += v
|
||||
a.SumSq += (v * v)
|
||||
@@ -102,6 +107,7 @@ func (a *AggregateSample) Ingest(v float64) {
|
||||
if v > a.Max || a.Count == 1 {
|
||||
a.Max = v
|
||||
}
|
||||
a.Rate = float64(a.Sum) / rateDenom
|
||||
a.LastUpdated = time.Now()
|
||||
}
|
||||
|
||||
@@ -116,25 +122,49 @@ func (a *AggregateSample) String() string {
|
||||
}
|
||||
}
|
||||
|
||||
// NewInmemSinkFromURL creates an InmemSink from a URL. It is used
|
||||
// (and tested) from NewMetricSinkFromURL.
|
||||
func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) {
|
||||
params := u.Query()
|
||||
|
||||
interval, err := time.ParseDuration(params.Get("interval"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Bad 'interval' param: %s", err)
|
||||
}
|
||||
|
||||
retain, err := time.ParseDuration(params.Get("retain"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Bad 'retain' param: %s", err)
|
||||
}
|
||||
|
||||
return NewInmemSink(interval, retain), nil
|
||||
}
|
||||
|
||||
// NewInmemSink is used to construct a new in-memory sink.
|
||||
// Uses an aggregation interval and maximum retention period.
|
||||
func NewInmemSink(interval, retain time.Duration) *InmemSink {
|
||||
rateTimeUnit := time.Second
|
||||
i := &InmemSink{
|
||||
interval: interval,
|
||||
retain: retain,
|
||||
maxIntervals: int(retain / interval),
|
||||
rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()),
|
||||
}
|
||||
i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals)
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *InmemSink) SetGauge(key []string, val float32) {
|
||||
k := i.flattenKey(key)
|
||||
i.SetGaugeWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
k, name := i.flattenKeyLabels(key, labels)
|
||||
intv := i.getInterval()
|
||||
|
||||
intv.Lock()
|
||||
defer intv.Unlock()
|
||||
intv.Gauges[k] = val
|
||||
intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels}
|
||||
}
|
||||
|
||||
func (i *InmemSink) EmitKey(key []string, val float32) {
|
||||
@@ -148,33 +178,49 @@ func (i *InmemSink) EmitKey(key []string, val float32) {
|
||||
}
|
||||
|
||||
func (i *InmemSink) IncrCounter(key []string, val float32) {
|
||||
k := i.flattenKey(key)
|
||||
i.IncrCounterWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
k, name := i.flattenKeyLabels(key, labels)
|
||||
intv := i.getInterval()
|
||||
|
||||
intv.Lock()
|
||||
defer intv.Unlock()
|
||||
|
||||
agg := intv.Counters[k]
|
||||
if agg == nil {
|
||||
agg = &AggregateSample{}
|
||||
agg, ok := intv.Counters[k]
|
||||
if !ok {
|
||||
agg = SampledValue{
|
||||
Name: name,
|
||||
AggregateSample: &AggregateSample{},
|
||||
Labels: labels,
|
||||
}
|
||||
intv.Counters[k] = agg
|
||||
}
|
||||
agg.Ingest(float64(val))
|
||||
agg.Ingest(float64(val), i.rateDenom)
|
||||
}
|
||||
|
||||
func (i *InmemSink) AddSample(key []string, val float32) {
|
||||
k := i.flattenKey(key)
|
||||
i.AddSampleWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
k, name := i.flattenKeyLabels(key, labels)
|
||||
intv := i.getInterval()
|
||||
|
||||
intv.Lock()
|
||||
defer intv.Unlock()
|
||||
|
||||
agg := intv.Samples[k]
|
||||
if agg == nil {
|
||||
agg = &AggregateSample{}
|
||||
agg, ok := intv.Samples[k]
|
||||
if !ok {
|
||||
agg = SampledValue{
|
||||
Name: name,
|
||||
AggregateSample: &AggregateSample{},
|
||||
Labels: labels,
|
||||
}
|
||||
intv.Samples[k] = agg
|
||||
}
|
||||
agg.Ingest(float64(val))
|
||||
agg.Ingest(float64(val), i.rateDenom)
|
||||
}
|
||||
|
||||
// Data is used to retrieve all the aggregated metrics
|
||||
@@ -186,8 +232,37 @@ func (i *InmemSink) Data() []*IntervalMetrics {
|
||||
i.intervalLock.RLock()
|
||||
defer i.intervalLock.RUnlock()
|
||||
|
||||
intervals := make([]*IntervalMetrics, len(i.intervals))
|
||||
copy(intervals, i.intervals)
|
||||
n := len(i.intervals)
|
||||
intervals := make([]*IntervalMetrics, n)
|
||||
|
||||
copy(intervals[:n-1], i.intervals[:n-1])
|
||||
current := i.intervals[n-1]
|
||||
|
||||
// make its own copy for current interval
|
||||
intervals[n-1] = &IntervalMetrics{}
|
||||
copyCurrent := intervals[n-1]
|
||||
current.RLock()
|
||||
*copyCurrent = *current
|
||||
|
||||
copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges))
|
||||
for k, v := range current.Gauges {
|
||||
copyCurrent.Gauges[k] = v
|
||||
}
|
||||
// saved values will be not change, just copy its link
|
||||
copyCurrent.Points = make(map[string][]float32, len(current.Points))
|
||||
for k, v := range current.Points {
|
||||
copyCurrent.Points[k] = v
|
||||
}
|
||||
copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters))
|
||||
for k, v := range current.Counters {
|
||||
copyCurrent.Counters[k] = v.deepCopy()
|
||||
}
|
||||
copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples))
|
||||
for k, v := range current.Samples {
|
||||
copyCurrent.Samples[k] = v.deepCopy()
|
||||
}
|
||||
current.RUnlock()
|
||||
|
||||
return intervals
|
||||
}
|
||||
|
||||
@@ -236,6 +311,38 @@ func (i *InmemSink) getInterval() *IntervalMetrics {
|
||||
|
||||
// Flattens the key for formatting, removes spaces
|
||||
func (i *InmemSink) flattenKey(parts []string) string {
|
||||
joined := strings.Join(parts, ".")
|
||||
return strings.Replace(joined, " ", "_", -1)
|
||||
buf := &bytes.Buffer{}
|
||||
replacer := strings.NewReplacer(" ", "_")
|
||||
|
||||
if len(parts) > 0 {
|
||||
replacer.WriteString(buf, parts[0])
|
||||
}
|
||||
for _, part := range parts[1:] {
|
||||
replacer.WriteString(buf, ".")
|
||||
replacer.WriteString(buf, part)
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// Flattens the key for formatting along with its labels, removes spaces
|
||||
func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) {
|
||||
buf := &bytes.Buffer{}
|
||||
replacer := strings.NewReplacer(" ", "_")
|
||||
|
||||
if len(parts) > 0 {
|
||||
replacer.WriteString(buf, parts[0])
|
||||
}
|
||||
for _, part := range parts[1:] {
|
||||
replacer.WriteString(buf, ".")
|
||||
replacer.WriteString(buf, part)
|
||||
}
|
||||
|
||||
key := buf.String()
|
||||
|
||||
for _, label := range labels {
|
||||
replacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value))
|
||||
}
|
||||
|
||||
return buf.String(), key
|
||||
}
|
||||
|
||||
131
vendor/github.com/armon/go-metrics/inmem_endpoint.go
generated
vendored
Normal file
131
vendor/github.com/armon/go-metrics/inmem_endpoint.go
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
// MetricsSummary holds a roll-up of metrics info for a given interval
|
||||
type MetricsSummary struct {
|
||||
Timestamp string
|
||||
Gauges []GaugeValue
|
||||
Points []PointValue
|
||||
Counters []SampledValue
|
||||
Samples []SampledValue
|
||||
}
|
||||
|
||||
type GaugeValue struct {
|
||||
Name string
|
||||
Hash string `json:"-"`
|
||||
Value float32
|
||||
|
||||
Labels []Label `json:"-"`
|
||||
DisplayLabels map[string]string `json:"Labels"`
|
||||
}
|
||||
|
||||
type PointValue struct {
|
||||
Name string
|
||||
Points []float32
|
||||
}
|
||||
|
||||
type SampledValue struct {
|
||||
Name string
|
||||
Hash string `json:"-"`
|
||||
*AggregateSample
|
||||
Mean float64
|
||||
Stddev float64
|
||||
|
||||
Labels []Label `json:"-"`
|
||||
DisplayLabels map[string]string `json:"Labels"`
|
||||
}
|
||||
|
||||
// deepCopy allocates a new instance of AggregateSample
|
||||
func (source *SampledValue) deepCopy() SampledValue {
|
||||
dest := *source
|
||||
if source.AggregateSample != nil {
|
||||
dest.AggregateSample = &AggregateSample{}
|
||||
*dest.AggregateSample = *source.AggregateSample
|
||||
}
|
||||
return dest
|
||||
}
|
||||
|
||||
// DisplayMetrics returns a summary of the metrics from the most recent finished interval.
|
||||
func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
|
||||
data := i.Data()
|
||||
|
||||
var interval *IntervalMetrics
|
||||
n := len(data)
|
||||
switch {
|
||||
case n == 0:
|
||||
return nil, fmt.Errorf("no metric intervals have been initialized yet")
|
||||
case n == 1:
|
||||
// Show the current interval if it's all we have
|
||||
interval = data[0]
|
||||
default:
|
||||
// Show the most recent finished interval if we have one
|
||||
interval = data[n-2]
|
||||
}
|
||||
|
||||
interval.RLock()
|
||||
defer interval.RUnlock()
|
||||
|
||||
summary := MetricsSummary{
|
||||
Timestamp: interval.Interval.Round(time.Second).UTC().String(),
|
||||
Gauges: make([]GaugeValue, 0, len(interval.Gauges)),
|
||||
Points: make([]PointValue, 0, len(interval.Points)),
|
||||
}
|
||||
|
||||
// Format and sort the output of each metric type, so it gets displayed in a
|
||||
// deterministic order.
|
||||
for name, points := range interval.Points {
|
||||
summary.Points = append(summary.Points, PointValue{name, points})
|
||||
}
|
||||
sort.Slice(summary.Points, func(i, j int) bool {
|
||||
return summary.Points[i].Name < summary.Points[j].Name
|
||||
})
|
||||
|
||||
for hash, value := range interval.Gauges {
|
||||
value.Hash = hash
|
||||
value.DisplayLabels = make(map[string]string)
|
||||
for _, label := range value.Labels {
|
||||
value.DisplayLabels[label.Name] = label.Value
|
||||
}
|
||||
value.Labels = nil
|
||||
|
||||
summary.Gauges = append(summary.Gauges, value)
|
||||
}
|
||||
sort.Slice(summary.Gauges, func(i, j int) bool {
|
||||
return summary.Gauges[i].Hash < summary.Gauges[j].Hash
|
||||
})
|
||||
|
||||
summary.Counters = formatSamples(interval.Counters)
|
||||
summary.Samples = formatSamples(interval.Samples)
|
||||
|
||||
return summary, nil
|
||||
}
|
||||
|
||||
func formatSamples(source map[string]SampledValue) []SampledValue {
|
||||
output := make([]SampledValue, 0, len(source))
|
||||
for hash, sample := range source {
|
||||
displayLabels := make(map[string]string)
|
||||
for _, label := range sample.Labels {
|
||||
displayLabels[label.Name] = label.Value
|
||||
}
|
||||
|
||||
output = append(output, SampledValue{
|
||||
Name: sample.Name,
|
||||
Hash: hash,
|
||||
AggregateSample: sample.AggregateSample,
|
||||
Mean: sample.AggregateSample.Mean(),
|
||||
Stddev: sample.AggregateSample.Stddev(),
|
||||
DisplayLabels: displayLabels,
|
||||
})
|
||||
}
|
||||
sort.Slice(output, func(i, j int) bool {
|
||||
return output[i].Hash < output[j].Hash
|
||||
})
|
||||
|
||||
return output
|
||||
}
|
||||
275
vendor/github.com/armon/go-metrics/inmem_endpoint_test.go
generated
vendored
Normal file
275
vendor/github.com/armon/go-metrics/inmem_endpoint_test.go
generated
vendored
Normal file
@@ -0,0 +1,275 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pascaldekloe/goe/verify"
|
||||
)
|
||||
|
||||
func TestDisplayMetrics(t *testing.T) {
|
||||
interval := 10 * time.Millisecond
|
||||
inm := NewInmemSink(interval, 50*time.Millisecond)
|
||||
|
||||
// Add data points
|
||||
inm.SetGauge([]string{"foo", "bar"}, 42)
|
||||
inm.SetGaugeWithLabels([]string{"foo", "bar"}, 23, []Label{{"a", "b"}})
|
||||
inm.EmitKey([]string{"foo", "bar"}, 42)
|
||||
inm.IncrCounter([]string{"foo", "bar"}, 20)
|
||||
inm.IncrCounter([]string{"foo", "bar"}, 22)
|
||||
inm.IncrCounterWithLabels([]string{"foo", "bar"}, 20, []Label{{"a", "b"}})
|
||||
inm.IncrCounterWithLabels([]string{"foo", "bar"}, 40, []Label{{"a", "b"}})
|
||||
inm.AddSample([]string{"foo", "bar"}, 20)
|
||||
inm.AddSample([]string{"foo", "bar"}, 24)
|
||||
inm.AddSampleWithLabels([]string{"foo", "bar"}, 23, []Label{{"a", "b"}})
|
||||
inm.AddSampleWithLabels([]string{"foo", "bar"}, 33, []Label{{"a", "b"}})
|
||||
|
||||
data := inm.Data()
|
||||
if len(data) != 1 {
|
||||
t.Fatalf("bad: %v", data)
|
||||
}
|
||||
|
||||
expected := MetricsSummary{
|
||||
Timestamp: data[0].Interval.Round(time.Second).UTC().String(),
|
||||
Gauges: []GaugeValue{
|
||||
{
|
||||
Name: "foo.bar",
|
||||
Hash: "foo.bar",
|
||||
Value: float32(42),
|
||||
DisplayLabels: map[string]string{},
|
||||
},
|
||||
{
|
||||
Name: "foo.bar",
|
||||
Hash: "foo.bar;a=b",
|
||||
Value: float32(23),
|
||||
DisplayLabels: map[string]string{"a": "b"},
|
||||
},
|
||||
},
|
||||
Points: []PointValue{
|
||||
{
|
||||
Name: "foo.bar",
|
||||
Points: []float32{42},
|
||||
},
|
||||
},
|
||||
Counters: []SampledValue{
|
||||
{
|
||||
Name: "foo.bar",
|
||||
Hash: "foo.bar",
|
||||
AggregateSample: &AggregateSample{
|
||||
Count: 2,
|
||||
Min: 20,
|
||||
Max: 22,
|
||||
Sum: 42,
|
||||
SumSq: 884,
|
||||
Rate: 4200,
|
||||
},
|
||||
Mean: 21,
|
||||
Stddev: 1.4142135623730951,
|
||||
},
|
||||
{
|
||||
Name: "foo.bar",
|
||||
Hash: "foo.bar;a=b",
|
||||
AggregateSample: &AggregateSample{
|
||||
Count: 2,
|
||||
Min: 20,
|
||||
Max: 40,
|
||||
Sum: 60,
|
||||
SumSq: 2000,
|
||||
Rate: 6000,
|
||||
},
|
||||
Mean: 30,
|
||||
Stddev: 14.142135623730951,
|
||||
DisplayLabels: map[string]string{"a": "b"},
|
||||
},
|
||||
},
|
||||
Samples: []SampledValue{
|
||||
{
|
||||
Name: "foo.bar",
|
||||
Hash: "foo.bar",
|
||||
AggregateSample: &AggregateSample{
|
||||
Count: 2,
|
||||
Min: 20,
|
||||
Max: 24,
|
||||
Sum: 44,
|
||||
SumSq: 976,
|
||||
Rate: 4400,
|
||||
},
|
||||
Mean: 22,
|
||||
Stddev: 2.8284271247461903,
|
||||
},
|
||||
{
|
||||
Name: "foo.bar",
|
||||
Hash: "foo.bar;a=b",
|
||||
AggregateSample: &AggregateSample{
|
||||
Count: 2,
|
||||
Min: 23,
|
||||
Max: 33,
|
||||
Sum: 56,
|
||||
SumSq: 1618,
|
||||
Rate: 5600,
|
||||
},
|
||||
Mean: 28,
|
||||
Stddev: 7.0710678118654755,
|
||||
DisplayLabels: map[string]string{"a": "b"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
raw, err := inm.DisplayMetrics(nil, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
result := raw.(MetricsSummary)
|
||||
|
||||
// Ignore the LastUpdated field, we don't export that anyway
|
||||
for i, got := range result.Counters {
|
||||
expected.Counters[i].LastUpdated = got.LastUpdated
|
||||
}
|
||||
for i, got := range result.Samples {
|
||||
expected.Samples[i].LastUpdated = got.LastUpdated
|
||||
}
|
||||
|
||||
verify.Values(t, "all", result, expected)
|
||||
}
|
||||
|
||||
func TestDisplayMetrics_RaceSetGauge(t *testing.T) {
|
||||
interval := 200 * time.Millisecond
|
||||
inm := NewInmemSink(interval, 10*interval)
|
||||
result := make(chan float32)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
inm.SetGauge([]string{"foo", "bar"}, float32(42))
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
start := time.Now()
|
||||
var summary MetricsSummary
|
||||
// test for twenty intervals
|
||||
for time.Now().Sub(start) < 20*interval {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
raw, _ := inm.DisplayMetrics(nil, nil)
|
||||
summary = raw.(MetricsSummary)
|
||||
}
|
||||
// save result
|
||||
for _, g := range summary.Gauges {
|
||||
if g.Name == "foo.bar" {
|
||||
result <- g.Value
|
||||
}
|
||||
}
|
||||
close(result)
|
||||
}()
|
||||
|
||||
got := <-result
|
||||
verify.Values(t, "all", got, float32(42))
|
||||
}
|
||||
|
||||
func TestDisplayMetrics_RaceAddSample(t *testing.T) {
|
||||
interval := 200 * time.Millisecond
|
||||
inm := NewInmemSink(interval, 10*interval)
|
||||
result := make(chan float32)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(75 * time.Millisecond)
|
||||
inm.AddSample([]string{"foo", "bar"}, float32(0.0))
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
start := time.Now()
|
||||
var summary MetricsSummary
|
||||
// test for twenty intervals
|
||||
for time.Now().Sub(start) < 20*interval {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
raw, _ := inm.DisplayMetrics(nil, nil)
|
||||
summary = raw.(MetricsSummary)
|
||||
}
|
||||
// save result
|
||||
for _, g := range summary.Gauges {
|
||||
if g.Name == "foo.bar" {
|
||||
result <- g.Value
|
||||
}
|
||||
}
|
||||
close(result)
|
||||
}()
|
||||
|
||||
got := <-result
|
||||
verify.Values(t, "all", got, float32(0.0))
|
||||
}
|
||||
|
||||
func TestDisplayMetrics_RaceIncrCounter(t *testing.T) {
|
||||
interval := 200 * time.Millisecond
|
||||
inm := NewInmemSink(interval, 10*interval)
|
||||
result := make(chan float32)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(75 * time.Millisecond)
|
||||
inm.IncrCounter([]string{"foo", "bar"}, float32(0.0))
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
start := time.Now()
|
||||
var summary MetricsSummary
|
||||
// test for twenty intervals
|
||||
for time.Now().Sub(start) < 20*interval {
|
||||
time.Sleep(30 * time.Millisecond)
|
||||
raw, _ := inm.DisplayMetrics(nil, nil)
|
||||
summary = raw.(MetricsSummary)
|
||||
}
|
||||
// save result for testing
|
||||
for _, g := range summary.Gauges {
|
||||
if g.Name == "foo.bar" {
|
||||
result <- g.Value
|
||||
}
|
||||
}
|
||||
close(result)
|
||||
}()
|
||||
|
||||
got := <-result
|
||||
verify.Values(t, "all", got, float32(0.0))
|
||||
}
|
||||
|
||||
func TestDisplayMetrics_RaceMetricsSetGauge(t *testing.T) {
|
||||
interval := 200 * time.Millisecond
|
||||
inm := NewInmemSink(interval, 10*interval)
|
||||
met := &Metrics{Config: Config{FilterDefault: true}, sink: inm}
|
||||
result := make(chan float32)
|
||||
labels := []Label{
|
||||
{"name1", "value1"},
|
||||
{"name2", "value2"},
|
||||
}
|
||||
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(75 * time.Millisecond)
|
||||
met.SetGaugeWithLabels([]string{"foo", "bar"}, float32(42), labels)
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
start := time.Now()
|
||||
var summary MetricsSummary
|
||||
// test for twenty intervals
|
||||
for time.Now().Sub(start) < 40*interval {
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
raw, _ := inm.DisplayMetrics(nil, nil)
|
||||
summary = raw.(MetricsSummary)
|
||||
}
|
||||
// save result
|
||||
for _, g := range summary.Gauges {
|
||||
if g.Name == "foo.bar" {
|
||||
result <- g.Value
|
||||
}
|
||||
}
|
||||
close(result)
|
||||
}()
|
||||
|
||||
got := <-result
|
||||
verify.Values(t, "all", got, float32(42))
|
||||
}
|
||||
|
||||
33
vendor/github.com/armon/go-metrics/inmem_signal.go
generated
vendored
33
vendor/github.com/armon/go-metrics/inmem_signal.go
generated
vendored
@@ -6,6 +6,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
@@ -75,22 +76,25 @@ func (i *InmemSignal) dumpStats() {
|
||||
|
||||
data := i.inm.Data()
|
||||
// Skip the last period which is still being aggregated
|
||||
for i := 0; i < len(data)-1; i++ {
|
||||
intv := data[i]
|
||||
for j := 0; j < len(data)-1; j++ {
|
||||
intv := data[j]
|
||||
intv.RLock()
|
||||
for name, val := range intv.Gauges {
|
||||
fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val)
|
||||
for _, val := range intv.Gauges {
|
||||
name := i.flattenLabels(val.Name, val.Labels)
|
||||
fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value)
|
||||
}
|
||||
for name, vals := range intv.Points {
|
||||
for _, val := range vals {
|
||||
fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val)
|
||||
}
|
||||
}
|
||||
for name, agg := range intv.Counters {
|
||||
fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg)
|
||||
for _, agg := range intv.Counters {
|
||||
name := i.flattenLabels(agg.Name, agg.Labels)
|
||||
fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
|
||||
}
|
||||
for name, agg := range intv.Samples {
|
||||
fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg)
|
||||
for _, agg := range intv.Samples {
|
||||
name := i.flattenLabels(agg.Name, agg.Labels)
|
||||
fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
|
||||
}
|
||||
intv.RUnlock()
|
||||
}
|
||||
@@ -98,3 +102,16 @@ func (i *InmemSignal) dumpStats() {
|
||||
// Write out the bytes
|
||||
i.w.Write(buf.Bytes())
|
||||
}
|
||||
|
||||
// Flattens the key for formatting along with its labels, removes spaces
|
||||
func (i *InmemSignal) flattenLabels(name string, labels []Label) string {
|
||||
buf := bytes.NewBufferString(name)
|
||||
replacer := strings.NewReplacer(" ", "_", ":", "_")
|
||||
|
||||
for _, label := range labels {
|
||||
replacer.WriteString(buf, ".")
|
||||
replacer.WriteString(buf, label.Value)
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
40
vendor/github.com/armon/go-metrics/inmem_signal_test.go
generated
vendored
40
vendor/github.com/armon/go-metrics/inmem_signal_test.go
generated
vendored
@@ -4,13 +4,14 @@ import (
|
||||
"bytes"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestInmemSignal(t *testing.T) {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
buf := newBuffer()
|
||||
inm := NewInmemSink(10*time.Millisecond, 50*time.Millisecond)
|
||||
sig := NewInmemSignal(inm, syscall.SIGUSR1, buf)
|
||||
defer sig.Stop()
|
||||
@@ -19,6 +20,9 @@ func TestInmemSignal(t *testing.T) {
|
||||
inm.EmitKey([]string{"bar"}, 42)
|
||||
inm.IncrCounter([]string{"baz"}, 42)
|
||||
inm.AddSample([]string{"wow"}, 42)
|
||||
inm.SetGaugeWithLabels([]string{"asdf"}, 42, []Label{{"a", "b"}})
|
||||
inm.IncrCounterWithLabels([]string{"qwer"}, 42, []Label{{"a", "b"}})
|
||||
inm.AddSampleWithLabels([]string{"zxcv"}, 42, []Label{{"a", "b"}})
|
||||
|
||||
// Wait for period to end
|
||||
time.Sleep(15 * time.Millisecond)
|
||||
@@ -30,7 +34,7 @@ func TestInmemSignal(t *testing.T) {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Check the output
|
||||
out := string(buf.Bytes())
|
||||
out := buf.String()
|
||||
if !strings.Contains(out, "[G] 'foo': 42") {
|
||||
t.Fatalf("bad: %v", out)
|
||||
}
|
||||
@@ -43,4 +47,36 @@ func TestInmemSignal(t *testing.T) {
|
||||
if !strings.Contains(out, "[S] 'wow': Count: 1 Sum: 42") {
|
||||
t.Fatalf("bad: %v", out)
|
||||
}
|
||||
if !strings.Contains(out, "[G] 'asdf.b': 42") {
|
||||
t.Fatalf("bad: %v", out)
|
||||
}
|
||||
if !strings.Contains(out, "[C] 'qwer.b': Count: 1 Sum: 42") {
|
||||
t.Fatalf("bad: %v", out)
|
||||
}
|
||||
if !strings.Contains(out, "[S] 'zxcv.b': Count: 1 Sum: 42") {
|
||||
t.Fatalf("bad: %v", out)
|
||||
}
|
||||
}
|
||||
|
||||
func newBuffer() *syncBuffer {
|
||||
return &syncBuffer{buf: bytes.NewBuffer(nil)}
|
||||
}
|
||||
|
||||
type syncBuffer struct {
|
||||
buf *bytes.Buffer
|
||||
lock sync.Mutex
|
||||
}
|
||||
|
||||
func (s *syncBuffer) Write(p []byte) (int, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
return s.buf.Write(p)
|
||||
}
|
||||
|
||||
func (s *syncBuffer) String() string {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
return s.buf.String()
|
||||
}
|
||||
|
||||
146
vendor/github.com/armon/go-metrics/inmem_test.go
generated
vendored
146
vendor/github.com/armon/go-metrics/inmem_test.go
generated
vendored
@@ -2,6 +2,8 @@ package metrics
|
||||
|
||||
import (
|
||||
"math"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@@ -16,11 +18,15 @@ func TestInmemSink(t *testing.T) {
|
||||
|
||||
// Add data points
|
||||
inm.SetGauge([]string{"foo", "bar"}, 42)
|
||||
inm.SetGaugeWithLabels([]string{"foo", "bar"}, 23, []Label{{"a", "b"}})
|
||||
inm.EmitKey([]string{"foo", "bar"}, 42)
|
||||
inm.IncrCounter([]string{"foo", "bar"}, 20)
|
||||
inm.IncrCounter([]string{"foo", "bar"}, 22)
|
||||
inm.IncrCounterWithLabels([]string{"foo", "bar"}, 20, []Label{{"a", "b"}})
|
||||
inm.IncrCounterWithLabels([]string{"foo", "bar"}, 22, []Label{{"a", "b"}})
|
||||
inm.AddSample([]string{"foo", "bar"}, 20)
|
||||
inm.AddSample([]string{"foo", "bar"}, 22)
|
||||
inm.AddSampleWithLabels([]string{"foo", "bar"}, 23, []Label{{"a", "b"}})
|
||||
|
||||
data = inm.Data()
|
||||
if len(data) != 1 {
|
||||
@@ -33,46 +39,57 @@ func TestInmemSink(t *testing.T) {
|
||||
if time.Now().Sub(intvM.Interval) > 10*time.Millisecond {
|
||||
t.Fatalf("interval too old")
|
||||
}
|
||||
if intvM.Gauges["foo.bar"] != 42 {
|
||||
if intvM.Gauges["foo.bar"].Value != 42 {
|
||||
t.Fatalf("bad val: %v", intvM.Gauges)
|
||||
}
|
||||
if intvM.Gauges["foo.bar;a=b"].Value != 23 {
|
||||
t.Fatalf("bad val: %v", intvM.Gauges)
|
||||
}
|
||||
if intvM.Points["foo.bar"][0] != 42 {
|
||||
t.Fatalf("bad val: %v", intvM.Points)
|
||||
}
|
||||
|
||||
agg := intvM.Counters["foo.bar"]
|
||||
if agg.Count != 2 {
|
||||
t.Fatalf("bad val: %v", agg)
|
||||
}
|
||||
if agg.Sum != 42 {
|
||||
t.Fatalf("bad val: %v", agg)
|
||||
}
|
||||
if agg.SumSq != 884 {
|
||||
t.Fatalf("bad val: %v", agg)
|
||||
}
|
||||
if agg.Min != 20 {
|
||||
t.Fatalf("bad val: %v", agg)
|
||||
}
|
||||
if agg.Max != 22 {
|
||||
t.Fatalf("bad val: %v", agg)
|
||||
}
|
||||
if agg.Mean() != 21 {
|
||||
t.Fatalf("bad val: %v", agg)
|
||||
}
|
||||
if agg.Stddev() != math.Sqrt(2) {
|
||||
t.Fatalf("bad val: %v", agg)
|
||||
for _, agg := range []SampledValue{intvM.Counters["foo.bar"], intvM.Counters["foo.bar;a=b"]} {
|
||||
if agg.Count != 2 {
|
||||
t.Fatalf("bad val: %v", agg)
|
||||
}
|
||||
if agg.Rate != 4200 {
|
||||
t.Fatalf("bad val: %v", agg.Rate)
|
||||
}
|
||||
if agg.Sum != 42 {
|
||||
t.Fatalf("bad val: %v", agg)
|
||||
}
|
||||
if agg.SumSq != 884 {
|
||||
t.Fatalf("bad val: %v", agg)
|
||||
}
|
||||
if agg.Min != 20 {
|
||||
t.Fatalf("bad val: %v", agg)
|
||||
}
|
||||
if agg.Max != 22 {
|
||||
t.Fatalf("bad val: %v", agg)
|
||||
}
|
||||
if agg.AggregateSample.Mean() != 21 {
|
||||
t.Fatalf("bad val: %v", agg)
|
||||
}
|
||||
if agg.AggregateSample.Stddev() != math.Sqrt(2) {
|
||||
t.Fatalf("bad val: %v", agg)
|
||||
}
|
||||
|
||||
if agg.LastUpdated.IsZero() {
|
||||
t.Fatalf("agg.LastUpdated is not set: %v", agg)
|
||||
}
|
||||
|
||||
diff := time.Now().Sub(agg.LastUpdated).Seconds()
|
||||
if diff > 1 {
|
||||
t.Fatalf("time diff too great: %f", diff)
|
||||
}
|
||||
}
|
||||
|
||||
if agg.LastUpdated.IsZero() {
|
||||
t.Fatalf("agg.LastUpdated is not set: %v", agg)
|
||||
if _, ok := intvM.Samples["foo.bar"]; !ok {
|
||||
t.Fatalf("missing sample")
|
||||
}
|
||||
|
||||
diff := time.Now().Sub(agg.LastUpdated).Seconds()
|
||||
if diff > 1 {
|
||||
t.Fatalf("time diff too great: %f", diff)
|
||||
}
|
||||
|
||||
if agg = intvM.Samples["foo.bar"]; agg == nil {
|
||||
if _, ok := intvM.Samples["foo.bar;a=b"]; !ok {
|
||||
t.Fatalf("missing sample")
|
||||
}
|
||||
|
||||
@@ -96,9 +113,78 @@ func TestInmemSink(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewInmemSinkFromURL(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
input string
|
||||
expectErr string
|
||||
expectInterval time.Duration
|
||||
expectRetain time.Duration
|
||||
}{
|
||||
{
|
||||
desc: "interval and duration are set via query params",
|
||||
input: "inmem://?interval=11s&retain=22s",
|
||||
expectInterval: duration(t, "11s"),
|
||||
expectRetain: duration(t, "22s"),
|
||||
},
|
||||
{
|
||||
desc: "interval is required",
|
||||
input: "inmem://?retain=22s",
|
||||
expectErr: "Bad 'interval' param",
|
||||
},
|
||||
{
|
||||
desc: "interval must be a duration",
|
||||
input: "inmem://?retain=30s&interval=HIYA",
|
||||
expectErr: "Bad 'interval' param",
|
||||
},
|
||||
{
|
||||
desc: "retain is required",
|
||||
input: "inmem://?interval=30s",
|
||||
expectErr: "Bad 'retain' param",
|
||||
},
|
||||
{
|
||||
desc: "retain must be a valid duration",
|
||||
input: "inmem://?interval=30s&retain=HELLO",
|
||||
expectErr: "Bad 'retain' param",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
u, err := url.Parse(tc.input)
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing URL: %s", err)
|
||||
}
|
||||
ms, err := NewInmemSinkFromURL(u)
|
||||
if tc.expectErr != "" {
|
||||
if !strings.Contains(err.Error(), tc.expectErr) {
|
||||
t.Fatalf("expected err: %q, to contain: %q", err, tc.expectErr)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err: %s", err)
|
||||
}
|
||||
is := ms.(*InmemSink)
|
||||
if is.interval != tc.expectInterval {
|
||||
t.Fatalf("expected interval %s, got: %s", tc.expectInterval, is.interval)
|
||||
}
|
||||
if is.retain != tc.expectRetain {
|
||||
t.Fatalf("expected retain %s, got: %s", tc.expectRetain, is.retain)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func duration(t *testing.T, s string) time.Duration {
|
||||
dur, err := time.ParseDuration(s)
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing duration: %s", err)
|
||||
}
|
||||
return dur
|
||||
}
|
||||
|
||||
183
vendor/github.com/armon/go-metrics/metrics.go
generated
vendored
183
vendor/github.com/armon/go-metrics/metrics.go
generated
vendored
@@ -2,20 +2,44 @@ package metrics
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-immutable-radix"
|
||||
)
|
||||
|
||||
type Label struct {
|
||||
Name string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (m *Metrics) SetGauge(key []string, val float32) {
|
||||
if m.HostName != "" && m.EnableHostname {
|
||||
key = insert(0, m.HostName, key)
|
||||
m.SetGaugeWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
if m.HostName != "" {
|
||||
if m.EnableHostnameLabel {
|
||||
labels = append(labels, Label{"host", m.HostName})
|
||||
} else if m.EnableHostname {
|
||||
key = insert(0, m.HostName, key)
|
||||
}
|
||||
}
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "gauge", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
if m.EnableServiceLabel {
|
||||
labels = append(labels, Label{"service", m.ServiceName})
|
||||
} else {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
}
|
||||
m.sink.SetGauge(key, val)
|
||||
allowed, labelsFiltered := m.allowMetric(key, labels)
|
||||
if !allowed {
|
||||
return
|
||||
}
|
||||
m.sink.SetGaugeWithLabels(key, val, labelsFiltered)
|
||||
}
|
||||
|
||||
func (m *Metrics) EmitKey(key []string, val float32) {
|
||||
@@ -25,40 +49,179 @@ func (m *Metrics) EmitKey(key []string, val float32) {
|
||||
if m.ServiceName != "" {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
allowed, _ := m.allowMetric(key, nil)
|
||||
if !allowed {
|
||||
return
|
||||
}
|
||||
m.sink.EmitKey(key, val)
|
||||
}
|
||||
|
||||
func (m *Metrics) IncrCounter(key []string, val float32) {
|
||||
m.IncrCounterWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
if m.HostName != "" && m.EnableHostnameLabel {
|
||||
labels = append(labels, Label{"host", m.HostName})
|
||||
}
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "counter", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
if m.EnableServiceLabel {
|
||||
labels = append(labels, Label{"service", m.ServiceName})
|
||||
} else {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
}
|
||||
m.sink.IncrCounter(key, val)
|
||||
allowed, labelsFiltered := m.allowMetric(key, labels)
|
||||
if !allowed {
|
||||
return
|
||||
}
|
||||
m.sink.IncrCounterWithLabels(key, val, labelsFiltered)
|
||||
}
|
||||
|
||||
func (m *Metrics) AddSample(key []string, val float32) {
|
||||
m.AddSampleWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
if m.HostName != "" && m.EnableHostnameLabel {
|
||||
labels = append(labels, Label{"host", m.HostName})
|
||||
}
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "sample", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
if m.EnableServiceLabel {
|
||||
labels = append(labels, Label{"service", m.ServiceName})
|
||||
} else {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
}
|
||||
m.sink.AddSample(key, val)
|
||||
allowed, labelsFiltered := m.allowMetric(key, labels)
|
||||
if !allowed {
|
||||
return
|
||||
}
|
||||
m.sink.AddSampleWithLabels(key, val, labelsFiltered)
|
||||
}
|
||||
|
||||
func (m *Metrics) MeasureSince(key []string, start time.Time) {
|
||||
m.MeasureSinceWithLabels(key, start, nil)
|
||||
}
|
||||
|
||||
func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
|
||||
if m.HostName != "" && m.EnableHostnameLabel {
|
||||
labels = append(labels, Label{"host", m.HostName})
|
||||
}
|
||||
if m.EnableTypePrefix {
|
||||
key = insert(0, "timer", key)
|
||||
}
|
||||
if m.ServiceName != "" {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
if m.EnableServiceLabel {
|
||||
labels = append(labels, Label{"service", m.ServiceName})
|
||||
} else {
|
||||
key = insert(0, m.ServiceName, key)
|
||||
}
|
||||
}
|
||||
allowed, labelsFiltered := m.allowMetric(key, labels)
|
||||
if !allowed {
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
elapsed := now.Sub(start)
|
||||
msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity)
|
||||
m.sink.AddSample(key, msec)
|
||||
m.sink.AddSampleWithLabels(key, msec, labelsFiltered)
|
||||
}
|
||||
|
||||
// UpdateFilter overwrites the existing filter with the given rules.
|
||||
func (m *Metrics) UpdateFilter(allow, block []string) {
|
||||
m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels)
|
||||
}
|
||||
|
||||
// UpdateFilterAndLabels overwrites the existing filter with the given rules.
|
||||
func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
|
||||
m.filterLock.Lock()
|
||||
defer m.filterLock.Unlock()
|
||||
|
||||
m.AllowedPrefixes = allow
|
||||
m.BlockedPrefixes = block
|
||||
|
||||
if allowedLabels == nil {
|
||||
// Having a white list means we take only elements from it
|
||||
m.allowedLabels = nil
|
||||
} else {
|
||||
m.allowedLabels = make(map[string]bool)
|
||||
for _, v := range allowedLabels {
|
||||
m.allowedLabels[v] = true
|
||||
}
|
||||
}
|
||||
m.blockedLabels = make(map[string]bool)
|
||||
for _, v := range blockedLabels {
|
||||
m.blockedLabels[v] = true
|
||||
}
|
||||
m.AllowedLabels = allowedLabels
|
||||
m.BlockedLabels = blockedLabels
|
||||
|
||||
m.filter = iradix.New()
|
||||
for _, prefix := range m.AllowedPrefixes {
|
||||
m.filter, _, _ = m.filter.Insert([]byte(prefix), true)
|
||||
}
|
||||
for _, prefix := range m.BlockedPrefixes {
|
||||
m.filter, _, _ = m.filter.Insert([]byte(prefix), false)
|
||||
}
|
||||
}
|
||||
|
||||
// labelIsAllowed return true if a should be included in metric
|
||||
// the caller should lock m.filterLock while calling this method
|
||||
func (m *Metrics) labelIsAllowed(label *Label) bool {
|
||||
labelName := (*label).Name
|
||||
if m.blockedLabels != nil {
|
||||
_, ok := m.blockedLabels[labelName]
|
||||
if ok {
|
||||
// If present, let's remove this label
|
||||
return false
|
||||
}
|
||||
}
|
||||
if m.allowedLabels != nil {
|
||||
_, ok := m.allowedLabels[labelName]
|
||||
return ok
|
||||
}
|
||||
// Allow by default
|
||||
return true
|
||||
}
|
||||
|
||||
// filterLabels return only allowed labels
|
||||
// the caller should lock m.filterLock while calling this method
|
||||
func (m *Metrics) filterLabels(labels []Label) []Label {
|
||||
if labels == nil {
|
||||
return nil
|
||||
}
|
||||
toReturn := []Label{}
|
||||
for _, label := range labels {
|
||||
if m.labelIsAllowed(&label) {
|
||||
toReturn = append(toReturn, label)
|
||||
}
|
||||
}
|
||||
return toReturn
|
||||
}
|
||||
|
||||
// Returns whether the metric should be allowed based on configured prefix filters
|
||||
// Also return the applicable labels
|
||||
func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) {
|
||||
m.filterLock.RLock()
|
||||
defer m.filterLock.RUnlock()
|
||||
|
||||
if m.filter == nil || m.filter.Len() == 0 {
|
||||
return m.Config.FilterDefault, m.filterLabels(labels)
|
||||
}
|
||||
|
||||
_, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, ".")))
|
||||
if !ok {
|
||||
return m.Config.FilterDefault, m.filterLabels(labels)
|
||||
}
|
||||
|
||||
return allowed.(bool), m.filterLabels(labels)
|
||||
}
|
||||
|
||||
// Periodically collects runtime stats to publish
|
||||
|
||||
339
vendor/github.com/armon/go-metrics/metrics_test.go
generated
vendored
339
vendor/github.com/armon/go-metrics/metrics_test.go
generated
vendored
@@ -9,25 +9,38 @@ import (
|
||||
|
||||
func mockMetric() (*MockSink, *Metrics) {
|
||||
m := &MockSink{}
|
||||
met := &Metrics{sink: m}
|
||||
met := &Metrics{Config: Config{FilterDefault: true}, sink: m}
|
||||
return m, met
|
||||
}
|
||||
|
||||
func TestMetrics_SetGauge(t *testing.T) {
|
||||
m, met := mockMetric()
|
||||
met.SetGauge([]string{"key"}, float32(1))
|
||||
if m.keys[0][0] != "key" {
|
||||
if m.getKeys()[0][0] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] != 1 {
|
||||
t.Fatalf("")
|
||||
}
|
||||
|
||||
m, met = mockMetric()
|
||||
labels := []Label{{"a", "b"}}
|
||||
met.SetGaugeWithLabels([]string{"key"}, float32(1), labels)
|
||||
if m.getKeys()[0][0] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] != 1 {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if !reflect.DeepEqual(m.labels[0], labels) {
|
||||
t.Fatalf("")
|
||||
}
|
||||
|
||||
m, met = mockMetric()
|
||||
met.HostName = "test"
|
||||
met.EnableHostname = true
|
||||
met.SetGauge([]string{"key"}, float32(1))
|
||||
if m.keys[0][0] != "test" || m.keys[0][1] != "key" {
|
||||
if m.getKeys()[0][0] != "test" || m.getKeys()[0][1] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] != 1 {
|
||||
@@ -37,7 +50,7 @@ func TestMetrics_SetGauge(t *testing.T) {
|
||||
m, met = mockMetric()
|
||||
met.EnableTypePrefix = true
|
||||
met.SetGauge([]string{"key"}, float32(1))
|
||||
if m.keys[0][0] != "gauge" || m.keys[0][1] != "key" {
|
||||
if m.getKeys()[0][0] != "gauge" || m.getKeys()[0][1] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] != 1 {
|
||||
@@ -47,7 +60,7 @@ func TestMetrics_SetGauge(t *testing.T) {
|
||||
m, met = mockMetric()
|
||||
met.ServiceName = "service"
|
||||
met.SetGauge([]string{"key"}, float32(1))
|
||||
if m.keys[0][0] != "service" || m.keys[0][1] != "key" {
|
||||
if m.getKeys()[0][0] != "service" || m.getKeys()[0][1] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] != 1 {
|
||||
@@ -58,7 +71,7 @@ func TestMetrics_SetGauge(t *testing.T) {
|
||||
func TestMetrics_EmitKey(t *testing.T) {
|
||||
m, met := mockMetric()
|
||||
met.EmitKey([]string{"key"}, float32(1))
|
||||
if m.keys[0][0] != "key" {
|
||||
if m.getKeys()[0][0] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] != 1 {
|
||||
@@ -68,7 +81,7 @@ func TestMetrics_EmitKey(t *testing.T) {
|
||||
m, met = mockMetric()
|
||||
met.EnableTypePrefix = true
|
||||
met.EmitKey([]string{"key"}, float32(1))
|
||||
if m.keys[0][0] != "kv" || m.keys[0][1] != "key" {
|
||||
if m.getKeys()[0][0] != "kv" || m.getKeys()[0][1] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] != 1 {
|
||||
@@ -78,7 +91,7 @@ func TestMetrics_EmitKey(t *testing.T) {
|
||||
m, met = mockMetric()
|
||||
met.ServiceName = "service"
|
||||
met.EmitKey([]string{"key"}, float32(1))
|
||||
if m.keys[0][0] != "service" || m.keys[0][1] != "key" {
|
||||
if m.getKeys()[0][0] != "service" || m.getKeys()[0][1] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] != 1 {
|
||||
@@ -89,17 +102,30 @@ func TestMetrics_EmitKey(t *testing.T) {
|
||||
func TestMetrics_IncrCounter(t *testing.T) {
|
||||
m, met := mockMetric()
|
||||
met.IncrCounter([]string{"key"}, float32(1))
|
||||
if m.keys[0][0] != "key" {
|
||||
if m.getKeys()[0][0] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] != 1 {
|
||||
t.Fatalf("")
|
||||
}
|
||||
|
||||
m, met = mockMetric()
|
||||
labels := []Label{{"a", "b"}}
|
||||
met.IncrCounterWithLabels([]string{"key"}, float32(1), labels)
|
||||
if m.getKeys()[0][0] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] != 1 {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if !reflect.DeepEqual(m.labels[0], labels) {
|
||||
t.Fatalf("")
|
||||
}
|
||||
|
||||
m, met = mockMetric()
|
||||
met.EnableTypePrefix = true
|
||||
met.IncrCounter([]string{"key"}, float32(1))
|
||||
if m.keys[0][0] != "counter" || m.keys[0][1] != "key" {
|
||||
if m.getKeys()[0][0] != "counter" || m.getKeys()[0][1] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] != 1 {
|
||||
@@ -109,7 +135,7 @@ func TestMetrics_IncrCounter(t *testing.T) {
|
||||
m, met = mockMetric()
|
||||
met.ServiceName = "service"
|
||||
met.IncrCounter([]string{"key"}, float32(1))
|
||||
if m.keys[0][0] != "service" || m.keys[0][1] != "key" {
|
||||
if m.getKeys()[0][0] != "service" || m.getKeys()[0][1] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] != 1 {
|
||||
@@ -120,17 +146,30 @@ func TestMetrics_IncrCounter(t *testing.T) {
|
||||
func TestMetrics_AddSample(t *testing.T) {
|
||||
m, met := mockMetric()
|
||||
met.AddSample([]string{"key"}, float32(1))
|
||||
if m.keys[0][0] != "key" {
|
||||
if m.getKeys()[0][0] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] != 1 {
|
||||
t.Fatalf("")
|
||||
}
|
||||
|
||||
m, met = mockMetric()
|
||||
labels := []Label{{"a", "b"}}
|
||||
met.AddSampleWithLabels([]string{"key"}, float32(1), labels)
|
||||
if m.getKeys()[0][0] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] != 1 {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if !reflect.DeepEqual(m.labels[0], labels) {
|
||||
t.Fatalf("")
|
||||
}
|
||||
|
||||
m, met = mockMetric()
|
||||
met.EnableTypePrefix = true
|
||||
met.AddSample([]string{"key"}, float32(1))
|
||||
if m.keys[0][0] != "sample" || m.keys[0][1] != "key" {
|
||||
if m.getKeys()[0][0] != "sample" || m.getKeys()[0][1] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] != 1 {
|
||||
@@ -140,7 +179,7 @@ func TestMetrics_AddSample(t *testing.T) {
|
||||
m, met = mockMetric()
|
||||
met.ServiceName = "service"
|
||||
met.AddSample([]string{"key"}, float32(1))
|
||||
if m.keys[0][0] != "service" || m.keys[0][1] != "key" {
|
||||
if m.getKeys()[0][0] != "service" || m.getKeys()[0][1] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] != 1 {
|
||||
@@ -153,18 +192,32 @@ func TestMetrics_MeasureSince(t *testing.T) {
|
||||
met.TimerGranularity = time.Millisecond
|
||||
n := time.Now()
|
||||
met.MeasureSince([]string{"key"}, n)
|
||||
if m.keys[0][0] != "key" {
|
||||
if m.getKeys()[0][0] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] > 0.1 {
|
||||
t.Fatalf("")
|
||||
}
|
||||
|
||||
m, met = mockMetric()
|
||||
met.TimerGranularity = time.Millisecond
|
||||
labels := []Label{{"a", "b"}}
|
||||
met.MeasureSinceWithLabels([]string{"key"}, n, labels)
|
||||
if m.getKeys()[0][0] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] > 0.1 {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if !reflect.DeepEqual(m.labels[0], labels) {
|
||||
t.Fatalf("")
|
||||
}
|
||||
|
||||
m, met = mockMetric()
|
||||
met.TimerGranularity = time.Millisecond
|
||||
met.EnableTypePrefix = true
|
||||
met.MeasureSince([]string{"key"}, n)
|
||||
if m.keys[0][0] != "timer" || m.keys[0][1] != "key" {
|
||||
if m.getKeys()[0][0] != "timer" || m.getKeys()[0][1] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] > 0.1 {
|
||||
@@ -175,7 +228,7 @@ func TestMetrics_MeasureSince(t *testing.T) {
|
||||
met.TimerGranularity = time.Millisecond
|
||||
met.ServiceName = "service"
|
||||
met.MeasureSince([]string{"key"}, n)
|
||||
if m.keys[0][0] != "service" || m.keys[0][1] != "key" {
|
||||
if m.getKeys()[0][0] != "service" || m.getKeys()[0][1] != "key" {
|
||||
t.Fatalf("")
|
||||
}
|
||||
if m.vals[0] > 0.1 {
|
||||
@@ -188,64 +241,64 @@ func TestMetrics_EmitRuntimeStats(t *testing.T) {
|
||||
m, met := mockMetric()
|
||||
met.emitRuntimeStats()
|
||||
|
||||
if m.keys[0][0] != "runtime" || m.keys[0][1] != "num_goroutines" {
|
||||
t.Fatalf("bad key %v", m.keys)
|
||||
if m.getKeys()[0][0] != "runtime" || m.getKeys()[0][1] != "num_goroutines" {
|
||||
t.Fatalf("bad key %v", m.getKeys())
|
||||
}
|
||||
if m.vals[0] <= 1 {
|
||||
t.Fatalf("bad val: %v", m.vals)
|
||||
}
|
||||
|
||||
if m.keys[1][0] != "runtime" || m.keys[1][1] != "alloc_bytes" {
|
||||
t.Fatalf("bad key %v", m.keys)
|
||||
if m.getKeys()[1][0] != "runtime" || m.getKeys()[1][1] != "alloc_bytes" {
|
||||
t.Fatalf("bad key %v", m.getKeys())
|
||||
}
|
||||
if m.vals[1] <= 40000 {
|
||||
t.Fatalf("bad val: %v", m.vals)
|
||||
}
|
||||
|
||||
if m.keys[2][0] != "runtime" || m.keys[2][1] != "sys_bytes" {
|
||||
t.Fatalf("bad key %v", m.keys)
|
||||
if m.getKeys()[2][0] != "runtime" || m.getKeys()[2][1] != "sys_bytes" {
|
||||
t.Fatalf("bad key %v", m.getKeys())
|
||||
}
|
||||
if m.vals[2] <= 100000 {
|
||||
t.Fatalf("bad val: %v", m.vals)
|
||||
}
|
||||
|
||||
if m.keys[3][0] != "runtime" || m.keys[3][1] != "malloc_count" {
|
||||
t.Fatalf("bad key %v", m.keys)
|
||||
if m.getKeys()[3][0] != "runtime" || m.getKeys()[3][1] != "malloc_count" {
|
||||
t.Fatalf("bad key %v", m.getKeys())
|
||||
}
|
||||
if m.vals[3] <= 100 {
|
||||
t.Fatalf("bad val: %v", m.vals)
|
||||
}
|
||||
|
||||
if m.keys[4][0] != "runtime" || m.keys[4][1] != "free_count" {
|
||||
t.Fatalf("bad key %v", m.keys)
|
||||
if m.getKeys()[4][0] != "runtime" || m.getKeys()[4][1] != "free_count" {
|
||||
t.Fatalf("bad key %v", m.getKeys())
|
||||
}
|
||||
if m.vals[4] <= 100 {
|
||||
t.Fatalf("bad val: %v", m.vals)
|
||||
}
|
||||
|
||||
if m.keys[5][0] != "runtime" || m.keys[5][1] != "heap_objects" {
|
||||
t.Fatalf("bad key %v", m.keys)
|
||||
if m.getKeys()[5][0] != "runtime" || m.getKeys()[5][1] != "heap_objects" {
|
||||
t.Fatalf("bad key %v", m.getKeys())
|
||||
}
|
||||
if m.vals[5] <= 100 {
|
||||
t.Fatalf("bad val: %v", m.vals)
|
||||
}
|
||||
|
||||
if m.keys[6][0] != "runtime" || m.keys[6][1] != "total_gc_pause_ns" {
|
||||
t.Fatalf("bad key %v", m.keys)
|
||||
if m.getKeys()[6][0] != "runtime" || m.getKeys()[6][1] != "total_gc_pause_ns" {
|
||||
t.Fatalf("bad key %v", m.getKeys())
|
||||
}
|
||||
if m.vals[6] <= 100000 {
|
||||
if m.vals[6] <= 100 {
|
||||
t.Fatalf("bad val: %v\nkeys: %v", m.vals, m.getKeys())
|
||||
}
|
||||
|
||||
if m.getKeys()[7][0] != "runtime" || m.getKeys()[7][1] != "total_gc_runs" {
|
||||
t.Fatalf("bad key %v", m.getKeys())
|
||||
}
|
||||
if m.vals[7] < 1 {
|
||||
t.Fatalf("bad val: %v", m.vals)
|
||||
}
|
||||
|
||||
if m.keys[7][0] != "runtime" || m.keys[7][1] != "total_gc_runs" {
|
||||
t.Fatalf("bad key %v", m.keys)
|
||||
}
|
||||
if m.vals[7] <= 1 {
|
||||
t.Fatalf("bad val: %v", m.vals)
|
||||
}
|
||||
|
||||
if m.keys[8][0] != "runtime" || m.keys[8][1] != "gc_pause_ns" {
|
||||
t.Fatalf("bad key %v", m.keys)
|
||||
if m.getKeys()[8][0] != "runtime" || m.getKeys()[8][1] != "gc_pause_ns" {
|
||||
t.Fatalf("bad key %v", m.getKeys())
|
||||
}
|
||||
if m.vals[8] <= 1000 {
|
||||
t.Fatalf("bad val: %v", m.vals)
|
||||
@@ -260,3 +313,207 @@ func TestInsert(t *testing.T) {
|
||||
t.Fatalf("bad insert %v %v", exp, out)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetrics_Filter_Blacklist(t *testing.T) {
|
||||
m := &MockSink{}
|
||||
conf := DefaultConfig("")
|
||||
conf.AllowedPrefixes = []string{"service", "debug.thing"}
|
||||
conf.BlockedPrefixes = []string{"debug"}
|
||||
conf.EnableHostname = false
|
||||
met, err := New(conf, m)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Allowed by default
|
||||
key := []string{"thing"}
|
||||
met.SetGauge(key, 1)
|
||||
if !reflect.DeepEqual(m.getKeys()[0], key) {
|
||||
t.Fatalf("key doesn't exist %v, %v", m.getKeys()[0], key)
|
||||
}
|
||||
if m.vals[0] != 1 {
|
||||
t.Fatalf("bad val: %v", m.vals[0])
|
||||
}
|
||||
|
||||
// Allowed by filter
|
||||
key = []string{"service", "thing"}
|
||||
met.SetGauge(key, 2)
|
||||
if !reflect.DeepEqual(m.getKeys()[1], key) {
|
||||
t.Fatalf("key doesn't exist")
|
||||
}
|
||||
if m.vals[1] != 2 {
|
||||
t.Fatalf("bad val: %v", m.vals[1])
|
||||
}
|
||||
|
||||
// Allowed by filter, subtree of a blocked entry
|
||||
key = []string{"debug", "thing"}
|
||||
met.SetGauge(key, 3)
|
||||
if !reflect.DeepEqual(m.getKeys()[2], key) {
|
||||
t.Fatalf("key doesn't exist")
|
||||
}
|
||||
if m.vals[2] != 3 {
|
||||
t.Fatalf("bad val: %v", m.vals[2])
|
||||
}
|
||||
|
||||
// Blocked by filter
|
||||
key = []string{"debug", "other-thing"}
|
||||
met.SetGauge(key, 4)
|
||||
if len(m.getKeys()) != 3 {
|
||||
t.Fatalf("key shouldn't exist")
|
||||
}
|
||||
}
|
||||
|
||||
func HasElem(s interface{}, elem interface{}) bool {
|
||||
arrV := reflect.ValueOf(s)
|
||||
|
||||
if arrV.Kind() == reflect.Slice {
|
||||
for i := 0; i < arrV.Len(); i++ {
|
||||
if arrV.Index(i).Interface() == elem {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func TestMetrics_Filter_Whitelist(t *testing.T) {
|
||||
m := &MockSink{}
|
||||
conf := DefaultConfig("")
|
||||
conf.AllowedPrefixes = []string{"service", "debug.thing"}
|
||||
conf.BlockedPrefixes = []string{"debug"}
|
||||
conf.FilterDefault = false
|
||||
conf.EnableHostname = false
|
||||
conf.BlockedLabels = []string{"bad_label"}
|
||||
met, err := New(conf, m)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Blocked by default
|
||||
key := []string{"thing"}
|
||||
met.SetGauge(key, 1)
|
||||
if len(m.getKeys()) != 0 {
|
||||
t.Fatalf("key should not exist")
|
||||
}
|
||||
|
||||
// Allowed by filter
|
||||
key = []string{"service", "thing"}
|
||||
met.SetGauge(key, 2)
|
||||
if !reflect.DeepEqual(m.getKeys()[0], key) {
|
||||
t.Fatalf("key doesn't exist")
|
||||
}
|
||||
if m.vals[0] != 2 {
|
||||
t.Fatalf("bad val: %v", m.vals[0])
|
||||
}
|
||||
|
||||
// Allowed by filter, subtree of a blocked entry
|
||||
key = []string{"debug", "thing"}
|
||||
met.SetGauge(key, 3)
|
||||
if !reflect.DeepEqual(m.getKeys()[1], key) {
|
||||
t.Fatalf("key doesn't exist")
|
||||
}
|
||||
if m.vals[1] != 3 {
|
||||
t.Fatalf("bad val: %v", m.vals[1])
|
||||
}
|
||||
|
||||
// Blocked by filter
|
||||
key = []string{"debug", "other-thing"}
|
||||
met.SetGauge(key, 4)
|
||||
if len(m.getKeys()) != 2 {
|
||||
t.Fatalf("key shouldn't exist")
|
||||
}
|
||||
// Test blacklisting of labels
|
||||
key = []string{"debug", "thing"}
|
||||
goodLabel := Label{Name: "good", Value: "should be present"}
|
||||
badLabel := Label{Name: "bad_label", Value: "should not be there"}
|
||||
labels := []Label{badLabel, goodLabel}
|
||||
met.SetGaugeWithLabels(key, 3, labels)
|
||||
if !reflect.DeepEqual(m.getKeys()[1], key) {
|
||||
t.Fatalf("key doesn't exist")
|
||||
}
|
||||
if m.vals[2] != 3 {
|
||||
t.Fatalf("bad val: %v", m.vals[1])
|
||||
}
|
||||
if HasElem(m.labels[2], badLabel) {
|
||||
t.Fatalf("bad_label should not be present in %v", m.labels[2])
|
||||
}
|
||||
if !HasElem(m.labels[2], goodLabel) {
|
||||
t.Fatalf("good label is not present in %v", m.labels[2])
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetrics_Filter_Labels_Whitelist(t *testing.T) {
|
||||
m := &MockSink{}
|
||||
conf := DefaultConfig("")
|
||||
conf.AllowedPrefixes = []string{"service", "debug.thing"}
|
||||
conf.BlockedPrefixes = []string{"debug"}
|
||||
conf.FilterDefault = false
|
||||
conf.EnableHostname = false
|
||||
conf.AllowedLabels = []string{"good_label"}
|
||||
conf.BlockedLabels = []string{"bad_label"}
|
||||
met, err := New(conf, m)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Blocked by default
|
||||
key := []string{"thing"}
|
||||
key = []string{"debug", "thing"}
|
||||
goodLabel := Label{Name: "good_label", Value: "should be present"}
|
||||
notReallyGoodLabel := Label{Name: "not_really_good_label", Value: "not whitelisted, but not blacklisted"}
|
||||
badLabel := Label{Name: "bad_label", Value: "should not be there"}
|
||||
labels := []Label{badLabel, notReallyGoodLabel, goodLabel}
|
||||
met.SetGaugeWithLabels(key, 1, labels)
|
||||
|
||||
if HasElem(m.labels[0], badLabel) {
|
||||
t.Fatalf("bad_label should not be present in %v", m.labels[0])
|
||||
}
|
||||
if HasElem(m.labels[0], notReallyGoodLabel) {
|
||||
t.Fatalf("not_really_good_label should not be present in %v", m.labels[0])
|
||||
}
|
||||
if !HasElem(m.labels[0], goodLabel) {
|
||||
t.Fatalf("good label is not present in %v", m.labels[0])
|
||||
}
|
||||
|
||||
conf.AllowedLabels = nil
|
||||
met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedLabels, conf.AllowedLabels, conf.BlockedLabels)
|
||||
met.SetGaugeWithLabels(key, 1, labels)
|
||||
|
||||
if HasElem(m.labels[1], badLabel) {
|
||||
t.Fatalf("bad_label should not be present in %v", m.labels[1])
|
||||
}
|
||||
// Since no whitelist, not_really_good_label should be there
|
||||
if !HasElem(m.labels[1], notReallyGoodLabel) {
|
||||
t.Fatalf("not_really_good_label is not present in %v", m.labels[1])
|
||||
}
|
||||
if !HasElem(m.labels[1], goodLabel) {
|
||||
t.Fatalf("good label is not present in %v", m.labels[1])
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetrics_Filter_Labels_ModifyArgs(t *testing.T) {
|
||||
m := &MockSink{}
|
||||
conf := DefaultConfig("")
|
||||
conf.FilterDefault = false
|
||||
conf.EnableHostname = false
|
||||
conf.AllowedLabels = []string{"keep"}
|
||||
conf.BlockedLabels = []string{"delete"}
|
||||
met, err := New(conf, m)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Blocked by default
|
||||
key := []string{"thing"}
|
||||
key = []string{"debug", "thing"}
|
||||
goodLabel := Label{Name: "keep", Value: "should be kept"}
|
||||
badLabel := Label{Name: "delete", Value: "should be deleted"}
|
||||
argLabels := []Label{badLabel, goodLabel, badLabel, goodLabel, badLabel, goodLabel, badLabel}
|
||||
origLabels := append([]Label{}, argLabels...)
|
||||
met.SetGaugeWithLabels(key, 1, argLabels)
|
||||
|
||||
if !reflect.DeepEqual(argLabels, origLabels) {
|
||||
t.Fatalf("SetGaugeWithLabels modified the input argument")
|
||||
}
|
||||
}
|
||||
|
||||
174
vendor/github.com/armon/go-metrics/prometheus/prometheus.go
generated
vendored
174
vendor/github.com/armon/go-metrics/prometheus/prometheus.go
generated
vendored
@@ -1,68 +1,169 @@
|
||||
// +build go1.3
|
||||
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"regexp"
|
||||
|
||||
"github.com/armon/go-metrics"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
// DefaultPrometheusOpts is the default set of options used when creating a
|
||||
// PrometheusSink.
|
||||
DefaultPrometheusOpts = PrometheusOpts{
|
||||
Expiration: 60 * time.Second,
|
||||
}
|
||||
)
|
||||
|
||||
// PrometheusOpts is used to configure the Prometheus Sink
|
||||
type PrometheusOpts struct {
|
||||
// Expiration is the duration a metric is valid for, after which it will be
|
||||
// untracked. If the value is zero, a metric is never expired.
|
||||
Expiration time.Duration
|
||||
}
|
||||
|
||||
type PrometheusSink struct {
|
||||
mu sync.Mutex
|
||||
gauges map[string]prometheus.Gauge
|
||||
summaries map[string]prometheus.Summary
|
||||
counters map[string]prometheus.Counter
|
||||
mu sync.Mutex
|
||||
gauges map[string]prometheus.Gauge
|
||||
summaries map[string]prometheus.Summary
|
||||
counters map[string]prometheus.Counter
|
||||
updates map[string]time.Time
|
||||
expiration time.Duration
|
||||
}
|
||||
|
||||
// NewPrometheusSink creates a new PrometheusSink using the default options.
|
||||
func NewPrometheusSink() (*PrometheusSink, error) {
|
||||
return &PrometheusSink{
|
||||
gauges: make(map[string]prometheus.Gauge),
|
||||
summaries: make(map[string]prometheus.Summary),
|
||||
counters: make(map[string]prometheus.Counter),
|
||||
}, nil
|
||||
return NewPrometheusSinkFrom(DefaultPrometheusOpts)
|
||||
}
|
||||
|
||||
func (p *PrometheusSink) flattenKey(parts []string) string {
|
||||
joined := strings.Join(parts, "_")
|
||||
joined = strings.Replace(joined, " ", "_", -1)
|
||||
joined = strings.Replace(joined, ".", "_", -1)
|
||||
joined = strings.Replace(joined, "-", "_", -1)
|
||||
return joined
|
||||
// NewPrometheusSinkFrom creates a new PrometheusSink using the passed options.
|
||||
func NewPrometheusSinkFrom(opts PrometheusOpts) (*PrometheusSink, error) {
|
||||
sink := &PrometheusSink{
|
||||
gauges: make(map[string]prometheus.Gauge),
|
||||
summaries: make(map[string]prometheus.Summary),
|
||||
counters: make(map[string]prometheus.Counter),
|
||||
updates: make(map[string]time.Time),
|
||||
expiration: opts.Expiration,
|
||||
}
|
||||
|
||||
return sink, prometheus.Register(sink)
|
||||
}
|
||||
|
||||
// Describe is needed to meet the Collector interface.
|
||||
func (p *PrometheusSink) Describe(c chan<- *prometheus.Desc) {
|
||||
// We must emit some description otherwise an error is returned. This
|
||||
// description isn't shown to the user!
|
||||
prometheus.NewGauge(prometheus.GaugeOpts{Name: "Dummy", Help: "Dummy"}).Describe(c)
|
||||
}
|
||||
|
||||
// Collect meets the collection interface and allows us to enforce our expiration
|
||||
// logic to clean up ephemeral metrics if their value haven't been set for a
|
||||
// duration exceeding our allowed expiration time.
|
||||
func (p *PrometheusSink) Collect(c chan<- prometheus.Metric) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
expire := p.expiration != 0
|
||||
now := time.Now()
|
||||
for k, v := range p.gauges {
|
||||
last := p.updates[k]
|
||||
if expire && last.Add(p.expiration).Before(now) {
|
||||
delete(p.updates, k)
|
||||
delete(p.gauges, k)
|
||||
} else {
|
||||
v.Collect(c)
|
||||
}
|
||||
}
|
||||
for k, v := range p.summaries {
|
||||
last := p.updates[k]
|
||||
if expire && last.Add(p.expiration).Before(now) {
|
||||
delete(p.updates, k)
|
||||
delete(p.summaries, k)
|
||||
} else {
|
||||
v.Collect(c)
|
||||
}
|
||||
}
|
||||
for k, v := range p.counters {
|
||||
last := p.updates[k]
|
||||
if expire && last.Add(p.expiration).Before(now) {
|
||||
delete(p.updates, k)
|
||||
delete(p.counters, k)
|
||||
} else {
|
||||
v.Collect(c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var forbiddenChars = regexp.MustCompile("[ .=\\-/]")
|
||||
|
||||
func (p *PrometheusSink) flattenKey(parts []string, labels []metrics.Label) (string, string) {
|
||||
key := strings.Join(parts, "_")
|
||||
key = forbiddenChars.ReplaceAllString(key, "_")
|
||||
|
||||
hash := key
|
||||
for _, label := range labels {
|
||||
hash += fmt.Sprintf(";%s=%s", label.Name, label.Value)
|
||||
}
|
||||
|
||||
return key, hash
|
||||
}
|
||||
|
||||
func prometheusLabels(labels []metrics.Label) prometheus.Labels {
|
||||
l := make(prometheus.Labels)
|
||||
for _, label := range labels {
|
||||
l[label.Name] = label.Value
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func (p *PrometheusSink) SetGauge(parts []string, val float32) {
|
||||
p.SetGaugeWithLabels(parts, val, nil)
|
||||
}
|
||||
|
||||
func (p *PrometheusSink) SetGaugeWithLabels(parts []string, val float32, labels []metrics.Label) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
key := p.flattenKey(parts)
|
||||
g, ok := p.gauges[key]
|
||||
key, hash := p.flattenKey(parts, labels)
|
||||
g, ok := p.gauges[hash]
|
||||
if !ok {
|
||||
g = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Name: key,
|
||||
Help: key,
|
||||
Name: key,
|
||||
Help: key,
|
||||
ConstLabels: prometheusLabels(labels),
|
||||
})
|
||||
prometheus.MustRegister(g)
|
||||
p.gauges[key] = g
|
||||
p.gauges[hash] = g
|
||||
}
|
||||
g.Set(float64(val))
|
||||
p.updates[hash] = time.Now()
|
||||
}
|
||||
|
||||
func (p *PrometheusSink) AddSample(parts []string, val float32) {
|
||||
p.AddSampleWithLabels(parts, val, nil)
|
||||
}
|
||||
|
||||
func (p *PrometheusSink) AddSampleWithLabels(parts []string, val float32, labels []metrics.Label) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
key := p.flattenKey(parts)
|
||||
g, ok := p.summaries[key]
|
||||
key, hash := p.flattenKey(parts, labels)
|
||||
g, ok := p.summaries[hash]
|
||||
if !ok {
|
||||
g = prometheus.NewSummary(prometheus.SummaryOpts{
|
||||
Name: key,
|
||||
Help: key,
|
||||
MaxAge: 10 * time.Second,
|
||||
Name: key,
|
||||
Help: key,
|
||||
MaxAge: 10 * time.Second,
|
||||
ConstLabels: prometheusLabels(labels),
|
||||
})
|
||||
prometheus.MustRegister(g)
|
||||
p.summaries[key] = g
|
||||
p.summaries[hash] = g
|
||||
}
|
||||
g.Observe(float64(val))
|
||||
p.updates[hash] = time.Now()
|
||||
}
|
||||
|
||||
// EmitKey is not implemented. Prometheus doesn’t offer a type for which an
|
||||
@@ -72,17 +173,22 @@ func (p *PrometheusSink) EmitKey(key []string, val float32) {
|
||||
}
|
||||
|
||||
func (p *PrometheusSink) IncrCounter(parts []string, val float32) {
|
||||
p.IncrCounterWithLabels(parts, val, nil)
|
||||
}
|
||||
|
||||
func (p *PrometheusSink) IncrCounterWithLabels(parts []string, val float32, labels []metrics.Label) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
key := p.flattenKey(parts)
|
||||
g, ok := p.counters[key]
|
||||
key, hash := p.flattenKey(parts, labels)
|
||||
g, ok := p.counters[hash]
|
||||
if !ok {
|
||||
g = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Name: key,
|
||||
Help: key,
|
||||
Name: key,
|
||||
Help: key,
|
||||
ConstLabels: prometheusLabels(labels),
|
||||
})
|
||||
prometheus.MustRegister(g)
|
||||
p.counters[key] = g
|
||||
p.counters[hash] = g
|
||||
}
|
||||
g.Add(float64(val))
|
||||
p.updates[hash] = time.Now()
|
||||
}
|
||||
|
||||
77
vendor/github.com/armon/go-metrics/sink.go
generated
vendored
77
vendor/github.com/armon/go-metrics/sink.go
generated
vendored
@@ -1,35 +1,50 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
// The MetricSink interface is used to transmit metrics information
|
||||
// to an external system
|
||||
type MetricSink interface {
|
||||
// A Gauge should retain the last value it is set to
|
||||
SetGauge(key []string, val float32)
|
||||
SetGaugeWithLabels(key []string, val float32, labels []Label)
|
||||
|
||||
// Should emit a Key/Value pair for each call
|
||||
EmitKey(key []string, val float32)
|
||||
|
||||
// Counters should accumulate values
|
||||
IncrCounter(key []string, val float32)
|
||||
IncrCounterWithLabels(key []string, val float32, labels []Label)
|
||||
|
||||
// Samples are for timing information, where quantiles are used
|
||||
AddSample(key []string, val float32)
|
||||
AddSampleWithLabels(key []string, val float32, labels []Label)
|
||||
}
|
||||
|
||||
// BlackholeSink is used to just blackhole messages
|
||||
type BlackholeSink struct{}
|
||||
|
||||
func (*BlackholeSink) SetGauge(key []string, val float32) {}
|
||||
func (*BlackholeSink) EmitKey(key []string, val float32) {}
|
||||
func (*BlackholeSink) IncrCounter(key []string, val float32) {}
|
||||
func (*BlackholeSink) AddSample(key []string, val float32) {}
|
||||
func (*BlackholeSink) SetGauge(key []string, val float32) {}
|
||||
func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {}
|
||||
func (*BlackholeSink) EmitKey(key []string, val float32) {}
|
||||
func (*BlackholeSink) IncrCounter(key []string, val float32) {}
|
||||
func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {}
|
||||
func (*BlackholeSink) AddSample(key []string, val float32) {}
|
||||
func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {}
|
||||
|
||||
// FanoutSink is used to sink to fanout values to multiple sinks
|
||||
type FanoutSink []MetricSink
|
||||
|
||||
func (fh FanoutSink) SetGauge(key []string, val float32) {
|
||||
fh.SetGaugeWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
for _, s := range fh {
|
||||
s.SetGauge(key, val)
|
||||
s.SetGaugeWithLabels(key, val, labels)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,13 +55,61 @@ func (fh FanoutSink) EmitKey(key []string, val float32) {
|
||||
}
|
||||
|
||||
func (fh FanoutSink) IncrCounter(key []string, val float32) {
|
||||
fh.IncrCounterWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
for _, s := range fh {
|
||||
s.IncrCounter(key, val)
|
||||
s.IncrCounterWithLabels(key, val, labels)
|
||||
}
|
||||
}
|
||||
|
||||
func (fh FanoutSink) AddSample(key []string, val float32) {
|
||||
fh.AddSampleWithLabels(key, val, nil)
|
||||
}
|
||||
|
||||
func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
for _, s := range fh {
|
||||
s.AddSample(key, val)
|
||||
s.AddSampleWithLabels(key, val, labels)
|
||||
}
|
||||
}
|
||||
|
||||
// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided
|
||||
// by each sink type
|
||||
type sinkURLFactoryFunc func(*url.URL) (MetricSink, error)
|
||||
|
||||
// sinkRegistry supports the generic NewMetricSink function by mapping URL
|
||||
// schemes to metric sink factory functions
|
||||
var sinkRegistry = map[string]sinkURLFactoryFunc{
|
||||
"statsd": NewStatsdSinkFromURL,
|
||||
"statsite": NewStatsiteSinkFromURL,
|
||||
"inmem": NewInmemSinkFromURL,
|
||||
}
|
||||
|
||||
// NewMetricSinkFromURL allows a generic URL input to configure any of the
|
||||
// supported sinks. The scheme of the URL identifies the type of the sink, the
|
||||
// and query parameters are used to set options.
|
||||
//
|
||||
// "statsd://" - Initializes a StatsdSink. The host and port are passed through
|
||||
// as the "addr" of the sink
|
||||
//
|
||||
// "statsite://" - Initializes a StatsiteSink. The host and port become the
|
||||
// "addr" of the sink
|
||||
//
|
||||
// "inmem://" - Initializes an InmemSink. The host and port are ignored. The
|
||||
// "interval" and "duration" query parameters must be specified with valid
|
||||
// durations, see NewInmemSink for details.
|
||||
func NewMetricSinkFromURL(urlStr string) (MetricSink, error) {
|
||||
u, err := url.Parse(urlStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sinkURLFactoryFunc := sinkRegistry[u.Scheme]
|
||||
if sinkURLFactoryFunc == nil {
|
||||
return nil, fmt.Errorf(
|
||||
"cannot create metric sink, unrecognized sink name: %q", u.Scheme)
|
||||
}
|
||||
|
||||
return sinkURLFactoryFunc(u)
|
||||
}
|
||||
|
||||
178
vendor/github.com/armon/go-metrics/sink_test.go
generated
vendored
178
vendor/github.com/armon/go-metrics/sink_test.go
generated
vendored
@@ -2,29 +2,66 @@ package metrics
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type MockSink struct {
|
||||
keys [][]string
|
||||
vals []float32
|
||||
lock sync.Mutex
|
||||
|
||||
keys [][]string
|
||||
vals []float32
|
||||
labels [][]Label
|
||||
}
|
||||
|
||||
func (m *MockSink) getKeys() [][]string {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
return m.keys
|
||||
}
|
||||
|
||||
func (m *MockSink) SetGauge(key []string, val float32) {
|
||||
m.SetGaugeWithLabels(key, val, nil)
|
||||
}
|
||||
func (m *MockSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
m.keys = append(m.keys, key)
|
||||
m.vals = append(m.vals, val)
|
||||
m.labels = append(m.labels, labels)
|
||||
}
|
||||
func (m *MockSink) EmitKey(key []string, val float32) {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
m.keys = append(m.keys, key)
|
||||
m.vals = append(m.vals, val)
|
||||
m.labels = append(m.labels, nil)
|
||||
}
|
||||
func (m *MockSink) IncrCounter(key []string, val float32) {
|
||||
m.IncrCounterWithLabels(key, val, nil)
|
||||
}
|
||||
func (m *MockSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
m.keys = append(m.keys, key)
|
||||
m.vals = append(m.vals, val)
|
||||
m.labels = append(m.labels, labels)
|
||||
}
|
||||
func (m *MockSink) AddSample(key []string, val float32) {
|
||||
m.AddSampleWithLabels(key, val, nil)
|
||||
}
|
||||
func (m *MockSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
m.keys = append(m.keys, key)
|
||||
m.vals = append(m.vals, val)
|
||||
m.labels = append(m.labels, labels)
|
||||
}
|
||||
|
||||
func TestFanoutSink_Gauge(t *testing.T) {
|
||||
@@ -50,6 +87,36 @@ func TestFanoutSink_Gauge(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFanoutSink_Gauge_Labels(t *testing.T) {
|
||||
m1 := &MockSink{}
|
||||
m2 := &MockSink{}
|
||||
fh := &FanoutSink{m1, m2}
|
||||
|
||||
k := []string{"test"}
|
||||
v := float32(42.0)
|
||||
l := []Label{{"a", "b"}}
|
||||
fh.SetGaugeWithLabels(k, v, l)
|
||||
|
||||
if !reflect.DeepEqual(m1.keys[0], k) {
|
||||
t.Fatalf("key not equal")
|
||||
}
|
||||
if !reflect.DeepEqual(m2.keys[0], k) {
|
||||
t.Fatalf("key not equal")
|
||||
}
|
||||
if !reflect.DeepEqual(m1.vals[0], v) {
|
||||
t.Fatalf("val not equal")
|
||||
}
|
||||
if !reflect.DeepEqual(m2.vals[0], v) {
|
||||
t.Fatalf("val not equal")
|
||||
}
|
||||
if !reflect.DeepEqual(m1.labels[0], l) {
|
||||
t.Fatalf("labels not equal")
|
||||
}
|
||||
if !reflect.DeepEqual(m2.labels[0], l) {
|
||||
t.Fatalf("labels not equal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFanoutSink_Key(t *testing.T) {
|
||||
m1 := &MockSink{}
|
||||
m2 := &MockSink{}
|
||||
@@ -96,6 +163,36 @@ func TestFanoutSink_Counter(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestFanoutSink_Counter_Labels(t *testing.T) {
|
||||
m1 := &MockSink{}
|
||||
m2 := &MockSink{}
|
||||
fh := &FanoutSink{m1, m2}
|
||||
|
||||
k := []string{"test"}
|
||||
v := float32(42.0)
|
||||
l := []Label{{"a", "b"}}
|
||||
fh.IncrCounterWithLabels(k, v, l)
|
||||
|
||||
if !reflect.DeepEqual(m1.keys[0], k) {
|
||||
t.Fatalf("key not equal")
|
||||
}
|
||||
if !reflect.DeepEqual(m2.keys[0], k) {
|
||||
t.Fatalf("key not equal")
|
||||
}
|
||||
if !reflect.DeepEqual(m1.vals[0], v) {
|
||||
t.Fatalf("val not equal")
|
||||
}
|
||||
if !reflect.DeepEqual(m2.vals[0], v) {
|
||||
t.Fatalf("val not equal")
|
||||
}
|
||||
if !reflect.DeepEqual(m1.labels[0], l) {
|
||||
t.Fatalf("labels not equal")
|
||||
}
|
||||
if !reflect.DeepEqual(m2.labels[0], l) {
|
||||
t.Fatalf("labels not equal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFanoutSink_Sample(t *testing.T) {
|
||||
m1 := &MockSink{}
|
||||
m2 := &MockSink{}
|
||||
@@ -118,3 +215,80 @@ func TestFanoutSink_Sample(t *testing.T) {
|
||||
t.Fatalf("val not equal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFanoutSink_Sample_Labels(t *testing.T) {
|
||||
m1 := &MockSink{}
|
||||
m2 := &MockSink{}
|
||||
fh := &FanoutSink{m1, m2}
|
||||
|
||||
k := []string{"test"}
|
||||
v := float32(42.0)
|
||||
l := []Label{{"a", "b"}}
|
||||
fh.AddSampleWithLabels(k, v, l)
|
||||
|
||||
if !reflect.DeepEqual(m1.keys[0], k) {
|
||||
t.Fatalf("key not equal")
|
||||
}
|
||||
if !reflect.DeepEqual(m2.keys[0], k) {
|
||||
t.Fatalf("key not equal")
|
||||
}
|
||||
if !reflect.DeepEqual(m1.vals[0], v) {
|
||||
t.Fatalf("val not equal")
|
||||
}
|
||||
if !reflect.DeepEqual(m2.vals[0], v) {
|
||||
t.Fatalf("val not equal")
|
||||
}
|
||||
if !reflect.DeepEqual(m1.labels[0], l) {
|
||||
t.Fatalf("labels not equal")
|
||||
}
|
||||
if !reflect.DeepEqual(m2.labels[0], l) {
|
||||
t.Fatalf("labels not equal")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewMetricSinkFromURL(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
input string
|
||||
expect reflect.Type
|
||||
expectErr string
|
||||
}{
|
||||
{
|
||||
desc: "statsd scheme yields a StatsdSink",
|
||||
input: "statsd://someserver:123",
|
||||
expect: reflect.TypeOf(&StatsdSink{}),
|
||||
},
|
||||
{
|
||||
desc: "statsite scheme yields a StatsiteSink",
|
||||
input: "statsite://someserver:123",
|
||||
expect: reflect.TypeOf(&StatsiteSink{}),
|
||||
},
|
||||
{
|
||||
desc: "inmem scheme yields an InmemSink",
|
||||
input: "inmem://?interval=30s&retain=30s",
|
||||
expect: reflect.TypeOf(&InmemSink{}),
|
||||
},
|
||||
{
|
||||
desc: "unknown scheme yields an error",
|
||||
input: "notasink://whatever",
|
||||
expectErr: "unrecognized sink name: \"notasink\"",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
ms, err := NewMetricSinkFromURL(tc.input)
|
||||
if tc.expectErr != "" {
|
||||
if !strings.Contains(err.Error(), tc.expectErr) {
|
||||
t.Fatalf("expected err: %q to contain: %q", err, tc.expectErr)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err: %s", err)
|
||||
}
|
||||
got := reflect.TypeOf(ms)
|
||||
if got != tc.expect {
|
||||
t.Fatalf("expected return type to be %v, got: %v", tc.expect, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
68
vendor/github.com/armon/go-metrics/start.go
generated
vendored
68
vendor/github.com/armon/go-metrics/start.go
generated
vendored
@@ -2,34 +2,50 @@ package metrics
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/go-immutable-radix"
|
||||
)
|
||||
|
||||
// Config is used to configure metrics settings
|
||||
type Config struct {
|
||||
ServiceName string // Prefixed with keys to seperate services
|
||||
ServiceName string // Prefixed with keys to separate services
|
||||
HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname
|
||||
EnableHostname bool // Enable prefixing gauge values with hostname
|
||||
EnableHostnameLabel bool // Enable adding hostname to labels
|
||||
EnableServiceLabel bool // Enable adding service to labels
|
||||
EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory)
|
||||
EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer")
|
||||
TimerGranularity time.Duration // Granularity of timers.
|
||||
ProfileInterval time.Duration // Interval to profile runtime metrics
|
||||
|
||||
AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator
|
||||
BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator
|
||||
AllowedLabels []string // A list of metric labels to allow, with '.' as the separator
|
||||
BlockedLabels []string // A list of metric labels to block, with '.' as the separator
|
||||
FilterDefault bool // Whether to allow metrics by default
|
||||
}
|
||||
|
||||
// Metrics represents an instance of a metrics sink that can
|
||||
// be used to emit
|
||||
type Metrics struct {
|
||||
Config
|
||||
lastNumGC uint32
|
||||
sink MetricSink
|
||||
lastNumGC uint32
|
||||
sink MetricSink
|
||||
filter *iradix.Tree
|
||||
allowedLabels map[string]bool
|
||||
blockedLabels map[string]bool
|
||||
filterLock sync.RWMutex // Lock filters and allowedLabels/blockedLabels access
|
||||
}
|
||||
|
||||
// Shared global metrics instance
|
||||
var globalMetrics *Metrics
|
||||
var globalMetrics atomic.Value // *Metrics
|
||||
|
||||
func init() {
|
||||
// Initialize to a blackhole sink to avoid errors
|
||||
globalMetrics = &Metrics{sink: &BlackholeSink{}}
|
||||
globalMetrics.Store(&Metrics{sink: &BlackholeSink{}})
|
||||
}
|
||||
|
||||
// DefaultConfig provides a sane default configuration
|
||||
@@ -42,6 +58,7 @@ func DefaultConfig(serviceName string) *Config {
|
||||
EnableTypePrefix: false, // Disable type prefix
|
||||
TimerGranularity: time.Millisecond, // Timers are in milliseconds
|
||||
ProfileInterval: time.Second, // Poll runtime every second
|
||||
FilterDefault: true, // Don't filter metrics by default
|
||||
}
|
||||
|
||||
// Try to get the hostname
|
||||
@@ -55,6 +72,7 @@ func New(conf *Config, sink MetricSink) (*Metrics, error) {
|
||||
met := &Metrics{}
|
||||
met.Config = *conf
|
||||
met.sink = sink
|
||||
met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels)
|
||||
|
||||
// Start the runtime collector
|
||||
if conf.EnableRuntimeMetrics {
|
||||
@@ -68,28 +86,56 @@ func New(conf *Config, sink MetricSink) (*Metrics, error) {
|
||||
func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) {
|
||||
metrics, err := New(conf, sink)
|
||||
if err == nil {
|
||||
globalMetrics = metrics
|
||||
globalMetrics.Store(metrics)
|
||||
}
|
||||
return metrics, err
|
||||
}
|
||||
|
||||
// Proxy all the methods to the globalMetrics instance
|
||||
func SetGauge(key []string, val float32) {
|
||||
globalMetrics.SetGauge(key, val)
|
||||
globalMetrics.Load().(*Metrics).SetGauge(key, val)
|
||||
}
|
||||
|
||||
func SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels)
|
||||
}
|
||||
|
||||
func EmitKey(key []string, val float32) {
|
||||
globalMetrics.EmitKey(key, val)
|
||||
globalMetrics.Load().(*Metrics).EmitKey(key, val)
|
||||
}
|
||||
|
||||
func IncrCounter(key []string, val float32) {
|
||||
globalMetrics.IncrCounter(key, val)
|
||||
globalMetrics.Load().(*Metrics).IncrCounter(key, val)
|
||||
}
|
||||
|
||||
func IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels)
|
||||
}
|
||||
|
||||
func AddSample(key []string, val float32) {
|
||||
globalMetrics.AddSample(key, val)
|
||||
globalMetrics.Load().(*Metrics).AddSample(key, val)
|
||||
}
|
||||
|
||||
func AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels)
|
||||
}
|
||||
|
||||
func MeasureSince(key []string, start time.Time) {
|
||||
globalMetrics.MeasureSince(key, start)
|
||||
globalMetrics.Load().(*Metrics).MeasureSince(key, start)
|
||||
}
|
||||
|
||||
func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
|
||||
globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels)
|
||||
}
|
||||
|
||||
func UpdateFilter(allow, block []string) {
|
||||
globalMetrics.Load().(*Metrics).UpdateFilter(allow, block)
|
||||
}
|
||||
|
||||
// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels
|
||||
// and blockedLabels - when not nil - allow filtering of labels in order to
|
||||
// block/allow globally labels (especially useful when having large number of
|
||||
// values for a given label). See README.md for more information about usage.
|
||||
func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
|
||||
globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels)
|
||||
}
|
||||
|
||||
218
vendor/github.com/armon/go-metrics/start_test.go
generated
vendored
218
vendor/github.com/armon/go-metrics/start_test.go
generated
vendored
@@ -1,7 +1,10 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@@ -28,83 +31,186 @@ func TestDefaultConfig(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_GlobalMetrics_SetGauge(t *testing.T) {
|
||||
m := &MockSink{}
|
||||
globalMetrics = &Metrics{sink: m}
|
||||
|
||||
k := []string{"test"}
|
||||
v := float32(42.0)
|
||||
SetGauge(k, v)
|
||||
|
||||
if !reflect.DeepEqual(m.keys[0], k) {
|
||||
t.Fatalf("key not equal")
|
||||
func Test_GlobalMetrics(t *testing.T) {
|
||||
var tests = []struct {
|
||||
desc string
|
||||
key []string
|
||||
val float32
|
||||
fn func([]string, float32)
|
||||
}{
|
||||
{"SetGauge", []string{"test"}, 42, SetGauge},
|
||||
{"EmitKey", []string{"test"}, 42, EmitKey},
|
||||
{"IncrCounter", []string{"test"}, 42, IncrCounter},
|
||||
{"AddSample", []string{"test"}, 42, AddSample},
|
||||
}
|
||||
if !reflect.DeepEqual(m.vals[0], v) {
|
||||
t.Fatalf("val not equal")
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.desc, func(t *testing.T) {
|
||||
s := &MockSink{}
|
||||
globalMetrics.Store(&Metrics{Config: Config{FilterDefault: true}, sink: s})
|
||||
tt.fn(tt.key, tt.val)
|
||||
if got, want := s.keys[0], tt.key; !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got key %s want %s", got, want)
|
||||
}
|
||||
if got, want := s.vals[0], tt.val; !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got val %v want %v", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_GlobalMetrics_EmitKey(t *testing.T) {
|
||||
m := &MockSink{}
|
||||
globalMetrics = &Metrics{sink: m}
|
||||
|
||||
k := []string{"test"}
|
||||
v := float32(42.0)
|
||||
EmitKey(k, v)
|
||||
|
||||
if !reflect.DeepEqual(m.keys[0], k) {
|
||||
t.Fatalf("key not equal")
|
||||
func Test_GlobalMetrics_Labels(t *testing.T) {
|
||||
labels := []Label{{"a", "b"}}
|
||||
var tests = []struct {
|
||||
desc string
|
||||
key []string
|
||||
val float32
|
||||
fn func([]string, float32, []Label)
|
||||
labels []Label
|
||||
}{
|
||||
{"SetGaugeWithLabels", []string{"test"}, 42, SetGaugeWithLabels, labels},
|
||||
{"IncrCounterWithLabels", []string{"test"}, 42, IncrCounterWithLabels, labels},
|
||||
{"AddSampleWithLabels", []string{"test"}, 42, AddSampleWithLabels, labels},
|
||||
}
|
||||
if !reflect.DeepEqual(m.vals[0], v) {
|
||||
t.Fatalf("val not equal")
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.desc, func(t *testing.T) {
|
||||
s := &MockSink{}
|
||||
globalMetrics.Store(&Metrics{Config: Config{FilterDefault: true}, sink: s})
|
||||
tt.fn(tt.key, tt.val, tt.labels)
|
||||
if got, want := s.keys[0], tt.key; !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got key %s want %s", got, want)
|
||||
}
|
||||
if got, want := s.vals[0], tt.val; !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got val %v want %v", got, want)
|
||||
}
|
||||
if got, want := s.labels[0], tt.labels; !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got val %s want %s", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_GlobalMetrics_IncrCounter(t *testing.T) {
|
||||
m := &MockSink{}
|
||||
globalMetrics = &Metrics{sink: m}
|
||||
|
||||
k := []string{"test"}
|
||||
v := float32(42.0)
|
||||
IncrCounter(k, v)
|
||||
|
||||
if !reflect.DeepEqual(m.keys[0], k) {
|
||||
t.Fatalf("key not equal")
|
||||
func Test_GlobalMetrics_DefaultLabels(t *testing.T) {
|
||||
config := Config{
|
||||
HostName: "host1",
|
||||
ServiceName: "redis",
|
||||
EnableHostnameLabel: true,
|
||||
EnableServiceLabel: true,
|
||||
FilterDefault: true,
|
||||
}
|
||||
if !reflect.DeepEqual(m.vals[0], v) {
|
||||
t.Fatalf("val not equal")
|
||||
labels := []Label{
|
||||
{"host", config.HostName},
|
||||
{"service", config.ServiceName},
|
||||
}
|
||||
}
|
||||
|
||||
func Test_GlobalMetrics_AddSample(t *testing.T) {
|
||||
m := &MockSink{}
|
||||
globalMetrics = &Metrics{sink: m}
|
||||
|
||||
k := []string{"test"}
|
||||
v := float32(42.0)
|
||||
AddSample(k, v)
|
||||
|
||||
if !reflect.DeepEqual(m.keys[0], k) {
|
||||
t.Fatalf("key not equal")
|
||||
var tests = []struct {
|
||||
desc string
|
||||
key []string
|
||||
val float32
|
||||
fn func([]string, float32, []Label)
|
||||
labels []Label
|
||||
}{
|
||||
{"SetGaugeWithLabels", []string{"test"}, 42, SetGaugeWithLabels, labels},
|
||||
{"IncrCounterWithLabels", []string{"test"}, 42, IncrCounterWithLabels, labels},
|
||||
{"AddSampleWithLabels", []string{"test"}, 42, AddSampleWithLabels, labels},
|
||||
}
|
||||
if !reflect.DeepEqual(m.vals[0], v) {
|
||||
t.Fatalf("val not equal")
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.desc, func(t *testing.T) {
|
||||
s := &MockSink{}
|
||||
globalMetrics.Store(&Metrics{Config: config, sink: s})
|
||||
tt.fn(tt.key, tt.val, nil)
|
||||
if got, want := s.keys[0], tt.key; !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got key %s want %s", got, want)
|
||||
}
|
||||
if got, want := s.vals[0], tt.val; !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got val %v want %v", got, want)
|
||||
}
|
||||
if got, want := s.labels[0], tt.labels; !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got val %s want %s", got, want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_GlobalMetrics_MeasureSince(t *testing.T) {
|
||||
m := &MockSink{}
|
||||
globalMetrics = &Metrics{sink: m}
|
||||
globalMetrics.TimerGranularity = time.Millisecond
|
||||
s := &MockSink{}
|
||||
m := &Metrics{sink: s, Config: Config{TimerGranularity: time.Millisecond, FilterDefault: true}}
|
||||
globalMetrics.Store(m)
|
||||
|
||||
k := []string{"test"}
|
||||
now := time.Now()
|
||||
MeasureSince(k, now)
|
||||
|
||||
if !reflect.DeepEqual(m.keys[0], k) {
|
||||
if !reflect.DeepEqual(s.keys[0], k) {
|
||||
t.Fatalf("key not equal")
|
||||
}
|
||||
if m.vals[0] > 0.1 {
|
||||
t.Fatalf("val too large %v", m.vals[0])
|
||||
if s.vals[0] > 0.1 {
|
||||
t.Fatalf("val too large %v", s.vals[0])
|
||||
}
|
||||
|
||||
labels := []Label{{"a", "b"}}
|
||||
MeasureSinceWithLabels(k, now, labels)
|
||||
if got, want := s.keys[1], k; !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got key %s want %s", got, want)
|
||||
}
|
||||
if s.vals[1] > 0.1 {
|
||||
t.Fatalf("val too large %v", s.vals[0])
|
||||
}
|
||||
if got, want := s.labels[1], labels; !reflect.DeepEqual(got, want) {
|
||||
t.Fatalf("got val %s want %s", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_GlobalMetrics_UpdateFilter(t *testing.T) {
|
||||
globalMetrics.Store(&Metrics{Config: Config{
|
||||
AllowedPrefixes: []string{"a"},
|
||||
BlockedPrefixes: []string{"b"},
|
||||
AllowedLabels: []string{"1"},
|
||||
BlockedLabels: []string{"2"},
|
||||
}})
|
||||
UpdateFilterAndLabels([]string{"c"}, []string{"d"}, []string{"3"}, []string{"4"})
|
||||
|
||||
m := globalMetrics.Load().(*Metrics)
|
||||
if m.AllowedPrefixes[0] != "c" {
|
||||
t.Fatalf("bad: %v", m.AllowedPrefixes)
|
||||
}
|
||||
if m.BlockedPrefixes[0] != "d" {
|
||||
t.Fatalf("bad: %v", m.BlockedPrefixes)
|
||||
}
|
||||
if m.AllowedLabels[0] != "3" {
|
||||
t.Fatalf("bad: %v", m.AllowedPrefixes)
|
||||
}
|
||||
if m.BlockedLabels[0] != "4" {
|
||||
t.Fatalf("bad: %v", m.AllowedPrefixes)
|
||||
}
|
||||
if _, ok := m.allowedLabels["3"]; !ok {
|
||||
t.Fatalf("bad: %v", m.allowedLabels)
|
||||
}
|
||||
if _, ok := m.blockedLabels["4"]; !ok {
|
||||
t.Fatalf("bad: %v", m.blockedLabels)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmark_GlobalMetrics_Direct/direct-8 5000000 278 ns/op
|
||||
// Benchmark_GlobalMetrics_Direct/atomic.Value-8 5000000 235 ns/op
|
||||
func Benchmark_GlobalMetrics_Direct(b *testing.B) {
|
||||
log.SetOutput(ioutil.Discard)
|
||||
s := &MockSink{}
|
||||
m := &Metrics{sink: s}
|
||||
var v atomic.Value
|
||||
v.Store(m)
|
||||
k := []string{"test"}
|
||||
b.Run("direct", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.IncrCounter(k, 1)
|
||||
}
|
||||
})
|
||||
b.Run("atomic.Value", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
v.Load().(*Metrics).IncrCounter(k, 1)
|
||||
}
|
||||
})
|
||||
// do something with m so that the compiler does not optimize this away
|
||||
b.Logf("%d", m.lastNumGC)
|
||||
}
|
||||
|
||||
30
vendor/github.com/armon/go-metrics/statsd.go
generated
vendored
30
vendor/github.com/armon/go-metrics/statsd.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
@@ -23,6 +24,12 @@ type StatsdSink struct {
|
||||
metricQueue chan string
|
||||
}
|
||||
|
||||
// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used
|
||||
// (and tested) from NewMetricSinkFromURL.
|
||||
func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) {
|
||||
return NewStatsdSink(u.Host)
|
||||
}
|
||||
|
||||
// NewStatsdSink is used to create a new StatsdSink
|
||||
func NewStatsdSink(addr string) (*StatsdSink, error) {
|
||||
s := &StatsdSink{
|
||||
@@ -43,6 +50,11 @@ func (s *StatsdSink) SetGauge(key []string, val float32) {
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) EmitKey(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
|
||||
@@ -53,11 +65,21 @@ func (s *StatsdSink) IncrCounter(key []string, val float32) {
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) AddSample(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
|
||||
}
|
||||
|
||||
// Flattens the key for formatting, removes spaces
|
||||
func (s *StatsdSink) flattenKey(parts []string) string {
|
||||
joined := strings.Join(parts, ".")
|
||||
@@ -73,6 +95,14 @@ func (s *StatsdSink) flattenKey(parts []string) string {
|
||||
}, joined)
|
||||
}
|
||||
|
||||
// Flattens the key along with labels for formatting, removes spaces
|
||||
func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string {
|
||||
for _, label := range labels {
|
||||
parts = append(parts, label.Value)
|
||||
}
|
||||
return s.flattenKey(parts)
|
||||
}
|
||||
|
||||
// Does a non-blocking push to the metrics queue
|
||||
func (s *StatsdSink) pushMetric(m string) {
|
||||
select {
|
||||
|
||||
82
vendor/github.com/armon/go-metrics/statsd_test.go
generated
vendored
82
vendor/github.com/armon/go-metrics/statsd_test.go
generated
vendored
@@ -4,6 +4,8 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@@ -64,7 +66,7 @@ func TestStatsd_Conn(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err %s", err)
|
||||
}
|
||||
if line != "key.other:2.000000|kv\n" {
|
||||
if line != "gauge_labels.val.label:2.000000|g\n" {
|
||||
t.Fatalf("bad line %s", line)
|
||||
}
|
||||
|
||||
@@ -72,7 +74,7 @@ func TestStatsd_Conn(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err %s", err)
|
||||
}
|
||||
if line != "counter.me:3.000000|c\n" {
|
||||
if line != "key.other:3.000000|kv\n" {
|
||||
t.Fatalf("bad line %s", line)
|
||||
}
|
||||
|
||||
@@ -80,7 +82,31 @@ func TestStatsd_Conn(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err %s", err)
|
||||
}
|
||||
if line != "sample.slow_thingy:4.000000|ms\n" {
|
||||
if line != "counter.me:4.000000|c\n" {
|
||||
t.Fatalf("bad line %s", line)
|
||||
}
|
||||
|
||||
line, err = reader.ReadString('\n')
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err %s", err)
|
||||
}
|
||||
if line != "counter_labels.me.label:5.000000|c\n" {
|
||||
t.Fatalf("bad line %s", line)
|
||||
}
|
||||
|
||||
line, err = reader.ReadString('\n')
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err %s", err)
|
||||
}
|
||||
if line != "sample.slow_thingy:6.000000|ms\n" {
|
||||
t.Fatalf("bad line %s", line)
|
||||
}
|
||||
|
||||
line, err = reader.ReadString('\n')
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err %s", err)
|
||||
}
|
||||
if line != "sample_labels.slow_thingy.label:7.000000|ms\n" {
|
||||
t.Fatalf("bad line %s", line)
|
||||
}
|
||||
|
||||
@@ -92,9 +118,12 @@ func TestStatsd_Conn(t *testing.T) {
|
||||
}
|
||||
|
||||
s.SetGauge([]string{"gauge", "val"}, float32(1))
|
||||
s.EmitKey([]string{"key", "other"}, float32(2))
|
||||
s.IncrCounter([]string{"counter", "me"}, float32(3))
|
||||
s.AddSample([]string{"sample", "slow thingy"}, float32(4))
|
||||
s.SetGaugeWithLabels([]string{"gauge_labels", "val"}, float32(2), []Label{{"a", "label"}})
|
||||
s.EmitKey([]string{"key", "other"}, float32(3))
|
||||
s.IncrCounter([]string{"counter", "me"}, float32(4))
|
||||
s.IncrCounterWithLabels([]string{"counter_labels", "me"}, float32(5), []Label{{"a", "label"}})
|
||||
s.AddSample([]string{"sample", "slow thingy"}, float32(6))
|
||||
s.AddSampleWithLabels([]string{"sample_labels", "slow thingy"}, float32(7), []Label{{"a", "label"}})
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
@@ -103,3 +132,44 @@ func TestStatsd_Conn(t *testing.T) {
|
||||
t.Fatalf("timeout")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewStatsdSinkFromURL(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
input string
|
||||
expectErr string
|
||||
expectAddr string
|
||||
}{
|
||||
{
|
||||
desc: "address is populated",
|
||||
input: "statsd://statsd.service.consul",
|
||||
expectAddr: "statsd.service.consul",
|
||||
},
|
||||
{
|
||||
desc: "address includes port",
|
||||
input: "statsd://statsd.service.consul:1234",
|
||||
expectAddr: "statsd.service.consul:1234",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
u, err := url.Parse(tc.input)
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing URL: %s", err)
|
||||
}
|
||||
ms, err := NewStatsdSinkFromURL(u)
|
||||
if tc.expectErr != "" {
|
||||
if !strings.Contains(err.Error(), tc.expectErr) {
|
||||
t.Fatalf("expected err: %q, to contain: %q", err, tc.expectErr)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err: %s", err)
|
||||
}
|
||||
is := ms.(*StatsdSink)
|
||||
if is.addr != tc.expectAddr {
|
||||
t.Fatalf("expected addr %s, got: %s", tc.expectAddr, is.addr)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
30
vendor/github.com/armon/go-metrics/statsite.go
generated
vendored
30
vendor/github.com/armon/go-metrics/statsite.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
@@ -16,6 +17,12 @@ const (
|
||||
flushInterval = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used
|
||||
// (and tested) from NewMetricSinkFromURL.
|
||||
func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) {
|
||||
return NewStatsiteSink(u.Host)
|
||||
}
|
||||
|
||||
// StatsiteSink provides a MetricSink that can be used with a
|
||||
// statsite metrics server
|
||||
type StatsiteSink struct {
|
||||
@@ -43,6 +50,11 @@ func (s *StatsiteSink) SetGauge(key []string, val float32) {
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) EmitKey(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
|
||||
@@ -53,11 +65,21 @@ func (s *StatsiteSink) IncrCounter(key []string, val float32) {
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) AddSample(key []string, val float32) {
|
||||
flatKey := s.flattenKey(key)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
|
||||
}
|
||||
|
||||
func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
|
||||
flatKey := s.flattenKeyLabels(key, labels)
|
||||
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
|
||||
}
|
||||
|
||||
// Flattens the key for formatting, removes spaces
|
||||
func (s *StatsiteSink) flattenKey(parts []string) string {
|
||||
joined := strings.Join(parts, ".")
|
||||
@@ -73,6 +95,14 @@ func (s *StatsiteSink) flattenKey(parts []string) string {
|
||||
}, joined)
|
||||
}
|
||||
|
||||
// Flattens the key along with labels for formatting, removes spaces
|
||||
func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string {
|
||||
for _, label := range labels {
|
||||
parts = append(parts, label.Value)
|
||||
}
|
||||
return s.flattenKey(parts)
|
||||
}
|
||||
|
||||
// Does a non-blocking push to the metrics queue
|
||||
func (s *StatsiteSink) pushMetric(m string) {
|
||||
select {
|
||||
|
||||
97
vendor/github.com/armon/go-metrics/statsite_test.go
generated
vendored
97
vendor/github.com/armon/go-metrics/statsite_test.go
generated
vendored
@@ -3,16 +3,12 @@ package metrics
|
||||
import (
|
||||
"bufio"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func acceptConn(addr string) net.Conn {
|
||||
ln, _ := net.Listen("tcp", addr)
|
||||
conn, _ := ln.Accept()
|
||||
return conn
|
||||
}
|
||||
|
||||
func TestStatsite_Flatten(t *testing.T) {
|
||||
s := &StatsiteSink{}
|
||||
flat := s.flattenKey([]string{"a", "b", "c", "d"})
|
||||
@@ -42,9 +38,16 @@ func TestStatsite_PushFullQueue(t *testing.T) {
|
||||
|
||||
func TestStatsite_Conn(t *testing.T) {
|
||||
addr := "localhost:7523"
|
||||
|
||||
ln, _ := net.Listen("tcp", addr)
|
||||
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
conn := acceptConn(addr)
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err %s", err)
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(conn)
|
||||
|
||||
line, err := reader.ReadString('\n')
|
||||
@@ -59,7 +62,7 @@ func TestStatsite_Conn(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err %s", err)
|
||||
}
|
||||
if line != "key.other:2.000000|kv\n" {
|
||||
if line != "gauge_labels.val.label:2.000000|g\n" {
|
||||
t.Fatalf("bad line %s", line)
|
||||
}
|
||||
|
||||
@@ -67,7 +70,7 @@ func TestStatsite_Conn(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err %s", err)
|
||||
}
|
||||
if line != "counter.me:3.000000|c\n" {
|
||||
if line != "key.other:3.000000|kv\n" {
|
||||
t.Fatalf("bad line %s", line)
|
||||
}
|
||||
|
||||
@@ -75,7 +78,31 @@ func TestStatsite_Conn(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err %s", err)
|
||||
}
|
||||
if line != "sample.slow_thingy:4.000000|ms\n" {
|
||||
if line != "counter.me:4.000000|c\n" {
|
||||
t.Fatalf("bad line %s", line)
|
||||
}
|
||||
|
||||
line, err = reader.ReadString('\n')
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err %s", err)
|
||||
}
|
||||
if line != "counter_labels.me.label:5.000000|c\n" {
|
||||
t.Fatalf("bad line %s", line)
|
||||
}
|
||||
|
||||
line, err = reader.ReadString('\n')
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err %s", err)
|
||||
}
|
||||
if line != "sample.slow_thingy:6.000000|ms\n" {
|
||||
t.Fatalf("bad line %s", line)
|
||||
}
|
||||
|
||||
line, err = reader.ReadString('\n')
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err %s", err)
|
||||
}
|
||||
if line != "sample_labels.slow_thingy.label:7.000000|ms\n" {
|
||||
t.Fatalf("bad line %s", line)
|
||||
}
|
||||
|
||||
@@ -88,9 +115,12 @@ func TestStatsite_Conn(t *testing.T) {
|
||||
}
|
||||
|
||||
s.SetGauge([]string{"gauge", "val"}, float32(1))
|
||||
s.EmitKey([]string{"key", "other"}, float32(2))
|
||||
s.IncrCounter([]string{"counter", "me"}, float32(3))
|
||||
s.AddSample([]string{"sample", "slow thingy"}, float32(4))
|
||||
s.SetGaugeWithLabels([]string{"gauge_labels", "val"}, float32(2), []Label{{"a", "label"}})
|
||||
s.EmitKey([]string{"key", "other"}, float32(3))
|
||||
s.IncrCounter([]string{"counter", "me"}, float32(4))
|
||||
s.IncrCounterWithLabels([]string{"counter_labels", "me"}, float32(5), []Label{{"a", "label"}})
|
||||
s.AddSample([]string{"sample", "slow thingy"}, float32(6))
|
||||
s.AddSampleWithLabels([]string{"sample_labels", "slow thingy"}, float32(7), []Label{{"a", "label"}})
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
@@ -99,3 +129,44 @@ func TestStatsite_Conn(t *testing.T) {
|
||||
t.Fatalf("timeout")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewStatsiteSinkFromURL(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
input string
|
||||
expectErr string
|
||||
expectAddr string
|
||||
}{
|
||||
{
|
||||
desc: "address is populated",
|
||||
input: "statsd://statsd.service.consul",
|
||||
expectAddr: "statsd.service.consul",
|
||||
},
|
||||
{
|
||||
desc: "address includes port",
|
||||
input: "statsd://statsd.service.consul:1234",
|
||||
expectAddr: "statsd.service.consul:1234",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
u, err := url.Parse(tc.input)
|
||||
if err != nil {
|
||||
t.Fatalf("error parsing URL: %s", err)
|
||||
}
|
||||
ms, err := NewStatsiteSinkFromURL(u)
|
||||
if tc.expectErr != "" {
|
||||
if !strings.Contains(err.Error(), tc.expectErr) {
|
||||
t.Fatalf("expected err: %q, to contain: %q", err, tc.expectErr)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err: %s", err)
|
||||
}
|
||||
is := ms.(*StatsiteSink)
|
||||
if is.addr != tc.expectAddr {
|
||||
t.Fatalf("expected addr %s, got: %s", tc.expectAddr, is.addr)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
363
vendor/github.com/hashicorp/go-immutable-radix/LICENSE
generated
vendored
Normal file
363
vendor/github.com/hashicorp/go-immutable-radix/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,363 @@
|
||||
Mozilla Public License, version 2.0
|
||||
|
||||
1. Definitions
|
||||
|
||||
1.1. "Contributor"
|
||||
|
||||
means each individual or legal entity that creates, contributes to the
|
||||
creation of, or owns Covered Software.
|
||||
|
||||
1.2. "Contributor Version"
|
||||
|
||||
means the combination of the Contributions of others (if any) used by a
|
||||
Contributor and that particular Contributor's Contribution.
|
||||
|
||||
1.3. "Contribution"
|
||||
|
||||
means Covered Software of a particular Contributor.
|
||||
|
||||
1.4. "Covered Software"
|
||||
|
||||
means Source Code Form to which the initial Contributor has attached the
|
||||
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||
Modifications of such Source Code Form, in each case including portions
|
||||
thereof.
|
||||
|
||||
1.5. "Incompatible With Secondary Licenses"
|
||||
means
|
||||
|
||||
a. that the initial Contributor has attached the notice described in
|
||||
Exhibit B to the Covered Software; or
|
||||
|
||||
b. that the Covered Software was made available under the terms of
|
||||
version 1.1 or earlier of the License, but not also under the terms of
|
||||
a Secondary License.
|
||||
|
||||
1.6. "Executable Form"
|
||||
|
||||
means any form of the work other than Source Code Form.
|
||||
|
||||
1.7. "Larger Work"
|
||||
|
||||
means a work that combines Covered Software with other material, in a
|
||||
separate file or files, that is not Covered Software.
|
||||
|
||||
1.8. "License"
|
||||
|
||||
means this document.
|
||||
|
||||
1.9. "Licensable"
|
||||
|
||||
means having the right to grant, to the maximum extent possible, whether
|
||||
at the time of the initial grant or subsequently, any and all of the
|
||||
rights conveyed by this License.
|
||||
|
||||
1.10. "Modifications"
|
||||
|
||||
means any of the following:
|
||||
|
||||
a. any file in Source Code Form that results from an addition to,
|
||||
deletion from, or modification of the contents of Covered Software; or
|
||||
|
||||
b. any new file in Source Code Form that contains any Covered Software.
|
||||
|
||||
1.11. "Patent Claims" of a Contributor
|
||||
|
||||
means any patent claim(s), including without limitation, method,
|
||||
process, and apparatus claims, in any patent Licensable by such
|
||||
Contributor that would be infringed, but for the grant of the License,
|
||||
by the making, using, selling, offering for sale, having made, import,
|
||||
or transfer of either its Contributions or its Contributor Version.
|
||||
|
||||
1.12. "Secondary License"
|
||||
|
||||
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||
General Public License, Version 2.1, the GNU Affero General Public
|
||||
License, Version 3.0, or any later versions of those licenses.
|
||||
|
||||
1.13. "Source Code Form"
|
||||
|
||||
means the form of the work preferred for making modifications.
|
||||
|
||||
1.14. "You" (or "Your")
|
||||
|
||||
means an individual or a legal entity exercising rights under this
|
||||
License. For legal entities, "You" includes any entity that controls, is
|
||||
controlled by, or is under common control with You. For purposes of this
|
||||
definition, "control" means (a) the power, direct or indirect, to cause
|
||||
the direction or management of such entity, whether by contract or
|
||||
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||
outstanding shares or beneficial ownership of such entity.
|
||||
|
||||
|
||||
2. License Grants and Conditions
|
||||
|
||||
2.1. Grants
|
||||
|
||||
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||
non-exclusive license:
|
||||
|
||||
a. under intellectual property rights (other than patent or trademark)
|
||||
Licensable by such Contributor to use, reproduce, make available,
|
||||
modify, display, perform, distribute, and otherwise exploit its
|
||||
Contributions, either on an unmodified basis, with Modifications, or
|
||||
as part of a Larger Work; and
|
||||
|
||||
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||
sale, have made, import, and otherwise transfer either its
|
||||
Contributions or its Contributor Version.
|
||||
|
||||
2.2. Effective Date
|
||||
|
||||
The licenses granted in Section 2.1 with respect to any Contribution
|
||||
become effective for each Contribution on the date the Contributor first
|
||||
distributes such Contribution.
|
||||
|
||||
2.3. Limitations on Grant Scope
|
||||
|
||||
The licenses granted in this Section 2 are the only rights granted under
|
||||
this License. No additional rights or licenses will be implied from the
|
||||
distribution or licensing of Covered Software under this License.
|
||||
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||
Contributor:
|
||||
|
||||
a. for any code that a Contributor has removed from Covered Software; or
|
||||
|
||||
b. for infringements caused by: (i) Your and any other third party's
|
||||
modifications of Covered Software, or (ii) the combination of its
|
||||
Contributions with other software (except as part of its Contributor
|
||||
Version); or
|
||||
|
||||
c. under Patent Claims infringed by Covered Software in the absence of
|
||||
its Contributions.
|
||||
|
||||
This License does not grant any rights in the trademarks, service marks,
|
||||
or logos of any Contributor (except as may be necessary to comply with
|
||||
the notice requirements in Section 3.4).
|
||||
|
||||
2.4. Subsequent Licenses
|
||||
|
||||
No Contributor makes additional grants as a result of Your choice to
|
||||
distribute the Covered Software under a subsequent version of this
|
||||
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||
permitted under the terms of Section 3.3).
|
||||
|
||||
2.5. Representation
|
||||
|
||||
Each Contributor represents that the Contributor believes its
|
||||
Contributions are its original creation(s) or it has sufficient rights to
|
||||
grant the rights to its Contributions conveyed by this License.
|
||||
|
||||
2.6. Fair Use
|
||||
|
||||
This License is not intended to limit any rights You have under
|
||||
applicable copyright doctrines of fair use, fair dealing, or other
|
||||
equivalents.
|
||||
|
||||
2.7. Conditions
|
||||
|
||||
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||
Section 2.1.
|
||||
|
||||
|
||||
3. Responsibilities
|
||||
|
||||
3.1. Distribution of Source Form
|
||||
|
||||
All distribution of Covered Software in Source Code Form, including any
|
||||
Modifications that You create or to which You contribute, must be under
|
||||
the terms of this License. You must inform recipients that the Source
|
||||
Code Form of the Covered Software is governed by the terms of this
|
||||
License, and how they can obtain a copy of this License. You may not
|
||||
attempt to alter or restrict the recipients' rights in the Source Code
|
||||
Form.
|
||||
|
||||
3.2. Distribution of Executable Form
|
||||
|
||||
If You distribute Covered Software in Executable Form then:
|
||||
|
||||
a. such Covered Software must also be made available in Source Code Form,
|
||||
as described in Section 3.1, and You must inform recipients of the
|
||||
Executable Form how they can obtain a copy of such Source Code Form by
|
||||
reasonable means in a timely manner, at a charge no more than the cost
|
||||
of distribution to the recipient; and
|
||||
|
||||
b. You may distribute such Executable Form under the terms of this
|
||||
License, or sublicense it under different terms, provided that the
|
||||
license for the Executable Form does not attempt to limit or alter the
|
||||
recipients' rights in the Source Code Form under this License.
|
||||
|
||||
3.3. Distribution of a Larger Work
|
||||
|
||||
You may create and distribute a Larger Work under terms of Your choice,
|
||||
provided that You also comply with the requirements of this License for
|
||||
the Covered Software. If the Larger Work is a combination of Covered
|
||||
Software with a work governed by one or more Secondary Licenses, and the
|
||||
Covered Software is not Incompatible With Secondary Licenses, this
|
||||
License permits You to additionally distribute such Covered Software
|
||||
under the terms of such Secondary License(s), so that the recipient of
|
||||
the Larger Work may, at their option, further distribute the Covered
|
||||
Software under the terms of either this License or such Secondary
|
||||
License(s).
|
||||
|
||||
3.4. Notices
|
||||
|
||||
You may not remove or alter the substance of any license notices
|
||||
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||
limitations of liability) contained within the Source Code Form of the
|
||||
Covered Software, except that You may alter any license notices to the
|
||||
extent required to remedy known factual inaccuracies.
|
||||
|
||||
3.5. Application of Additional Terms
|
||||
|
||||
You may choose to offer, and to charge a fee for, warranty, support,
|
||||
indemnity or liability obligations to one or more recipients of Covered
|
||||
Software. However, You may do so only on Your own behalf, and not on
|
||||
behalf of any Contributor. You must make it absolutely clear that any
|
||||
such warranty, support, indemnity, or liability obligation is offered by
|
||||
You alone, and You hereby agree to indemnify every Contributor for any
|
||||
liability incurred by such Contributor as a result of warranty, support,
|
||||
indemnity or liability terms You offer. You may include additional
|
||||
disclaimers of warranty and limitations of liability specific to any
|
||||
jurisdiction.
|
||||
|
||||
4. Inability to Comply Due to Statute or Regulation
|
||||
|
||||
If it is impossible for You to comply with any of the terms of this License
|
||||
with respect to some or all of the Covered Software due to statute,
|
||||
judicial order, or regulation then You must: (a) comply with the terms of
|
||||
this License to the maximum extent possible; and (b) describe the
|
||||
limitations and the code they affect. Such description must be placed in a
|
||||
text file included with all distributions of the Covered Software under
|
||||
this License. Except to the extent prohibited by statute or regulation,
|
||||
such description must be sufficiently detailed for a recipient of ordinary
|
||||
skill to be able to understand it.
|
||||
|
||||
5. Termination
|
||||
|
||||
5.1. The rights granted under this License will terminate automatically if You
|
||||
fail to comply with any of its terms. However, if You become compliant,
|
||||
then the rights granted under this License from a particular Contributor
|
||||
are reinstated (a) provisionally, unless and until such Contributor
|
||||
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||
basis, if such Contributor fails to notify You of the non-compliance by
|
||||
some reasonable means prior to 60 days after You have come back into
|
||||
compliance. Moreover, Your grants from a particular Contributor are
|
||||
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||
non-compliance by some reasonable means, this is the first time You have
|
||||
received notice of non-compliance with this License from such
|
||||
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||
of the notice.
|
||||
|
||||
5.2. If You initiate litigation against any entity by asserting a patent
|
||||
infringement claim (excluding declaratory judgment actions,
|
||||
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||
directly or indirectly infringes any patent, then the rights granted to
|
||||
You by any and all Contributors for the Covered Software under Section
|
||||
2.1 of this License shall terminate.
|
||||
|
||||
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||
license agreements (excluding distributors and resellers) which have been
|
||||
validly granted by You or Your distributors under this License prior to
|
||||
termination shall survive termination.
|
||||
|
||||
6. Disclaimer of Warranty
|
||||
|
||||
Covered Software is provided under this License on an "as is" basis,
|
||||
without warranty of any kind, either expressed, implied, or statutory,
|
||||
including, without limitation, warranties that the Covered Software is free
|
||||
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||
The entire risk as to the quality and performance of the Covered Software
|
||||
is with You. Should any Covered Software prove defective in any respect,
|
||||
You (not any Contributor) assume the cost of any necessary servicing,
|
||||
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||
part of this License. No use of any Covered Software is authorized under
|
||||
this License except under this disclaimer.
|
||||
|
||||
7. Limitation of Liability
|
||||
|
||||
Under no circumstances and under no legal theory, whether tort (including
|
||||
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||
distributes Covered Software as permitted above, be liable to You for any
|
||||
direct, indirect, special, incidental, or consequential damages of any
|
||||
character including, without limitation, damages for lost profits, loss of
|
||||
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses, even if such party shall have been
|
||||
informed of the possibility of such damages. This limitation of liability
|
||||
shall not apply to liability for death or personal injury resulting from
|
||||
such party's negligence to the extent applicable law prohibits such
|
||||
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||
incidental or consequential damages, so this exclusion and limitation may
|
||||
not apply to You.
|
||||
|
||||
8. Litigation
|
||||
|
||||
Any litigation relating to this License may be brought only in the courts
|
||||
of a jurisdiction where the defendant maintains its principal place of
|
||||
business and such litigation shall be governed by laws of that
|
||||
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||
in this Section shall prevent a party's ability to bring cross-claims or
|
||||
counter-claims.
|
||||
|
||||
9. Miscellaneous
|
||||
|
||||
This License represents the complete agreement concerning the subject
|
||||
matter hereof. If any provision of this License is held to be
|
||||
unenforceable, such provision shall be reformed only to the extent
|
||||
necessary to make it enforceable. Any law or regulation which provides that
|
||||
the language of a contract shall be construed against the drafter shall not
|
||||
be used to construe this License against a Contributor.
|
||||
|
||||
|
||||
10. Versions of the License
|
||||
|
||||
10.1. New Versions
|
||||
|
||||
Mozilla Foundation is the license steward. Except as provided in Section
|
||||
10.3, no one other than the license steward has the right to modify or
|
||||
publish new versions of this License. Each version will be given a
|
||||
distinguishing version number.
|
||||
|
||||
10.2. Effect of New Versions
|
||||
|
||||
You may distribute the Covered Software under the terms of the version
|
||||
of the License under which You originally received the Covered Software,
|
||||
or under the terms of any subsequent version published by the license
|
||||
steward.
|
||||
|
||||
10.3. Modified Versions
|
||||
|
||||
If you create software not governed by this License, and you want to
|
||||
create a new license for such software, you may create and use a
|
||||
modified version of this License if you rename the license and remove
|
||||
any references to the name of the license steward (except to note that
|
||||
such modified license differs from this License).
|
||||
|
||||
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||
Licenses If You choose to distribute Source Code Form that is
|
||||
Incompatible With Secondary Licenses under the terms of this version of
|
||||
the License, the notice described in Exhibit B of this License must be
|
||||
attached.
|
||||
|
||||
Exhibit A - Source Code Form License Notice
|
||||
|
||||
This Source Code Form is subject to the
|
||||
terms of the Mozilla Public License, v.
|
||||
2.0. If a copy of the MPL was not
|
||||
distributed with this file, You can
|
||||
obtain one at
|
||||
http://mozilla.org/MPL/2.0/.
|
||||
|
||||
If it is not possible or desirable to put the notice in a particular file,
|
||||
then You may include the notice in a location (such as a LICENSE file in a
|
||||
relevant directory) where a recipient would be likely to look for such a
|
||||
notice.
|
||||
|
||||
You may add additional accurate notices of copyright ownership.
|
||||
|
||||
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||
|
||||
This Source Code Form is "Incompatible
|
||||
With Secondary Licenses", as defined by
|
||||
the Mozilla Public License, v. 2.0.
|
||||
|
||||
21
vendor/github.com/hashicorp/go-immutable-radix/edges.go
generated
vendored
Normal file
21
vendor/github.com/hashicorp/go-immutable-radix/edges.go
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
package iradix
|
||||
|
||||
import "sort"
|
||||
|
||||
type edges []edge
|
||||
|
||||
func (e edges) Len() int {
|
||||
return len(e)
|
||||
}
|
||||
|
||||
func (e edges) Less(i, j int) bool {
|
||||
return e[i].label < e[j].label
|
||||
}
|
||||
|
||||
func (e edges) Swap(i, j int) {
|
||||
e[i], e[j] = e[j], e[i]
|
||||
}
|
||||
|
||||
func (e edges) Sort() {
|
||||
sort.Sort(e)
|
||||
}
|
||||
662
vendor/github.com/hashicorp/go-immutable-radix/iradix.go
generated
vendored
Normal file
662
vendor/github.com/hashicorp/go-immutable-radix/iradix.go
generated
vendored
Normal file
@@ -0,0 +1,662 @@
|
||||
package iradix
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/golang-lru/simplelru"
|
||||
)
|
||||
|
||||
const (
|
||||
// defaultModifiedCache is the default size of the modified node
|
||||
// cache used per transaction. This is used to cache the updates
|
||||
// to the nodes near the root, while the leaves do not need to be
|
||||
// cached. This is important for very large transactions to prevent
|
||||
// the modified cache from growing to be enormous. This is also used
|
||||
// to set the max size of the mutation notify maps since those should
|
||||
// also be bounded in a similar way.
|
||||
defaultModifiedCache = 8192
|
||||
)
|
||||
|
||||
// Tree implements an immutable radix tree. This can be treated as a
|
||||
// Dictionary abstract data type. The main advantage over a standard
|
||||
// hash map is prefix-based lookups and ordered iteration. The immutability
|
||||
// means that it is safe to concurrently read from a Tree without any
|
||||
// coordination.
|
||||
type Tree struct {
|
||||
root *Node
|
||||
size int
|
||||
}
|
||||
|
||||
// New returns an empty Tree
|
||||
func New() *Tree {
|
||||
t := &Tree{
|
||||
root: &Node{
|
||||
mutateCh: make(chan struct{}),
|
||||
},
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Len is used to return the number of elements in the tree
|
||||
func (t *Tree) Len() int {
|
||||
return t.size
|
||||
}
|
||||
|
||||
// Txn is a transaction on the tree. This transaction is applied
|
||||
// atomically and returns a new tree when committed. A transaction
|
||||
// is not thread safe, and should only be used by a single goroutine.
|
||||
type Txn struct {
|
||||
// root is the modified root for the transaction.
|
||||
root *Node
|
||||
|
||||
// snap is a snapshot of the root node for use if we have to run the
|
||||
// slow notify algorithm.
|
||||
snap *Node
|
||||
|
||||
// size tracks the size of the tree as it is modified during the
|
||||
// transaction.
|
||||
size int
|
||||
|
||||
// writable is a cache of writable nodes that have been created during
|
||||
// the course of the transaction. This allows us to re-use the same
|
||||
// nodes for further writes and avoid unnecessary copies of nodes that
|
||||
// have never been exposed outside the transaction. This will only hold
|
||||
// up to defaultModifiedCache number of entries.
|
||||
writable *simplelru.LRU
|
||||
|
||||
// trackChannels is used to hold channels that need to be notified to
|
||||
// signal mutation of the tree. This will only hold up to
|
||||
// defaultModifiedCache number of entries, after which we will set the
|
||||
// trackOverflow flag, which will cause us to use a more expensive
|
||||
// algorithm to perform the notifications. Mutation tracking is only
|
||||
// performed if trackMutate is true.
|
||||
trackChannels map[chan struct{}]struct{}
|
||||
trackOverflow bool
|
||||
trackMutate bool
|
||||
}
|
||||
|
||||
// Txn starts a new transaction that can be used to mutate the tree
|
||||
func (t *Tree) Txn() *Txn {
|
||||
txn := &Txn{
|
||||
root: t.root,
|
||||
snap: t.root,
|
||||
size: t.size,
|
||||
}
|
||||
return txn
|
||||
}
|
||||
|
||||
// TrackMutate can be used to toggle if mutations are tracked. If this is enabled
|
||||
// then notifications will be issued for affected internal nodes and leaves when
|
||||
// the transaction is committed.
|
||||
func (t *Txn) TrackMutate(track bool) {
|
||||
t.trackMutate = track
|
||||
}
|
||||
|
||||
// trackChannel safely attempts to track the given mutation channel, setting the
|
||||
// overflow flag if we can no longer track any more. This limits the amount of
|
||||
// state that will accumulate during a transaction and we have a slower algorithm
|
||||
// to switch to if we overflow.
|
||||
func (t *Txn) trackChannel(ch chan struct{}) {
|
||||
// In overflow, make sure we don't store any more objects.
|
||||
if t.trackOverflow {
|
||||
return
|
||||
}
|
||||
|
||||
// If this would overflow the state we reject it and set the flag (since
|
||||
// we aren't tracking everything that's required any longer).
|
||||
if len(t.trackChannels) >= defaultModifiedCache {
|
||||
// Mark that we are in the overflow state
|
||||
t.trackOverflow = true
|
||||
|
||||
// Clear the map so that the channels can be garbage collected. It is
|
||||
// safe to do this since we have already overflowed and will be using
|
||||
// the slow notify algorithm.
|
||||
t.trackChannels = nil
|
||||
return
|
||||
}
|
||||
|
||||
// Create the map on the fly when we need it.
|
||||
if t.trackChannels == nil {
|
||||
t.trackChannels = make(map[chan struct{}]struct{})
|
||||
}
|
||||
|
||||
// Otherwise we are good to track it.
|
||||
t.trackChannels[ch] = struct{}{}
|
||||
}
|
||||
|
||||
// writeNode returns a node to be modified, if the current node has already been
|
||||
// modified during the course of the transaction, it is used in-place. Set
|
||||
// forLeafUpdate to true if you are getting a write node to update the leaf,
|
||||
// which will set leaf mutation tracking appropriately as well.
|
||||
func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node {
|
||||
// Ensure the writable set exists.
|
||||
if t.writable == nil {
|
||||
lru, err := simplelru.NewLRU(defaultModifiedCache, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.writable = lru
|
||||
}
|
||||
|
||||
// If this node has already been modified, we can continue to use it
|
||||
// during this transaction. We know that we don't need to track it for
|
||||
// a node update since the node is writable, but if this is for a leaf
|
||||
// update we track it, in case the initial write to this node didn't
|
||||
// update the leaf.
|
||||
if _, ok := t.writable.Get(n); ok {
|
||||
if t.trackMutate && forLeafUpdate && n.leaf != nil {
|
||||
t.trackChannel(n.leaf.mutateCh)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Mark this node as being mutated.
|
||||
if t.trackMutate {
|
||||
t.trackChannel(n.mutateCh)
|
||||
}
|
||||
|
||||
// Mark its leaf as being mutated, if appropriate.
|
||||
if t.trackMutate && forLeafUpdate && n.leaf != nil {
|
||||
t.trackChannel(n.leaf.mutateCh)
|
||||
}
|
||||
|
||||
// Copy the existing node. If you have set forLeafUpdate it will be
|
||||
// safe to replace this leaf with another after you get your node for
|
||||
// writing. You MUST replace it, because the channel associated with
|
||||
// this leaf will be closed when this transaction is committed.
|
||||
nc := &Node{
|
||||
mutateCh: make(chan struct{}),
|
||||
leaf: n.leaf,
|
||||
}
|
||||
if n.prefix != nil {
|
||||
nc.prefix = make([]byte, len(n.prefix))
|
||||
copy(nc.prefix, n.prefix)
|
||||
}
|
||||
if len(n.edges) != 0 {
|
||||
nc.edges = make([]edge, len(n.edges))
|
||||
copy(nc.edges, n.edges)
|
||||
}
|
||||
|
||||
// Mark this node as writable.
|
||||
t.writable.Add(nc, nil)
|
||||
return nc
|
||||
}
|
||||
|
||||
// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction
|
||||
// Returns the size of the subtree visited
|
||||
func (t *Txn) trackChannelsAndCount(n *Node) int {
|
||||
// Count only leaf nodes
|
||||
leaves := 0
|
||||
if n.leaf != nil {
|
||||
leaves = 1
|
||||
}
|
||||
// Mark this node as being mutated.
|
||||
if t.trackMutate {
|
||||
t.trackChannel(n.mutateCh)
|
||||
}
|
||||
|
||||
// Mark its leaf as being mutated, if appropriate.
|
||||
if t.trackMutate && n.leaf != nil {
|
||||
t.trackChannel(n.leaf.mutateCh)
|
||||
}
|
||||
|
||||
// Recurse on the children
|
||||
for _, e := range n.edges {
|
||||
leaves += t.trackChannelsAndCount(e.node)
|
||||
}
|
||||
return leaves
|
||||
}
|
||||
|
||||
// mergeChild is called to collapse the given node with its child. This is only
|
||||
// called when the given node is not a leaf and has a single edge.
|
||||
func (t *Txn) mergeChild(n *Node) {
|
||||
// Mark the child node as being mutated since we are about to abandon
|
||||
// it. We don't need to mark the leaf since we are retaining it if it
|
||||
// is there.
|
||||
e := n.edges[0]
|
||||
child := e.node
|
||||
if t.trackMutate {
|
||||
t.trackChannel(child.mutateCh)
|
||||
}
|
||||
|
||||
// Merge the nodes.
|
||||
n.prefix = concat(n.prefix, child.prefix)
|
||||
n.leaf = child.leaf
|
||||
if len(child.edges) != 0 {
|
||||
n.edges = make([]edge, len(child.edges))
|
||||
copy(n.edges, child.edges)
|
||||
} else {
|
||||
n.edges = nil
|
||||
}
|
||||
}
|
||||
|
||||
// insert does a recursive insertion
|
||||
func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) {
|
||||
// Handle key exhaustion
|
||||
if len(search) == 0 {
|
||||
var oldVal interface{}
|
||||
didUpdate := false
|
||||
if n.isLeaf() {
|
||||
oldVal = n.leaf.val
|
||||
didUpdate = true
|
||||
}
|
||||
|
||||
nc := t.writeNode(n, true)
|
||||
nc.leaf = &leafNode{
|
||||
mutateCh: make(chan struct{}),
|
||||
key: k,
|
||||
val: v,
|
||||
}
|
||||
return nc, oldVal, didUpdate
|
||||
}
|
||||
|
||||
// Look for the edge
|
||||
idx, child := n.getEdge(search[0])
|
||||
|
||||
// No edge, create one
|
||||
if child == nil {
|
||||
e := edge{
|
||||
label: search[0],
|
||||
node: &Node{
|
||||
mutateCh: make(chan struct{}),
|
||||
leaf: &leafNode{
|
||||
mutateCh: make(chan struct{}),
|
||||
key: k,
|
||||
val: v,
|
||||
},
|
||||
prefix: search,
|
||||
},
|
||||
}
|
||||
nc := t.writeNode(n, false)
|
||||
nc.addEdge(e)
|
||||
return nc, nil, false
|
||||
}
|
||||
|
||||
// Determine longest prefix of the search key on match
|
||||
commonPrefix := longestPrefix(search, child.prefix)
|
||||
if commonPrefix == len(child.prefix) {
|
||||
search = search[commonPrefix:]
|
||||
newChild, oldVal, didUpdate := t.insert(child, k, search, v)
|
||||
if newChild != nil {
|
||||
nc := t.writeNode(n, false)
|
||||
nc.edges[idx].node = newChild
|
||||
return nc, oldVal, didUpdate
|
||||
}
|
||||
return nil, oldVal, didUpdate
|
||||
}
|
||||
|
||||
// Split the node
|
||||
nc := t.writeNode(n, false)
|
||||
splitNode := &Node{
|
||||
mutateCh: make(chan struct{}),
|
||||
prefix: search[:commonPrefix],
|
||||
}
|
||||
nc.replaceEdge(edge{
|
||||
label: search[0],
|
||||
node: splitNode,
|
||||
})
|
||||
|
||||
// Restore the existing child node
|
||||
modChild := t.writeNode(child, false)
|
||||
splitNode.addEdge(edge{
|
||||
label: modChild.prefix[commonPrefix],
|
||||
node: modChild,
|
||||
})
|
||||
modChild.prefix = modChild.prefix[commonPrefix:]
|
||||
|
||||
// Create a new leaf node
|
||||
leaf := &leafNode{
|
||||
mutateCh: make(chan struct{}),
|
||||
key: k,
|
||||
val: v,
|
||||
}
|
||||
|
||||
// If the new key is a subset, add to to this node
|
||||
search = search[commonPrefix:]
|
||||
if len(search) == 0 {
|
||||
splitNode.leaf = leaf
|
||||
return nc, nil, false
|
||||
}
|
||||
|
||||
// Create a new edge for the node
|
||||
splitNode.addEdge(edge{
|
||||
label: search[0],
|
||||
node: &Node{
|
||||
mutateCh: make(chan struct{}),
|
||||
leaf: leaf,
|
||||
prefix: search,
|
||||
},
|
||||
})
|
||||
return nc, nil, false
|
||||
}
|
||||
|
||||
// delete does a recursive deletion
|
||||
func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) {
|
||||
// Check for key exhaustion
|
||||
if len(search) == 0 {
|
||||
if !n.isLeaf() {
|
||||
return nil, nil
|
||||
}
|
||||
// Copy the pointer in case we are in a transaction that already
|
||||
// modified this node since the node will be reused. Any changes
|
||||
// made to the node will not affect returning the original leaf
|
||||
// value.
|
||||
oldLeaf := n.leaf
|
||||
|
||||
// Remove the leaf node
|
||||
nc := t.writeNode(n, true)
|
||||
nc.leaf = nil
|
||||
|
||||
// Check if this node should be merged
|
||||
if n != t.root && len(nc.edges) == 1 {
|
||||
t.mergeChild(nc)
|
||||
}
|
||||
return nc, oldLeaf
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
label := search[0]
|
||||
idx, child := n.getEdge(label)
|
||||
if child == nil || !bytes.HasPrefix(search, child.prefix) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
search = search[len(child.prefix):]
|
||||
newChild, leaf := t.delete(n, child, search)
|
||||
if newChild == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Copy this node. WATCH OUT - it's safe to pass "false" here because we
|
||||
// will only ADD a leaf via nc.mergeChild() if there isn't one due to
|
||||
// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
|
||||
// so be careful if you change any of the logic here.
|
||||
nc := t.writeNode(n, false)
|
||||
|
||||
// Delete the edge if the node has no edges
|
||||
if newChild.leaf == nil && len(newChild.edges) == 0 {
|
||||
nc.delEdge(label)
|
||||
if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
|
||||
t.mergeChild(nc)
|
||||
}
|
||||
} else {
|
||||
nc.edges[idx].node = newChild
|
||||
}
|
||||
return nc, leaf
|
||||
}
|
||||
|
||||
// delete does a recursive deletion
|
||||
func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) {
|
||||
// Check for key exhaustion
|
||||
if len(search) == 0 {
|
||||
nc := t.writeNode(n, true)
|
||||
if n.isLeaf() {
|
||||
nc.leaf = nil
|
||||
}
|
||||
nc.edges = nil
|
||||
return nc, t.trackChannelsAndCount(n)
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
label := search[0]
|
||||
idx, child := n.getEdge(label)
|
||||
// We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix
|
||||
// Need to do both so that we can delete prefixes that don't correspond to any node in the tree
|
||||
if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) {
|
||||
return nil, 0
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if len(child.prefix) > len(search) {
|
||||
search = []byte("")
|
||||
} else {
|
||||
search = search[len(child.prefix):]
|
||||
}
|
||||
newChild, numDeletions := t.deletePrefix(n, child, search)
|
||||
if newChild == nil {
|
||||
return nil, 0
|
||||
}
|
||||
// Copy this node. WATCH OUT - it's safe to pass "false" here because we
|
||||
// will only ADD a leaf via nc.mergeChild() if there isn't one due to
|
||||
// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
|
||||
// so be careful if you change any of the logic here.
|
||||
|
||||
nc := t.writeNode(n, false)
|
||||
|
||||
// Delete the edge if the node has no edges
|
||||
if newChild.leaf == nil && len(newChild.edges) == 0 {
|
||||
nc.delEdge(label)
|
||||
if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
|
||||
t.mergeChild(nc)
|
||||
}
|
||||
} else {
|
||||
nc.edges[idx].node = newChild
|
||||
}
|
||||
return nc, numDeletions
|
||||
}
|
||||
|
||||
// Insert is used to add or update a given key. The return provides
|
||||
// the previous value and a bool indicating if any was set.
|
||||
func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) {
|
||||
newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v)
|
||||
if newRoot != nil {
|
||||
t.root = newRoot
|
||||
}
|
||||
if !didUpdate {
|
||||
t.size++
|
||||
}
|
||||
return oldVal, didUpdate
|
||||
}
|
||||
|
||||
// Delete is used to delete a given key. Returns the old value if any,
|
||||
// and a bool indicating if the key was set.
|
||||
func (t *Txn) Delete(k []byte) (interface{}, bool) {
|
||||
newRoot, leaf := t.delete(nil, t.root, k)
|
||||
if newRoot != nil {
|
||||
t.root = newRoot
|
||||
}
|
||||
if leaf != nil {
|
||||
t.size--
|
||||
return leaf.val, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// DeletePrefix is used to delete an entire subtree that matches the prefix
|
||||
// This will delete all nodes under that prefix
|
||||
func (t *Txn) DeletePrefix(prefix []byte) bool {
|
||||
newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix)
|
||||
if newRoot != nil {
|
||||
t.root = newRoot
|
||||
t.size = t.size - numDeletions
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
||||
}
|
||||
|
||||
// Root returns the current root of the radix tree within this
|
||||
// transaction. The root is not safe across insert and delete operations,
|
||||
// but can be used to read the current state during a transaction.
|
||||
func (t *Txn) Root() *Node {
|
||||
return t.root
|
||||
}
|
||||
|
||||
// Get is used to lookup a specific key, returning
|
||||
// the value and if it was found
|
||||
func (t *Txn) Get(k []byte) (interface{}, bool) {
|
||||
return t.root.Get(k)
|
||||
}
|
||||
|
||||
// GetWatch is used to lookup a specific key, returning
|
||||
// the watch channel, value and if it was found
|
||||
func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
|
||||
return t.root.GetWatch(k)
|
||||
}
|
||||
|
||||
// Commit is used to finalize the transaction and return a new tree. If mutation
|
||||
// tracking is turned on then notifications will also be issued.
|
||||
func (t *Txn) Commit() *Tree {
|
||||
nt := t.CommitOnly()
|
||||
if t.trackMutate {
|
||||
t.Notify()
|
||||
}
|
||||
return nt
|
||||
}
|
||||
|
||||
// CommitOnly is used to finalize the transaction and return a new tree, but
|
||||
// does not issue any notifications until Notify is called.
|
||||
func (t *Txn) CommitOnly() *Tree {
|
||||
nt := &Tree{t.root, t.size}
|
||||
t.writable = nil
|
||||
return nt
|
||||
}
|
||||
|
||||
// slowNotify does a complete comparison of the before and after trees in order
|
||||
// to trigger notifications. This doesn't require any additional state but it
|
||||
// is very expensive to compute.
|
||||
func (t *Txn) slowNotify() {
|
||||
snapIter := t.snap.rawIterator()
|
||||
rootIter := t.root.rawIterator()
|
||||
for snapIter.Front() != nil || rootIter.Front() != nil {
|
||||
// If we've exhausted the nodes in the old snapshot, we know
|
||||
// there's nothing remaining to notify.
|
||||
if snapIter.Front() == nil {
|
||||
return
|
||||
}
|
||||
snapElem := snapIter.Front()
|
||||
|
||||
// If we've exhausted the nodes in the new root, we know we need
|
||||
// to invalidate everything that remains in the old snapshot. We
|
||||
// know from the loop condition there's something in the old
|
||||
// snapshot.
|
||||
if rootIter.Front() == nil {
|
||||
close(snapElem.mutateCh)
|
||||
if snapElem.isLeaf() {
|
||||
close(snapElem.leaf.mutateCh)
|
||||
}
|
||||
snapIter.Next()
|
||||
continue
|
||||
}
|
||||
|
||||
// Do one string compare so we can check the various conditions
|
||||
// below without repeating the compare.
|
||||
cmp := strings.Compare(snapIter.Path(), rootIter.Path())
|
||||
|
||||
// If the snapshot is behind the root, then we must have deleted
|
||||
// this node during the transaction.
|
||||
if cmp < 0 {
|
||||
close(snapElem.mutateCh)
|
||||
if snapElem.isLeaf() {
|
||||
close(snapElem.leaf.mutateCh)
|
||||
}
|
||||
snapIter.Next()
|
||||
continue
|
||||
}
|
||||
|
||||
// If the snapshot is ahead of the root, then we must have added
|
||||
// this node during the transaction.
|
||||
if cmp > 0 {
|
||||
rootIter.Next()
|
||||
continue
|
||||
}
|
||||
|
||||
// If we have the same path, then we need to see if we mutated a
|
||||
// node and possibly the leaf.
|
||||
rootElem := rootIter.Front()
|
||||
if snapElem != rootElem {
|
||||
close(snapElem.mutateCh)
|
||||
if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) {
|
||||
close(snapElem.leaf.mutateCh)
|
||||
}
|
||||
}
|
||||
snapIter.Next()
|
||||
rootIter.Next()
|
||||
}
|
||||
}
|
||||
|
||||
// Notify is used along with TrackMutate to trigger notifications. This must
|
||||
// only be done once a transaction is committed via CommitOnly, and it is called
|
||||
// automatically by Commit.
|
||||
func (t *Txn) Notify() {
|
||||
if !t.trackMutate {
|
||||
return
|
||||
}
|
||||
|
||||
// If we've overflowed the tracking state we can't use it in any way and
|
||||
// need to do a full tree compare.
|
||||
if t.trackOverflow {
|
||||
t.slowNotify()
|
||||
} else {
|
||||
for ch := range t.trackChannels {
|
||||
close(ch)
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up the tracking state so that a re-notify is safe (will trigger
|
||||
// the else clause above which will be a no-op).
|
||||
t.trackChannels = nil
|
||||
t.trackOverflow = false
|
||||
}
|
||||
|
||||
// Insert is used to add or update a given key. The return provides
|
||||
// the new tree, previous value and a bool indicating if any was set.
|
||||
func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) {
|
||||
txn := t.Txn()
|
||||
old, ok := txn.Insert(k, v)
|
||||
return txn.Commit(), old, ok
|
||||
}
|
||||
|
||||
// Delete is used to delete a given key. Returns the new tree,
|
||||
// old value if any, and a bool indicating if the key was set.
|
||||
func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) {
|
||||
txn := t.Txn()
|
||||
old, ok := txn.Delete(k)
|
||||
return txn.Commit(), old, ok
|
||||
}
|
||||
|
||||
// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree,
|
||||
// and a bool indicating if the prefix matched any nodes
|
||||
func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) {
|
||||
txn := t.Txn()
|
||||
ok := txn.DeletePrefix(k)
|
||||
return txn.Commit(), ok
|
||||
}
|
||||
|
||||
// Root returns the root node of the tree which can be used for richer
|
||||
// query operations.
|
||||
func (t *Tree) Root() *Node {
|
||||
return t.root
|
||||
}
|
||||
|
||||
// Get is used to lookup a specific key, returning
|
||||
// the value and if it was found
|
||||
func (t *Tree) Get(k []byte) (interface{}, bool) {
|
||||
return t.root.Get(k)
|
||||
}
|
||||
|
||||
// longestPrefix finds the length of the shared prefix
|
||||
// of two strings
|
||||
func longestPrefix(k1, k2 []byte) int {
|
||||
max := len(k1)
|
||||
if l := len(k2); l < max {
|
||||
max = l
|
||||
}
|
||||
var i int
|
||||
for i = 0; i < max; i++ {
|
||||
if k1[i] != k2[i] {
|
||||
break
|
||||
}
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// concat two byte slices, returning a third new copy
|
||||
func concat(a, b []byte) []byte {
|
||||
c := make([]byte, len(a)+len(b))
|
||||
copy(c, a)
|
||||
copy(c[len(a):], b)
|
||||
return c
|
||||
}
|
||||
91
vendor/github.com/hashicorp/go-immutable-radix/iter.go
generated
vendored
Normal file
91
vendor/github.com/hashicorp/go-immutable-radix/iter.go
generated
vendored
Normal file
@@ -0,0 +1,91 @@
|
||||
package iradix
|
||||
|
||||
import "bytes"
|
||||
|
||||
// Iterator is used to iterate over a set of nodes
|
||||
// in pre-order
|
||||
type Iterator struct {
|
||||
node *Node
|
||||
stack []edges
|
||||
}
|
||||
|
||||
// SeekPrefixWatch is used to seek the iterator to a given prefix
|
||||
// and returns the watch channel of the finest granularity
|
||||
func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
|
||||
// Wipe the stack
|
||||
i.stack = nil
|
||||
n := i.node
|
||||
watch = n.mutateCh
|
||||
search := prefix
|
||||
for {
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
i.node = n
|
||||
return
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
_, n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
i.node = nil
|
||||
return
|
||||
}
|
||||
|
||||
// Update to the finest granularity as the search makes progress
|
||||
watch = n.mutateCh
|
||||
|
||||
// Consume the search prefix
|
||||
if bytes.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
|
||||
} else if bytes.HasPrefix(n.prefix, search) {
|
||||
i.node = n
|
||||
return
|
||||
} else {
|
||||
i.node = nil
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SeekPrefix is used to seek the iterator to a given prefix
|
||||
func (i *Iterator) SeekPrefix(prefix []byte) {
|
||||
i.SeekPrefixWatch(prefix)
|
||||
}
|
||||
|
||||
// Next returns the next node in order
|
||||
func (i *Iterator) Next() ([]byte, interface{}, bool) {
|
||||
// Initialize our stack if needed
|
||||
if i.stack == nil && i.node != nil {
|
||||
i.stack = []edges{
|
||||
edges{
|
||||
edge{node: i.node},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
for len(i.stack) > 0 {
|
||||
// Inspect the last element of the stack
|
||||
n := len(i.stack)
|
||||
last := i.stack[n-1]
|
||||
elem := last[0].node
|
||||
|
||||
// Update the stack
|
||||
if len(last) > 1 {
|
||||
i.stack[n-1] = last[1:]
|
||||
} else {
|
||||
i.stack = i.stack[:n-1]
|
||||
}
|
||||
|
||||
// Push the edges onto the frontier
|
||||
if len(elem.edges) > 0 {
|
||||
i.stack = append(i.stack, elem.edges)
|
||||
}
|
||||
|
||||
// Return the leaf values if any
|
||||
if elem.leaf != nil {
|
||||
return elem.leaf.key, elem.leaf.val, true
|
||||
}
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
292
vendor/github.com/hashicorp/go-immutable-radix/node.go
generated
vendored
Normal file
292
vendor/github.com/hashicorp/go-immutable-radix/node.go
generated
vendored
Normal file
@@ -0,0 +1,292 @@
|
||||
package iradix
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// WalkFn is used when walking the tree. Takes a
|
||||
// key and value, returning if iteration should
|
||||
// be terminated.
|
||||
type WalkFn func(k []byte, v interface{}) bool
|
||||
|
||||
// leafNode is used to represent a value
|
||||
type leafNode struct {
|
||||
mutateCh chan struct{}
|
||||
key []byte
|
||||
val interface{}
|
||||
}
|
||||
|
||||
// edge is used to represent an edge node
|
||||
type edge struct {
|
||||
label byte
|
||||
node *Node
|
||||
}
|
||||
|
||||
// Node is an immutable node in the radix tree
|
||||
type Node struct {
|
||||
// mutateCh is closed if this node is modified
|
||||
mutateCh chan struct{}
|
||||
|
||||
// leaf is used to store possible leaf
|
||||
leaf *leafNode
|
||||
|
||||
// prefix is the common prefix we ignore
|
||||
prefix []byte
|
||||
|
||||
// Edges should be stored in-order for iteration.
|
||||
// We avoid a fully materialized slice to save memory,
|
||||
// since in most cases we expect to be sparse
|
||||
edges edges
|
||||
}
|
||||
|
||||
func (n *Node) isLeaf() bool {
|
||||
return n.leaf != nil
|
||||
}
|
||||
|
||||
func (n *Node) addEdge(e edge) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= e.label
|
||||
})
|
||||
n.edges = append(n.edges, e)
|
||||
if idx != num {
|
||||
copy(n.edges[idx+1:], n.edges[idx:num])
|
||||
n.edges[idx] = e
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) replaceEdge(e edge) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= e.label
|
||||
})
|
||||
if idx < num && n.edges[idx].label == e.label {
|
||||
n.edges[idx].node = e.node
|
||||
return
|
||||
}
|
||||
panic("replacing missing edge")
|
||||
}
|
||||
|
||||
func (n *Node) getEdge(label byte) (int, *Node) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= label
|
||||
})
|
||||
if idx < num && n.edges[idx].label == label {
|
||||
return idx, n.edges[idx].node
|
||||
}
|
||||
return -1, nil
|
||||
}
|
||||
|
||||
func (n *Node) delEdge(label byte) {
|
||||
num := len(n.edges)
|
||||
idx := sort.Search(num, func(i int) bool {
|
||||
return n.edges[i].label >= label
|
||||
})
|
||||
if idx < num && n.edges[idx].label == label {
|
||||
copy(n.edges[idx:], n.edges[idx+1:])
|
||||
n.edges[len(n.edges)-1] = edge{}
|
||||
n.edges = n.edges[:len(n.edges)-1]
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
|
||||
search := k
|
||||
watch := n.mutateCh
|
||||
for {
|
||||
// Check for key exhaustion
|
||||
if len(search) == 0 {
|
||||
if n.isLeaf() {
|
||||
return n.leaf.mutateCh, n.leaf.val, true
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
_, n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Update to the finest granularity as the search makes progress
|
||||
watch = n.mutateCh
|
||||
|
||||
// Consume the search prefix
|
||||
if bytes.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return watch, nil, false
|
||||
}
|
||||
|
||||
func (n *Node) Get(k []byte) (interface{}, bool) {
|
||||
_, val, ok := n.GetWatch(k)
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// LongestPrefix is like Get, but instead of an
|
||||
// exact match, it will return the longest prefix match.
|
||||
func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) {
|
||||
var last *leafNode
|
||||
search := k
|
||||
for {
|
||||
// Look for a leaf node
|
||||
if n.isLeaf() {
|
||||
last = n.leaf
|
||||
}
|
||||
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
_, n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if bytes.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
if last != nil {
|
||||
return last.key, last.val, true
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Minimum is used to return the minimum value in the tree
|
||||
func (n *Node) Minimum() ([]byte, interface{}, bool) {
|
||||
for {
|
||||
if n.isLeaf() {
|
||||
return n.leaf.key, n.leaf.val, true
|
||||
}
|
||||
if len(n.edges) > 0 {
|
||||
n = n.edges[0].node
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Maximum is used to return the maximum value in the tree
|
||||
func (n *Node) Maximum() ([]byte, interface{}, bool) {
|
||||
for {
|
||||
if num := len(n.edges); num > 0 {
|
||||
n = n.edges[num-1].node
|
||||
continue
|
||||
}
|
||||
if n.isLeaf() {
|
||||
return n.leaf.key, n.leaf.val, true
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil, nil, false
|
||||
}
|
||||
|
||||
// Iterator is used to return an iterator at
|
||||
// the given node to walk the tree
|
||||
func (n *Node) Iterator() *Iterator {
|
||||
return &Iterator{node: n}
|
||||
}
|
||||
|
||||
// rawIterator is used to return a raw iterator at the given node to walk the
|
||||
// tree.
|
||||
func (n *Node) rawIterator() *rawIterator {
|
||||
iter := &rawIterator{node: n}
|
||||
iter.Next()
|
||||
return iter
|
||||
}
|
||||
|
||||
// Walk is used to walk the tree
|
||||
func (n *Node) Walk(fn WalkFn) {
|
||||
recursiveWalk(n, fn)
|
||||
}
|
||||
|
||||
// WalkPrefix is used to walk the tree under a prefix
|
||||
func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) {
|
||||
search := prefix
|
||||
for {
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
recursiveWalk(n, fn)
|
||||
return
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
_, n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if bytes.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
|
||||
} else if bytes.HasPrefix(n.prefix, search) {
|
||||
// Child may be under our search prefix
|
||||
recursiveWalk(n, fn)
|
||||
return
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WalkPath is used to walk the tree, but only visiting nodes
|
||||
// from the root down to a given leaf. Where WalkPrefix walks
|
||||
// all the entries *under* the given prefix, this walks the
|
||||
// entries *above* the given prefix.
|
||||
func (n *Node) WalkPath(path []byte, fn WalkFn) {
|
||||
search := path
|
||||
for {
|
||||
// Visit the leaf values if any
|
||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
||||
return
|
||||
}
|
||||
|
||||
// Check for key exhaution
|
||||
if len(search) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Look for an edge
|
||||
_, n = n.getEdge(search[0])
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Consume the search prefix
|
||||
if bytes.HasPrefix(search, n.prefix) {
|
||||
search = search[len(n.prefix):]
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// recursiveWalk is used to do a pre-order walk of a node
|
||||
// recursively. Returns true if the walk should be aborted
|
||||
func recursiveWalk(n *Node, fn WalkFn) bool {
|
||||
// Visit the leaf values if any
|
||||
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Recurse on the children
|
||||
for _, e := range n.edges {
|
||||
if recursiveWalk(e.node, fn) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
78
vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go
generated
vendored
Normal file
78
vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go
generated
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
package iradix
|
||||
|
||||
// rawIterator visits each of the nodes in the tree, even the ones that are not
|
||||
// leaves. It keeps track of the effective path (what a leaf at a given node
|
||||
// would be called), which is useful for comparing trees.
|
||||
type rawIterator struct {
|
||||
// node is the starting node in the tree for the iterator.
|
||||
node *Node
|
||||
|
||||
// stack keeps track of edges in the frontier.
|
||||
stack []rawStackEntry
|
||||
|
||||
// pos is the current position of the iterator.
|
||||
pos *Node
|
||||
|
||||
// path is the effective path of the current iterator position,
|
||||
// regardless of whether the current node is a leaf.
|
||||
path string
|
||||
}
|
||||
|
||||
// rawStackEntry is used to keep track of the cumulative common path as well as
|
||||
// its associated edges in the frontier.
|
||||
type rawStackEntry struct {
|
||||
path string
|
||||
edges edges
|
||||
}
|
||||
|
||||
// Front returns the current node that has been iterated to.
|
||||
func (i *rawIterator) Front() *Node {
|
||||
return i.pos
|
||||
}
|
||||
|
||||
// Path returns the effective path of the current node, even if it's not actually
|
||||
// a leaf.
|
||||
func (i *rawIterator) Path() string {
|
||||
return i.path
|
||||
}
|
||||
|
||||
// Next advances the iterator to the next node.
|
||||
func (i *rawIterator) Next() {
|
||||
// Initialize our stack if needed.
|
||||
if i.stack == nil && i.node != nil {
|
||||
i.stack = []rawStackEntry{
|
||||
rawStackEntry{
|
||||
edges: edges{
|
||||
edge{node: i.node},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
for len(i.stack) > 0 {
|
||||
// Inspect the last element of the stack.
|
||||
n := len(i.stack)
|
||||
last := i.stack[n-1]
|
||||
elem := last.edges[0].node
|
||||
|
||||
// Update the stack.
|
||||
if len(last.edges) > 1 {
|
||||
i.stack[n-1].edges = last.edges[1:]
|
||||
} else {
|
||||
i.stack = i.stack[:n-1]
|
||||
}
|
||||
|
||||
// Push the edges onto the frontier.
|
||||
if len(elem.edges) > 0 {
|
||||
path := last.path + string(elem.prefix)
|
||||
i.stack = append(i.stack, rawStackEntry{path, elem.edges})
|
||||
}
|
||||
|
||||
i.pos = elem
|
||||
i.path = last.path + string(elem.prefix)
|
||||
return
|
||||
}
|
||||
|
||||
i.pos = nil
|
||||
i.path = ""
|
||||
}
|
||||
12
vendor/manifest
vendored
12
vendor/manifest
vendored
@@ -101,8 +101,8 @@
|
||||
{
|
||||
"importpath": "github.com/armon/go-metrics",
|
||||
"repository": "https://github.com/armon/go-metrics",
|
||||
"vcs": "",
|
||||
"revision": "6c5fa0d8f48f4661c9ba8709799c88d425ad20f0",
|
||||
"vcs": "git",
|
||||
"revision": "ec5e00d3c878b2a97bbe0884ef45ffd1b4f669f5",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
@@ -1094,6 +1094,14 @@
|
||||
"revision": "ce617e79981a8fff618bb643d155133a8f38db96",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/hashicorp/go-immutable-radix",
|
||||
"repository": "https://github.com/hashicorp/go-immutable-radix",
|
||||
"vcs": "git",
|
||||
"revision": "27df80928bb34bb1b0d6d0e01b9e679902e7a6b5",
|
||||
"branch": "master",
|
||||
"notests": true
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/hashicorp/go-version",
|
||||
"repository": "https://github.com/hashicorp/go-version",
|
||||
|
||||
Reference in New Issue
Block a user