mirror of
https://github.com/prymitive/karma
synced 2026-02-13 20:59:53 +00:00
chore(backend): drop support for alertmanager <0.19.0
This commit is contained in:
committed by
Łukasz Mierzwa
parent
58ca6f8de7
commit
4059f8bb04
@@ -5,10 +5,7 @@ Alert dashboard for
|
||||
|
||||
---
|
||||
|
||||
Starting with karma v0.56 only Alertmanager V2 API is supported, which
|
||||
requires Alertmanager `>=0.17.0`.
|
||||
|
||||
Alertmanager `>=0.19.0` is recommended as older versions might not show all
|
||||
Alertmanager `>=0.19.0` is required as older versions might not show all
|
||||
receivers in karma, see
|
||||
[issue #812](https://github.com/prymitive/karma/issues/812) for details.
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
func TestGetAlertMapper(t *testing.T) {
|
||||
versions := []string{
|
||||
"0.17.0",
|
||||
"0.19.0",
|
||||
"0.20.0-rc.0",
|
||||
"0.20.0-rc.0-2",
|
||||
}
|
||||
|
||||
@@ -88,10 +88,6 @@ func (am *Alertmanager) probeVersion() string {
|
||||
}
|
||||
log.Infof("[%s] Upstream version: %s", am.Name, version)
|
||||
|
||||
if version == "0.17.0" || version == "0.18.0" {
|
||||
log.Warningf("Alertmanager %s might return incomplete list of alert groups in the API, please upgrade to >=0.19.0, see https://github.com/prymitive/karma/issues/812", version)
|
||||
}
|
||||
|
||||
return version
|
||||
}
|
||||
|
||||
|
||||
@@ -22,13 +22,13 @@ var testCases = []testCaseType{
|
||||
{requestedVersion: "0.6.6", hadError: true},
|
||||
{requestedVersion: "0.15.0", hadError: true},
|
||||
{requestedVersion: "0.16.0", hadError: true},
|
||||
{requestedVersion: "0.17.0"},
|
||||
{requestedVersion: "0.17.0-rc-1"},
|
||||
{requestedVersion: "0.17.0-beta.1"},
|
||||
{requestedVersion: "0.17.99-beta.1"},
|
||||
{requestedVersion: "0.18-beta.1"},
|
||||
{requestedVersion: "0.18"},
|
||||
{requestedVersion: "0.18.1"},
|
||||
{requestedVersion: "0.19.0"},
|
||||
{requestedVersion: "0.19.0-rc-1"},
|
||||
{requestedVersion: "0.19.0-beta.1"},
|
||||
{requestedVersion: "0.19.99-beta.1"},
|
||||
{requestedVersion: "0.20-beta.1"},
|
||||
{requestedVersion: "0.20"},
|
||||
{requestedVersion: "0.20.1"},
|
||||
}
|
||||
|
||||
func TestGetAlertMapper(t *testing.T) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ALERTMANAGER_VERSION := v0.17.0
|
||||
ALERTMANAGER_VERSION := v0.21.0
|
||||
API_VERSION := v2
|
||||
PACKAGE := v017
|
||||
TARGET_DIR := /go/src/github.com/prymitive/karma/internal/mapper/$(PACKAGE)
|
||||
|
||||
@@ -18,7 +18,7 @@ type AlertMapper struct {
|
||||
// IsSupported returns true if given version string is supported
|
||||
func (m AlertMapper) IsSupported(version string) bool {
|
||||
// no need to check for errors as we pass static value
|
||||
versionRange, _ := semver.NewConstraint(">=0.17.0")
|
||||
versionRange, _ := semver.NewConstraint(">=0.19.0")
|
||||
return versionRange.Check(semver.MustParse(version))
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ type SilenceMapper struct {
|
||||
// IsSupported returns true if given version string is supported
|
||||
func (m SilenceMapper) IsSupported(version string) bool {
|
||||
// no need to check for errors as we pass static value
|
||||
versionRange, _ := semver.NewConstraint(">=0.17.0")
|
||||
versionRange, _ := semver.NewConstraint(">=0.19.0")
|
||||
return versionRange.Check(semver.MustParse(version))
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ type StatusMapper struct {
|
||||
// IsSupported returns true if given version string is supported
|
||||
func (s StatusMapper) IsSupported(version string) bool {
|
||||
// no need to check for errors as we pass static value
|
||||
versionRange, _ := semver.NewConstraint(">=0.17.0")
|
||||
versionRange, _ := semver.NewConstraint(">=0.19.0")
|
||||
return versionRange.Check(semver.MustParse(version))
|
||||
}
|
||||
|
||||
|
||||
@@ -1,850 +0,0 @@
|
||||
[
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"alert": "Less than 10% disk space is free",
|
||||
"dashboard": "http://localhost/dashboard.html"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "aae7a1432b5d2f1b",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Free_Disk_Space_Too_Low",
|
||||
"cluster": "staging",
|
||||
"disk": "sda",
|
||||
"instance": "server5",
|
||||
"job": "node_exporter"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "Free_Disk_Space_Too_Low"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-name"
|
||||
}
|
||||
},
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"help": "Example help annotation",
|
||||
"summary": "Example summary",
|
||||
"url": "http://localhost/example.html"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "54c2f185e49cfccb",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "HTTP_Probe_Failed",
|
||||
"cluster": "dev",
|
||||
"instance": "web1",
|
||||
"job": "node_exporter"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [
|
||||
"b0dbc7c2-8238-4a0b-907c-5748d5e68710"
|
||||
],
|
||||
"state": "suppressed"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "5cb0dd95e7f3d9c0",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "HTTP_Probe_Failed",
|
||||
"cluster": "dev",
|
||||
"instance": "web2",
|
||||
"job": "node_exporter"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "HTTP_Probe_Failed"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-name"
|
||||
}
|
||||
},
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "ffbb5f178eb27f11",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "dev",
|
||||
"instance": "server8",
|
||||
"ip": "127.0.0.8",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [
|
||||
"ee1e81a3-89c1-4f8a-a823-85b0c9f8a4bb"
|
||||
],
|
||||
"state": "suppressed"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary",
|
||||
"url": "http://localhost/example.html"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "7013294faf5f854d",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "prod",
|
||||
"instance": "server1",
|
||||
"ip": "127.0.0.1",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "fa959d3911d1978b",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "prod",
|
||||
"instance": "server2",
|
||||
"ip": "127.0.0.2",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "7f3a53482c303b65",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "staging",
|
||||
"instance": "server3",
|
||||
"ip": "127.0.0.3",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "7f5c6e647877b3df",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "staging",
|
||||
"instance": "server4",
|
||||
"ip": "127.0.0.4",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "b5dcc9c573def911",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "staging",
|
||||
"instance": "server5",
|
||||
"ip": "127.0.0.5",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "588a14f7b4613621",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "dev",
|
||||
"instance": "server6",
|
||||
"ip": "127.0.0.6",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [
|
||||
"ee1e81a3-89c1-4f8a-a823-85b0c9f8a4bb"
|
||||
],
|
||||
"state": "suppressed"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "cc58f2ac8260fb97",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "dev",
|
||||
"instance": "server7",
|
||||
"ip": "127.0.0.7",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [
|
||||
"4660e240-e340-4ded-8400-9728066a8b63",
|
||||
"ee1e81a3-89c1-4f8a-a823-85b0c9f8a4bb"
|
||||
],
|
||||
"state": "suppressed"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "Host_Down"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-name"
|
||||
}
|
||||
},
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"alert": "Memory usage exceeding threshold",
|
||||
"dashboard": "http://localhost/dashboard.html"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "7d0b114ebf24f857",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Memory_Usage_Too_High",
|
||||
"cluster": "prod",
|
||||
"instance": "server2",
|
||||
"job": "node_exporter"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "Memory_Usage_Too_High"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-name"
|
||||
}
|
||||
},
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"alert": "Less than 10% disk space is free",
|
||||
"dashboard": "http://localhost/dashboard.html"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "aae7a1432b5d2f1b",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Free_Disk_Space_Too_Low",
|
||||
"cluster": "staging",
|
||||
"disk": "sda",
|
||||
"instance": "server5",
|
||||
"job": "node_exporter"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "Free_Disk_Space_Too_Low",
|
||||
"cluster": "staging"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-cluster-service"
|
||||
}
|
||||
},
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "5cb0dd95e7f3d9c0",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "HTTP_Probe_Failed",
|
||||
"cluster": "dev",
|
||||
"instance": "web2",
|
||||
"job": "node_exporter"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"help": "Example help annotation",
|
||||
"summary": "Example summary",
|
||||
"url": "http://localhost/example.html"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "54c2f185e49cfccb",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "HTTP_Probe_Failed",
|
||||
"cluster": "dev",
|
||||
"instance": "web1",
|
||||
"job": "node_exporter"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [
|
||||
"b0dbc7c2-8238-4a0b-907c-5748d5e68710"
|
||||
],
|
||||
"state": "suppressed"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "HTTP_Probe_Failed",
|
||||
"cluster": "dev"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-cluster-service"
|
||||
}
|
||||
},
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "588a14f7b4613621",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "dev",
|
||||
"instance": "server6",
|
||||
"ip": "127.0.0.6",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [
|
||||
"ee1e81a3-89c1-4f8a-a823-85b0c9f8a4bb"
|
||||
],
|
||||
"state": "suppressed"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "cc58f2ac8260fb97",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "dev",
|
||||
"instance": "server7",
|
||||
"ip": "127.0.0.7",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [
|
||||
"4660e240-e340-4ded-8400-9728066a8b63",
|
||||
"ee1e81a3-89c1-4f8a-a823-85b0c9f8a4bb"
|
||||
],
|
||||
"state": "suppressed"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "ffbb5f178eb27f11",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "dev",
|
||||
"instance": "server8",
|
||||
"ip": "127.0.0.8",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [
|
||||
"ee1e81a3-89c1-4f8a-a823-85b0c9f8a4bb"
|
||||
],
|
||||
"state": "suppressed"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "dev"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-cluster-service"
|
||||
}
|
||||
},
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary",
|
||||
"url": "http://localhost/example.html"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "7013294faf5f854d",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "prod",
|
||||
"instance": "server1",
|
||||
"ip": "127.0.0.1",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "fa959d3911d1978b",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "prod",
|
||||
"instance": "server2",
|
||||
"ip": "127.0.0.2",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "prod"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-cluster-service"
|
||||
}
|
||||
},
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "7f3a53482c303b65",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "staging",
|
||||
"instance": "server3",
|
||||
"ip": "127.0.0.3",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "7f5c6e647877b3df",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "staging",
|
||||
"instance": "server4",
|
||||
"ip": "127.0.0.4",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "b5dcc9c573def911",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "staging",
|
||||
"instance": "server5",
|
||||
"ip": "127.0.0.5",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "staging"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-cluster-service"
|
||||
}
|
||||
},
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"alert": "Memory usage exceeding threshold",
|
||||
"dashboard": "http://localhost/dashboard.html"
|
||||
},
|
||||
"endsAt": "2019-08-05T23:09:46.810Z",
|
||||
"fingerprint": "7d0b114ebf24f857",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Memory_Usage_Too_High",
|
||||
"cluster": "prod",
|
||||
"instance": "server2",
|
||||
"job": "node_exporter"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-name"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.763Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:46.810Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "Memory_Usage_Too_High",
|
||||
"cluster": "prod"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-cluster-service"
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -1,61 +0,0 @@
|
||||
[
|
||||
{
|
||||
"comment": "Silenced instance",
|
||||
"createdBy": "john@example.com",
|
||||
"endsAt": "2063-01-01T00:00:00.000Z",
|
||||
"id": "b0dbc7c2-8238-4a0b-907c-5748d5e68710",
|
||||
"matchers": [
|
||||
{
|
||||
"isRegex": false,
|
||||
"name": "instance",
|
||||
"value": "web1"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.749Z",
|
||||
"status": {
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:06.749Z"
|
||||
},
|
||||
{
|
||||
"comment": "Silenced Host_Down alerts in the dev cluster",
|
||||
"createdBy": "john@example.com",
|
||||
"endsAt": "2063-01-01T00:00:00.000Z",
|
||||
"id": "ee1e81a3-89c1-4f8a-a823-85b0c9f8a4bb",
|
||||
"matchers": [
|
||||
{
|
||||
"isRegex": false,
|
||||
"name": "alertname",
|
||||
"value": "Host_Down"
|
||||
},
|
||||
{
|
||||
"isRegex": false,
|
||||
"name": "cluster",
|
||||
"value": "dev"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.755Z",
|
||||
"status": {
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:06.755Z"
|
||||
},
|
||||
{
|
||||
"comment": "Silenced server7",
|
||||
"createdBy": "john@example.com",
|
||||
"endsAt": "2063-01-01T00:00:00.000Z",
|
||||
"id": "4660e240-e340-4ded-8400-9728066a8b63",
|
||||
"matchers": [
|
||||
{
|
||||
"isRegex": false,
|
||||
"name": "instance",
|
||||
"value": "server7"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-05T23:04:06.760Z",
|
||||
"status": {
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-05T23:04:06.760Z"
|
||||
}
|
||||
]
|
||||
@@ -1,24 +0,0 @@
|
||||
{
|
||||
"cluster": {
|
||||
"name": "01DHJ0CMR1KQ19ZA4M2X9QDZG8",
|
||||
"peers": [
|
||||
{
|
||||
"address": "172.17.0.2:9094",
|
||||
"name": "01DHJ0CMR1KQ19ZA4M2X9QDZG8"
|
||||
}
|
||||
],
|
||||
"status": "ready"
|
||||
},
|
||||
"config": {
|
||||
"original": "global:\n resolve_timeout: 5m\n http_config: {}\n smtp_hello: localhost\n smtp_require_tls: true\n pagerduty_url: https://events.pagerduty.com/v2/enqueue\n hipchat_api_url: https://api.hipchat.com/\n opsgenie_api_url: https://api.opsgenie.com/\n wechat_api_url: https://qyapi.weixin.qq.com/cgi-bin/\n victorops_api_url: https://alert.victorops.com/integrations/generic/20131114/alert/\nroute:\n receiver: default\n group_by:\n - alertname\n routes:\n - receiver: by-cluster-service\n group_by:\n - alertname\n - cluster\n - service\n match_re:\n alertname: .*\n continue: true\n - receiver: by-name\n group_by:\n - alertname\n match_re:\n alertname: .*\n continue: true\n group_wait: 15s\n group_interval: 35s\n repeat_interval: 999h\ninhibit_rules:\n- source_match:\n severity: critical\n target_match:\n severity: warning\n equal:\n - alertname\n - cluster\n - service\nreceivers:\n- name: default\n- name: by-cluster-service\n- name: by-name\ntemplates: []\n"
|
||||
},
|
||||
"uptime": "2019-08-05T23:03:51.812Z",
|
||||
"versionInfo": {
|
||||
"branch": "HEAD",
|
||||
"buildDate": "20190503-09:10:07",
|
||||
"buildUser": "root@932a86a52b76",
|
||||
"goVersion": "go1.12.4",
|
||||
"revision": "c7551cd75c414dc81df027f691e2eb21d4fd85b2",
|
||||
"version": "0.17.0"
|
||||
}
|
||||
}
|
||||
@@ -1,480 +0,0 @@
|
||||
# HELP alertmanager_alerts How many alerts by state.
|
||||
# TYPE alertmanager_alerts gauge
|
||||
alertmanager_alerts{state="active"} 8
|
||||
alertmanager_alerts{state="suppressed"} 4
|
||||
# HELP alertmanager_alerts_invalid_total The total number of received alerts that were invalid.
|
||||
# TYPE alertmanager_alerts_invalid_total counter
|
||||
alertmanager_alerts_invalid_total 0
|
||||
# HELP alertmanager_alerts_received_total The total number of received alerts.
|
||||
# TYPE alertmanager_alerts_received_total counter
|
||||
alertmanager_alerts_received_total{status="firing"} 60
|
||||
alertmanager_alerts_received_total{status="resolved"} 0
|
||||
# HELP alertmanager_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which alertmanager was built.
|
||||
# TYPE alertmanager_build_info gauge
|
||||
alertmanager_build_info{branch="HEAD",goversion="go1.12.4",revision="c7551cd75c414dc81df027f691e2eb21d4fd85b2",version="0.17.0"} 1
|
||||
# HELP alertmanager_cluster_failed_peers Number indicating the current number of failed peers in the cluster.
|
||||
# TYPE alertmanager_cluster_failed_peers gauge
|
||||
alertmanager_cluster_failed_peers 0
|
||||
# HELP alertmanager_cluster_health_score Health score of the cluster. Lower values are better and zero means 'totally healthy'.
|
||||
# TYPE alertmanager_cluster_health_score gauge
|
||||
alertmanager_cluster_health_score 0
|
||||
# HELP alertmanager_cluster_members Number indicating current number of members in cluster.
|
||||
# TYPE alertmanager_cluster_members gauge
|
||||
alertmanager_cluster_members 1
|
||||
# HELP alertmanager_cluster_messages_pruned_total Total number of cluster messages pruned.
|
||||
# TYPE alertmanager_cluster_messages_pruned_total counter
|
||||
alertmanager_cluster_messages_pruned_total 0
|
||||
# HELP alertmanager_cluster_messages_queued Number of cluster messages which are queued.
|
||||
# TYPE alertmanager_cluster_messages_queued gauge
|
||||
alertmanager_cluster_messages_queued 3
|
||||
# HELP alertmanager_cluster_messages_received_size_total Total size of cluster messages received.
|
||||
# TYPE alertmanager_cluster_messages_received_size_total counter
|
||||
alertmanager_cluster_messages_received_size_total{msg_type="full_state"} 0
|
||||
alertmanager_cluster_messages_received_size_total{msg_type="update"} 0
|
||||
# HELP alertmanager_cluster_messages_received_total Total number of cluster messages received.
|
||||
# TYPE alertmanager_cluster_messages_received_total counter
|
||||
alertmanager_cluster_messages_received_total{msg_type="full_state"} 0
|
||||
alertmanager_cluster_messages_received_total{msg_type="update"} 0
|
||||
# HELP alertmanager_cluster_messages_sent_size_total Total size of cluster messages sent.
|
||||
# TYPE alertmanager_cluster_messages_sent_size_total counter
|
||||
alertmanager_cluster_messages_sent_size_total{msg_type="full_state"} 0
|
||||
alertmanager_cluster_messages_sent_size_total{msg_type="update"} 0
|
||||
# HELP alertmanager_cluster_messages_sent_total Total number of cluster messages sent.
|
||||
# TYPE alertmanager_cluster_messages_sent_total counter
|
||||
alertmanager_cluster_messages_sent_total{msg_type="full_state"} 0
|
||||
alertmanager_cluster_messages_sent_total{msg_type="update"} 0
|
||||
# HELP alertmanager_cluster_peers_joined_total A counter of the number of peers that have joined.
|
||||
# TYPE alertmanager_cluster_peers_joined_total counter
|
||||
alertmanager_cluster_peers_joined_total 1
|
||||
# HELP alertmanager_cluster_peers_left_total A counter of the number of peers that have left.
|
||||
# TYPE alertmanager_cluster_peers_left_total counter
|
||||
alertmanager_cluster_peers_left_total 0
|
||||
# HELP alertmanager_cluster_peers_update_total A counter of the number of peers that have updated metadata.
|
||||
# TYPE alertmanager_cluster_peers_update_total counter
|
||||
alertmanager_cluster_peers_update_total 0
|
||||
# HELP alertmanager_cluster_reconnections_failed_total A counter of the number of failed cluster peer reconnection attempts.
|
||||
# TYPE alertmanager_cluster_reconnections_failed_total counter
|
||||
alertmanager_cluster_reconnections_failed_total 0
|
||||
# HELP alertmanager_cluster_reconnections_total A counter of the number of cluster peer reconnections.
|
||||
# TYPE alertmanager_cluster_reconnections_total counter
|
||||
alertmanager_cluster_reconnections_total 0
|
||||
# HELP alertmanager_cluster_refresh_join_failed_total A counter of the number of failed cluster peer joined attempts via refresh.
|
||||
# TYPE alertmanager_cluster_refresh_join_failed_total counter
|
||||
alertmanager_cluster_refresh_join_failed_total 0
|
||||
# HELP alertmanager_cluster_refresh_join_total A counter of the number of cluster peer joined via refresh.
|
||||
# TYPE alertmanager_cluster_refresh_join_total counter
|
||||
alertmanager_cluster_refresh_join_total 0
|
||||
# HELP alertmanager_config_hash Hash of the currently loaded alertmanager configuration.
|
||||
# TYPE alertmanager_config_hash gauge
|
||||
alertmanager_config_hash 6.2645753076152e+13
|
||||
# HELP alertmanager_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload.
|
||||
# TYPE alertmanager_config_last_reload_success_timestamp_seconds gauge
|
||||
alertmanager_config_last_reload_success_timestamp_seconds 1.565046231e+09
|
||||
# HELP alertmanager_config_last_reload_successful Whether the last configuration reload attempt was successful.
|
||||
# TYPE alertmanager_config_last_reload_successful gauge
|
||||
alertmanager_config_last_reload_successful 1
|
||||
# HELP alertmanager_http_concurrency_limit_exceeded_total Total number of times an HTTP request failed because the concurrency limit was reached.
|
||||
# TYPE alertmanager_http_concurrency_limit_exceeded_total counter
|
||||
alertmanager_http_concurrency_limit_exceeded_total{method="get"} 0
|
||||
# HELP alertmanager_http_request_duration_seconds Histogram of latencies for HTTP requests.
|
||||
# TYPE alertmanager_http_request_duration_seconds histogram
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.05"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.1"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.25"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.5"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.75"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="1"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="2"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="5"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="20"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="60"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="+Inf"} 5
|
||||
alertmanager_http_request_duration_seconds_sum{handler="/alerts",method="post"} 0.0073848
|
||||
alertmanager_http_request_duration_seconds_count{handler="/alerts",method="post"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.05"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.1"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.25"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.5"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.75"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="1"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="2"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="5"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="20"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="60"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="+Inf"} 3
|
||||
alertmanager_http_request_duration_seconds_sum{handler="/silences",method="post"} 0.0051026000000000005
|
||||
alertmanager_http_request_duration_seconds_count{handler="/silences",method="post"} 3
|
||||
# HELP alertmanager_http_requests_in_flight Current number of HTTP requests being processed.
|
||||
# TYPE alertmanager_http_requests_in_flight gauge
|
||||
alertmanager_http_requests_in_flight{method="get"} 1
|
||||
# HELP alertmanager_http_response_size_bytes Histogram of response size for HTTP requests.
|
||||
# TYPE alertmanager_http_response_size_bytes histogram
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="100"} 5
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="1000"} 5
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="10000"} 5
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="100000"} 5
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="1e+06"} 5
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="1e+07"} 5
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="1e+08"} 5
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="+Inf"} 5
|
||||
alertmanager_http_response_size_bytes_sum{handler="/alerts",method="post"} 100
|
||||
alertmanager_http_response_size_bytes_count{handler="/alerts",method="post"} 5
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="100"} 3
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="1000"} 3
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="10000"} 3
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="100000"} 3
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="1e+06"} 3
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="1e+07"} 3
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="1e+08"} 3
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="+Inf"} 3
|
||||
alertmanager_http_response_size_bytes_sum{handler="/silences",method="post"} 240
|
||||
alertmanager_http_response_size_bytes_count{handler="/silences",method="post"} 3
|
||||
# HELP alertmanager_nflog_gc_duration_seconds Duration of the last notification log garbage collection cycle.
|
||||
# TYPE alertmanager_nflog_gc_duration_seconds summary
|
||||
alertmanager_nflog_gc_duration_seconds{quantile="0.5"} NaN
|
||||
alertmanager_nflog_gc_duration_seconds{quantile="0.9"} NaN
|
||||
alertmanager_nflog_gc_duration_seconds{quantile="0.99"} NaN
|
||||
alertmanager_nflog_gc_duration_seconds_sum 0
|
||||
alertmanager_nflog_gc_duration_seconds_count 0
|
||||
# HELP alertmanager_nflog_gossip_messages_propagated_total Number of received gossip messages that have been further gossiped.
|
||||
# TYPE alertmanager_nflog_gossip_messages_propagated_total counter
|
||||
alertmanager_nflog_gossip_messages_propagated_total 0
|
||||
# HELP alertmanager_nflog_queries_total Number of notification log queries were received.
|
||||
# TYPE alertmanager_nflog_queries_total counter
|
||||
alertmanager_nflog_queries_total 0
|
||||
# HELP alertmanager_nflog_query_duration_seconds Duration of notification log query evaluation.
|
||||
# TYPE alertmanager_nflog_query_duration_seconds histogram
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="0.005"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="0.01"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="0.025"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="0.05"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="0.1"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="0.25"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="0.5"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="1"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="2.5"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="5"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="10"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="+Inf"} 0
|
||||
alertmanager_nflog_query_duration_seconds_sum 0
|
||||
alertmanager_nflog_query_duration_seconds_count 0
|
||||
# HELP alertmanager_nflog_query_errors_total Number notification log received queries that failed.
|
||||
# TYPE alertmanager_nflog_query_errors_total counter
|
||||
alertmanager_nflog_query_errors_total 0
|
||||
# HELP alertmanager_nflog_snapshot_duration_seconds Duration of the last notification log snapshot.
|
||||
# TYPE alertmanager_nflog_snapshot_duration_seconds summary
|
||||
alertmanager_nflog_snapshot_duration_seconds{quantile="0.5"} NaN
|
||||
alertmanager_nflog_snapshot_duration_seconds{quantile="0.9"} NaN
|
||||
alertmanager_nflog_snapshot_duration_seconds{quantile="0.99"} NaN
|
||||
alertmanager_nflog_snapshot_duration_seconds_sum 0
|
||||
alertmanager_nflog_snapshot_duration_seconds_count 0
|
||||
# HELP alertmanager_nflog_snapshot_size_bytes Size of the last notification log snapshot in bytes.
|
||||
# TYPE alertmanager_nflog_snapshot_size_bytes gauge
|
||||
alertmanager_nflog_snapshot_size_bytes 0
|
||||
# HELP alertmanager_notification_latency_seconds The latency of notifications in seconds.
|
||||
# TYPE alertmanager_notification_latency_seconds histogram
|
||||
alertmanager_notification_latency_seconds_bucket{integration="email",le="1"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="email",le="5"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="email",le="10"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="email",le="15"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="email",le="20"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="email",le="+Inf"} 0
|
||||
alertmanager_notification_latency_seconds_sum{integration="email"} 0
|
||||
alertmanager_notification_latency_seconds_count{integration="email"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="hipchat",le="1"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="hipchat",le="5"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="hipchat",le="10"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="hipchat",le="15"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="hipchat",le="20"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="hipchat",le="+Inf"} 0
|
||||
alertmanager_notification_latency_seconds_sum{integration="hipchat"} 0
|
||||
alertmanager_notification_latency_seconds_count{integration="hipchat"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="1"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="5"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="10"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="15"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="20"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="+Inf"} 0
|
||||
alertmanager_notification_latency_seconds_sum{integration="opsgenie"} 0
|
||||
alertmanager_notification_latency_seconds_count{integration="opsgenie"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="1"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="5"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="10"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="15"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="20"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="+Inf"} 0
|
||||
alertmanager_notification_latency_seconds_sum{integration="pagerduty"} 0
|
||||
alertmanager_notification_latency_seconds_count{integration="pagerduty"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="1"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="5"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="10"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="15"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="20"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="+Inf"} 0
|
||||
alertmanager_notification_latency_seconds_sum{integration="pushover"} 0
|
||||
alertmanager_notification_latency_seconds_count{integration="pushover"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="slack",le="1"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="slack",le="5"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="slack",le="10"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="slack",le="15"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="slack",le="20"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="slack",le="+Inf"} 0
|
||||
alertmanager_notification_latency_seconds_sum{integration="slack"} 0
|
||||
alertmanager_notification_latency_seconds_count{integration="slack"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="1"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="5"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="10"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="15"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="20"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="+Inf"} 0
|
||||
alertmanager_notification_latency_seconds_sum{integration="victorops"} 0
|
||||
alertmanager_notification_latency_seconds_count{integration="victorops"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="1"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="5"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="10"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="15"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="20"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="+Inf"} 0
|
||||
alertmanager_notification_latency_seconds_sum{integration="webhook"} 0
|
||||
alertmanager_notification_latency_seconds_count{integration="webhook"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="1"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="5"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="10"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="15"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="20"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="+Inf"} 0
|
||||
alertmanager_notification_latency_seconds_sum{integration="wechat"} 0
|
||||
alertmanager_notification_latency_seconds_count{integration="wechat"} 0
|
||||
# HELP alertmanager_notifications_failed_total The total number of failed notifications.
|
||||
# TYPE alertmanager_notifications_failed_total counter
|
||||
alertmanager_notifications_failed_total{integration="email"} 0
|
||||
alertmanager_notifications_failed_total{integration="hipchat"} 0
|
||||
alertmanager_notifications_failed_total{integration="opsgenie"} 0
|
||||
alertmanager_notifications_failed_total{integration="pagerduty"} 0
|
||||
alertmanager_notifications_failed_total{integration="pushover"} 0
|
||||
alertmanager_notifications_failed_total{integration="slack"} 0
|
||||
alertmanager_notifications_failed_total{integration="victorops"} 0
|
||||
alertmanager_notifications_failed_total{integration="webhook"} 0
|
||||
alertmanager_notifications_failed_total{integration="wechat"} 0
|
||||
# HELP alertmanager_notifications_total The total number of attempted notifications.
|
||||
# TYPE alertmanager_notifications_total counter
|
||||
alertmanager_notifications_total{integration="email"} 0
|
||||
alertmanager_notifications_total{integration="hipchat"} 0
|
||||
alertmanager_notifications_total{integration="opsgenie"} 0
|
||||
alertmanager_notifications_total{integration="pagerduty"} 0
|
||||
alertmanager_notifications_total{integration="pushover"} 0
|
||||
alertmanager_notifications_total{integration="slack"} 0
|
||||
alertmanager_notifications_total{integration="victorops"} 0
|
||||
alertmanager_notifications_total{integration="webhook"} 0
|
||||
alertmanager_notifications_total{integration="wechat"} 0
|
||||
# HELP alertmanager_oversize_gossip_message_duration_seconds Duration of oversized gossip message requests.
|
||||
# TYPE alertmanager_oversize_gossip_message_duration_seconds histogram
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.005"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.01"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.025"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.05"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.1"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.25"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.5"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="1"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="2.5"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="5"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="10"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="+Inf"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_sum{key="nfl"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_count{key="nfl"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.005"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.01"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.025"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.05"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.1"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.25"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.5"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="1"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="2.5"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="5"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="10"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="+Inf"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_sum{key="sil"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_count{key="sil"} 0
|
||||
# HELP alertmanager_oversized_gossip_message_dropped_total Number of oversized gossip messages that were dropped due to a full message queue.
|
||||
# TYPE alertmanager_oversized_gossip_message_dropped_total counter
|
||||
alertmanager_oversized_gossip_message_dropped_total{key="nfl"} 0
|
||||
alertmanager_oversized_gossip_message_dropped_total{key="sil"} 0
|
||||
# HELP alertmanager_oversized_gossip_message_failure_total Number of oversized gossip message sends that failed.
|
||||
# TYPE alertmanager_oversized_gossip_message_failure_total counter
|
||||
alertmanager_oversized_gossip_message_failure_total{key="nfl"} 0
|
||||
alertmanager_oversized_gossip_message_failure_total{key="sil"} 0
|
||||
# HELP alertmanager_oversized_gossip_message_sent_total Number of oversized gossip message sent.
|
||||
# TYPE alertmanager_oversized_gossip_message_sent_total counter
|
||||
alertmanager_oversized_gossip_message_sent_total{key="nfl"} 0
|
||||
alertmanager_oversized_gossip_message_sent_total{key="sil"} 0
|
||||
# HELP alertmanager_peer_position Position the Alertmanager instance believes it's in. The position determines a peer's behavior in the cluster.
|
||||
# TYPE alertmanager_peer_position gauge
|
||||
alertmanager_peer_position 0
|
||||
# HELP alertmanager_silences How many silences by state.
|
||||
# TYPE alertmanager_silences gauge
|
||||
alertmanager_silences{state="active"} 3
|
||||
alertmanager_silences{state="expired"} 0
|
||||
alertmanager_silences{state="pending"} 0
|
||||
# HELP alertmanager_silences_gc_duration_seconds Duration of the last silence garbage collection cycle.
|
||||
# TYPE alertmanager_silences_gc_duration_seconds summary
|
||||
alertmanager_silences_gc_duration_seconds{quantile="0.5"} NaN
|
||||
alertmanager_silences_gc_duration_seconds{quantile="0.9"} NaN
|
||||
alertmanager_silences_gc_duration_seconds{quantile="0.99"} NaN
|
||||
alertmanager_silences_gc_duration_seconds_sum 0
|
||||
alertmanager_silences_gc_duration_seconds_count 0
|
||||
# HELP alertmanager_silences_gossip_messages_propagated_total Number of received gossip messages that have been further gossiped.
|
||||
# TYPE alertmanager_silences_gossip_messages_propagated_total counter
|
||||
alertmanager_silences_gossip_messages_propagated_total 0
|
||||
# HELP alertmanager_silences_queries_total How many silence queries were received.
|
||||
# TYPE alertmanager_silences_queries_total counter
|
||||
alertmanager_silences_queries_total 26
|
||||
# HELP alertmanager_silences_query_duration_seconds Duration of silence query evaluation.
|
||||
# TYPE alertmanager_silences_query_duration_seconds histogram
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="0.005"} 26
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="0.01"} 26
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="0.025"} 26
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="0.05"} 26
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="0.1"} 26
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="0.25"} 26
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="0.5"} 26
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="1"} 26
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="2.5"} 26
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="5"} 26
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="10"} 26
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="+Inf"} 26
|
||||
alertmanager_silences_query_duration_seconds_sum 0.0009475999999999999
|
||||
alertmanager_silences_query_duration_seconds_count 26
|
||||
# HELP alertmanager_silences_query_errors_total How many silence received queries did not succeed.
|
||||
# TYPE alertmanager_silences_query_errors_total counter
|
||||
alertmanager_silences_query_errors_total 0
|
||||
# HELP alertmanager_silences_snapshot_duration_seconds Duration of the last silence snapshot.
|
||||
# TYPE alertmanager_silences_snapshot_duration_seconds summary
|
||||
alertmanager_silences_snapshot_duration_seconds{quantile="0.5"} NaN
|
||||
alertmanager_silences_snapshot_duration_seconds{quantile="0.9"} NaN
|
||||
alertmanager_silences_snapshot_duration_seconds{quantile="0.99"} NaN
|
||||
alertmanager_silences_snapshot_duration_seconds_sum 0
|
||||
alertmanager_silences_snapshot_duration_seconds_count 0
|
||||
# HELP alertmanager_silences_snapshot_size_bytes Size of the last silence snapshot in bytes.
|
||||
# TYPE alertmanager_silences_snapshot_size_bytes gauge
|
||||
alertmanager_silences_snapshot_size_bytes 0
|
||||
# HELP go_gc_duration_seconds A summary of the GC invocation durations.
|
||||
# TYPE go_gc_duration_seconds summary
|
||||
go_gc_duration_seconds{quantile="0"} 6.12e-05
|
||||
go_gc_duration_seconds{quantile="0.25"} 0.0001221
|
||||
go_gc_duration_seconds{quantile="0.5"} 0.0002244
|
||||
go_gc_duration_seconds{quantile="0.75"} 0.0004771
|
||||
go_gc_duration_seconds{quantile="1"} 0.0006216
|
||||
go_gc_duration_seconds_sum 0.0015064
|
||||
go_gc_duration_seconds_count 5
|
||||
# HELP go_goroutines Number of goroutines that currently exist.
|
||||
# TYPE go_goroutines gauge
|
||||
go_goroutines 50
|
||||
# HELP go_info Information about the Go environment.
|
||||
# TYPE go_info gauge
|
||||
go_info{version="go1.12.4"} 1
|
||||
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
|
||||
# TYPE go_memstats_alloc_bytes gauge
|
||||
go_memstats_alloc_bytes 7.028352e+06
|
||||
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
|
||||
# TYPE go_memstats_alloc_bytes_total counter
|
||||
go_memstats_alloc_bytes_total 1.5461424e+07
|
||||
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
|
||||
# TYPE go_memstats_buck_hash_sys_bytes gauge
|
||||
go_memstats_buck_hash_sys_bytes 1.447826e+06
|
||||
# HELP go_memstats_frees_total Total number of frees.
|
||||
# TYPE go_memstats_frees_total counter
|
||||
go_memstats_frees_total 89957
|
||||
# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.
|
||||
# TYPE go_memstats_gc_cpu_fraction gauge
|
||||
go_memstats_gc_cpu_fraction 0.02225053057301886
|
||||
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
|
||||
# TYPE go_memstats_gc_sys_bytes gauge
|
||||
go_memstats_gc_sys_bytes 2.38592e+06
|
||||
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
|
||||
# TYPE go_memstats_heap_alloc_bytes gauge
|
||||
go_memstats_heap_alloc_bytes 7.028352e+06
|
||||
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
|
||||
# TYPE go_memstats_heap_idle_bytes gauge
|
||||
go_memstats_heap_idle_bytes 5.7679872e+07
|
||||
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
|
||||
# TYPE go_memstats_heap_inuse_bytes gauge
|
||||
go_memstats_heap_inuse_bytes 8.609792e+06
|
||||
# HELP go_memstats_heap_objects Number of allocated objects.
|
||||
# TYPE go_memstats_heap_objects gauge
|
||||
go_memstats_heap_objects 38820
|
||||
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
|
||||
# TYPE go_memstats_heap_released_bytes gauge
|
||||
go_memstats_heap_released_bytes 0
|
||||
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
|
||||
# TYPE go_memstats_heap_sys_bytes gauge
|
||||
go_memstats_heap_sys_bytes 6.6289664e+07
|
||||
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
|
||||
# TYPE go_memstats_last_gc_time_seconds gauge
|
||||
go_memstats_last_gc_time_seconds 1.5650462318443272e+09
|
||||
# HELP go_memstats_lookups_total Total number of pointer lookups.
|
||||
# TYPE go_memstats_lookups_total counter
|
||||
go_memstats_lookups_total 0
|
||||
# HELP go_memstats_mallocs_total Total number of mallocs.
|
||||
# TYPE go_memstats_mallocs_total counter
|
||||
go_memstats_mallocs_total 128777
|
||||
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
|
||||
# TYPE go_memstats_mcache_inuse_bytes gauge
|
||||
go_memstats_mcache_inuse_bytes 3472
|
||||
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
|
||||
# TYPE go_memstats_mcache_sys_bytes gauge
|
||||
go_memstats_mcache_sys_bytes 16384
|
||||
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
|
||||
# TYPE go_memstats_mspan_inuse_bytes gauge
|
||||
go_memstats_mspan_inuse_bytes 106992
|
||||
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
|
||||
# TYPE go_memstats_mspan_sys_bytes gauge
|
||||
go_memstats_mspan_sys_bytes 114688
|
||||
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
|
||||
# TYPE go_memstats_next_gc_bytes gauge
|
||||
go_memstats_next_gc_bytes 9.03216e+06
|
||||
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
|
||||
# TYPE go_memstats_other_sys_bytes gauge
|
||||
go_memstats_other_sys_bytes 688486
|
||||
# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
|
||||
# TYPE go_memstats_stack_inuse_bytes gauge
|
||||
go_memstats_stack_inuse_bytes 819200
|
||||
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
|
||||
# TYPE go_memstats_stack_sys_bytes gauge
|
||||
go_memstats_stack_sys_bytes 819200
|
||||
# HELP go_memstats_sys_bytes Number of bytes obtained from system.
|
||||
# TYPE go_memstats_sys_bytes gauge
|
||||
go_memstats_sys_bytes 7.1762168e+07
|
||||
# HELP go_threads Number of OS threads created.
|
||||
# TYPE go_threads gauge
|
||||
go_threads 9
|
||||
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
|
||||
# TYPE process_cpu_seconds_total counter
|
||||
process_cpu_seconds_total 0.39
|
||||
# HELP process_max_fds Maximum number of open file descriptors.
|
||||
# TYPE process_max_fds gauge
|
||||
process_max_fds 1.048576e+06
|
||||
# HELP process_open_fds Number of open file descriptors.
|
||||
# TYPE process_open_fds gauge
|
||||
process_open_fds 9
|
||||
# HELP process_resident_memory_bytes Resident memory size in bytes.
|
||||
# TYPE process_resident_memory_bytes gauge
|
||||
process_resident_memory_bytes 2.371584e+07
|
||||
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
|
||||
# TYPE process_start_time_seconds gauge
|
||||
process_start_time_seconds 1.56504623054e+09
|
||||
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
|
||||
# TYPE process_virtual_memory_bytes gauge
|
||||
process_virtual_memory_bytes 1.2558336e+08
|
||||
# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.
|
||||
# TYPE process_virtual_memory_max_bytes gauge
|
||||
process_virtual_memory_max_bytes -1
|
||||
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
|
||||
# TYPE promhttp_metric_handler_requests_in_flight gauge
|
||||
promhttp_metric_handler_requests_in_flight 1
|
||||
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
|
||||
# TYPE promhttp_metric_handler_requests_total counter
|
||||
promhttp_metric_handler_requests_total{code="200"} 0
|
||||
promhttp_metric_handler_requests_total{code="500"} 0
|
||||
promhttp_metric_handler_requests_total{code="503"} 0
|
||||
@@ -1,850 +0,0 @@
|
||||
[
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"alert": "Less than 10% disk space is free",
|
||||
"dashboard": "http://localhost/dashboard.html"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "aae7a1432b5d2f1b",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Free_Disk_Space_Too_Low",
|
||||
"cluster": "staging",
|
||||
"disk": "sda",
|
||||
"instance": "server5",
|
||||
"job": "node_exporter"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "Free_Disk_Space_Too_Low"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-name"
|
||||
}
|
||||
},
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"help": "Example help annotation",
|
||||
"summary": "Example summary",
|
||||
"url": "http://localhost/example.html"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "54c2f185e49cfccb",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "HTTP_Probe_Failed",
|
||||
"cluster": "dev",
|
||||
"instance": "web1",
|
||||
"job": "node_exporter"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [
|
||||
"dba0c327-c0f6-4605-8669-e4252921fbfe"
|
||||
],
|
||||
"state": "suppressed"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "5cb0dd95e7f3d9c0",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "HTTP_Probe_Failed",
|
||||
"cluster": "dev",
|
||||
"instance": "web2",
|
||||
"job": "node_exporter"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "HTTP_Probe_Failed"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-name"
|
||||
}
|
||||
},
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary",
|
||||
"url": "http://localhost/example.html"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "7013294faf5f854d",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "prod",
|
||||
"instance": "server1",
|
||||
"ip": "127.0.0.1",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "fa959d3911d1978b",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "prod",
|
||||
"instance": "server2",
|
||||
"ip": "127.0.0.2",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "7f3a53482c303b65",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "staging",
|
||||
"instance": "server3",
|
||||
"ip": "127.0.0.3",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "7f5c6e647877b3df",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "staging",
|
||||
"instance": "server4",
|
||||
"ip": "127.0.0.4",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "b5dcc9c573def911",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "staging",
|
||||
"instance": "server5",
|
||||
"ip": "127.0.0.5",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "588a14f7b4613621",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "dev",
|
||||
"instance": "server6",
|
||||
"ip": "127.0.0.6",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [
|
||||
"aa878af5-ce35-4d00-b56b-c46c28802436"
|
||||
],
|
||||
"state": "suppressed"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "cc58f2ac8260fb97",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "dev",
|
||||
"instance": "server7",
|
||||
"ip": "127.0.0.7",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [
|
||||
"aa878af5-ce35-4d00-b56b-c46c28802436",
|
||||
"f0659e37-afae-4c0c-9b14-1f7a26fde82e"
|
||||
],
|
||||
"state": "suppressed"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "ffbb5f178eb27f11",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "dev",
|
||||
"instance": "server8",
|
||||
"ip": "127.0.0.8",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [
|
||||
"aa878af5-ce35-4d00-b56b-c46c28802436"
|
||||
],
|
||||
"state": "suppressed"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "Host_Down"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-name"
|
||||
}
|
||||
},
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"alert": "Memory usage exceeding threshold",
|
||||
"dashboard": "http://localhost/dashboard.html"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "7d0b114ebf24f857",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Memory_Usage_Too_High",
|
||||
"cluster": "prod",
|
||||
"instance": "server2",
|
||||
"job": "node_exporter"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "Memory_Usage_Too_High"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-name"
|
||||
}
|
||||
},
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"alert": "Less than 10% disk space is free",
|
||||
"dashboard": "http://localhost/dashboard.html"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "aae7a1432b5d2f1b",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Free_Disk_Space_Too_Low",
|
||||
"cluster": "staging",
|
||||
"disk": "sda",
|
||||
"instance": "server5",
|
||||
"job": "node_exporter"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "Free_Disk_Space_Too_Low",
|
||||
"cluster": "staging"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-cluster-service"
|
||||
}
|
||||
},
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"help": "Example help annotation",
|
||||
"summary": "Example summary",
|
||||
"url": "http://localhost/example.html"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "54c2f185e49cfccb",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "HTTP_Probe_Failed",
|
||||
"cluster": "dev",
|
||||
"instance": "web1",
|
||||
"job": "node_exporter"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [
|
||||
"dba0c327-c0f6-4605-8669-e4252921fbfe"
|
||||
],
|
||||
"state": "suppressed"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "5cb0dd95e7f3d9c0",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "HTTP_Probe_Failed",
|
||||
"cluster": "dev",
|
||||
"instance": "web2",
|
||||
"job": "node_exporter"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "HTTP_Probe_Failed",
|
||||
"cluster": "dev"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-cluster-service"
|
||||
}
|
||||
},
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "cc58f2ac8260fb97",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "dev",
|
||||
"instance": "server7",
|
||||
"ip": "127.0.0.7",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [
|
||||
"aa878af5-ce35-4d00-b56b-c46c28802436",
|
||||
"f0659e37-afae-4c0c-9b14-1f7a26fde82e"
|
||||
],
|
||||
"state": "suppressed"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "ffbb5f178eb27f11",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "dev",
|
||||
"instance": "server8",
|
||||
"ip": "127.0.0.8",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [
|
||||
"aa878af5-ce35-4d00-b56b-c46c28802436"
|
||||
],
|
||||
"state": "suppressed"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "588a14f7b4613621",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "dev",
|
||||
"instance": "server6",
|
||||
"ip": "127.0.0.6",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [
|
||||
"aa878af5-ce35-4d00-b56b-c46c28802436"
|
||||
],
|
||||
"state": "suppressed"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "dev"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-cluster-service"
|
||||
}
|
||||
},
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary",
|
||||
"url": "http://localhost/example.html"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "7013294faf5f854d",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "prod",
|
||||
"instance": "server1",
|
||||
"ip": "127.0.0.1",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "fa959d3911d1978b",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "prod",
|
||||
"instance": "server2",
|
||||
"ip": "127.0.0.2",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "prod"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-cluster-service"
|
||||
}
|
||||
},
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "7f3a53482c303b65",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "staging",
|
||||
"instance": "server3",
|
||||
"ip": "127.0.0.3",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "7f5c6e647877b3df",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "staging",
|
||||
"instance": "server4",
|
||||
"ip": "127.0.0.4",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
},
|
||||
{
|
||||
"annotations": {
|
||||
"summary": "Example summary"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "b5dcc9c573def911",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "staging",
|
||||
"instance": "server5",
|
||||
"ip": "127.0.0.5",
|
||||
"job": "node_ping"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "Host_Down",
|
||||
"cluster": "staging"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-cluster-service"
|
||||
}
|
||||
},
|
||||
{
|
||||
"alerts": [
|
||||
{
|
||||
"annotations": {
|
||||
"alert": "Memory usage exceeding threshold",
|
||||
"dashboard": "http://localhost/dashboard.html"
|
||||
},
|
||||
"endsAt": "2019-08-06T20:35:34.816Z",
|
||||
"fingerprint": "7d0b114ebf24f857",
|
||||
"generatorURL": "localhost/prometheus",
|
||||
"labels": {
|
||||
"alertname": "Memory_Usage_Too_High",
|
||||
"cluster": "prod",
|
||||
"instance": "server2",
|
||||
"job": "node_exporter"
|
||||
},
|
||||
"receivers": [
|
||||
{
|
||||
"name": "by-cluster-service"
|
||||
},
|
||||
{
|
||||
"name": "by-name"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.772Z",
|
||||
"status": {
|
||||
"inhibitedBy": [],
|
||||
"silencedBy": [],
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:30:34.816Z"
|
||||
}
|
||||
],
|
||||
"labels": {
|
||||
"alertname": "Memory_Usage_Too_High",
|
||||
"cluster": "prod"
|
||||
},
|
||||
"receiver": {
|
||||
"name": "by-cluster-service"
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -1,61 +0,0 @@
|
||||
[
|
||||
{
|
||||
"comment": "Silenced instance",
|
||||
"createdBy": "john@example.com",
|
||||
"endsAt": "2063-01-01T00:00:00.000Z",
|
||||
"id": "dba0c327-c0f6-4605-8669-e4252921fbfe",
|
||||
"matchers": [
|
||||
{
|
||||
"isRegex": false,
|
||||
"name": "instance",
|
||||
"value": "web1"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.759Z",
|
||||
"status": {
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:29:54.759Z"
|
||||
},
|
||||
{
|
||||
"comment": "Silenced Host_Down alerts in the dev cluster",
|
||||
"createdBy": "john@example.com",
|
||||
"endsAt": "2063-01-01T00:00:00.000Z",
|
||||
"id": "aa878af5-ce35-4d00-b56b-c46c28802436",
|
||||
"matchers": [
|
||||
{
|
||||
"isRegex": false,
|
||||
"name": "alertname",
|
||||
"value": "Host_Down"
|
||||
},
|
||||
{
|
||||
"isRegex": false,
|
||||
"name": "cluster",
|
||||
"value": "dev"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.764Z",
|
||||
"status": {
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:29:54.764Z"
|
||||
},
|
||||
{
|
||||
"comment": "Silenced server7",
|
||||
"createdBy": "john@example.com",
|
||||
"endsAt": "2063-01-01T00:00:00.000Z",
|
||||
"id": "f0659e37-afae-4c0c-9b14-1f7a26fde82e",
|
||||
"matchers": [
|
||||
{
|
||||
"isRegex": false,
|
||||
"name": "instance",
|
||||
"value": "server7"
|
||||
}
|
||||
],
|
||||
"startsAt": "2019-08-06T20:29:54.768Z",
|
||||
"status": {
|
||||
"state": "active"
|
||||
},
|
||||
"updatedAt": "2019-08-06T20:29:54.768Z"
|
||||
}
|
||||
]
|
||||
@@ -1,24 +0,0 @@
|
||||
{
|
||||
"cluster": {
|
||||
"name": "01DHM9Z0B8F4M1RPDZPXJSX0NX",
|
||||
"peers": [
|
||||
{
|
||||
"address": "172.17.0.2:9094",
|
||||
"name": "01DHM9Z0B8F4M1RPDZPXJSX0NX"
|
||||
}
|
||||
],
|
||||
"status": "ready"
|
||||
},
|
||||
"config": {
|
||||
"original": "global:\n resolve_timeout: 5m\n http_config: {}\n smtp_hello: localhost\n smtp_require_tls: true\n pagerduty_url: https://events.pagerduty.com/v2/enqueue\n hipchat_api_url: https://api.hipchat.com/\n opsgenie_api_url: https://api.opsgenie.com/\n wechat_api_url: https://qyapi.weixin.qq.com/cgi-bin/\n victorops_api_url: https://alert.victorops.com/integrations/generic/20131114/alert/\nroute:\n receiver: default\n group_by:\n - alertname\n routes:\n - receiver: by-cluster-service\n group_by:\n - alertname\n - cluster\n - service\n match_re:\n alertname: .*\n continue: true\n - receiver: by-name\n group_by:\n - alertname\n match_re:\n alertname: .*\n continue: true\n group_wait: 15s\n group_interval: 35s\n repeat_interval: 999h\ninhibit_rules:\n- source_match:\n severity: critical\n target_match:\n severity: warning\n equal:\n - alertname\n - cluster\n - service\nreceivers:\n- name: default\n- name: by-cluster-service\n- name: by-name\ntemplates: []\n"
|
||||
},
|
||||
"uptime": "2019-08-06T20:29:39.563Z",
|
||||
"versionInfo": {
|
||||
"branch": "HEAD",
|
||||
"buildDate": "20190708-14:31:49",
|
||||
"buildUser": "root@868685ed3ed0",
|
||||
"goVersion": "go1.12.6",
|
||||
"revision": "1ace0f76b7101cccc149d7298022df36039858ca",
|
||||
"version": "0.18.0"
|
||||
}
|
||||
}
|
||||
@@ -1,477 +0,0 @@
|
||||
# HELP alertmanager_alerts How many alerts by state.
|
||||
# TYPE alertmanager_alerts gauge
|
||||
alertmanager_alerts{state="active"} 8
|
||||
alertmanager_alerts{state="suppressed"} 4
|
||||
# HELP alertmanager_alerts_invalid_total The total number of received alerts that were invalid.
|
||||
# TYPE alertmanager_alerts_invalid_total counter
|
||||
alertmanager_alerts_invalid_total{version="v1"} 0
|
||||
alertmanager_alerts_invalid_total{version="v2"} 0
|
||||
# HELP alertmanager_alerts_received_total The total number of received alerts.
|
||||
# TYPE alertmanager_alerts_received_total counter
|
||||
alertmanager_alerts_received_total{status="firing",version="v1"} 60
|
||||
alertmanager_alerts_received_total{status="firing",version="v2"} 0
|
||||
alertmanager_alerts_received_total{status="resolved",version="v1"} 0
|
||||
alertmanager_alerts_received_total{status="resolved",version="v2"} 0
|
||||
# HELP alertmanager_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which alertmanager was built.
|
||||
# TYPE alertmanager_build_info gauge
|
||||
alertmanager_build_info{branch="HEAD",goversion="go1.12.6",revision="1ace0f76b7101cccc149d7298022df36039858ca",version="0.18.0"} 1
|
||||
# HELP alertmanager_cluster_alive_messages_total Total number of received alive messages.
|
||||
# TYPE alertmanager_cluster_alive_messages_total counter
|
||||
alertmanager_cluster_alive_messages_total{peer="01DHM9Z0B8F4M1RPDZPXJSX0NX"} 1
|
||||
# HELP alertmanager_cluster_failed_peers Number indicating the current number of failed peers in the cluster.
|
||||
# TYPE alertmanager_cluster_failed_peers gauge
|
||||
alertmanager_cluster_failed_peers 0
|
||||
# HELP alertmanager_cluster_health_score Health score of the cluster. Lower values are better and zero means 'totally healthy'.
|
||||
# TYPE alertmanager_cluster_health_score gauge
|
||||
alertmanager_cluster_health_score 0
|
||||
# HELP alertmanager_cluster_members Number indicating current number of members in cluster.
|
||||
# TYPE alertmanager_cluster_members gauge
|
||||
alertmanager_cluster_members 1
|
||||
# HELP alertmanager_cluster_messages_pruned_total Total number of cluster messages pruned.
|
||||
# TYPE alertmanager_cluster_messages_pruned_total counter
|
||||
alertmanager_cluster_messages_pruned_total 0
|
||||
# HELP alertmanager_cluster_messages_queued Number of cluster messages which are queued.
|
||||
# TYPE alertmanager_cluster_messages_queued gauge
|
||||
alertmanager_cluster_messages_queued 3
|
||||
# HELP alertmanager_cluster_messages_received_size_total Total size of cluster messages received.
|
||||
# TYPE alertmanager_cluster_messages_received_size_total counter
|
||||
alertmanager_cluster_messages_received_size_total{msg_type="full_state"} 0
|
||||
alertmanager_cluster_messages_received_size_total{msg_type="update"} 0
|
||||
# HELP alertmanager_cluster_messages_received_total Total number of cluster messages received.
|
||||
# TYPE alertmanager_cluster_messages_received_total counter
|
||||
alertmanager_cluster_messages_received_total{msg_type="full_state"} 0
|
||||
alertmanager_cluster_messages_received_total{msg_type="update"} 0
|
||||
# HELP alertmanager_cluster_messages_sent_size_total Total size of cluster messages sent.
|
||||
# TYPE alertmanager_cluster_messages_sent_size_total counter
|
||||
alertmanager_cluster_messages_sent_size_total{msg_type="full_state"} 0
|
||||
alertmanager_cluster_messages_sent_size_total{msg_type="update"} 0
|
||||
# HELP alertmanager_cluster_messages_sent_total Total number of cluster messages sent.
|
||||
# TYPE alertmanager_cluster_messages_sent_total counter
|
||||
alertmanager_cluster_messages_sent_total{msg_type="full_state"} 0
|
||||
alertmanager_cluster_messages_sent_total{msg_type="update"} 0
|
||||
# HELP alertmanager_cluster_peer_info A metric with a constant '1' value labeled by peer name.
|
||||
# TYPE alertmanager_cluster_peer_info gauge
|
||||
alertmanager_cluster_peer_info{peer="01DHM9Z0B8F4M1RPDZPXJSX0NX"} 1
|
||||
# HELP alertmanager_cluster_peers_joined_total A counter of the number of peers that have joined.
|
||||
# TYPE alertmanager_cluster_peers_joined_total counter
|
||||
alertmanager_cluster_peers_joined_total 1
|
||||
# HELP alertmanager_cluster_peers_left_total A counter of the number of peers that have left.
|
||||
# TYPE alertmanager_cluster_peers_left_total counter
|
||||
alertmanager_cluster_peers_left_total 0
|
||||
# HELP alertmanager_cluster_peers_update_total A counter of the number of peers that have updated metadata.
|
||||
# TYPE alertmanager_cluster_peers_update_total counter
|
||||
alertmanager_cluster_peers_update_total 0
|
||||
# HELP alertmanager_cluster_reconnections_failed_total A counter of the number of failed cluster peer reconnection attempts.
|
||||
# TYPE alertmanager_cluster_reconnections_failed_total counter
|
||||
alertmanager_cluster_reconnections_failed_total 0
|
||||
# HELP alertmanager_cluster_reconnections_total A counter of the number of cluster peer reconnections.
|
||||
# TYPE alertmanager_cluster_reconnections_total counter
|
||||
alertmanager_cluster_reconnections_total 0
|
||||
# HELP alertmanager_cluster_refresh_join_failed_total A counter of the number of failed cluster peer joined attempts via refresh.
|
||||
# TYPE alertmanager_cluster_refresh_join_failed_total counter
|
||||
alertmanager_cluster_refresh_join_failed_total 0
|
||||
# HELP alertmanager_cluster_refresh_join_total A counter of the number of cluster peer joined via refresh.
|
||||
# TYPE alertmanager_cluster_refresh_join_total counter
|
||||
alertmanager_cluster_refresh_join_total 0
|
||||
# HELP alertmanager_config_hash Hash of the currently loaded alertmanager configuration.
|
||||
# TYPE alertmanager_config_hash gauge
|
||||
alertmanager_config_hash 6.2645753076152e+13
|
||||
# HELP alertmanager_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload.
|
||||
# TYPE alertmanager_config_last_reload_success_timestamp_seconds gauge
|
||||
alertmanager_config_last_reload_success_timestamp_seconds 1.565123379e+09
|
||||
# HELP alertmanager_config_last_reload_successful Whether the last configuration reload attempt was successful.
|
||||
# TYPE alertmanager_config_last_reload_successful gauge
|
||||
alertmanager_config_last_reload_successful 1
|
||||
# HELP alertmanager_http_concurrency_limit_exceeded_total Total number of times an HTTP request failed because the concurrency limit was reached.
|
||||
# TYPE alertmanager_http_concurrency_limit_exceeded_total counter
|
||||
alertmanager_http_concurrency_limit_exceeded_total{method="get"} 0
|
||||
# HELP alertmanager_http_request_duration_seconds Histogram of latencies for HTTP requests.
|
||||
# TYPE alertmanager_http_request_duration_seconds histogram
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.05"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.1"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.25"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.5"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.75"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="1"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="2"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="5"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="20"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="60"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="+Inf"} 5
|
||||
alertmanager_http_request_duration_seconds_sum{handler="/alerts",method="post"} 0.0056321999999999995
|
||||
alertmanager_http_request_duration_seconds_count{handler="/alerts",method="post"} 5
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.05"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.1"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.25"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.5"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.75"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="1"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="2"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="5"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="20"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="60"} 3
|
||||
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="+Inf"} 3
|
||||
alertmanager_http_request_duration_seconds_sum{handler="/silences",method="post"} 0.0022254
|
||||
alertmanager_http_request_duration_seconds_count{handler="/silences",method="post"} 3
|
||||
# HELP alertmanager_http_requests_in_flight Current number of HTTP requests being processed.
|
||||
# TYPE alertmanager_http_requests_in_flight gauge
|
||||
alertmanager_http_requests_in_flight{method="get"} 1
|
||||
# HELP alertmanager_http_response_size_bytes Histogram of response size for HTTP requests.
|
||||
# TYPE alertmanager_http_response_size_bytes histogram
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="100"} 5
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="1000"} 5
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="10000"} 5
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="100000"} 5
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="1e+06"} 5
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="1e+07"} 5
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="1e+08"} 5
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="+Inf"} 5
|
||||
alertmanager_http_response_size_bytes_sum{handler="/alerts",method="post"} 100
|
||||
alertmanager_http_response_size_bytes_count{handler="/alerts",method="post"} 5
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="100"} 3
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="1000"} 3
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="10000"} 3
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="100000"} 3
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="1e+06"} 3
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="1e+07"} 3
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="1e+08"} 3
|
||||
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="+Inf"} 3
|
||||
alertmanager_http_response_size_bytes_sum{handler="/silences",method="post"} 240
|
||||
alertmanager_http_response_size_bytes_count{handler="/silences",method="post"} 3
|
||||
# HELP alertmanager_nflog_gc_duration_seconds Duration of the last notification log garbage collection cycle.
|
||||
# TYPE alertmanager_nflog_gc_duration_seconds summary
|
||||
alertmanager_nflog_gc_duration_seconds_sum 0
|
||||
alertmanager_nflog_gc_duration_seconds_count 0
|
||||
# HELP alertmanager_nflog_gossip_messages_propagated_total Number of received gossip messages that have been further gossiped.
|
||||
# TYPE alertmanager_nflog_gossip_messages_propagated_total counter
|
||||
alertmanager_nflog_gossip_messages_propagated_total 0
|
||||
# HELP alertmanager_nflog_queries_total Number of notification log queries were received.
|
||||
# TYPE alertmanager_nflog_queries_total counter
|
||||
alertmanager_nflog_queries_total 0
|
||||
# HELP alertmanager_nflog_query_duration_seconds Duration of notification log query evaluation.
|
||||
# TYPE alertmanager_nflog_query_duration_seconds histogram
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="0.005"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="0.01"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="0.025"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="0.05"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="0.1"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="0.25"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="0.5"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="1"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="2.5"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="5"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="10"} 0
|
||||
alertmanager_nflog_query_duration_seconds_bucket{le="+Inf"} 0
|
||||
alertmanager_nflog_query_duration_seconds_sum 0
|
||||
alertmanager_nflog_query_duration_seconds_count 0
|
||||
# HELP alertmanager_nflog_query_errors_total Number notification log received queries that failed.
|
||||
# TYPE alertmanager_nflog_query_errors_total counter
|
||||
alertmanager_nflog_query_errors_total 0
|
||||
# HELP alertmanager_nflog_snapshot_duration_seconds Duration of the last notification log snapshot.
|
||||
# TYPE alertmanager_nflog_snapshot_duration_seconds summary
|
||||
alertmanager_nflog_snapshot_duration_seconds_sum 0
|
||||
alertmanager_nflog_snapshot_duration_seconds_count 0
|
||||
# HELP alertmanager_nflog_snapshot_size_bytes Size of the last notification log snapshot in bytes.
|
||||
# TYPE alertmanager_nflog_snapshot_size_bytes gauge
|
||||
alertmanager_nflog_snapshot_size_bytes 0
|
||||
# HELP alertmanager_notification_latency_seconds The latency of notifications in seconds.
|
||||
# TYPE alertmanager_notification_latency_seconds histogram
|
||||
alertmanager_notification_latency_seconds_bucket{integration="email",le="1"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="email",le="5"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="email",le="10"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="email",le="15"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="email",le="20"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="email",le="+Inf"} 0
|
||||
alertmanager_notification_latency_seconds_sum{integration="email"} 0
|
||||
alertmanager_notification_latency_seconds_count{integration="email"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="hipchat",le="1"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="hipchat",le="5"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="hipchat",le="10"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="hipchat",le="15"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="hipchat",le="20"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="hipchat",le="+Inf"} 0
|
||||
alertmanager_notification_latency_seconds_sum{integration="hipchat"} 0
|
||||
alertmanager_notification_latency_seconds_count{integration="hipchat"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="1"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="5"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="10"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="15"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="20"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="+Inf"} 0
|
||||
alertmanager_notification_latency_seconds_sum{integration="opsgenie"} 0
|
||||
alertmanager_notification_latency_seconds_count{integration="opsgenie"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="1"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="5"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="10"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="15"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="20"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="+Inf"} 0
|
||||
alertmanager_notification_latency_seconds_sum{integration="pagerduty"} 0
|
||||
alertmanager_notification_latency_seconds_count{integration="pagerduty"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="1"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="5"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="10"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="15"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="20"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="+Inf"} 0
|
||||
alertmanager_notification_latency_seconds_sum{integration="pushover"} 0
|
||||
alertmanager_notification_latency_seconds_count{integration="pushover"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="slack",le="1"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="slack",le="5"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="slack",le="10"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="slack",le="15"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="slack",le="20"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="slack",le="+Inf"} 0
|
||||
alertmanager_notification_latency_seconds_sum{integration="slack"} 0
|
||||
alertmanager_notification_latency_seconds_count{integration="slack"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="1"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="5"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="10"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="15"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="20"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="+Inf"} 0
|
||||
alertmanager_notification_latency_seconds_sum{integration="victorops"} 0
|
||||
alertmanager_notification_latency_seconds_count{integration="victorops"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="1"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="5"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="10"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="15"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="20"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="+Inf"} 0
|
||||
alertmanager_notification_latency_seconds_sum{integration="webhook"} 0
|
||||
alertmanager_notification_latency_seconds_count{integration="webhook"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="1"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="5"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="10"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="15"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="20"} 0
|
||||
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="+Inf"} 0
|
||||
alertmanager_notification_latency_seconds_sum{integration="wechat"} 0
|
||||
alertmanager_notification_latency_seconds_count{integration="wechat"} 0
|
||||
# HELP alertmanager_notifications_failed_total The total number of failed notifications.
|
||||
# TYPE alertmanager_notifications_failed_total counter
|
||||
alertmanager_notifications_failed_total{integration="email"} 0
|
||||
alertmanager_notifications_failed_total{integration="hipchat"} 0
|
||||
alertmanager_notifications_failed_total{integration="opsgenie"} 0
|
||||
alertmanager_notifications_failed_total{integration="pagerduty"} 0
|
||||
alertmanager_notifications_failed_total{integration="pushover"} 0
|
||||
alertmanager_notifications_failed_total{integration="slack"} 0
|
||||
alertmanager_notifications_failed_total{integration="victorops"} 0
|
||||
alertmanager_notifications_failed_total{integration="webhook"} 0
|
||||
alertmanager_notifications_failed_total{integration="wechat"} 0
|
||||
# HELP alertmanager_notifications_total The total number of attempted notifications.
|
||||
# TYPE alertmanager_notifications_total counter
|
||||
alertmanager_notifications_total{integration="email"} 0
|
||||
alertmanager_notifications_total{integration="hipchat"} 0
|
||||
alertmanager_notifications_total{integration="opsgenie"} 0
|
||||
alertmanager_notifications_total{integration="pagerduty"} 0
|
||||
alertmanager_notifications_total{integration="pushover"} 0
|
||||
alertmanager_notifications_total{integration="slack"} 0
|
||||
alertmanager_notifications_total{integration="victorops"} 0
|
||||
alertmanager_notifications_total{integration="webhook"} 0
|
||||
alertmanager_notifications_total{integration="wechat"} 0
|
||||
# HELP alertmanager_oversize_gossip_message_duration_seconds Duration of oversized gossip message requests.
|
||||
# TYPE alertmanager_oversize_gossip_message_duration_seconds histogram
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.005"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.01"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.025"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.05"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.1"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.25"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.5"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="1"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="2.5"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="5"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="10"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="+Inf"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_sum{key="nfl"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_count{key="nfl"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.005"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.01"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.025"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.05"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.1"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.25"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.5"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="1"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="2.5"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="5"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="10"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="+Inf"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_sum{key="sil"} 0
|
||||
alertmanager_oversize_gossip_message_duration_seconds_count{key="sil"} 0
|
||||
# HELP alertmanager_oversized_gossip_message_dropped_total Number of oversized gossip messages that were dropped due to a full message queue.
|
||||
# TYPE alertmanager_oversized_gossip_message_dropped_total counter
|
||||
alertmanager_oversized_gossip_message_dropped_total{key="nfl"} 0
|
||||
alertmanager_oversized_gossip_message_dropped_total{key="sil"} 0
|
||||
# HELP alertmanager_oversized_gossip_message_failure_total Number of oversized gossip message sends that failed.
|
||||
# TYPE alertmanager_oversized_gossip_message_failure_total counter
|
||||
alertmanager_oversized_gossip_message_failure_total{key="nfl"} 0
|
||||
alertmanager_oversized_gossip_message_failure_total{key="sil"} 0
|
||||
# HELP alertmanager_oversized_gossip_message_sent_total Number of oversized gossip message sent.
|
||||
# TYPE alertmanager_oversized_gossip_message_sent_total counter
|
||||
alertmanager_oversized_gossip_message_sent_total{key="nfl"} 0
|
||||
alertmanager_oversized_gossip_message_sent_total{key="sil"} 0
|
||||
# HELP alertmanager_peer_position Position the Alertmanager instance believes it's in. The position determines a peer's behavior in the cluster.
|
||||
# TYPE alertmanager_peer_position gauge
|
||||
alertmanager_peer_position 0
|
||||
# HELP alertmanager_silences How many silences by state.
|
||||
# TYPE alertmanager_silences gauge
|
||||
alertmanager_silences{state="active"} 3
|
||||
alertmanager_silences{state="expired"} 0
|
||||
alertmanager_silences{state="pending"} 0
|
||||
# HELP alertmanager_silences_gc_duration_seconds Duration of the last silence garbage collection cycle.
|
||||
# TYPE alertmanager_silences_gc_duration_seconds summary
|
||||
alertmanager_silences_gc_duration_seconds_sum 0
|
||||
alertmanager_silences_gc_duration_seconds_count 0
|
||||
# HELP alertmanager_silences_gossip_messages_propagated_total Number of received gossip messages that have been further gossiped.
|
||||
# TYPE alertmanager_silences_gossip_messages_propagated_total counter
|
||||
alertmanager_silences_gossip_messages_propagated_total 0
|
||||
# HELP alertmanager_silences_queries_total How many silence queries were received.
|
||||
# TYPE alertmanager_silences_queries_total counter
|
||||
alertmanager_silences_queries_total 25
|
||||
# HELP alertmanager_silences_query_duration_seconds Duration of silence query evaluation.
|
||||
# TYPE alertmanager_silences_query_duration_seconds histogram
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="0.005"} 24
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="0.01"} 24
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="0.025"} 24
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="0.05"} 24
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="0.1"} 24
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="0.25"} 24
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="0.5"} 24
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="1"} 24
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="2.5"} 24
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="5"} 24
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="10"} 24
|
||||
alertmanager_silences_query_duration_seconds_bucket{le="+Inf"} 24
|
||||
alertmanager_silences_query_duration_seconds_sum 0.0010285
|
||||
alertmanager_silences_query_duration_seconds_count 24
|
||||
# HELP alertmanager_silences_query_errors_total How many silence received queries did not succeed.
|
||||
# TYPE alertmanager_silences_query_errors_total counter
|
||||
alertmanager_silences_query_errors_total 0
|
||||
# HELP alertmanager_silences_snapshot_duration_seconds Duration of the last silence snapshot.
|
||||
# TYPE alertmanager_silences_snapshot_duration_seconds summary
|
||||
alertmanager_silences_snapshot_duration_seconds_sum 0
|
||||
alertmanager_silences_snapshot_duration_seconds_count 0
|
||||
# HELP alertmanager_silences_snapshot_size_bytes Size of the last silence snapshot in bytes.
|
||||
# TYPE alertmanager_silences_snapshot_size_bytes gauge
|
||||
alertmanager_silences_snapshot_size_bytes 0
|
||||
# HELP go_gc_duration_seconds A summary of the GC invocation durations.
|
||||
# TYPE go_gc_duration_seconds summary
|
||||
go_gc_duration_seconds{quantile="0"} 5.79e-05
|
||||
go_gc_duration_seconds{quantile="0.25"} 6.12e-05
|
||||
go_gc_duration_seconds{quantile="0.5"} 0.0001188
|
||||
go_gc_duration_seconds{quantile="0.75"} 0.0001755
|
||||
go_gc_duration_seconds{quantile="1"} 0.0004936
|
||||
go_gc_duration_seconds_sum 0.000907
|
||||
go_gc_duration_seconds_count 5
|
||||
# HELP go_goroutines Number of goroutines that currently exist.
|
||||
# TYPE go_goroutines gauge
|
||||
go_goroutines 51
|
||||
# HELP go_info Information about the Go environment.
|
||||
# TYPE go_info gauge
|
||||
go_info{version="go1.12.6"} 1
|
||||
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
|
||||
# TYPE go_memstats_alloc_bytes gauge
|
||||
go_memstats_alloc_bytes 6.846952e+06
|
||||
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
|
||||
# TYPE go_memstats_alloc_bytes_total counter
|
||||
go_memstats_alloc_bytes_total 1.5217664e+07
|
||||
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
|
||||
# TYPE go_memstats_buck_hash_sys_bytes gauge
|
||||
go_memstats_buck_hash_sys_bytes 1.449538e+06
|
||||
# HELP go_memstats_frees_total Total number of frees.
|
||||
# TYPE go_memstats_frees_total counter
|
||||
go_memstats_frees_total 89084
|
||||
# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.
|
||||
# TYPE go_memstats_gc_cpu_fraction gauge
|
||||
go_memstats_gc_cpu_fraction 0.037543780660893165
|
||||
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
|
||||
# TYPE go_memstats_gc_sys_bytes gauge
|
||||
go_memstats_gc_sys_bytes 2.38592e+06
|
||||
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
|
||||
# TYPE go_memstats_heap_alloc_bytes gauge
|
||||
go_memstats_heap_alloc_bytes 6.846952e+06
|
||||
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
|
||||
# TYPE go_memstats_heap_idle_bytes gauge
|
||||
go_memstats_heap_idle_bytes 5.7909248e+07
|
||||
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
|
||||
# TYPE go_memstats_heap_inuse_bytes gauge
|
||||
go_memstats_heap_inuse_bytes 8.380416e+06
|
||||
# HELP go_memstats_heap_objects Number of allocated objects.
|
||||
# TYPE go_memstats_heap_objects gauge
|
||||
go_memstats_heap_objects 39678
|
||||
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
|
||||
# TYPE go_memstats_heap_released_bytes gauge
|
||||
go_memstats_heap_released_bytes 0
|
||||
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
|
||||
# TYPE go_memstats_heap_sys_bytes gauge
|
||||
go_memstats_heap_sys_bytes 6.6289664e+07
|
||||
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
|
||||
# TYPE go_memstats_last_gc_time_seconds gauge
|
||||
go_memstats_last_gc_time_seconds 1.5651233796129203e+09
|
||||
# HELP go_memstats_lookups_total Total number of pointer lookups.
|
||||
# TYPE go_memstats_lookups_total counter
|
||||
go_memstats_lookups_total 0
|
||||
# HELP go_memstats_mallocs_total Total number of mallocs.
|
||||
# TYPE go_memstats_mallocs_total counter
|
||||
go_memstats_mallocs_total 128762
|
||||
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
|
||||
# TYPE go_memstats_mcache_inuse_bytes gauge
|
||||
go_memstats_mcache_inuse_bytes 3472
|
||||
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
|
||||
# TYPE go_memstats_mcache_sys_bytes gauge
|
||||
go_memstats_mcache_sys_bytes 16384
|
||||
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
|
||||
# TYPE go_memstats_mspan_inuse_bytes gauge
|
||||
go_memstats_mspan_inuse_bytes 105552
|
||||
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
|
||||
# TYPE go_memstats_mspan_sys_bytes gauge
|
||||
go_memstats_mspan_sys_bytes 114688
|
||||
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
|
||||
# TYPE go_memstats_next_gc_bytes gauge
|
||||
go_memstats_next_gc_bytes 8.537216e+06
|
||||
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
|
||||
# TYPE go_memstats_other_sys_bytes gauge
|
||||
go_memstats_other_sys_bytes 686774
|
||||
# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
|
||||
# TYPE go_memstats_stack_inuse_bytes gauge
|
||||
go_memstats_stack_inuse_bytes 819200
|
||||
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
|
||||
# TYPE go_memstats_stack_sys_bytes gauge
|
||||
go_memstats_stack_sys_bytes 819200
|
||||
# HELP go_memstats_sys_bytes Number of bytes obtained from system.
|
||||
# TYPE go_memstats_sys_bytes gauge
|
||||
go_memstats_sys_bytes 7.1762168e+07
|
||||
# HELP go_threads Number of OS threads created.
|
||||
# TYPE go_threads gauge
|
||||
go_threads 9
|
||||
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
|
||||
# TYPE process_cpu_seconds_total counter
|
||||
process_cpu_seconds_total 0.36
|
||||
# HELP process_max_fds Maximum number of open file descriptors.
|
||||
# TYPE process_max_fds gauge
|
||||
process_max_fds 1.048576e+06
|
||||
# HELP process_open_fds Number of open file descriptors.
|
||||
# TYPE process_open_fds gauge
|
||||
process_open_fds 9
|
||||
# HELP process_resident_memory_bytes Resident memory size in bytes.
|
||||
# TYPE process_resident_memory_bytes gauge
|
||||
process_resident_memory_bytes 2.2933504e+07
|
||||
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
|
||||
# TYPE process_start_time_seconds gauge
|
||||
process_start_time_seconds 1.56512337842e+09
|
||||
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
|
||||
# TYPE process_virtual_memory_bytes gauge
|
||||
process_virtual_memory_bytes 1.25607936e+08
|
||||
# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.
|
||||
# TYPE process_virtual_memory_max_bytes gauge
|
||||
process_virtual_memory_max_bytes -1
|
||||
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
|
||||
# TYPE promhttp_metric_handler_requests_in_flight gauge
|
||||
promhttp_metric_handler_requests_in_flight 1
|
||||
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
|
||||
# TYPE promhttp_metric_handler_requests_total counter
|
||||
promhttp_metric_handler_requests_total{code="200"} 0
|
||||
promhttp_metric_handler_requests_total{code="500"} 0
|
||||
promhttp_metric_handler_requests_total{code="503"} 0
|
||||
@@ -5,7 +5,7 @@ DOCKER_ARGS := --name $(DOCKER_NAME) --rm -d -p 9093:9093 \
|
||||
-v $(CURDIR)/alertmanager.yml:/etc/alertmanager/alertmanager.yml
|
||||
|
||||
# list of Alertmanager versions to generate mock files for
|
||||
VERSIONS := 0.17.0 0.18.0 0.19.0 0.20.0 0.21.0
|
||||
VERSIONS := 0.19.0 0.20.0 0.21.0
|
||||
|
||||
%/.ok: livemock.py
|
||||
$(eval VERSION := $(word 1, $(subst /, ,$@)))
|
||||
|
||||
Reference in New Issue
Block a user