Merge pull request #704 from prymitive/am-0.17.0

feat(backend): add alertmanager v0.17.0 mock files for testing
This commit is contained in:
Łukasz Mierzwa
2019-05-10 21:49:15 +01:00
committed by GitHub
9 changed files with 1521 additions and 11 deletions

View File

@@ -6,6 +6,8 @@ import (
"testing"
"time"
"github.com/jarcoal/httpmock"
"github.com/prymitive/karma/internal/alertmanager"
"github.com/prymitive/karma/internal/config"
"github.com/prymitive/karma/internal/mock"
@@ -15,8 +17,10 @@ import (
func init() {
log.SetLevel(log.ErrorLevel)
for i, uri := range mock.ListAllMockURIs() {
name := fmt.Sprintf("dedup-mock-%d", i)
httpmock.Activate()
for _, version := range mock.ListAllMocks() {
name := fmt.Sprintf("dedup-mock-%s", version)
uri := fmt.Sprintf("http://%s.localhost", version)
am, err := alertmanager.NewAlertmanager(name, uri, alertmanager.WithRequestTimeout(time.Second))
if err != nil {
log.Fatal(err)
@@ -25,6 +29,13 @@ func init() {
if err != nil {
panic(fmt.Sprintf("Failed to register Alertmanager instance %s: %s", am.Name, err))
}
mock.RegisterURL(fmt.Sprintf("%s/metrics", uri), version, "metrics")
mock.RegisterURL(fmt.Sprintf("%s/api/v1/status", uri), version, "api/v1/status")
mock.RegisterURL(fmt.Sprintf("%s/api/v1/silences", uri), version, "api/v1/silences")
mock.RegisterURL(fmt.Sprintf("%s/api/v2/silences", uri), version, "api/v2/silences")
mock.RegisterURL(fmt.Sprintf("%s/api/v1/alerts/groups", uri), version, "api/v1/alerts/groups")
mock.RegisterURL(fmt.Sprintf("%s/api/v2/alerts/groups", uri), version, "api/v2/alerts/groups")
}
}

0
internal/mock/0.17.0/.ok Normal file
View File

View File

@@ -0,0 +1,110 @@
{
"data": {
"clusterStatus": {
"name": "01DAHM8SXN8TV81ZEREK68P5C5",
"peers": [
{
"address": "172.17.0.2:9094",
"name": "01DAHM8SXN8TV81ZEREK68P5C5"
}
],
"status": "ready"
},
"configJSON": {
"global": {
"hipchat_api_url": "https://api.hipchat.com/",
"http_config": {
"BasicAuth": null,
"BearerToken": "",
"BearerTokenFile": "",
"ProxyURL": {},
"TLSConfig": {
"CAFile": "",
"CertFile": "",
"InsecureSkipVerify": false,
"KeyFile": "",
"ServerName": ""
}
},
"opsgenie_api_url": "https://api.opsgenie.com/",
"pagerduty_url": "https://events.pagerduty.com/v2/enqueue",
"resolve_timeout": 300000000000,
"smtp_hello": "localhost",
"smtp_require_tls": true,
"victorops_api_url": "https://alert.victorops.com/integrations/generic/20131114/alert/",
"wechat_api_url": "https://qyapi.weixin.qq.com/cgi-bin/"
},
"inhibit_rules": [
{
"equal": [
"alertname",
"cluster",
"service"
],
"source_match": {
"severity": "critical"
},
"target_match": {
"severity": "warning"
}
}
],
"receivers": [
{
"name": "default"
},
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"route": {
"group_by": [
"alertname"
],
"group_interval": 35000000000,
"group_wait": 15000000000,
"receiver": "default",
"repeat_interval": 3596400000000000,
"routes": [
{
"continue": true,
"group_by": [
"alertname",
"cluster",
"service"
],
"match_re": {
"alertname": ".*"
},
"receiver": "by-cluster-service"
},
{
"continue": true,
"group_by": [
"alertname"
],
"match_re": {
"alertname": ".*"
},
"receiver": "by-name"
}
]
},
"templates": null
},
"configYAML": "global:\n resolve_timeout: 5m\n http_config: {}\n smtp_hello: localhost\n smtp_require_tls: true\n pagerduty_url: https://events.pagerduty.com/v2/enqueue\n hipchat_api_url: https://api.hipchat.com/\n opsgenie_api_url: https://api.opsgenie.com/\n wechat_api_url: https://qyapi.weixin.qq.com/cgi-bin/\n victorops_api_url: https://alert.victorops.com/integrations/generic/20131114/alert/\nroute:\n receiver: default\n group_by:\n - alertname\n routes:\n - receiver: by-cluster-service\n group_by:\n - alertname\n - cluster\n - service\n match_re:\n alertname: .*\n continue: true\n - receiver: by-name\n group_by:\n - alertname\n match_re:\n alertname: .*\n continue: true\n group_wait: 15s\n group_interval: 35s\n repeat_interval: 999h\ninhibit_rules:\n- source_match:\n severity: critical\n target_match:\n severity: warning\n equal:\n - alertname\n - cluster\n - service\nreceivers:\n- name: default\n- name: by-cluster-service\n- name: by-name\ntemplates: []\n",
"uptime": "2019-05-10T19:42:10.360492Z",
"versionInfo": {
"branch": "HEAD",
"buildDate": "20190503-09:10:07",
"buildUser": "root@932a86a52b76",
"goVersion": "go1.12.4",
"revision": "c7551cd75c414dc81df027f691e2eb21d4fd85b2",
"version": "0.17.0"
}
},
"status": "success"
}

View File

@@ -0,0 +1,832 @@
[
{
"alerts": [
{
"annotations": {
"alert": "Less than 10% disk space is free",
"dashboard": "http://localhost/dashboard.html"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "f87343c11c74a3f4",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Free_Disk_Space_Too_Low",
"cluster": "staging",
"instance": "server5",
"job": "node_exporter"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
}
],
"labels": {
"alertname": "Free_Disk_Space_Too_Low"
},
"receiver": {
"name": "by-name"
}
},
{
"alerts": [
{
"annotations": {
"help": "Example help annotation",
"summary": "Example summary",
"url": "http://localhost/example.html"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "54c2f185e49cfccb",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web1",
"job": "node_exporter"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"f363523b-128c-46d8-a8ab-fe9d97547741"
],
"state": "suppressed"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "5cb0dd95e7f3d9c0",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web2",
"job": "node_exporter"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
}
],
"labels": {
"alertname": "HTTP_Probe_Failed"
},
"receiver": {
"name": "by-name"
}
},
{
"alerts": [
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "af9d8f2f0ccb3970",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server8",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"01937075-4bf7-4e9f-ae3a-3f539e3040e3"
],
"state": "suppressed"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
},
{
"annotations": {
"summary": "Example summary",
"url": "http://localhost/example.html"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "d0aee2649e71388b",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server1",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "3bdb8b68bdce2ae0",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server2",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "24e4121619386f95",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server3",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "d9067fcc9686d942",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server4",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "5f1306dab6671183",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server5",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "0967807e4073b606",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server6",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"01937075-4bf7-4e9f-ae3a-3f539e3040e3"
],
"state": "suppressed"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "44497481566cd3c7",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server7",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"01937075-4bf7-4e9f-ae3a-3f539e3040e3",
"c805d071-b605-4164-b84b-ef24bd8c595e"
],
"state": "suppressed"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
}
],
"labels": {
"alertname": "Host_Down"
},
"receiver": {
"name": "by-name"
}
},
{
"alerts": [
{
"annotations": {
"alert": "Memory usage exceeding threshold",
"dashboard": "http://localhost/dashboard.html"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "7d0b114ebf24f857",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Memory_Usage_Too_High",
"cluster": "prod",
"instance": "server2",
"job": "node_exporter"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
}
],
"labels": {
"alertname": "Memory_Usage_Too_High"
},
"receiver": {
"name": "by-name"
}
},
{
"alerts": [
{
"annotations": {
"alert": "Less than 10% disk space is free",
"dashboard": "http://localhost/dashboard.html"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "f87343c11c74a3f4",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Free_Disk_Space_Too_Low",
"cluster": "staging",
"instance": "server5",
"job": "node_exporter"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
}
],
"labels": {
"alertname": "Free_Disk_Space_Too_Low",
"cluster": "staging"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"help": "Example help annotation",
"summary": "Example summary",
"url": "http://localhost/example.html"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "54c2f185e49cfccb",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web1",
"job": "node_exporter"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"f363523b-128c-46d8-a8ab-fe9d97547741"
],
"state": "suppressed"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "5cb0dd95e7f3d9c0",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web2",
"job": "node_exporter"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
}
],
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "0967807e4073b606",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server6",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"01937075-4bf7-4e9f-ae3a-3f539e3040e3"
],
"state": "suppressed"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "44497481566cd3c7",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server7",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"01937075-4bf7-4e9f-ae3a-3f539e3040e3",
"c805d071-b605-4164-b84b-ef24bd8c595e"
],
"state": "suppressed"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "af9d8f2f0ccb3970",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server8",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"01937075-4bf7-4e9f-ae3a-3f539e3040e3"
],
"state": "suppressed"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
}
],
"labels": {
"alertname": "Host_Down",
"cluster": "dev"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "3bdb8b68bdce2ae0",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server2",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
},
{
"annotations": {
"summary": "Example summary",
"url": "http://localhost/example.html"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "d0aee2649e71388b",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server1",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
}
],
"labels": {
"alertname": "Host_Down",
"cluster": "prod"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "24e4121619386f95",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server3",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "d9067fcc9686d942",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server4",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "5f1306dab6671183",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server5",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
}
],
"labels": {
"alertname": "Host_Down",
"cluster": "staging"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"alert": "Memory usage exceeding threshold",
"dashboard": "http://localhost/dashboard.html"
},
"endsAt": "2019-05-10T19:48:05.658Z",
"fingerprint": "7d0b114ebf24f857",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Memory_Usage_Too_High",
"cluster": "prod",
"instance": "server2",
"job": "node_exporter"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-cluster-service"
}
],
"startsAt": "2019-05-10T19:42:25.615Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2019-05-10T19:43:05.658Z"
}
],
"labels": {
"alertname": "Memory_Usage_Too_High",
"cluster": "prod"
},
"receiver": {
"name": "by-cluster-service"
}
}
]

View File

@@ -0,0 +1,61 @@
[
{
"comment": "Silenced Host_Down alerts in the dev cluster",
"createdBy": "john@example.com",
"endsAt": "2063-01-01T00:00:00.000Z",
"id": "01937075-4bf7-4e9f-ae3a-3f539e3040e3",
"matchers": [
{
"isRegex": false,
"name": "alertname",
"value": "Host_Down"
},
{
"isRegex": false,
"name": "cluster",
"value": "dev"
}
],
"startsAt": "2019-05-10T19:42:25.599Z",
"status": {
"state": "active"
},
"updatedAt": "2019-05-10T19:42:25.599Z"
},
{
"comment": "Silenced server7",
"createdBy": "john@example.com",
"endsAt": "2063-01-01T00:00:00.000Z",
"id": "c805d071-b605-4164-b84b-ef24bd8c595e",
"matchers": [
{
"isRegex": false,
"name": "instance",
"value": "server7"
}
],
"startsAt": "2019-05-10T19:42:25.603Z",
"status": {
"state": "active"
},
"updatedAt": "2019-05-10T19:42:25.603Z"
},
{
"comment": "Silenced instance",
"createdBy": "john@example.com",
"endsAt": "2063-01-01T00:00:00.000Z",
"id": "f363523b-128c-46d8-a8ab-fe9d97547741",
"matchers": [
{
"isRegex": false,
"name": "instance",
"value": "web1"
}
],
"startsAt": "2019-05-10T19:42:25.587Z",
"status": {
"state": "active"
},
"updatedAt": "2019-05-10T19:42:25.587Z"
}
]

View File

@@ -0,0 +1,480 @@
# HELP alertmanager_alerts How many alerts by state.
# TYPE alertmanager_alerts gauge
alertmanager_alerts{state="active"} 8
alertmanager_alerts{state="suppressed"} 4
# HELP alertmanager_alerts_invalid_total The total number of received alerts that were invalid.
# TYPE alertmanager_alerts_invalid_total counter
alertmanager_alerts_invalid_total 0
# HELP alertmanager_alerts_received_total The total number of received alerts.
# TYPE alertmanager_alerts_received_total counter
alertmanager_alerts_received_total{status="firing"} 60
alertmanager_alerts_received_total{status="resolved"} 0
# HELP alertmanager_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which alertmanager was built.
# TYPE alertmanager_build_info gauge
alertmanager_build_info{branch="HEAD",goversion="go1.12.4",revision="c7551cd75c414dc81df027f691e2eb21d4fd85b2",version="0.17.0"} 1
# HELP alertmanager_cluster_failed_peers Number indicating the current number of failed peers in the cluster.
# TYPE alertmanager_cluster_failed_peers gauge
alertmanager_cluster_failed_peers 0
# HELP alertmanager_cluster_health_score Health score of the cluster. Lower values are better and zero means 'totally healthy'.
# TYPE alertmanager_cluster_health_score gauge
alertmanager_cluster_health_score 0
# HELP alertmanager_cluster_members Number indicating current number of members in cluster.
# TYPE alertmanager_cluster_members gauge
alertmanager_cluster_members 1
# HELP alertmanager_cluster_messages_pruned_total Total number of cluster messages pruned.
# TYPE alertmanager_cluster_messages_pruned_total counter
alertmanager_cluster_messages_pruned_total 0
# HELP alertmanager_cluster_messages_queued Number of cluster messages which are queued.
# TYPE alertmanager_cluster_messages_queued gauge
alertmanager_cluster_messages_queued 3
# HELP alertmanager_cluster_messages_received_size_total Total size of cluster messages received.
# TYPE alertmanager_cluster_messages_received_size_total counter
alertmanager_cluster_messages_received_size_total{msg_type="full_state"} 0
alertmanager_cluster_messages_received_size_total{msg_type="update"} 0
# HELP alertmanager_cluster_messages_received_total Total number of cluster messages received.
# TYPE alertmanager_cluster_messages_received_total counter
alertmanager_cluster_messages_received_total{msg_type="full_state"} 0
alertmanager_cluster_messages_received_total{msg_type="update"} 0
# HELP alertmanager_cluster_messages_sent_size_total Total size of cluster messages sent.
# TYPE alertmanager_cluster_messages_sent_size_total counter
alertmanager_cluster_messages_sent_size_total{msg_type="full_state"} 0
alertmanager_cluster_messages_sent_size_total{msg_type="update"} 0
# HELP alertmanager_cluster_messages_sent_total Total number of cluster messages sent.
# TYPE alertmanager_cluster_messages_sent_total counter
alertmanager_cluster_messages_sent_total{msg_type="full_state"} 0
alertmanager_cluster_messages_sent_total{msg_type="update"} 0
# HELP alertmanager_cluster_peers_joined_total A counter of the number of peers that have joined.
# TYPE alertmanager_cluster_peers_joined_total counter
alertmanager_cluster_peers_joined_total 1
# HELP alertmanager_cluster_peers_left_total A counter of the number of peers that have left.
# TYPE alertmanager_cluster_peers_left_total counter
alertmanager_cluster_peers_left_total 0
# HELP alertmanager_cluster_peers_update_total A counter of the number of peers that have updated metadata.
# TYPE alertmanager_cluster_peers_update_total counter
alertmanager_cluster_peers_update_total 0
# HELP alertmanager_cluster_reconnections_failed_total A counter of the number of failed cluster peer reconnection attempts.
# TYPE alertmanager_cluster_reconnections_failed_total counter
alertmanager_cluster_reconnections_failed_total 0
# HELP alertmanager_cluster_reconnections_total A counter of the number of cluster peer reconnections.
# TYPE alertmanager_cluster_reconnections_total counter
alertmanager_cluster_reconnections_total 0
# HELP alertmanager_cluster_refresh_join_failed_total A counter of the number of failed cluster peer joined attempts via refresh.
# TYPE alertmanager_cluster_refresh_join_failed_total counter
alertmanager_cluster_refresh_join_failed_total 0
# HELP alertmanager_cluster_refresh_join_total A counter of the number of cluster peer joined via refresh.
# TYPE alertmanager_cluster_refresh_join_total counter
alertmanager_cluster_refresh_join_total 0
# HELP alertmanager_config_hash Hash of the currently loaded alertmanager configuration.
# TYPE alertmanager_config_hash gauge
alertmanager_config_hash 6.2645753076152e+13
# HELP alertmanager_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload.
# TYPE alertmanager_config_last_reload_success_timestamp_seconds gauge
alertmanager_config_last_reload_success_timestamp_seconds 1.55751733e+09
# HELP alertmanager_config_last_reload_successful Whether the last configuration reload attempt was successful.
# TYPE alertmanager_config_last_reload_successful gauge
alertmanager_config_last_reload_successful 1
# HELP alertmanager_http_concurrency_limit_exceeded_total Total number of times an HTTP request failed because the concurrency limit was reached.
# TYPE alertmanager_http_concurrency_limit_exceeded_total counter
alertmanager_http_concurrency_limit_exceeded_total{method="get"} 0
# HELP alertmanager_http_request_duration_seconds Histogram of latencies for HTTP requests.
# TYPE alertmanager_http_request_duration_seconds histogram
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.05"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.1"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.25"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.5"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.75"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="1"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="2"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="5"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="20"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="60"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="+Inf"} 5
alertmanager_http_request_duration_seconds_sum{handler="/alerts",method="post"} 0.0054434999999999996
alertmanager_http_request_duration_seconds_count{handler="/alerts",method="post"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.05"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.1"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.25"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.5"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.75"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="1"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="2"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="5"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="20"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="60"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="+Inf"} 3
alertmanager_http_request_duration_seconds_sum{handler="/silences",method="post"} 0.0036524000000000005
alertmanager_http_request_duration_seconds_count{handler="/silences",method="post"} 3
# HELP alertmanager_http_requests_in_flight Current number of HTTP requests being processed.
# TYPE alertmanager_http_requests_in_flight gauge
alertmanager_http_requests_in_flight{method="get"} 1
# HELP alertmanager_http_response_size_bytes Histogram of response size for HTTP requests.
# TYPE alertmanager_http_response_size_bytes histogram
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="100"} 5
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="1000"} 5
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="10000"} 5
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="100000"} 5
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="1e+06"} 5
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="1e+07"} 5
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="1e+08"} 5
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="+Inf"} 5
alertmanager_http_response_size_bytes_sum{handler="/alerts",method="post"} 100
alertmanager_http_response_size_bytes_count{handler="/alerts",method="post"} 5
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="100"} 3
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="1000"} 3
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="10000"} 3
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="100000"} 3
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="1e+06"} 3
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="1e+07"} 3
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="1e+08"} 3
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="+Inf"} 3
alertmanager_http_response_size_bytes_sum{handler="/silences",method="post"} 240
alertmanager_http_response_size_bytes_count{handler="/silences",method="post"} 3
# HELP alertmanager_nflog_gc_duration_seconds Duration of the last notification log garbage collection cycle.
# TYPE alertmanager_nflog_gc_duration_seconds summary
alertmanager_nflog_gc_duration_seconds{quantile="0.5"} NaN
alertmanager_nflog_gc_duration_seconds{quantile="0.9"} NaN
alertmanager_nflog_gc_duration_seconds{quantile="0.99"} NaN
alertmanager_nflog_gc_duration_seconds_sum 0
alertmanager_nflog_gc_duration_seconds_count 0
# HELP alertmanager_nflog_gossip_messages_propagated_total Number of received gossip messages that have been further gossiped.
# TYPE alertmanager_nflog_gossip_messages_propagated_total counter
alertmanager_nflog_gossip_messages_propagated_total 0
# HELP alertmanager_nflog_queries_total Number of notification log queries were received.
# TYPE alertmanager_nflog_queries_total counter
alertmanager_nflog_queries_total 0
# HELP alertmanager_nflog_query_duration_seconds Duration of notification log query evaluation.
# TYPE alertmanager_nflog_query_duration_seconds histogram
alertmanager_nflog_query_duration_seconds_bucket{le="0.005"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.01"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.025"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.05"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.1"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.25"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.5"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="1"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="2.5"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="5"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="10"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="+Inf"} 0
alertmanager_nflog_query_duration_seconds_sum 0
alertmanager_nflog_query_duration_seconds_count 0
# HELP alertmanager_nflog_query_errors_total Number notification log received queries that failed.
# TYPE alertmanager_nflog_query_errors_total counter
alertmanager_nflog_query_errors_total 0
# HELP alertmanager_nflog_snapshot_duration_seconds Duration of the last notification log snapshot.
# TYPE alertmanager_nflog_snapshot_duration_seconds summary
alertmanager_nflog_snapshot_duration_seconds{quantile="0.5"} NaN
alertmanager_nflog_snapshot_duration_seconds{quantile="0.9"} NaN
alertmanager_nflog_snapshot_duration_seconds{quantile="0.99"} NaN
alertmanager_nflog_snapshot_duration_seconds_sum 0
alertmanager_nflog_snapshot_duration_seconds_count 0
# HELP alertmanager_nflog_snapshot_size_bytes Size of the last notification log snapshot in bytes.
# TYPE alertmanager_nflog_snapshot_size_bytes gauge
alertmanager_nflog_snapshot_size_bytes 0
# HELP alertmanager_notification_latency_seconds The latency of notifications in seconds.
# TYPE alertmanager_notification_latency_seconds histogram
alertmanager_notification_latency_seconds_bucket{integration="email",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="email"} 0
alertmanager_notification_latency_seconds_count{integration="email"} 0
alertmanager_notification_latency_seconds_bucket{integration="hipchat",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="hipchat",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="hipchat",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="hipchat",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="hipchat",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="hipchat",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="hipchat"} 0
alertmanager_notification_latency_seconds_count{integration="hipchat"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="opsgenie"} 0
alertmanager_notification_latency_seconds_count{integration="opsgenie"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="pagerduty"} 0
alertmanager_notification_latency_seconds_count{integration="pagerduty"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="pushover"} 0
alertmanager_notification_latency_seconds_count{integration="pushover"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="slack"} 0
alertmanager_notification_latency_seconds_count{integration="slack"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="victorops"} 0
alertmanager_notification_latency_seconds_count{integration="victorops"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="webhook"} 0
alertmanager_notification_latency_seconds_count{integration="webhook"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="wechat"} 0
alertmanager_notification_latency_seconds_count{integration="wechat"} 0
# HELP alertmanager_notifications_failed_total The total number of failed notifications.
# TYPE alertmanager_notifications_failed_total counter
alertmanager_notifications_failed_total{integration="email"} 0
alertmanager_notifications_failed_total{integration="hipchat"} 0
alertmanager_notifications_failed_total{integration="opsgenie"} 0
alertmanager_notifications_failed_total{integration="pagerduty"} 0
alertmanager_notifications_failed_total{integration="pushover"} 0
alertmanager_notifications_failed_total{integration="slack"} 0
alertmanager_notifications_failed_total{integration="victorops"} 0
alertmanager_notifications_failed_total{integration="webhook"} 0
alertmanager_notifications_failed_total{integration="wechat"} 0
# HELP alertmanager_notifications_total The total number of attempted notifications.
# TYPE alertmanager_notifications_total counter
alertmanager_notifications_total{integration="email"} 0
alertmanager_notifications_total{integration="hipchat"} 0
alertmanager_notifications_total{integration="opsgenie"} 0
alertmanager_notifications_total{integration="pagerduty"} 0
alertmanager_notifications_total{integration="pushover"} 0
alertmanager_notifications_total{integration="slack"} 0
alertmanager_notifications_total{integration="victorops"} 0
alertmanager_notifications_total{integration="webhook"} 0
alertmanager_notifications_total{integration="wechat"} 0
# HELP alertmanager_oversize_gossip_message_duration_seconds Duration of oversized gossip message requests.
# TYPE alertmanager_oversize_gossip_message_duration_seconds histogram
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.005"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.01"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.025"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.05"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.1"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.25"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="1"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="2.5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="10"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="+Inf"} 0
alertmanager_oversize_gossip_message_duration_seconds_sum{key="nfl"} 0
alertmanager_oversize_gossip_message_duration_seconds_count{key="nfl"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.005"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.01"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.025"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.05"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.1"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.25"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="1"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="2.5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="10"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="+Inf"} 0
alertmanager_oversize_gossip_message_duration_seconds_sum{key="sil"} 0
alertmanager_oversize_gossip_message_duration_seconds_count{key="sil"} 0
# HELP alertmanager_oversized_gossip_message_dropped_total Number of oversized gossip messages that were dropped due to a full message queue.
# TYPE alertmanager_oversized_gossip_message_dropped_total counter
alertmanager_oversized_gossip_message_dropped_total{key="nfl"} 0
alertmanager_oversized_gossip_message_dropped_total{key="sil"} 0
# HELP alertmanager_oversized_gossip_message_failure_total Number of oversized gossip message sends that failed.
# TYPE alertmanager_oversized_gossip_message_failure_total counter
alertmanager_oversized_gossip_message_failure_total{key="nfl"} 0
alertmanager_oversized_gossip_message_failure_total{key="sil"} 0
# HELP alertmanager_oversized_gossip_message_sent_total Number of oversized gossip message sent.
# TYPE alertmanager_oversized_gossip_message_sent_total counter
alertmanager_oversized_gossip_message_sent_total{key="nfl"} 0
alertmanager_oversized_gossip_message_sent_total{key="sil"} 0
# HELP alertmanager_peer_position Position the Alertmanager instance believes it's in. The position determines a peer's behavior in the cluster.
# TYPE alertmanager_peer_position gauge
alertmanager_peer_position 0
# HELP alertmanager_silences How many silences by state.
# TYPE alertmanager_silences gauge
alertmanager_silences{state="active"} 3
alertmanager_silences{state="expired"} 0
alertmanager_silences{state="pending"} 0
# HELP alertmanager_silences_gc_duration_seconds Duration of the last silence garbage collection cycle.
# TYPE alertmanager_silences_gc_duration_seconds summary
alertmanager_silences_gc_duration_seconds{quantile="0.5"} NaN
alertmanager_silences_gc_duration_seconds{quantile="0.9"} NaN
alertmanager_silences_gc_duration_seconds{quantile="0.99"} NaN
alertmanager_silences_gc_duration_seconds_sum 0
alertmanager_silences_gc_duration_seconds_count 0
# HELP alertmanager_silences_gossip_messages_propagated_total Number of received gossip messages that have been further gossiped.
# TYPE alertmanager_silences_gossip_messages_propagated_total counter
alertmanager_silences_gossip_messages_propagated_total 0
# HELP alertmanager_silences_queries_total How many silence queries were received.
# TYPE alertmanager_silences_queries_total counter
alertmanager_silences_queries_total 25
# HELP alertmanager_silences_query_duration_seconds Duration of silence query evaluation.
# TYPE alertmanager_silences_query_duration_seconds histogram
alertmanager_silences_query_duration_seconds_bucket{le="0.005"} 24
alertmanager_silences_query_duration_seconds_bucket{le="0.01"} 24
alertmanager_silences_query_duration_seconds_bucket{le="0.025"} 24
alertmanager_silences_query_duration_seconds_bucket{le="0.05"} 24
alertmanager_silences_query_duration_seconds_bucket{le="0.1"} 24
alertmanager_silences_query_duration_seconds_bucket{le="0.25"} 24
alertmanager_silences_query_duration_seconds_bucket{le="0.5"} 24
alertmanager_silences_query_duration_seconds_bucket{le="1"} 24
alertmanager_silences_query_duration_seconds_bucket{le="2.5"} 24
alertmanager_silences_query_duration_seconds_bucket{le="5"} 24
alertmanager_silences_query_duration_seconds_bucket{le="10"} 24
alertmanager_silences_query_duration_seconds_bucket{le="+Inf"} 24
alertmanager_silences_query_duration_seconds_sum 0.0009598
alertmanager_silences_query_duration_seconds_count 24
# HELP alertmanager_silences_query_errors_total How many silence received queries did not succeed.
# TYPE alertmanager_silences_query_errors_total counter
alertmanager_silences_query_errors_total 0
# HELP alertmanager_silences_snapshot_duration_seconds Duration of the last silence snapshot.
# TYPE alertmanager_silences_snapshot_duration_seconds summary
alertmanager_silences_snapshot_duration_seconds{quantile="0.5"} NaN
alertmanager_silences_snapshot_duration_seconds{quantile="0.9"} NaN
alertmanager_silences_snapshot_duration_seconds{quantile="0.99"} NaN
alertmanager_silences_snapshot_duration_seconds_sum 0
alertmanager_silences_snapshot_duration_seconds_count 0
# HELP alertmanager_silences_snapshot_size_bytes Size of the last silence snapshot in bytes.
# TYPE alertmanager_silences_snapshot_size_bytes gauge
alertmanager_silences_snapshot_size_bytes 0
# HELP go_gc_duration_seconds A summary of the GC invocation durations.
# TYPE go_gc_duration_seconds summary
go_gc_duration_seconds{quantile="0"} 6.85e-05
go_gc_duration_seconds{quantile="0.25"} 0.0001263
go_gc_duration_seconds{quantile="0.5"} 0.0001508
go_gc_duration_seconds{quantile="0.75"} 0.0001825
go_gc_duration_seconds{quantile="1"} 0.0005197
go_gc_duration_seconds_sum 0.0010478
go_gc_duration_seconds_count 5
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
go_goroutines 50
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
go_info{version="go1.12.4"} 1
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes 7.003e+06
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
# TYPE go_memstats_alloc_bytes_total counter
go_memstats_alloc_bytes_total 1.5485728e+07
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
# TYPE go_memstats_buck_hash_sys_bytes gauge
go_memstats_buck_hash_sys_bytes 1.448834e+06
# HELP go_memstats_frees_total Total number of frees.
# TYPE go_memstats_frees_total counter
go_memstats_frees_total 89977
# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.
# TYPE go_memstats_gc_cpu_fraction gauge
go_memstats_gc_cpu_fraction 0.023123785563213525
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
# TYPE go_memstats_gc_sys_bytes gauge
go_memstats_gc_sys_bytes 2.38592e+06
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
# TYPE go_memstats_heap_alloc_bytes gauge
go_memstats_heap_alloc_bytes 7.003e+06
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
# TYPE go_memstats_heap_idle_bytes gauge
go_memstats_heap_idle_bytes 5.7597952e+07
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
# TYPE go_memstats_heap_inuse_bytes gauge
go_memstats_heap_inuse_bytes 8.691712e+06
# HELP go_memstats_heap_objects Number of allocated objects.
# TYPE go_memstats_heap_objects gauge
go_memstats_heap_objects 38573
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
# TYPE go_memstats_heap_released_bytes gauge
go_memstats_heap_released_bytes 0
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
# TYPE go_memstats_heap_sys_bytes gauge
go_memstats_heap_sys_bytes 6.6289664e+07
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
go_memstats_last_gc_time_seconds 1.5575173303955498e+09
# HELP go_memstats_lookups_total Total number of pointer lookups.
# TYPE go_memstats_lookups_total counter
go_memstats_lookups_total 0
# HELP go_memstats_mallocs_total Total number of mallocs.
# TYPE go_memstats_mallocs_total counter
go_memstats_mallocs_total 128550
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
# TYPE go_memstats_mcache_inuse_bytes gauge
go_memstats_mcache_inuse_bytes 3472
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
# TYPE go_memstats_mcache_sys_bytes gauge
go_memstats_mcache_sys_bytes 16384
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
# TYPE go_memstats_mspan_inuse_bytes gauge
go_memstats_mspan_inuse_bytes 109008
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
# TYPE go_memstats_mspan_sys_bytes gauge
go_memstats_mspan_sys_bytes 114688
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
# TYPE go_memstats_next_gc_bytes gauge
go_memstats_next_gc_bytes 9.28288e+06
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
# TYPE go_memstats_other_sys_bytes gauge
go_memstats_other_sys_bytes 687478
# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
# TYPE go_memstats_stack_inuse_bytes gauge
go_memstats_stack_inuse_bytes 819200
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
# TYPE go_memstats_stack_sys_bytes gauge
go_memstats_stack_sys_bytes 819200
# HELP go_memstats_sys_bytes Number of bytes obtained from system.
# TYPE go_memstats_sys_bytes gauge
go_memstats_sys_bytes 7.1762168e+07
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
go_threads 9
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
# TYPE process_cpu_seconds_total counter
process_cpu_seconds_total 0.4
# HELP process_max_fds Maximum number of open file descriptors.
# TYPE process_max_fds gauge
process_max_fds 1.048576e+06
# HELP process_open_fds Number of open file descriptors.
# TYPE process_open_fds gauge
process_open_fds 9
# HELP process_resident_memory_bytes Resident memory size in bytes.
# TYPE process_resident_memory_bytes gauge
process_resident_memory_bytes 2.4010752e+07
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
# TYPE process_start_time_seconds gauge
process_start_time_seconds 1.55751732955e+09
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
# TYPE process_virtual_memory_bytes gauge
process_virtual_memory_bytes 1.2558336e+08
# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.
# TYPE process_virtual_memory_max_bytes gauge
process_virtual_memory_max_bytes -1
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
# TYPE promhttp_metric_handler_requests_in_flight gauge
promhttp_metric_handler_requests_in_flight 1
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
# TYPE promhttp_metric_handler_requests_total counter
promhttp_metric_handler_requests_total{code="200"} 0
promhttp_metric_handler_requests_total{code="500"} 0
promhttp_metric_handler_requests_total{code="503"} 0

View File

@@ -5,26 +5,33 @@ DOCKER_ARGS := --name $(DOCKER_NAME) --rm -d -p 9093:9093 \
-v $(CURDIR)/alertmanager.yml:/etc/alertmanager/alertmanager.yml
# list of Alertmanager versions to generate mock files for
VERSIONS := 0.4.0 0.4.1 0.4.2 0.5.0 0.5.1 0.6.0 0.6.2 0.7.0 0.7.1 0.8.0 0.9.0 0.9.1 0.10.0 0.11.0 0.12.0 0.13.0 0.14.0 0.15.0 0.15.1 0.15.2 0.15.3
VERSIONS := 0.4.0 0.4.1 0.4.2 0.5.0 0.5.1 0.6.0 0.6.2 0.7.0 0.7.1 0.8.0 0.9.0 0.9.1 0.10.0 0.11.0 0.12.0 0.13.0 0.14.0 0.15.0 0.15.1 0.15.2 0.15.3 0.17.0
%/.ok: livemock.py
$(eval VERSION := $(word 1, $(subst /, ,$@)))
@echo "Generating mock files for Alertmanager $(VERSION)"
$(eval VERSION_MAJOR := $(word 2,$(subst ., ,$(VERSION))))
$(eval API := $(shell (test $(VERSION_MAJOR) -gt 16 && echo 2) || echo 1))
@echo "+ Generating mock files for Alertmanager $(VERSION) with API $(API)"
docker pull $(DOCKER_IMAGE):v$(VERSION)
@docker rm -f $(DOCKER_NAME) > /dev/null 2>&1 || true
@echo "Starting Alertmanager"
@echo "+ Starting Alertmanager"
docker run $(DOCKER_ARGS) $(DOCKER_IMAGE):v$(VERSION)
@sleep 15
@echo "Sending mock alerts and silences"
@echo "+ Sending mock alerts and silences"
@python livemock.py
@mkdir -p $(CURDIR)/$(VERSION)/api/v1 $(CURDIR)/$(VERSION)/api/v1/alerts
@echo "Collecting API responses"
@echo "+ Collecting API responses"
@mkdir -p $(CURDIR)/$(VERSION)/api/v{1..2} $(CURDIR)/$(VERSION)/api/v$(API)/alerts
@curl --fail -s localhost:9093/metrics > $(CURDIR)/$(VERSION)/metrics
@curl --fail -s localhost:9093/api/v1/status | python -m json.tool > $(CURDIR)/$(VERSION)/api/v1/status
@curl --fail -s localhost:9093/api/v1/silences | python -m json.tool > $(CURDIR)/$(VERSION)/api/v1/silences
@curl --fail -s localhost:9093/api/v1/alerts/groups | python -m json.tool > $(CURDIR)/$(VERSION)/api/v1/alerts/groups
@curl --fail -s localhost:9093/api/v$(API)/silences | python -m json.tool > $(CURDIR)/$(VERSION)/api/v$(API)/silences
@curl --fail -s localhost:9093/api/v$(API)/alerts/groups | python -m json.tool > $(CURDIR)/$(VERSION)/api/v$(API)/alerts/groups
@touch $(VERSION)/.ok
@echo "Done"
@echo "+ Done"
.PHONY: all
all: $(foreach version, $(VERSIONS), $(version)/.ok)

View File

@@ -21,6 +21,13 @@ func GetAbsoluteMockPath(filename string, version string) string {
// RegisterURL for given url and return 200 status register mock http responder
func RegisterURL(url string, version string, filename string) {
fullPath := GetAbsoluteMockPath(filename, version)
if _, err := os.Stat(fullPath); err != nil {
if os.IsNotExist(err) {
return
}
}
mockJSON, err := ioutil.ReadFile(fullPath)
if err != nil {
panic(err)

View File

@@ -80,7 +80,9 @@ func mockAlerts(version string) {
mock.RegisterURL("http://localhost/metrics", version, "metrics")
mock.RegisterURL("http://localhost/api/v1/status", version, "api/v1/status")
mock.RegisterURL("http://localhost/api/v1/silences", version, "api/v1/silences")
mock.RegisterURL("http://localhost/api/v2/silences", version, "api/v2/silences")
mock.RegisterURL("http://localhost/api/v1/alerts/groups", version, "api/v1/alerts/groups")
mock.RegisterURL("http://localhost/api/v2/alerts/groups", version, "api/v2/alerts/groups")
pullFromAlertmanager()
}