chore(tests): add alertmanager 0.21.0 mock files

This commit is contained in:
Łukasz Mierzwa
2020-06-19 13:33:17 +01:00
committed by Łukasz Mierzwa
parent 6becbee0fa
commit 20dfcd1b6a
6 changed files with 1419 additions and 1 deletions

0
internal/mock/0.21.0/.ok Normal file
View File

View File

@@ -0,0 +1,850 @@
[
{
"alerts": [
{
"annotations": {
"alert": "Less than 10% disk space is free",
"dashboard": "http://localhost/dashboard.html"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "aae7a1432b5d2f1b",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Free_Disk_Space_Too_Low",
"cluster": "staging",
"disk": "sda",
"instance": "server5",
"job": "node_exporter"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
}
],
"labels": {
"alertname": "Free_Disk_Space_Too_Low"
},
"receiver": {
"name": "by-name"
}
},
{
"alerts": [
{
"annotations": {
"help": "Example help annotation",
"summary": "Example summary",
"url": "http://localhost/example.html"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "54c2f185e49cfccb",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web1",
"job": "node_exporter"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"65a786b3-4f9f-4152-b32d-43cbcec7c3bf"
],
"state": "suppressed"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "5cb0dd95e7f3d9c0",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web2",
"job": "node_exporter"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
}
],
"labels": {
"alertname": "HTTP_Probe_Failed"
},
"receiver": {
"name": "by-name"
}
},
{
"alerts": [
{
"annotations": {
"summary": "Example summary",
"url": "http://localhost/example.html"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "7013294faf5f854d",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server1",
"ip": "127.0.0.1",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "fa959d3911d1978b",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server2",
"ip": "127.0.0.2",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "7f3a53482c303b65",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server3",
"ip": "127.0.0.3",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "7f5c6e647877b3df",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server4",
"ip": "127.0.0.4",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "b5dcc9c573def911",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server5",
"ip": "127.0.0.5",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "588a14f7b4613621",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server6",
"ip": "127.0.0.6",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"b8e50246-5b97-45ac-b440-7dd5b7dcb2a8"
],
"state": "suppressed"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "cc58f2ac8260fb97",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server7",
"ip": "127.0.0.7",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"383f8f76-722c-44f6-81b5-d5550059baaf",
"b8e50246-5b97-45ac-b440-7dd5b7dcb2a8"
],
"state": "suppressed"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "ffbb5f178eb27f11",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server8",
"ip": "127.0.0.8",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"b8e50246-5b97-45ac-b440-7dd5b7dcb2a8"
],
"state": "suppressed"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
}
],
"labels": {
"alertname": "Host_Down"
},
"receiver": {
"name": "by-name"
}
},
{
"alerts": [
{
"annotations": {
"alert": "Memory usage exceeding threshold",
"dashboard": "http://localhost/dashboard.html"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "7d0b114ebf24f857",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Memory_Usage_Too_High",
"cluster": "prod",
"instance": "server2",
"job": "node_exporter"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
}
],
"labels": {
"alertname": "Memory_Usage_Too_High"
},
"receiver": {
"name": "by-name"
}
},
{
"alerts": [
{
"annotations": {
"alert": "Less than 10% disk space is free",
"dashboard": "http://localhost/dashboard.html"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "aae7a1432b5d2f1b",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Free_Disk_Space_Too_Low",
"cluster": "staging",
"disk": "sda",
"instance": "server5",
"job": "node_exporter"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
}
],
"labels": {
"alertname": "Free_Disk_Space_Too_Low",
"cluster": "staging"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"help": "Example help annotation",
"summary": "Example summary",
"url": "http://localhost/example.html"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "54c2f185e49cfccb",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web1",
"job": "node_exporter"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"65a786b3-4f9f-4152-b32d-43cbcec7c3bf"
],
"state": "suppressed"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "5cb0dd95e7f3d9c0",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web2",
"job": "node_exporter"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
}
],
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "588a14f7b4613621",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server6",
"ip": "127.0.0.6",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"b8e50246-5b97-45ac-b440-7dd5b7dcb2a8"
],
"state": "suppressed"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "cc58f2ac8260fb97",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server7",
"ip": "127.0.0.7",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"383f8f76-722c-44f6-81b5-d5550059baaf",
"b8e50246-5b97-45ac-b440-7dd5b7dcb2a8"
],
"state": "suppressed"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "ffbb5f178eb27f11",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server8",
"ip": "127.0.0.8",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"b8e50246-5b97-45ac-b440-7dd5b7dcb2a8"
],
"state": "suppressed"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
}
],
"labels": {
"alertname": "Host_Down",
"cluster": "dev"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"summary": "Example summary",
"url": "http://localhost/example.html"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "7013294faf5f854d",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server1",
"ip": "127.0.0.1",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "fa959d3911d1978b",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server2",
"ip": "127.0.0.2",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
}
],
"labels": {
"alertname": "Host_Down",
"cluster": "prod"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "7f3a53482c303b65",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server3",
"ip": "127.0.0.3",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "7f5c6e647877b3df",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server4",
"ip": "127.0.0.4",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "b5dcc9c573def911",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server5",
"ip": "127.0.0.5",
"job": "node_ping"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
}
],
"labels": {
"alertname": "Host_Down",
"cluster": "staging"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"alert": "Memory usage exceeding threshold",
"dashboard": "http://localhost/dashboard.html"
},
"endsAt": "2020-06-19T12:18:29.798Z",
"fingerprint": "7d0b114ebf24f857",
"generatorURL": "localhost/prometheus",
"labels": {
"alertname": "Memory_Usage_Too_High",
"cluster": "prod",
"instance": "server2",
"job": "node_exporter"
},
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2020-06-19T12:12:49.754Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2020-06-19T12:13:29.798Z"
}
],
"labels": {
"alertname": "Memory_Usage_Too_High",
"cluster": "prod"
},
"receiver": {
"name": "by-cluster-service"
}
}
]

View File

@@ -0,0 +1,61 @@
[
{
"comment": "Silenced instance",
"createdBy": "john@example.com",
"endsAt": "2063-01-01T00:00:00.000Z",
"id": "65a786b3-4f9f-4152-b32d-43cbcec7c3bf",
"matchers": [
{
"isRegex": false,
"name": "instance",
"value": "web1"
}
],
"startsAt": "2020-06-19T12:12:49.727Z",
"status": {
"state": "active"
},
"updatedAt": "2020-06-19T12:12:49.727Z"
},
{
"comment": "Silenced Host_Down alerts in the dev cluster",
"createdBy": "john@example.com",
"endsAt": "2063-01-01T00:00:00.000Z",
"id": "b8e50246-5b97-45ac-b440-7dd5b7dcb2a8",
"matchers": [
{
"isRegex": false,
"name": "alertname",
"value": "Host_Down"
},
{
"isRegex": false,
"name": "cluster",
"value": "dev"
}
],
"startsAt": "2020-06-19T12:12:49.740Z",
"status": {
"state": "active"
},
"updatedAt": "2020-06-19T12:12:49.740Z"
},
{
"comment": "Silenced server7",
"createdBy": "john@example.com",
"endsAt": "2063-01-01T00:00:00.000Z",
"id": "383f8f76-722c-44f6-81b5-d5550059baaf",
"matchers": [
{
"isRegex": false,
"name": "instance",
"value": "server7"
}
],
"startsAt": "2020-06-19T12:12:49.745Z",
"status": {
"state": "active"
},
"updatedAt": "2020-06-19T12:12:49.745Z"
}
]

View File

@@ -0,0 +1,24 @@
{
"cluster": {
"name": "01EB67XCFES27NAFAGSW48NAHC",
"peers": [
{
"address": "172.17.0.2:9094",
"name": "01EB67XCFES27NAFAGSW48NAHC"
}
],
"status": "ready"
},
"config": {
"original": "global:\n resolve_timeout: 5m\n http_config: {}\n smtp_hello: localhost\n smtp_require_tls: true\n pagerduty_url: https://events.pagerduty.com/v2/enqueue\n opsgenie_api_url: https://api.opsgenie.com/\n wechat_api_url: https://qyapi.weixin.qq.com/cgi-bin/\n victorops_api_url: https://alert.victorops.com/integrations/generic/20131114/alert/\nroute:\n receiver: default\n group_by:\n - alertname\n routes:\n - receiver: by-cluster-service\n group_by:\n - alertname\n - cluster\n - service\n match_re:\n alertname: .*\n continue: true\n - receiver: by-name\n group_by:\n - alertname\n match_re:\n alertname: .*\n continue: true\n group_wait: 15s\n group_interval: 35s\n repeat_interval: 999h\ninhibit_rules:\n- source_match:\n severity: critical\n target_match:\n severity: warning\n equal:\n - alertname\n - cluster\n - service\nreceivers:\n- name: default\n- name: by-cluster-service\n- name: by-name\ntemplates: []\n"
},
"uptime": "2020-06-19T12:12:34.674Z",
"versionInfo": {
"branch": "HEAD",
"buildDate": "20200617-08:54:02",
"buildUser": "root@dee35927357f",
"goVersion": "go1.14.4",
"revision": "4c6c03ebfe21009c546e4d1e9b92c371d67c021d",
"version": "0.21.0"
}
}

View File

@@ -0,0 +1,483 @@
# HELP alertmanager_alerts How many alerts by state.
# TYPE alertmanager_alerts gauge
alertmanager_alerts{state="active"} 8
alertmanager_alerts{state="suppressed"} 4
# HELP alertmanager_alerts_invalid_total The total number of received alerts that were invalid.
# TYPE alertmanager_alerts_invalid_total counter
alertmanager_alerts_invalid_total{version="v1"} 0
alertmanager_alerts_invalid_total{version="v2"} 0
# HELP alertmanager_alerts_received_total The total number of received alerts.
# TYPE alertmanager_alerts_received_total counter
alertmanager_alerts_received_total{status="firing",version="v1"} 60
alertmanager_alerts_received_total{status="firing",version="v2"} 0
alertmanager_alerts_received_total{status="resolved",version="v1"} 0
alertmanager_alerts_received_total{status="resolved",version="v2"} 0
# HELP alertmanager_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which alertmanager was built.
# TYPE alertmanager_build_info gauge
alertmanager_build_info{branch="HEAD",goversion="go1.14.4",revision="4c6c03ebfe21009c546e4d1e9b92c371d67c021d",version="0.21.0"} 1
# HELP alertmanager_cluster_alive_messages_total Total number of received alive messages.
# TYPE alertmanager_cluster_alive_messages_total counter
alertmanager_cluster_alive_messages_total{peer="01EB67XCFES27NAFAGSW48NAHC"} 1
# HELP alertmanager_cluster_enabled Indicates whether the clustering is enabled or not.
# TYPE alertmanager_cluster_enabled gauge
alertmanager_cluster_enabled 1
# HELP alertmanager_cluster_failed_peers Number indicating the current number of failed peers in the cluster.
# TYPE alertmanager_cluster_failed_peers gauge
alertmanager_cluster_failed_peers 0
# HELP alertmanager_cluster_health_score Health score of the cluster. Lower values are better and zero means 'totally healthy'.
# TYPE alertmanager_cluster_health_score gauge
alertmanager_cluster_health_score 0
# HELP alertmanager_cluster_members Number indicating current number of members in cluster.
# TYPE alertmanager_cluster_members gauge
alertmanager_cluster_members 1
# HELP alertmanager_cluster_messages_pruned_total Total number of cluster messages pruned.
# TYPE alertmanager_cluster_messages_pruned_total counter
alertmanager_cluster_messages_pruned_total 0
# HELP alertmanager_cluster_messages_queued Number of cluster messages which are queued.
# TYPE alertmanager_cluster_messages_queued gauge
alertmanager_cluster_messages_queued 3
# HELP alertmanager_cluster_messages_received_size_total Total size of cluster messages received.
# TYPE alertmanager_cluster_messages_received_size_total counter
alertmanager_cluster_messages_received_size_total{msg_type="full_state"} 0
alertmanager_cluster_messages_received_size_total{msg_type="update"} 0
# HELP alertmanager_cluster_messages_received_total Total number of cluster messages received.
# TYPE alertmanager_cluster_messages_received_total counter
alertmanager_cluster_messages_received_total{msg_type="full_state"} 0
alertmanager_cluster_messages_received_total{msg_type="update"} 0
# HELP alertmanager_cluster_messages_sent_size_total Total size of cluster messages sent.
# TYPE alertmanager_cluster_messages_sent_size_total counter
alertmanager_cluster_messages_sent_size_total{msg_type="full_state"} 0
alertmanager_cluster_messages_sent_size_total{msg_type="update"} 0
# HELP alertmanager_cluster_messages_sent_total Total number of cluster messages sent.
# TYPE alertmanager_cluster_messages_sent_total counter
alertmanager_cluster_messages_sent_total{msg_type="full_state"} 0
alertmanager_cluster_messages_sent_total{msg_type="update"} 0
# HELP alertmanager_cluster_peer_info A metric with a constant '1' value labeled by peer name.
# TYPE alertmanager_cluster_peer_info gauge
alertmanager_cluster_peer_info{peer="01EB67XCFES27NAFAGSW48NAHC"} 1
# HELP alertmanager_cluster_peers_joined_total A counter of the number of peers that have joined.
# TYPE alertmanager_cluster_peers_joined_total counter
alertmanager_cluster_peers_joined_total 1
# HELP alertmanager_cluster_peers_left_total A counter of the number of peers that have left.
# TYPE alertmanager_cluster_peers_left_total counter
alertmanager_cluster_peers_left_total 0
# HELP alertmanager_cluster_peers_update_total A counter of the number of peers that have updated metadata.
# TYPE alertmanager_cluster_peers_update_total counter
alertmanager_cluster_peers_update_total 0
# HELP alertmanager_cluster_reconnections_failed_total A counter of the number of failed cluster peer reconnection attempts.
# TYPE alertmanager_cluster_reconnections_failed_total counter
alertmanager_cluster_reconnections_failed_total 0
# HELP alertmanager_cluster_reconnections_total A counter of the number of cluster peer reconnections.
# TYPE alertmanager_cluster_reconnections_total counter
alertmanager_cluster_reconnections_total 0
# HELP alertmanager_cluster_refresh_join_failed_total A counter of the number of failed cluster peer joined attempts via refresh.
# TYPE alertmanager_cluster_refresh_join_failed_total counter
alertmanager_cluster_refresh_join_failed_total 0
# HELP alertmanager_cluster_refresh_join_total A counter of the number of cluster peer joined via refresh.
# TYPE alertmanager_cluster_refresh_join_total counter
alertmanager_cluster_refresh_join_total 0
# HELP alertmanager_config_hash Hash of the currently loaded alertmanager configuration.
# TYPE alertmanager_config_hash gauge
alertmanager_config_hash 6.2645753076152e+13
# HELP alertmanager_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload.
# TYPE alertmanager_config_last_reload_success_timestamp_seconds gauge
alertmanager_config_last_reload_success_timestamp_seconds 1.592568754e+09
# HELP alertmanager_config_last_reload_successful Whether the last configuration reload attempt was successful.
# TYPE alertmanager_config_last_reload_successful gauge
alertmanager_config_last_reload_successful 1
# HELP alertmanager_dispatcher_aggregation_groups Number of active aggregation groups
# TYPE alertmanager_dispatcher_aggregation_groups gauge
alertmanager_dispatcher_aggregation_groups 10
# HELP alertmanager_dispatcher_alert_processing_duration_seconds Summary of latencies for the processing of alerts.
# TYPE alertmanager_dispatcher_alert_processing_duration_seconds summary
alertmanager_dispatcher_alert_processing_duration_seconds_sum 0.0010157679999999994
alertmanager_dispatcher_alert_processing_duration_seconds_count 60
# HELP alertmanager_http_concurrency_limit_exceeded_total Total number of times an HTTP request failed because the concurrency limit was reached.
# TYPE alertmanager_http_concurrency_limit_exceeded_total counter
alertmanager_http_concurrency_limit_exceeded_total{method="get"} 0
# HELP alertmanager_http_request_duration_seconds Histogram of latencies for HTTP requests.
# TYPE alertmanager_http_request_duration_seconds histogram
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.05"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.1"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.25"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.5"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="0.75"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="1"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="2"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="5"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="20"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="60"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/alerts",method="post",le="+Inf"} 5
alertmanager_http_request_duration_seconds_sum{handler="/alerts",method="post"} 0.0036524789999999997
alertmanager_http_request_duration_seconds_count{handler="/alerts",method="post"} 5
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.05"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.1"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.25"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.5"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="0.75"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="1"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="2"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="5"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="20"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="60"} 3
alertmanager_http_request_duration_seconds_bucket{handler="/silences",method="post",le="+Inf"} 3
alertmanager_http_request_duration_seconds_sum{handler="/silences",method="post"} 0.003017815
alertmanager_http_request_duration_seconds_count{handler="/silences",method="post"} 3
# HELP alertmanager_http_requests_in_flight Current number of HTTP requests being processed.
# TYPE alertmanager_http_requests_in_flight gauge
alertmanager_http_requests_in_flight{method="get"} 1
# HELP alertmanager_http_response_size_bytes Histogram of response size for HTTP requests.
# TYPE alertmanager_http_response_size_bytes histogram
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="100"} 5
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="1000"} 5
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="10000"} 5
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="100000"} 5
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="1e+06"} 5
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="1e+07"} 5
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="1e+08"} 5
alertmanager_http_response_size_bytes_bucket{handler="/alerts",method="post",le="+Inf"} 5
alertmanager_http_response_size_bytes_sum{handler="/alerts",method="post"} 100
alertmanager_http_response_size_bytes_count{handler="/alerts",method="post"} 5
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="100"} 3
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="1000"} 3
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="10000"} 3
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="100000"} 3
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="1e+06"} 3
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="1e+07"} 3
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="1e+08"} 3
alertmanager_http_response_size_bytes_bucket{handler="/silences",method="post",le="+Inf"} 3
alertmanager_http_response_size_bytes_sum{handler="/silences",method="post"} 240
alertmanager_http_response_size_bytes_count{handler="/silences",method="post"} 3
# HELP alertmanager_integrations Number of configured integrations.
# TYPE alertmanager_integrations gauge
alertmanager_integrations 0
# HELP alertmanager_nflog_gc_duration_seconds Duration of the last notification log garbage collection cycle.
# TYPE alertmanager_nflog_gc_duration_seconds summary
alertmanager_nflog_gc_duration_seconds_sum 0
alertmanager_nflog_gc_duration_seconds_count 0
# HELP alertmanager_nflog_gossip_messages_propagated_total Number of received gossip messages that have been further gossiped.
# TYPE alertmanager_nflog_gossip_messages_propagated_total counter
alertmanager_nflog_gossip_messages_propagated_total 0
# HELP alertmanager_nflog_queries_total Number of notification log queries were received.
# TYPE alertmanager_nflog_queries_total counter
alertmanager_nflog_queries_total 0
# HELP alertmanager_nflog_query_duration_seconds Duration of notification log query evaluation.
# TYPE alertmanager_nflog_query_duration_seconds histogram
alertmanager_nflog_query_duration_seconds_bucket{le="0.005"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.01"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.025"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.05"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.1"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.25"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.5"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="1"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="2.5"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="5"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="10"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="+Inf"} 0
alertmanager_nflog_query_duration_seconds_sum 0
alertmanager_nflog_query_duration_seconds_count 0
# HELP alertmanager_nflog_query_errors_total Number notification log received queries that failed.
# TYPE alertmanager_nflog_query_errors_total counter
alertmanager_nflog_query_errors_total 0
# HELP alertmanager_nflog_snapshot_duration_seconds Duration of the last notification log snapshot.
# TYPE alertmanager_nflog_snapshot_duration_seconds summary
alertmanager_nflog_snapshot_duration_seconds_sum 0
alertmanager_nflog_snapshot_duration_seconds_count 0
# HELP alertmanager_nflog_snapshot_size_bytes Size of the last notification log snapshot in bytes.
# TYPE alertmanager_nflog_snapshot_size_bytes gauge
alertmanager_nflog_snapshot_size_bytes 0
# HELP alertmanager_notification_latency_seconds The latency of notifications in seconds.
# TYPE alertmanager_notification_latency_seconds histogram
alertmanager_notification_latency_seconds_bucket{integration="email",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="email"} 0
alertmanager_notification_latency_seconds_count{integration="email"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="opsgenie"} 0
alertmanager_notification_latency_seconds_count{integration="opsgenie"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="pagerduty"} 0
alertmanager_notification_latency_seconds_count{integration="pagerduty"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="pushover"} 0
alertmanager_notification_latency_seconds_count{integration="pushover"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="slack"} 0
alertmanager_notification_latency_seconds_count{integration="slack"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="victorops"} 0
alertmanager_notification_latency_seconds_count{integration="victorops"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="webhook"} 0
alertmanager_notification_latency_seconds_count{integration="webhook"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="wechat"} 0
alertmanager_notification_latency_seconds_count{integration="wechat"} 0
# HELP alertmanager_notifications_failed_total The total number of failed notifications.
# TYPE alertmanager_notifications_failed_total counter
alertmanager_notifications_failed_total{integration="email"} 0
alertmanager_notifications_failed_total{integration="opsgenie"} 0
alertmanager_notifications_failed_total{integration="pagerduty"} 0
alertmanager_notifications_failed_total{integration="pushover"} 0
alertmanager_notifications_failed_total{integration="slack"} 0
alertmanager_notifications_failed_total{integration="victorops"} 0
alertmanager_notifications_failed_total{integration="webhook"} 0
alertmanager_notifications_failed_total{integration="wechat"} 0
# HELP alertmanager_notifications_total The total number of attempted notifications.
# TYPE alertmanager_notifications_total counter
alertmanager_notifications_total{integration="email"} 0
alertmanager_notifications_total{integration="opsgenie"} 0
alertmanager_notifications_total{integration="pagerduty"} 0
alertmanager_notifications_total{integration="pushover"} 0
alertmanager_notifications_total{integration="slack"} 0
alertmanager_notifications_total{integration="victorops"} 0
alertmanager_notifications_total{integration="webhook"} 0
alertmanager_notifications_total{integration="wechat"} 0
# HELP alertmanager_oversize_gossip_message_duration_seconds Duration of oversized gossip message requests.
# TYPE alertmanager_oversize_gossip_message_duration_seconds histogram
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.005"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.01"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.025"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.05"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.1"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.25"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="1"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="2.5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="10"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="+Inf"} 0
alertmanager_oversize_gossip_message_duration_seconds_sum{key="nfl"} 0
alertmanager_oversize_gossip_message_duration_seconds_count{key="nfl"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.005"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.01"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.025"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.05"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.1"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.25"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="1"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="2.5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="10"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="+Inf"} 0
alertmanager_oversize_gossip_message_duration_seconds_sum{key="sil"} 0
alertmanager_oversize_gossip_message_duration_seconds_count{key="sil"} 0
# HELP alertmanager_oversized_gossip_message_dropped_total Number of oversized gossip messages that were dropped due to a full message queue.
# TYPE alertmanager_oversized_gossip_message_dropped_total counter
alertmanager_oversized_gossip_message_dropped_total{key="nfl"} 0
alertmanager_oversized_gossip_message_dropped_total{key="sil"} 0
# HELP alertmanager_oversized_gossip_message_failure_total Number of oversized gossip message sends that failed.
# TYPE alertmanager_oversized_gossip_message_failure_total counter
alertmanager_oversized_gossip_message_failure_total{key="nfl"} 0
alertmanager_oversized_gossip_message_failure_total{key="sil"} 0
# HELP alertmanager_oversized_gossip_message_sent_total Number of oversized gossip message sent.
# TYPE alertmanager_oversized_gossip_message_sent_total counter
alertmanager_oversized_gossip_message_sent_total{key="nfl"} 0
alertmanager_oversized_gossip_message_sent_total{key="sil"} 0
# HELP alertmanager_peer_position Position the Alertmanager instance believes it's in. The position determines a peer's behavior in the cluster.
# TYPE alertmanager_peer_position gauge
alertmanager_peer_position 0
# HELP alertmanager_receivers Number of configured receivers.
# TYPE alertmanager_receivers gauge
alertmanager_receivers 3
# HELP alertmanager_silences How many silences by state.
# TYPE alertmanager_silences gauge
alertmanager_silences{state="active"} 3
alertmanager_silences{state="expired"} 0
alertmanager_silences{state="pending"} 0
# HELP alertmanager_silences_gc_duration_seconds Duration of the last silence garbage collection cycle.
# TYPE alertmanager_silences_gc_duration_seconds summary
alertmanager_silences_gc_duration_seconds_sum 0
alertmanager_silences_gc_duration_seconds_count 0
# HELP alertmanager_silences_gossip_messages_propagated_total Number of received gossip messages that have been further gossiped.
# TYPE alertmanager_silences_gossip_messages_propagated_total counter
alertmanager_silences_gossip_messages_propagated_total 0
# HELP alertmanager_silences_queries_total How many silence queries were received.
# TYPE alertmanager_silences_queries_total counter
alertmanager_silences_queries_total 24
# HELP alertmanager_silences_query_duration_seconds Duration of silence query evaluation.
# TYPE alertmanager_silences_query_duration_seconds histogram
alertmanager_silences_query_duration_seconds_bucket{le="0.005"} 25
alertmanager_silences_query_duration_seconds_bucket{le="0.01"} 25
alertmanager_silences_query_duration_seconds_bucket{le="0.025"} 25
alertmanager_silences_query_duration_seconds_bucket{le="0.05"} 25
alertmanager_silences_query_duration_seconds_bucket{le="0.1"} 25
alertmanager_silences_query_duration_seconds_bucket{le="0.25"} 25
alertmanager_silences_query_duration_seconds_bucket{le="0.5"} 25
alertmanager_silences_query_duration_seconds_bucket{le="1"} 25
alertmanager_silences_query_duration_seconds_bucket{le="2.5"} 25
alertmanager_silences_query_duration_seconds_bucket{le="5"} 25
alertmanager_silences_query_duration_seconds_bucket{le="10"} 25
alertmanager_silences_query_duration_seconds_bucket{le="+Inf"} 25
alertmanager_silences_query_duration_seconds_sum 5.861099999999999e-05
alertmanager_silences_query_duration_seconds_count 25
# HELP alertmanager_silences_query_errors_total How many silence received queries did not succeed.
# TYPE alertmanager_silences_query_errors_total counter
alertmanager_silences_query_errors_total 0
# HELP alertmanager_silences_snapshot_duration_seconds Duration of the last silence snapshot.
# TYPE alertmanager_silences_snapshot_duration_seconds summary
alertmanager_silences_snapshot_duration_seconds_sum 0
alertmanager_silences_snapshot_duration_seconds_count 0
# HELP alertmanager_silences_snapshot_size_bytes Size of the last silence snapshot in bytes.
# TYPE alertmanager_silences_snapshot_size_bytes gauge
alertmanager_silences_snapshot_size_bytes 0
# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
# TYPE go_gc_duration_seconds summary
go_gc_duration_seconds{quantile="0"} 5.0358e-05
go_gc_duration_seconds{quantile="0.25"} 7.322e-05
go_gc_duration_seconds{quantile="0.5"} 0.000134004
go_gc_duration_seconds{quantile="0.75"} 0.000298027
go_gc_duration_seconds{quantile="1"} 0.000298427
go_gc_duration_seconds_sum 0.00093139
go_gc_duration_seconds_count 6
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
go_goroutines 42
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
go_info{version="go1.14.4"} 1
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes 5.970872e+06
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
# TYPE go_memstats_alloc_bytes_total counter
go_memstats_alloc_bytes_total 1.7212624e+07
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
# TYPE go_memstats_buck_hash_sys_bytes gauge
go_memstats_buck_hash_sys_bytes 1.452402e+06
# HELP go_memstats_frees_total Total number of frees.
# TYPE go_memstats_frees_total counter
go_memstats_frees_total 115787
# HELP go_memstats_gc_cpu_fraction The fraction of this program's available CPU time used by the GC since the program started.
# TYPE go_memstats_gc_cpu_fraction gauge
go_memstats_gc_cpu_fraction 0.017100866777820436
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
# TYPE go_memstats_gc_sys_bytes gauge
go_memstats_gc_sys_bytes 3.582216e+06
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
# TYPE go_memstats_heap_alloc_bytes gauge
go_memstats_heap_alloc_bytes 5.970872e+06
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
# TYPE go_memstats_heap_idle_bytes gauge
go_memstats_heap_idle_bytes 5.8335232e+07
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
# TYPE go_memstats_heap_inuse_bytes gauge
go_memstats_heap_inuse_bytes 8.019968e+06
# HELP go_memstats_heap_objects Number of allocated objects.
# TYPE go_memstats_heap_objects gauge
go_memstats_heap_objects 42240
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
# TYPE go_memstats_heap_released_bytes gauge
go_memstats_heap_released_bytes 5.81632e+07
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
# TYPE go_memstats_heap_sys_bytes gauge
go_memstats_heap_sys_bytes 6.63552e+07
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
go_memstats_last_gc_time_seconds 1.5925687547118459e+09
# HELP go_memstats_lookups_total Total number of pointer lookups.
# TYPE go_memstats_lookups_total counter
go_memstats_lookups_total 0
# HELP go_memstats_mallocs_total Total number of mallocs.
# TYPE go_memstats_mallocs_total counter
go_memstats_mallocs_total 158027
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
# TYPE go_memstats_mcache_inuse_bytes gauge
go_memstats_mcache_inuse_bytes 3472
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
# TYPE go_memstats_mcache_sys_bytes gauge
go_memstats_mcache_sys_bytes 16384
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
# TYPE go_memstats_mspan_inuse_bytes gauge
go_memstats_mspan_inuse_bytes 113016
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
# TYPE go_memstats_mspan_sys_bytes gauge
go_memstats_mspan_sys_bytes 131072
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
# TYPE go_memstats_next_gc_bytes gauge
go_memstats_next_gc_bytes 9.532608e+06
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
# TYPE go_memstats_other_sys_bytes gauge
go_memstats_other_sys_bytes 667270
# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
# TYPE go_memstats_stack_inuse_bytes gauge
go_memstats_stack_inuse_bytes 753664
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
# TYPE go_memstats_stack_sys_bytes gauge
go_memstats_stack_sys_bytes 753664
# HELP go_memstats_sys_bytes Number of bytes obtained from system.
# TYPE go_memstats_sys_bytes gauge
go_memstats_sys_bytes 7.2958208e+07
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
go_threads 8
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
# TYPE process_cpu_seconds_total counter
process_cpu_seconds_total 0.28
# HELP process_max_fds Maximum number of open file descriptors.
# TYPE process_max_fds gauge
process_max_fds 1.048576e+06
# HELP process_open_fds Number of open file descriptors.
# TYPE process_open_fds gauge
process_open_fds 11
# HELP process_resident_memory_bytes Resident memory size in bytes.
# TYPE process_resident_memory_bytes gauge
process_resident_memory_bytes 2.564096e+07
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
# TYPE process_start_time_seconds gauge
process_start_time_seconds 1.59256875357e+09
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
# TYPE process_virtual_memory_bytes gauge
process_virtual_memory_bytes 7.41470208e+08
# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.
# TYPE process_virtual_memory_max_bytes gauge
process_virtual_memory_max_bytes -1
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
# TYPE promhttp_metric_handler_requests_in_flight gauge
promhttp_metric_handler_requests_in_flight 1
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
# TYPE promhttp_metric_handler_requests_total counter
promhttp_metric_handler_requests_total{code="200"} 0
promhttp_metric_handler_requests_total{code="500"} 0
promhttp_metric_handler_requests_total{code="503"} 0

View File

@@ -5,7 +5,7 @@ DOCKER_ARGS := --name $(DOCKER_NAME) --rm -d -p 9093:9093 \
-v $(CURDIR)/alertmanager.yml:/etc/alertmanager/alertmanager.yml
# list of Alertmanager versions to generate mock files for
VERSIONS := 0.17.0 0.18.0 0.19.0 0.20.0
VERSIONS := 0.17.0 0.18.0 0.19.0 0.20.0 0.21.0
%/.ok: livemock.py
$(eval VERSION := $(word 1, $(subst /, ,$@)))