fix(ci): add test files for alertmanager v0.25.0

This commit is contained in:
Łukasz Mierzwa
2022-12-23 14:54:00 +00:00
committed by Łukasz Mierzwa
parent aa0c997e0d
commit c729029748
9 changed files with 1427 additions and 4 deletions

View File

@@ -3,7 +3,11 @@ updates:
- package-ecosystem: "docker"
directory: "/"
schedule:
interval: "weekly"
interval: "daily"
- package-ecosystem: "docker"
directory: "/demo"
schedule:
interval: "daily"
- package-ecosystem: "gomod"
directory: "/"
schedule:

View File

@@ -1,4 +1,4 @@
ALERTMANAGER_VERSION := v0.22.1
ALERTMANAGER_VERSION := v0.25.0
API_VERSION := v2
PACKAGE := v017
TARGET_DIR := /go/src/github.com/prymitive/karma/internal/mapper/$(PACKAGE)

View File

@@ -26,7 +26,7 @@ const (
DefaultHost string = "localhost"
// DefaultBasePath is the default BasePath
// found in Meta (info) section of spec file
DefaultBasePath string = "/"
DefaultBasePath string = "/api/v2/"
)
// DefaultSchemes are the default schemes found in Meta (info) section of spec file

0
internal/mock/0.25.0/.ok Normal file
View File

View File

@@ -0,0 +1,850 @@
[
{
"alerts": [
{
"annotations": {
"alert": "Less than 10% disk space is free",
"dashboard": "http://localhost/dashboard.html"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "aae7a1432b5d2f1b",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Free_Disk_Space_Too_Low",
"cluster": "staging",
"disk": "sda",
"instance": "server5",
"job": "node_exporter"
}
}
],
"labels": {
"alertname": "Free_Disk_Space_Too_Low"
},
"receiver": {
"name": "by-name"
}
},
{
"alerts": [
{
"annotations": {
"help": "Example help annotation",
"summary": "Example summary",
"url": "http://localhost/example.html"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "54c2f185e49cfccb",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"a58d1a5d-2cec-4430-a27e-f3adcb9727d2"
],
"state": "suppressed"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web1",
"job": "node_exporter"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "5cb0dd95e7f3d9c0",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web2",
"job": "node_exporter"
}
}
],
"labels": {
"alertname": "HTTP_Probe_Failed"
},
"receiver": {
"name": "by-name"
}
},
{
"alerts": [
{
"annotations": {
"summary": "Example summary",
"url": "http://localhost/example.html"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "7013294faf5f854d",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server1",
"ip": "127.0.0.1",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "fa959d3911d1978b",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server2",
"ip": "127.0.0.2",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "7f3a53482c303b65",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server3",
"ip": "127.0.0.3",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "7f5c6e647877b3df",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server4",
"ip": "127.0.0.4",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "b5dcc9c573def911",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server5",
"ip": "127.0.0.5",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "588a14f7b4613621",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"fb79710a-ad92-4dcd-959f-10c180454df2"
],
"state": "suppressed"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server6",
"ip": "127.0.0.6",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "cc58f2ac8260fb97",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"20972742-87df-4ef9-8535-48a62d7b9cbb",
"fb79710a-ad92-4dcd-959f-10c180454df2"
],
"state": "suppressed"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server7",
"ip": "127.0.0.7",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "ffbb5f178eb27f11",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"fb79710a-ad92-4dcd-959f-10c180454df2"
],
"state": "suppressed"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server8",
"ip": "127.0.0.8",
"job": "node_ping"
}
}
],
"labels": {
"alertname": "Host_Down"
},
"receiver": {
"name": "by-name"
}
},
{
"alerts": [
{
"annotations": {
"alert": "Memory usage exceeding threshold",
"dashboard": "http://localhost/dashboard.html"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "7d0b114ebf24f857",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Memory_Usage_Too_High",
"cluster": "prod",
"instance": "server2",
"job": "node_exporter"
}
}
],
"labels": {
"alertname": "Memory_Usage_Too_High"
},
"receiver": {
"name": "by-name"
}
},
{
"alerts": [
{
"annotations": {
"alert": "Less than 10% disk space is free",
"dashboard": "http://localhost/dashboard.html"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "aae7a1432b5d2f1b",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Free_Disk_Space_Too_Low",
"cluster": "staging",
"disk": "sda",
"instance": "server5",
"job": "node_exporter"
}
}
],
"labels": {
"alertname": "Free_Disk_Space_Too_Low",
"cluster": "staging"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"help": "Example help annotation",
"summary": "Example summary",
"url": "http://localhost/example.html"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "54c2f185e49cfccb",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"a58d1a5d-2cec-4430-a27e-f3adcb9727d2"
],
"state": "suppressed"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web1",
"job": "node_exporter"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "5cb0dd95e7f3d9c0",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web2",
"job": "node_exporter"
}
}
],
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "588a14f7b4613621",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"fb79710a-ad92-4dcd-959f-10c180454df2"
],
"state": "suppressed"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server6",
"ip": "127.0.0.6",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "cc58f2ac8260fb97",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"20972742-87df-4ef9-8535-48a62d7b9cbb",
"fb79710a-ad92-4dcd-959f-10c180454df2"
],
"state": "suppressed"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server7",
"ip": "127.0.0.7",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "ffbb5f178eb27f11",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [
"fb79710a-ad92-4dcd-959f-10c180454df2"
],
"state": "suppressed"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server8",
"ip": "127.0.0.8",
"job": "node_ping"
}
}
],
"labels": {
"alertname": "Host_Down",
"cluster": "dev"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"summary": "Example summary",
"url": "http://localhost/example.html"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "7013294faf5f854d",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server1",
"ip": "127.0.0.1",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "fa959d3911d1978b",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server2",
"ip": "127.0.0.2",
"job": "node_ping"
}
}
],
"labels": {
"alertname": "Host_Down",
"cluster": "prod"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "7f3a53482c303b65",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server3",
"ip": "127.0.0.3",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "7f5c6e647877b3df",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server4",
"ip": "127.0.0.4",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "b5dcc9c573def911",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server5",
"ip": "127.0.0.5",
"job": "node_ping"
}
}
],
"labels": {
"alertname": "Host_Down",
"cluster": "staging"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"alert": "Memory usage exceeding threshold",
"dashboard": "http://localhost/dashboard.html"
},
"endsAt": "2022-12-23T14:51:30.578Z",
"fingerprint": "7d0b114ebf24f857",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2022-12-23T14:45:50.528Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2022-12-23T14:46:30.578Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Memory_Usage_Too_High",
"cluster": "prod",
"instance": "server2",
"job": "node_exporter"
}
}
],
"labels": {
"alertname": "Memory_Usage_Too_High",
"cluster": "prod"
},
"receiver": {
"name": "by-cluster-service"
}
}
]

View File

@@ -0,0 +1,65 @@
[
{
"id": "a58d1a5d-2cec-4430-a27e-f3adcb9727d2",
"status": {
"state": "active"
},
"updatedAt": "2022-12-23T14:45:50.517Z",
"comment": "Silenced instance",
"createdBy": "john@example.com",
"endsAt": "2063-01-01T00:00:00.000Z",
"matchers": [
{
"isEqual": true,
"isRegex": false,
"name": "instance",
"value": "web1"
}
],
"startsAt": "2022-12-23T14:45:50.517Z"
},
{
"id": "fb79710a-ad92-4dcd-959f-10c180454df2",
"status": {
"state": "active"
},
"updatedAt": "2022-12-23T14:45:50.521Z",
"comment": "Silenced Host_Down alerts in the dev cluster",
"createdBy": "john@example.com",
"endsAt": "2063-01-01T00:00:00.000Z",
"matchers": [
{
"isEqual": true,
"isRegex": false,
"name": "alertname",
"value": "Host_Down"
},
{
"isEqual": true,
"isRegex": false,
"name": "cluster",
"value": "dev"
}
],
"startsAt": "2022-12-23T14:45:50.521Z"
},
{
"id": "20972742-87df-4ef9-8535-48a62d7b9cbb",
"status": {
"state": "active"
},
"updatedAt": "2022-12-23T14:45:50.524Z",
"comment": "Silenced server7",
"createdBy": "john@example.com",
"endsAt": "2063-01-01T00:00:00.000Z",
"matchers": [
{
"isEqual": true,
"isRegex": false,
"name": "instance",
"value": "server7"
}
],
"startsAt": "2022-12-23T14:45:50.524Z"
}
]

View File

@@ -0,0 +1,24 @@
{
"cluster": {
"name": "01GMZQ4MTNBV8TSKEKHNG6F9BR",
"peers": [
{
"address": "172.17.0.2:9094",
"name": "01GMZQ4MTNBV8TSKEKHNG6F9BR"
}
],
"status": "ready"
},
"config": {
"original": "global:\n resolve_timeout: 5m\n http_config:\n follow_redirects: true\n enable_http2: true\n smtp_hello: localhost\n smtp_require_tls: true\n pagerduty_url: https://events.pagerduty.com/v2/enqueue\n opsgenie_api_url: https://api.opsgenie.com/\n wechat_api_url: https://qyapi.weixin.qq.com/cgi-bin/\n victorops_api_url: https://alert.victorops.com/integrations/generic/20131114/alert/\n telegram_api_url: https://api.telegram.org\n webex_api_url: https://webexapis.com/v1/messages\nroute:\n receiver: default\n group_by:\n - alertname\n continue: false\n routes:\n - receiver: by-cluster-service\n group_by:\n - alertname\n - cluster\n - service\n match_re:\n alertname: .*\n continue: true\n - receiver: by-name\n group_by:\n - alertname\n match_re:\n alertname: .*\n continue: true\n group_wait: 15s\n group_interval: 35s\n repeat_interval: 41d15h\ninhibit_rules:\n- source_match:\n severity: critical\n target_match:\n severity: warning\n equal:\n - alertname\n - cluster\n - service\nreceivers:\n- name: default\n- name: by-cluster-service\n- name: by-name\ntemplates: []\n"
},
"uptime": "2022-12-23T14:45:35.190Z",
"versionInfo": {
"branch": "HEAD",
"buildDate": "20221222-14:48:36",
"buildUser": "root@521a2d62cff8",
"goVersion": "go1.19.4",
"revision": "258fab7cdd551f2cf251ed0348f0ad7289aee789",
"version": "0.25.0"
}
}

View File

@@ -0,0 +1,480 @@
# HELP alertmanager_alerts How many alerts by state.
# TYPE alertmanager_alerts gauge
alertmanager_alerts{state="active"} 8
alertmanager_alerts{state="suppressed"} 4
alertmanager_alerts{state="unprocessed"} 0
# HELP alertmanager_alerts_invalid_total The total number of received alerts that were invalid.
# TYPE alertmanager_alerts_invalid_total counter
alertmanager_alerts_invalid_total{version="v1"} 0
alertmanager_alerts_invalid_total{version="v2"} 0
# HELP alertmanager_alerts_received_total The total number of received alerts.
# TYPE alertmanager_alerts_received_total counter
alertmanager_alerts_received_total{status="firing",version="v1"} 0
alertmanager_alerts_received_total{status="firing",version="v2"} 60
alertmanager_alerts_received_total{status="resolved",version="v1"} 0
alertmanager_alerts_received_total{status="resolved",version="v2"} 0
# HELP alertmanager_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which alertmanager was built.
# TYPE alertmanager_build_info gauge
alertmanager_build_info{branch="HEAD",goversion="go1.19.4",revision="258fab7cdd551f2cf251ed0348f0ad7289aee789",version="0.25.0"} 1
# HELP alertmanager_cluster_alive_messages_total Total number of received alive messages.
# TYPE alertmanager_cluster_alive_messages_total counter
alertmanager_cluster_alive_messages_total{peer="01GMZQ4MTNBV8TSKEKHNG6F9BR"} 1
# HELP alertmanager_cluster_enabled Indicates whether the clustering is enabled or not.
# TYPE alertmanager_cluster_enabled gauge
alertmanager_cluster_enabled 1
# HELP alertmanager_cluster_failed_peers Number indicating the current number of failed peers in the cluster.
# TYPE alertmanager_cluster_failed_peers gauge
alertmanager_cluster_failed_peers 0
# HELP alertmanager_cluster_health_score Health score of the cluster. Lower values are better and zero means 'totally healthy'.
# TYPE alertmanager_cluster_health_score gauge
alertmanager_cluster_health_score 0
# HELP alertmanager_cluster_members Number indicating current number of members in cluster.
# TYPE alertmanager_cluster_members gauge
alertmanager_cluster_members 1
# HELP alertmanager_cluster_messages_pruned_total Total number of cluster messages pruned.
# TYPE alertmanager_cluster_messages_pruned_total counter
alertmanager_cluster_messages_pruned_total 0
# HELP alertmanager_cluster_messages_queued Number of cluster messages which are queued.
# TYPE alertmanager_cluster_messages_queued gauge
alertmanager_cluster_messages_queued 3
# HELP alertmanager_cluster_messages_received_size_total Total size of cluster messages received.
# TYPE alertmanager_cluster_messages_received_size_total counter
alertmanager_cluster_messages_received_size_total{msg_type="full_state"} 0
alertmanager_cluster_messages_received_size_total{msg_type="update"} 0
# HELP alertmanager_cluster_messages_received_total Total number of cluster messages received.
# TYPE alertmanager_cluster_messages_received_total counter
alertmanager_cluster_messages_received_total{msg_type="full_state"} 0
alertmanager_cluster_messages_received_total{msg_type="update"} 0
# HELP alertmanager_cluster_messages_sent_size_total Total size of cluster messages sent.
# TYPE alertmanager_cluster_messages_sent_size_total counter
alertmanager_cluster_messages_sent_size_total{msg_type="full_state"} 0
alertmanager_cluster_messages_sent_size_total{msg_type="update"} 0
# HELP alertmanager_cluster_messages_sent_total Total number of cluster messages sent.
# TYPE alertmanager_cluster_messages_sent_total counter
alertmanager_cluster_messages_sent_total{msg_type="full_state"} 0
alertmanager_cluster_messages_sent_total{msg_type="update"} 0
# HELP alertmanager_cluster_peer_info A metric with a constant '1' value labeled by peer name.
# TYPE alertmanager_cluster_peer_info gauge
alertmanager_cluster_peer_info{peer="01GMZQ4MTNBV8TSKEKHNG6F9BR"} 1
# HELP alertmanager_cluster_peers_joined_total A counter of the number of peers that have joined.
# TYPE alertmanager_cluster_peers_joined_total counter
alertmanager_cluster_peers_joined_total 1
# HELP alertmanager_cluster_peers_left_total A counter of the number of peers that have left.
# TYPE alertmanager_cluster_peers_left_total counter
alertmanager_cluster_peers_left_total 0
# HELP alertmanager_cluster_peers_update_total A counter of the number of peers that have updated metadata.
# TYPE alertmanager_cluster_peers_update_total counter
alertmanager_cluster_peers_update_total 0
# HELP alertmanager_cluster_reconnections_failed_total A counter of the number of failed cluster peer reconnection attempts.
# TYPE alertmanager_cluster_reconnections_failed_total counter
alertmanager_cluster_reconnections_failed_total 0
# HELP alertmanager_cluster_reconnections_total A counter of the number of cluster peer reconnections.
# TYPE alertmanager_cluster_reconnections_total counter
alertmanager_cluster_reconnections_total 0
# HELP alertmanager_cluster_refresh_join_failed_total A counter of the number of failed cluster peer joined attempts via refresh.
# TYPE alertmanager_cluster_refresh_join_failed_total counter
alertmanager_cluster_refresh_join_failed_total 0
# HELP alertmanager_cluster_refresh_join_total A counter of the number of cluster peer joined via refresh.
# TYPE alertmanager_cluster_refresh_join_total counter
alertmanager_cluster_refresh_join_total 0
# HELP alertmanager_config_hash Hash of the currently loaded alertmanager configuration.
# TYPE alertmanager_config_hash gauge
alertmanager_config_hash 6.2645753076152e+13
# HELP alertmanager_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload.
# TYPE alertmanager_config_last_reload_success_timestamp_seconds gauge
alertmanager_config_last_reload_success_timestamp_seconds 1.6718067352273526e+09
# HELP alertmanager_config_last_reload_successful Whether the last configuration reload attempt was successful.
# TYPE alertmanager_config_last_reload_successful gauge
alertmanager_config_last_reload_successful 1
# HELP alertmanager_dispatcher_aggregation_groups Number of active aggregation groups
# TYPE alertmanager_dispatcher_aggregation_groups gauge
alertmanager_dispatcher_aggregation_groups 10
# HELP alertmanager_dispatcher_alert_processing_duration_seconds Summary of latencies for the processing of alerts.
# TYPE alertmanager_dispatcher_alert_processing_duration_seconds summary
alertmanager_dispatcher_alert_processing_duration_seconds_sum 0.0007815850000000002
alertmanager_dispatcher_alert_processing_duration_seconds_count 60
# HELP alertmanager_http_concurrency_limit_exceeded_total Total number of times an HTTP request failed because the concurrency limit was reached.
# TYPE alertmanager_http_concurrency_limit_exceeded_total counter
alertmanager_http_concurrency_limit_exceeded_total{method="get"} 0
# HELP alertmanager_http_requests_in_flight Current number of HTTP requests being processed.
# TYPE alertmanager_http_requests_in_flight gauge
alertmanager_http_requests_in_flight{method="get"} 1
# HELP alertmanager_integrations Number of configured integrations.
# TYPE alertmanager_integrations gauge
alertmanager_integrations 0
# HELP alertmanager_marked_alerts How many alerts by state are currently marked in the Alertmanager regardless of their expiry.
# TYPE alertmanager_marked_alerts gauge
alertmanager_marked_alerts{state="active"} 8
alertmanager_marked_alerts{state="suppressed"} 4
alertmanager_marked_alerts{state="unprocessed"} 0
# HELP alertmanager_nflog_gc_duration_seconds Duration of the last notification log garbage collection cycle.
# TYPE alertmanager_nflog_gc_duration_seconds summary
alertmanager_nflog_gc_duration_seconds_sum 0
alertmanager_nflog_gc_duration_seconds_count 0
# HELP alertmanager_nflog_gossip_messages_propagated_total Number of received gossip messages that have been further gossiped.
# TYPE alertmanager_nflog_gossip_messages_propagated_total counter
alertmanager_nflog_gossip_messages_propagated_total 0
# HELP alertmanager_nflog_queries_total Number of notification log queries were received.
# TYPE alertmanager_nflog_queries_total counter
alertmanager_nflog_queries_total 0
# HELP alertmanager_nflog_query_duration_seconds Duration of notification log query evaluation.
# TYPE alertmanager_nflog_query_duration_seconds histogram
alertmanager_nflog_query_duration_seconds_bucket{le="0.005"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.01"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.025"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.05"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.1"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.25"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.5"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="1"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="2.5"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="5"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="10"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="+Inf"} 0
alertmanager_nflog_query_duration_seconds_sum 0
alertmanager_nflog_query_duration_seconds_count 0
# HELP alertmanager_nflog_query_errors_total Number notification log received queries that failed.
# TYPE alertmanager_nflog_query_errors_total counter
alertmanager_nflog_query_errors_total 0
# HELP alertmanager_nflog_snapshot_duration_seconds Duration of the last notification log snapshot.
# TYPE alertmanager_nflog_snapshot_duration_seconds summary
alertmanager_nflog_snapshot_duration_seconds_sum 0
alertmanager_nflog_snapshot_duration_seconds_count 0
# HELP alertmanager_nflog_snapshot_size_bytes Size of the last notification log snapshot in bytes.
# TYPE alertmanager_nflog_snapshot_size_bytes gauge
alertmanager_nflog_snapshot_size_bytes 0
# HELP alertmanager_notification_latency_seconds The latency of notifications in seconds.
# TYPE alertmanager_notification_latency_seconds histogram
alertmanager_notification_latency_seconds_bucket{integration="email",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="email"} 0
alertmanager_notification_latency_seconds_count{integration="email"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="opsgenie"} 0
alertmanager_notification_latency_seconds_count{integration="opsgenie"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="pagerduty"} 0
alertmanager_notification_latency_seconds_count{integration="pagerduty"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="pushover"} 0
alertmanager_notification_latency_seconds_count{integration="pushover"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="slack"} 0
alertmanager_notification_latency_seconds_count{integration="slack"} 0
alertmanager_notification_latency_seconds_bucket{integration="sns",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="sns",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="sns",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="sns",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="sns",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="sns",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="sns"} 0
alertmanager_notification_latency_seconds_count{integration="sns"} 0
alertmanager_notification_latency_seconds_bucket{integration="telegram",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="telegram",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="telegram",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="telegram",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="telegram",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="telegram",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="telegram"} 0
alertmanager_notification_latency_seconds_count{integration="telegram"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="victorops"} 0
alertmanager_notification_latency_seconds_count{integration="victorops"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="webhook"} 0
alertmanager_notification_latency_seconds_count{integration="webhook"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="wechat"} 0
alertmanager_notification_latency_seconds_count{integration="wechat"} 0
# HELP alertmanager_notification_requests_failed_total The total number of failed notification requests.
# TYPE alertmanager_notification_requests_failed_total counter
alertmanager_notification_requests_failed_total{integration="email"} 0
alertmanager_notification_requests_failed_total{integration="opsgenie"} 0
alertmanager_notification_requests_failed_total{integration="pagerduty"} 0
alertmanager_notification_requests_failed_total{integration="pushover"} 0
alertmanager_notification_requests_failed_total{integration="slack"} 0
alertmanager_notification_requests_failed_total{integration="sns"} 0
alertmanager_notification_requests_failed_total{integration="telegram"} 0
alertmanager_notification_requests_failed_total{integration="victorops"} 0
alertmanager_notification_requests_failed_total{integration="webhook"} 0
alertmanager_notification_requests_failed_total{integration="wechat"} 0
# HELP alertmanager_notification_requests_total The total number of attempted notification requests.
# TYPE alertmanager_notification_requests_total counter
alertmanager_notification_requests_total{integration="email"} 0
alertmanager_notification_requests_total{integration="opsgenie"} 0
alertmanager_notification_requests_total{integration="pagerduty"} 0
alertmanager_notification_requests_total{integration="pushover"} 0
alertmanager_notification_requests_total{integration="slack"} 0
alertmanager_notification_requests_total{integration="sns"} 0
alertmanager_notification_requests_total{integration="telegram"} 0
alertmanager_notification_requests_total{integration="victorops"} 0
alertmanager_notification_requests_total{integration="webhook"} 0
alertmanager_notification_requests_total{integration="wechat"} 0
# HELP alertmanager_notifications_failed_total The total number of failed notifications.
# TYPE alertmanager_notifications_failed_total counter
alertmanager_notifications_failed_total{integration="email"} 0
alertmanager_notifications_failed_total{integration="opsgenie"} 0
alertmanager_notifications_failed_total{integration="pagerduty"} 0
alertmanager_notifications_failed_total{integration="pushover"} 0
alertmanager_notifications_failed_total{integration="slack"} 0
alertmanager_notifications_failed_total{integration="sns"} 0
alertmanager_notifications_failed_total{integration="telegram"} 0
alertmanager_notifications_failed_total{integration="victorops"} 0
alertmanager_notifications_failed_total{integration="webhook"} 0
alertmanager_notifications_failed_total{integration="wechat"} 0
# HELP alertmanager_notifications_total The total number of attempted notifications.
# TYPE alertmanager_notifications_total counter
alertmanager_notifications_total{integration="email"} 0
alertmanager_notifications_total{integration="opsgenie"} 0
alertmanager_notifications_total{integration="pagerduty"} 0
alertmanager_notifications_total{integration="pushover"} 0
alertmanager_notifications_total{integration="slack"} 0
alertmanager_notifications_total{integration="sns"} 0
alertmanager_notifications_total{integration="telegram"} 0
alertmanager_notifications_total{integration="victorops"} 0
alertmanager_notifications_total{integration="webhook"} 0
alertmanager_notifications_total{integration="wechat"} 0
# HELP alertmanager_oversize_gossip_message_duration_seconds Duration of oversized gossip message requests.
# TYPE alertmanager_oversize_gossip_message_duration_seconds histogram
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.005"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.01"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.025"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.05"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.1"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.25"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="1"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="2.5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="10"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="+Inf"} 0
alertmanager_oversize_gossip_message_duration_seconds_sum{key="nfl"} 0
alertmanager_oversize_gossip_message_duration_seconds_count{key="nfl"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.005"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.01"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.025"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.05"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.1"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.25"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="1"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="2.5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="10"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="+Inf"} 0
alertmanager_oversize_gossip_message_duration_seconds_sum{key="sil"} 0
alertmanager_oversize_gossip_message_duration_seconds_count{key="sil"} 0
# HELP alertmanager_oversized_gossip_message_dropped_total Number of oversized gossip messages that were dropped due to a full message queue.
# TYPE alertmanager_oversized_gossip_message_dropped_total counter
alertmanager_oversized_gossip_message_dropped_total{key="nfl"} 0
alertmanager_oversized_gossip_message_dropped_total{key="sil"} 0
# HELP alertmanager_oversized_gossip_message_failure_total Number of oversized gossip message sends that failed.
# TYPE alertmanager_oversized_gossip_message_failure_total counter
alertmanager_oversized_gossip_message_failure_total{key="nfl"} 0
alertmanager_oversized_gossip_message_failure_total{key="sil"} 0
# HELP alertmanager_oversized_gossip_message_sent_total Number of oversized gossip message sent.
# TYPE alertmanager_oversized_gossip_message_sent_total counter
alertmanager_oversized_gossip_message_sent_total{key="nfl"} 0
alertmanager_oversized_gossip_message_sent_total{key="sil"} 0
# HELP alertmanager_peer_position Position the Alertmanager instance believes it's in. The position determines a peer's behavior in the cluster.
# TYPE alertmanager_peer_position gauge
alertmanager_peer_position 0
# HELP alertmanager_receivers Number of configured receivers.
# TYPE alertmanager_receivers gauge
alertmanager_receivers 3
# HELP alertmanager_silences How many silences by state.
# TYPE alertmanager_silences gauge
alertmanager_silences{state="active"} 3
alertmanager_silences{state="expired"} 0
alertmanager_silences{state="pending"} 0
# HELP alertmanager_silences_gc_duration_seconds Duration of the last silence garbage collection cycle.
# TYPE alertmanager_silences_gc_duration_seconds summary
alertmanager_silences_gc_duration_seconds_sum 0
alertmanager_silences_gc_duration_seconds_count 0
# HELP alertmanager_silences_gossip_messages_propagated_total Number of received gossip messages that have been further gossiped.
# TYPE alertmanager_silences_gossip_messages_propagated_total counter
alertmanager_silences_gossip_messages_propagated_total 0
# HELP alertmanager_silences_queries_total How many silence queries were received.
# TYPE alertmanager_silences_queries_total counter
alertmanager_silences_queries_total 24
# HELP alertmanager_silences_query_duration_seconds Duration of silence query evaluation.
# TYPE alertmanager_silences_query_duration_seconds histogram
alertmanager_silences_query_duration_seconds_bucket{le="0.005"} 25
alertmanager_silences_query_duration_seconds_bucket{le="0.01"} 25
alertmanager_silences_query_duration_seconds_bucket{le="0.025"} 25
alertmanager_silences_query_duration_seconds_bucket{le="0.05"} 25
alertmanager_silences_query_duration_seconds_bucket{le="0.1"} 25
alertmanager_silences_query_duration_seconds_bucket{le="0.25"} 25
alertmanager_silences_query_duration_seconds_bucket{le="0.5"} 25
alertmanager_silences_query_duration_seconds_bucket{le="1"} 25
alertmanager_silences_query_duration_seconds_bucket{le="2.5"} 25
alertmanager_silences_query_duration_seconds_bucket{le="5"} 25
alertmanager_silences_query_duration_seconds_bucket{le="10"} 25
alertmanager_silences_query_duration_seconds_bucket{le="+Inf"} 25
alertmanager_silences_query_duration_seconds_sum 0.000327457
alertmanager_silences_query_duration_seconds_count 25
# HELP alertmanager_silences_query_errors_total How many silence received queries did not succeed.
# TYPE alertmanager_silences_query_errors_total counter
alertmanager_silences_query_errors_total 0
# HELP alertmanager_silences_snapshot_duration_seconds Duration of the last silence snapshot.
# TYPE alertmanager_silences_snapshot_duration_seconds summary
alertmanager_silences_snapshot_duration_seconds_sum 0
alertmanager_silences_snapshot_duration_seconds_count 0
# HELP alertmanager_silences_snapshot_size_bytes Size of the last silence snapshot in bytes.
# TYPE alertmanager_silences_snapshot_size_bytes gauge
alertmanager_silences_snapshot_size_bytes 0
# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
# TYPE go_gc_duration_seconds summary
go_gc_duration_seconds{quantile="0"} 7.8916e-05
go_gc_duration_seconds{quantile="0.25"} 0.000127251
go_gc_duration_seconds{quantile="0.5"} 0.000141542
go_gc_duration_seconds{quantile="0.75"} 0.000170125
go_gc_duration_seconds{quantile="1"} 0.000294374
go_gc_duration_seconds_sum 0.000812208
go_gc_duration_seconds_count 5
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
go_goroutines 42
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
go_info{version="go1.19.4"} 1
# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes 7.02308e+06
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
# TYPE go_memstats_alloc_bytes_total counter
go_memstats_alloc_bytes_total 1.6316232e+07
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
# TYPE go_memstats_buck_hash_sys_bytes gauge
go_memstats_buck_hash_sys_bytes 1.459997e+06
# HELP go_memstats_frees_total Total number of frees.
# TYPE go_memstats_frees_total counter
go_memstats_frees_total 106507
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
# TYPE go_memstats_gc_sys_bytes gauge
go_memstats_gc_sys_bytes 5.384112e+06
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
# TYPE go_memstats_heap_alloc_bytes gauge
go_memstats_heap_alloc_bytes 7.02308e+06
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
# TYPE go_memstats_heap_idle_bytes gauge
go_memstats_heap_idle_bytes 860160
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
# TYPE go_memstats_heap_inuse_bytes gauge
go_memstats_heap_inuse_bytes 1.0805248e+07
# HELP go_memstats_heap_objects Number of allocated objects.
# TYPE go_memstats_heap_objects gauge
go_memstats_heap_objects 39127
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS.
# TYPE go_memstats_heap_released_bytes gauge
go_memstats_heap_released_bytes 393216
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
# TYPE go_memstats_heap_sys_bytes gauge
go_memstats_heap_sys_bytes 1.1665408e+07
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
go_memstats_last_gc_time_seconds 1.671806735225899e+09
# HELP go_memstats_lookups_total Total number of pointer lookups.
# TYPE go_memstats_lookups_total counter
go_memstats_lookups_total 0
# HELP go_memstats_mallocs_total Total number of mallocs.
# TYPE go_memstats_mallocs_total counter
go_memstats_mallocs_total 145634
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
# TYPE go_memstats_mcache_inuse_bytes gauge
go_memstats_mcache_inuse_bytes 4800
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
# TYPE go_memstats_mcache_sys_bytes gauge
go_memstats_mcache_sys_bytes 15600
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
# TYPE go_memstats_mspan_inuse_bytes gauge
go_memstats_mspan_inuse_bytes 184176
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
# TYPE go_memstats_mspan_sys_bytes gauge
go_memstats_mspan_sys_bytes 195264
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
# TYPE go_memstats_next_gc_bytes gauge
go_memstats_next_gc_bytes 1.2898576e+07
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
# TYPE go_memstats_other_sys_bytes gauge
go_memstats_other_sys_bytes 904843
# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
# TYPE go_memstats_stack_inuse_bytes gauge
go_memstats_stack_inuse_bytes 917504
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
# TYPE go_memstats_stack_sys_bytes gauge
go_memstats_stack_sys_bytes 917504
# HELP go_memstats_sys_bytes Number of bytes obtained from system.
# TYPE go_memstats_sys_bytes gauge
go_memstats_sys_bytes 2.0542728e+07
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
go_threads 9
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
# TYPE process_cpu_seconds_total counter
process_cpu_seconds_total 0.26
# HELP process_max_fds Maximum number of open file descriptors.
# TYPE process_max_fds gauge
process_max_fds 1.048576e+06
# HELP process_open_fds Number of open file descriptors.
# TYPE process_open_fds gauge
process_open_fds 11
# HELP process_resident_memory_bytes Resident memory size in bytes.
# TYPE process_resident_memory_bytes gauge
process_resident_memory_bytes 3.3021952e+07
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
# TYPE process_start_time_seconds gauge
process_start_time_seconds 1.67180673448e+09
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
# TYPE process_virtual_memory_bytes gauge
process_virtual_memory_bytes 7.55011584e+08
# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.
# TYPE process_virtual_memory_max_bytes gauge
process_virtual_memory_max_bytes 1.8446744073709552e+19
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
# TYPE promhttp_metric_handler_requests_in_flight gauge
promhttp_metric_handler_requests_in_flight 1
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
# TYPE promhttp_metric_handler_requests_total counter
promhttp_metric_handler_requests_total{code="200"} 0
promhttp_metric_handler_requests_total{code="500"} 0
promhttp_metric_handler_requests_total{code="503"} 0

View File

@@ -5,7 +5,7 @@ DOCKER_ARGS := --name $(DOCKER_NAME) --rm -d -p 9093:9093 \
-v $(CURDIR)/alertmanager.yml:/etc/alertmanager/alertmanager.yml
# list of Alertmanager versions to generate mock files for
VERSIONS := 0.22.0 022.1 0.23.0 0.24.0
VERSIONS := 0.22.0 022.1 0.23.0 0.24.0 0.25.0
%/.ok: livemock.py
$(eval VERSION := $(word 1, $(subst /, ,$@)))