fix(docs): fix typos

This commit is contained in:
Lukasz Mierzwa
2026-03-11 13:03:13 +00:00
committed by Łukasz Mierzwa
parent 72f73504d3
commit ca4364cf9d
21 changed files with 1709 additions and 29 deletions

View File

@@ -5,6 +5,7 @@
### Changed
- Refactored light and dark theme to use a single CSS bundle.
- Improved backend API performance.
## v0.126
@@ -183,7 +184,7 @@
### Fixed
- Correctly set filter history.
- Correctly escape label values when quering Prometheus for alert history.
- Correctly escape label values when querying Prometheus for alert history.
## v0.101
@@ -457,12 +458,12 @@
### Fixed
- Not all labels were stripped when using `lables:keep` or `labels:strip`
- Not all labels were stripped when using `labels:keep` or `labels:strip`
option #2585.
### Added
- `healthcheck:visible` alertmanager option to control if healtcheck alerts
- `healthcheck:visible` alertmanager option to control if healthcheck alerts
should be visible in the UI #2614.
## v0.78
@@ -473,7 +474,7 @@
### Added
- Added support for DeadMansSwitch alerts via `healtcheck:alerts` option
- Added support for DeadMansSwitch alerts via `healthcheck:alerts` option
on alertmanager upstream configuration #2512.
Example:
@@ -484,7 +485,7 @@
expr: vector(1)
```
- Add healtcheck configuration to karma:
- Add healthcheck configuration to karma:
```YAML
alertmanager:

View File

@@ -29,7 +29,7 @@ a breaking change run tests using:
## Vendoring dependencies
[Go modules](https://github.com/golang/go/wiki/Modules) are used for managing
dependecies. After adding new or removing exitsting depenencies please run
dependencies. After adding new or removing existing dependencies please run
go mod tidy

View File

@@ -232,7 +232,7 @@ To finally compile `karma` the binary run:
make
Note that building locally from sources requires Go, nodejs and yarn.
See Docker build options below for instructions on building from withing docker
See Docker build options below for instructions on building from within docker
container.
## Running

View File

@@ -42,12 +42,12 @@ type queryResult struct {
func generateV1Matrix(series []seriesValues, step time.Duration) queryResult {
r := queryResult{ResultType: "matrix", Result: model.Matrix{}}
for _, serie := range series {
for _, sv := range series {
ss := model.SampleStream{
Metric: serie.metric,
Metric: sv.metric,
}
ts := time.Now()
for _, val := range serie.values {
for _, val := range sv.values {
sp := model.SamplePair{
Timestamp: model.TimeFromUnix(ts.Unix()),
Value: model.SampleValue(val),

View File

@@ -44,7 +44,7 @@ var (
// data from Alertmanager
ticker *time.Ticker
// apiCache will be used to keep short lived copy of JSON reponses generated for the UI
// apiCache will be used to keep short lived copy of JSON responses generated for the UI
// If there are requests with the same filter we should respond from cache
// rather than do all the filtering every time
apiCache *lru.Cache[string, []byte]

View File

@@ -125,16 +125,16 @@ func (c *karmaCollector) Collect(ch chan<- prometheus.Metric) {
}
// publish metrics using calculated values
for reciver, count := range groupsByReceiver {
for receiver, count := range groupsByReceiver {
ch <- prometheus.MustNewConstMetric(
c.collectedGroups,
prometheus.GaugeValue,
count,
am.Name,
reciver,
receiver,
)
}
for reciver, byState := range alertsByReceiverByState {
for receiver, byState := range alertsByReceiverByState {
for state, count := range byState {
ch <- prometheus.MustNewConstMetric(
c.collectedAlerts,
@@ -142,7 +142,7 @@ func (c *karmaCollector) Collect(ch chan<- prometheus.Metric) {
count,
am.Name,
state,
reciver,
receiver,
)
}
}

View File

@@ -50,7 +50,7 @@ func NewAlertmanagerProxy(alertmanager *alertmanager.Alertmanager) *httputil.Rev
req.Header.Set(key, val)
}
// drop Accept-Encoding header so we always get uncompressed reponses from
// drop Accept-Encoding header so we always get uncompressed responses from
// upstream, there's a gzip middleware that's global so we don't want it
// to gzip twice
req.Header.Del("Accept-Encoding")

View File

@@ -182,7 +182,7 @@ func httpServer(ts *testscript.TestScript, _ bool, args []string) {
for {
try++
if try > 30 {
ts.Fatalf("HTTP server didn't pass healt checks after %d check(s)", try)
ts.Fatalf("HTTP server didn't pass health checks after %d check(s)", try)
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)

View File

@@ -4,7 +4,7 @@
cmp stderr stderr.txt
-- stderr.txt --
level=fatal msg="Execution failed" error="invalid cors.credentials value 'foo' for alertmanager 'am1', allowed options: omit, inclue, same-origin"
level=fatal msg="Execution failed" error="invalid cors.credentials value 'foo' for alertmanager 'am1', allowed options: omit, include, same-origin"
-- karma.yaml --
alertmanager:
servers:

View File

@@ -935,7 +935,7 @@ func TestGzipMiddleware(t *testing.T) {
ce := h.Get("Content-Encoding")
if ce != "gzip" {
t.Errorf("Inavlid 'Content-Encoding' in response for '%s', expected 'gzip', got '%s'", path, ce)
t.Errorf("Invalid 'Content-Encoding' in response for '%s', expected 'gzip', got '%s'", path, ce)
}
bs := h.Get("Content-Length")
@@ -962,7 +962,7 @@ func TestGzipMiddlewareWithoutAcceptEncoding(t *testing.T) {
ce := h.Get("Content-Encoding")
if ce == "gzip" {
t.Errorf("Inavlid 'Content-Encoding' in response for '%s', expected '', got '%s'", path, ce)
t.Errorf("Invalid 'Content-Encoding' in response for '%s', expected '', got '%s'", path, ce)
}
bs := h.Get("Content-Length")

View File

@@ -283,9 +283,9 @@ alertmanager:
[see docs](https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS/Errors/CORSNotSupportingCredentials).
- `healthcheck:visible` - enable this option if you want `healthcheck:filters`
alerts to be visible in karma UI. An alternative to enabling this option is to
route healcheck alerts to alertmanager receiver that isn't visible using default
route healthcheck alerts to alertmanager receiver that isn't visible using default
karma filters.
- `healthcheck:filters` - define healtchecks using alert filters. When set karma
- `healthcheck:filters` - define healthchecks using alert filters. When set karma
will search for alerts matching defined filters and show an error if it doesn't
match anything. This can be used with a [Dead man's switch](https://en.wikipedia.org/wiki/Dead_man%27s_switch)
style alert to notify karma users that there's a problem with alerting pipeline.
@@ -301,7 +301,7 @@ alertmanager:
expr: vector(1)
```
- Add healtcheck configuration to karma:
- Add healthcheck configuration to karma:
```YAML
alertmanager:

View File

@@ -55,7 +55,7 @@ func DedupAlerts() []models.AlertGroup {
for _, am := range upstreams {
if !am.healthchecksVisible {
if _, hc := am.IsHealthCheckAlert(&alert); hc != nil {
log.Debug().Str("fingerprint", alert.Fingerprint).Msg("Skipping healtcheck alert")
log.Debug().Str("fingerprint", alert.Fingerprint).Msg("Skipping healthcheck alert")
keep = false
break
}

View File

@@ -355,7 +355,7 @@ func (config *configSchema) Read(flags *pflag.FlagSet) (string, error) {
}
if !slices.Contains([]string{"omit", "include", "same-origin"}, config.Alertmanager.CORS.Credentials) {
return "", fmt.Errorf("invalid alertmanager.cors.credentials value '%s', allowed options: omit, inclue, same-origin", config.Alertmanager.CORS.Credentials)
return "", fmt.Errorf("invalid alertmanager.cors.credentials value '%s', allowed options: omit, include, same-origin", config.Alertmanager.CORS.Credentials)
}
for i, s := range config.Alertmanager.Servers {
@@ -369,7 +369,7 @@ func (config *configSchema) Read(flags *pflag.FlagSet) (string, error) {
config.Alertmanager.Servers[i].CORS.Credentials = config.Alertmanager.CORS.Credentials
}
if !slices.Contains([]string{"omit", "include", "same-origin"}, config.Alertmanager.Servers[i].CORS.Credentials) {
return "", fmt.Errorf("invalid cors.credentials value '%s' for alertmanager '%s', allowed options: omit, inclue, same-origin", config.Alertmanager.Servers[i].CORS.Credentials, s.Name)
return "", fmt.Errorf("invalid cors.credentials value '%s' for alertmanager '%s', allowed options: omit, include, same-origin", config.Alertmanager.Servers[i].CORS.Credentials, s.Name)
}
}

0
internal/mock/0.31.0/.ok Normal file
View File

View File

@@ -0,0 +1,874 @@
[
{
"alerts": [
{
"annotations": {
"alert": "Less than 10% disk space is free",
"dashboard": "http://localhost/dashboard.html"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "aae7a1432b5d2f1b",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Free_Disk_Space_Too_Low",
"cluster": "staging",
"disk": "sda",
"instance": "server5",
"job": "node_exporter"
}
}
],
"labels": {
"alertname": "Free_Disk_Space_Too_Low"
},
"receiver": {
"name": "by-name"
}
},
{
"alerts": [
{
"annotations": {
"help": "Example help annotation",
"summary": "Example summary",
"url": "http://localhost/example.html"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "54c2f185e49cfccb",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [
"810ccf7f-c957-474a-b383-7e76d66a4d3b"
],
"state": "suppressed"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web1",
"job": "node_exporter"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "5cb0dd95e7f3d9c0",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web2",
"job": "node_exporter"
}
}
],
"labels": {
"alertname": "HTTP_Probe_Failed"
},
"receiver": {
"name": "by-name"
}
},
{
"alerts": [
{
"annotations": {
"summary": "Example summary",
"url": "http://localhost/example.html"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "7013294faf5f854d",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server1",
"ip": "127.0.0.1",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "fa959d3911d1978b",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server2",
"ip": "127.0.0.2",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "7f3a53482c303b65",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server3",
"ip": "127.0.0.3",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "7f5c6e647877b3df",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server4",
"ip": "127.0.0.4",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "b5dcc9c573def911",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server5",
"ip": "127.0.0.5",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "588a14f7b4613621",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [
"dcb3b5d0-9f10-4baa-977a-70073a1899bd"
],
"state": "suppressed"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server6",
"ip": "127.0.0.6",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "cc58f2ac8260fb97",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [
"9bd58938-25fd-41c5-aba3-9bc373074484",
"dcb3b5d0-9f10-4baa-977a-70073a1899bd"
],
"state": "suppressed"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server7",
"ip": "127.0.0.7",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "ffbb5f178eb27f11",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [
"dcb3b5d0-9f10-4baa-977a-70073a1899bd"
],
"state": "suppressed"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server8",
"ip": "127.0.0.8",
"job": "node_ping"
}
}
],
"labels": {
"alertname": "Host_Down"
},
"receiver": {
"name": "by-name"
}
},
{
"alerts": [
{
"annotations": {
"alert": "Memory usage exceeding threshold",
"dashboard": "http://localhost/dashboard.html"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "7d0b114ebf24f857",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Memory_Usage_Too_High",
"cluster": "prod",
"instance": "server2",
"job": "node_exporter"
}
}
],
"labels": {
"alertname": "Memory_Usage_Too_High"
},
"receiver": {
"name": "by-name"
}
},
{
"alerts": [
{
"annotations": {
"alert": "Less than 10% disk space is free",
"dashboard": "http://localhost/dashboard.html"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "aae7a1432b5d2f1b",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Free_Disk_Space_Too_Low",
"cluster": "staging",
"disk": "sda",
"instance": "server5",
"job": "node_exporter"
}
}
],
"labels": {
"alertname": "Free_Disk_Space_Too_Low",
"cluster": "staging"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"help": "Example help annotation",
"summary": "Example summary",
"url": "http://localhost/example.html"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "54c2f185e49cfccb",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [
"810ccf7f-c957-474a-b383-7e76d66a4d3b"
],
"state": "suppressed"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web1",
"job": "node_exporter"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "5cb0dd95e7f3d9c0",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web2",
"job": "node_exporter"
}
}
],
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "588a14f7b4613621",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [
"dcb3b5d0-9f10-4baa-977a-70073a1899bd"
],
"state": "suppressed"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server6",
"ip": "127.0.0.6",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "cc58f2ac8260fb97",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [
"9bd58938-25fd-41c5-aba3-9bc373074484",
"dcb3b5d0-9f10-4baa-977a-70073a1899bd"
],
"state": "suppressed"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server7",
"ip": "127.0.0.7",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "ffbb5f178eb27f11",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [
"dcb3b5d0-9f10-4baa-977a-70073a1899bd"
],
"state": "suppressed"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server8",
"ip": "127.0.0.8",
"job": "node_ping"
}
}
],
"labels": {
"alertname": "Host_Down",
"cluster": "dev"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"summary": "Example summary",
"url": "http://localhost/example.html"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "7013294faf5f854d",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server1",
"ip": "127.0.0.1",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "fa959d3911d1978b",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server2",
"ip": "127.0.0.2",
"job": "node_ping"
}
}
],
"labels": {
"alertname": "Host_Down",
"cluster": "prod"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "7f3a53482c303b65",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server3",
"ip": "127.0.0.3",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "7f5c6e647877b3df",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server4",
"ip": "127.0.0.4",
"job": "node_ping"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "b5dcc9c573def911",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server5",
"ip": "127.0.0.5",
"job": "node_ping"
}
}
],
"labels": {
"alertname": "Host_Down",
"cluster": "staging"
},
"receiver": {
"name": "by-cluster-service"
}
},
{
"alerts": [
{
"annotations": {
"alert": "Memory usage exceeding threshold",
"dashboard": "http://localhost/dashboard.html"
},
"endsAt": "2026-03-11T12:54:18.427Z",
"fingerprint": "7d0b114ebf24f857",
"receivers": [
{
"name": "by-cluster-service"
},
{
"name": "by-name"
}
],
"startsAt": "2026-03-11T12:48:38.413Z",
"status": {
"inhibitedBy": [],
"mutedBy": [],
"silencedBy": [],
"state": "active"
},
"updatedAt": "2026-03-11T12:49:18.427Z",
"generatorURL": "http://localhost/prometheus",
"labels": {
"alertname": "Memory_Usage_Too_High",
"cluster": "prod",
"instance": "server2",
"job": "node_exporter"
}
}
],
"labels": {
"alertname": "Memory_Usage_Too_High",
"cluster": "prod"
},
"receiver": {
"name": "by-cluster-service"
}
}
]

View File

@@ -0,0 +1,65 @@
[
{
"id": "810ccf7f-c957-474a-b383-7e76d66a4d3b",
"status": {
"state": "active"
},
"updatedAt": "2026-03-11T12:48:38.409Z",
"comment": "Silenced instance",
"createdBy": "john@example.com",
"endsAt": "2063-01-01T00:00:00.000Z",
"matchers": [
{
"isEqual": true,
"isRegex": false,
"name": "instance",
"value": "web1"
}
],
"startsAt": "2026-03-11T12:48:38.409Z"
},
{
"id": "dcb3b5d0-9f10-4baa-977a-70073a1899bd",
"status": {
"state": "active"
},
"updatedAt": "2026-03-11T12:48:38.411Z",
"comment": "Silenced Host_Down alerts in the dev cluster",
"createdBy": "john@example.com",
"endsAt": "2063-01-01T00:00:00.000Z",
"matchers": [
{
"isEqual": true,
"isRegex": false,
"name": "alertname",
"value": "Host_Down"
},
{
"isEqual": true,
"isRegex": false,
"name": "cluster",
"value": "dev"
}
],
"startsAt": "2026-03-11T12:48:38.411Z"
},
{
"id": "9bd58938-25fd-41c5-aba3-9bc373074484",
"status": {
"state": "active"
},
"updatedAt": "2026-03-11T12:48:38.412Z",
"comment": "Silenced server7",
"createdBy": "john@example.com",
"endsAt": "2063-01-01T00:00:00.000Z",
"matchers": [
{
"isEqual": true,
"isRegex": false,
"name": "instance",
"value": "server7"
}
],
"startsAt": "2026-03-11T12:48:38.412Z"
}
]

View File

@@ -0,0 +1,740 @@
# HELP alertmanager_alerts How many alerts by state.
# TYPE alertmanager_alerts gauge
alertmanager_alerts{state="active"} 8
alertmanager_alerts{state="suppressed"} 4
alertmanager_alerts{state="unprocessed"} 0
# HELP alertmanager_alerts_invalid_total The total number of received alerts that were invalid.
# TYPE alertmanager_alerts_invalid_total counter
alertmanager_alerts_invalid_total{version="v2"} 0
# HELP alertmanager_alerts_per_alert_limit Current limit on number of alerts per alert name
# TYPE alertmanager_alerts_per_alert_limit gauge
alertmanager_alerts_per_alert_limit 0
# HELP alertmanager_alerts_received_total The total number of received alerts.
# TYPE alertmanager_alerts_received_total counter
alertmanager_alerts_received_total{status="firing",version="v2"} 60
alertmanager_alerts_received_total{status="resolved",version="v2"} 0
# HELP alertmanager_alerts_subscriber_channel_writes_total Number of times alerts were written to subscriber channels
# TYPE alertmanager_alerts_subscriber_channel_writes_total counter
alertmanager_alerts_subscriber_channel_writes_total{subscriber="dispatcher"} 60
alertmanager_alerts_subscriber_channel_writes_total{subscriber="inhibitor"} 60
# HELP alertmanager_build_info A metric with a constant '1' value labeled by version, revision, branch, goversion from which alertmanager was built, and the goos and goarch for the build.
# TYPE alertmanager_build_info gauge
alertmanager_build_info{branch="HEAD",goarch="amd64",goos="linux",goversion="go1.25.6",revision="0ae07a09fbb26a7738c867306f32b5f42583a7d2",tags="netgo",version="0.31.0"} 1
# HELP alertmanager_cluster_alive_messages_total Total number of received alive messages.
# TYPE alertmanager_cluster_alive_messages_total counter
alertmanager_cluster_alive_messages_total{peer="01KKEF1VS4AZH53FJ7EC916F9H"} 1
# HELP alertmanager_cluster_enabled Indicates whether the clustering is enabled or not.
# TYPE alertmanager_cluster_enabled gauge
alertmanager_cluster_enabled 1
# HELP alertmanager_cluster_failed_peers Number indicating the current number of failed peers in the cluster.
# TYPE alertmanager_cluster_failed_peers gauge
alertmanager_cluster_failed_peers 0
# HELP alertmanager_cluster_health_score Health score of the cluster. Lower values are better and zero means 'totally healthy'.
# TYPE alertmanager_cluster_health_score gauge
alertmanager_cluster_health_score 0
# HELP alertmanager_cluster_members Number indicating current number of members in cluster.
# TYPE alertmanager_cluster_members gauge
alertmanager_cluster_members 1
# HELP alertmanager_cluster_messages_pruned_total Total number of cluster messages pruned.
# TYPE alertmanager_cluster_messages_pruned_total counter
alertmanager_cluster_messages_pruned_total 0
# HELP alertmanager_cluster_messages_queued Number of cluster messages which are queued.
# TYPE alertmanager_cluster_messages_queued gauge
alertmanager_cluster_messages_queued 3
# HELP alertmanager_cluster_messages_received_size_total Total size of cluster messages received.
# TYPE alertmanager_cluster_messages_received_size_total counter
alertmanager_cluster_messages_received_size_total{msg_type="full_state"} 0
alertmanager_cluster_messages_received_size_total{msg_type="update"} 0
# HELP alertmanager_cluster_messages_received_total Total number of cluster messages received.
# TYPE alertmanager_cluster_messages_received_total counter
alertmanager_cluster_messages_received_total{msg_type="full_state"} 0
alertmanager_cluster_messages_received_total{msg_type="update"} 0
# HELP alertmanager_cluster_messages_sent_size_total Total size of cluster messages sent.
# TYPE alertmanager_cluster_messages_sent_size_total counter
alertmanager_cluster_messages_sent_size_total{msg_type="full_state"} 0
alertmanager_cluster_messages_sent_size_total{msg_type="update"} 0
# HELP alertmanager_cluster_messages_sent_total Total number of cluster messages sent.
# TYPE alertmanager_cluster_messages_sent_total counter
alertmanager_cluster_messages_sent_total{msg_type="full_state"} 0
alertmanager_cluster_messages_sent_total{msg_type="update"} 0
# HELP alertmanager_cluster_peer_info A metric with a constant '1' value labeled by peer name.
# TYPE alertmanager_cluster_peer_info gauge
alertmanager_cluster_peer_info{peer="01KKEF1VS4AZH53FJ7EC916F9H"} 1
# HELP alertmanager_cluster_peer_name_conflicts_total Total number of times memberlist has noticed conflicting peer names
# TYPE alertmanager_cluster_peer_name_conflicts_total counter
alertmanager_cluster_peer_name_conflicts_total 0
# HELP alertmanager_cluster_peers_joined_total A counter of the number of peers that have joined.
# TYPE alertmanager_cluster_peers_joined_total counter
alertmanager_cluster_peers_joined_total 1
# HELP alertmanager_cluster_peers_left_total A counter of the number of peers that have left.
# TYPE alertmanager_cluster_peers_left_total counter
alertmanager_cluster_peers_left_total 0
# HELP alertmanager_cluster_peers_update_total A counter of the number of peers that have updated metadata.
# TYPE alertmanager_cluster_peers_update_total counter
alertmanager_cluster_peers_update_total 0
# HELP alertmanager_cluster_reconnections_failed_total A counter of the number of failed cluster peer reconnection attempts.
# TYPE alertmanager_cluster_reconnections_failed_total counter
alertmanager_cluster_reconnections_failed_total 0
# HELP alertmanager_cluster_reconnections_total A counter of the number of cluster peer reconnections.
# TYPE alertmanager_cluster_reconnections_total counter
alertmanager_cluster_reconnections_total 0
# HELP alertmanager_cluster_refresh_join_failed_total A counter of the number of failed cluster peer joined attempts via refresh.
# TYPE alertmanager_cluster_refresh_join_failed_total counter
alertmanager_cluster_refresh_join_failed_total 0
# HELP alertmanager_cluster_refresh_join_total A counter of the number of cluster peer joined via refresh.
# TYPE alertmanager_cluster_refresh_join_total counter
alertmanager_cluster_refresh_join_total 0
# HELP alertmanager_config_hash Hash of the currently loaded alertmanager configuration. Note that this is not a cryptographically strong hash.
# TYPE alertmanager_config_hash gauge
alertmanager_config_hash 6.2645753076152e+13
# HELP alertmanager_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload.
# TYPE alertmanager_config_last_reload_success_timestamp_seconds gauge
alertmanager_config_last_reload_success_timestamp_seconds 1.7732333033532562e+09
# HELP alertmanager_config_last_reload_successful Whether the last configuration reload attempt was successful.
# TYPE alertmanager_config_last_reload_successful gauge
alertmanager_config_last_reload_successful 1
# HELP alertmanager_dispatcher_aggregation_group_limit_reached_total Number of times when dispatcher failed to create new aggregation group due to limit.
# TYPE alertmanager_dispatcher_aggregation_group_limit_reached_total counter
alertmanager_dispatcher_aggregation_group_limit_reached_total 0
# HELP alertmanager_dispatcher_aggregation_groups Number of active aggregation groups
# TYPE alertmanager_dispatcher_aggregation_groups gauge
alertmanager_dispatcher_aggregation_groups 10
# HELP alertmanager_dispatcher_alert_processing_duration_seconds Summary of latencies for the processing of alerts.
# TYPE alertmanager_dispatcher_alert_processing_duration_seconds summary
alertmanager_dispatcher_alert_processing_duration_seconds_sum 0.0018133570000000005
alertmanager_dispatcher_alert_processing_duration_seconds_count 60
# HELP alertmanager_http_concurrency_limit_exceeded_total Total number of times an HTTP request failed because the concurrency limit was reached.
# TYPE alertmanager_http_concurrency_limit_exceeded_total counter
alertmanager_http_concurrency_limit_exceeded_total{method="get"} 0
# HELP alertmanager_http_request_duration_seconds Histogram of latencies for HTTP requests.
# TYPE alertmanager_http_request_duration_seconds histogram
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/alerts",method="post",le="0.005"} 5
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/alerts",method="post",le="0.01"} 5
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/alerts",method="post",le="0.025"} 5
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/alerts",method="post",le="0.05"} 5
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/alerts",method="post",le="0.1"} 5
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/alerts",method="post",le="0.25"} 5
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/alerts",method="post",le="0.5"} 5
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/alerts",method="post",le="1"} 5
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/alerts",method="post",le="2.5"} 5
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/alerts",method="post",le="5"} 5
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/alerts",method="post",le="10"} 5
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/alerts",method="post",le="+Inf"} 5
alertmanager_http_request_duration_seconds_sum{code="200",handler="/api/v2/alerts",method="post"} 0.0028228439999999997
alertmanager_http_request_duration_seconds_count{code="200",handler="/api/v2/alerts",method="post"} 5
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/silences",method="post",le="0.005"} 3
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/silences",method="post",le="0.01"} 3
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/silences",method="post",le="0.025"} 3
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/silences",method="post",le="0.05"} 3
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/silences",method="post",le="0.1"} 3
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/silences",method="post",le="0.25"} 3
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/silences",method="post",le="0.5"} 3
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/silences",method="post",le="1"} 3
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/silences",method="post",le="2.5"} 3
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/silences",method="post",le="5"} 3
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/silences",method="post",le="10"} 3
alertmanager_http_request_duration_seconds_bucket{code="200",handler="/api/v2/silences",method="post",le="+Inf"} 3
alertmanager_http_request_duration_seconds_sum{code="200",handler="/api/v2/silences",method="post"} 0.001101868
alertmanager_http_request_duration_seconds_count{code="200",handler="/api/v2/silences",method="post"} 3
# HELP alertmanager_http_requests_in_flight Current number of HTTP requests being processed.
# TYPE alertmanager_http_requests_in_flight gauge
alertmanager_http_requests_in_flight{method="get"} 1
# HELP alertmanager_inhibition_rules Number of configured inhibition rules.
# TYPE alertmanager_inhibition_rules gauge
alertmanager_inhibition_rules 1
# HELP alertmanager_integrations Number of configured integrations.
# TYPE alertmanager_integrations gauge
alertmanager_integrations 0
# HELP alertmanager_marked_alerts How many alerts by state are currently marked in the Alertmanager regardless of their expiry.
# TYPE alertmanager_marked_alerts gauge
alertmanager_marked_alerts{state="active"} 8
alertmanager_marked_alerts{state="suppressed"} 4
alertmanager_marked_alerts{state="unprocessed"} 0
# HELP alertmanager_nflog_gc_duration_seconds Duration of the last notification log garbage collection cycle.
# TYPE alertmanager_nflog_gc_duration_seconds summary
alertmanager_nflog_gc_duration_seconds_sum 0
alertmanager_nflog_gc_duration_seconds_count 0
# HELP alertmanager_nflog_gossip_messages_propagated_total Number of received gossip messages that have been further gossiped.
# TYPE alertmanager_nflog_gossip_messages_propagated_total counter
alertmanager_nflog_gossip_messages_propagated_total 0
# HELP alertmanager_nflog_maintenance_errors_total How many maintenances were executed for the notification log that failed.
# TYPE alertmanager_nflog_maintenance_errors_total counter
alertmanager_nflog_maintenance_errors_total 0
# HELP alertmanager_nflog_maintenance_total How many maintenances were executed for the notification log.
# TYPE alertmanager_nflog_maintenance_total counter
alertmanager_nflog_maintenance_total 0
# HELP alertmanager_nflog_queries_total Number of notification log queries were received.
# TYPE alertmanager_nflog_queries_total counter
alertmanager_nflog_queries_total 0
# HELP alertmanager_nflog_query_duration_seconds Duration of notification log query evaluation.
# TYPE alertmanager_nflog_query_duration_seconds histogram
alertmanager_nflog_query_duration_seconds_bucket{le="0.005"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.01"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.025"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.05"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.1"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.25"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="0.5"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="1"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="2.5"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="5"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="10"} 0
alertmanager_nflog_query_duration_seconds_bucket{le="+Inf"} 0
alertmanager_nflog_query_duration_seconds_sum 0
alertmanager_nflog_query_duration_seconds_count 0
# HELP alertmanager_nflog_query_errors_total Number notification log received queries that failed.
# TYPE alertmanager_nflog_query_errors_total counter
alertmanager_nflog_query_errors_total 0
# HELP alertmanager_nflog_snapshot_duration_seconds Duration of the last notification log snapshot.
# TYPE alertmanager_nflog_snapshot_duration_seconds summary
alertmanager_nflog_snapshot_duration_seconds_sum 0
alertmanager_nflog_snapshot_duration_seconds_count 0
# HELP alertmanager_nflog_snapshot_size_bytes Size of the last notification log snapshot in bytes.
# TYPE alertmanager_nflog_snapshot_size_bytes gauge
alertmanager_nflog_snapshot_size_bytes 0
# HELP alertmanager_notification_latency_seconds The latency of notifications in seconds.
# TYPE alertmanager_notification_latency_seconds histogram
alertmanager_notification_latency_seconds_bucket{integration="discord",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="discord",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="discord",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="discord",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="discord",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="discord",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="discord"} 0
alertmanager_notification_latency_seconds_count{integration="discord"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="email",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="email"} 0
alertmanager_notification_latency_seconds_count{integration="email"} 0
alertmanager_notification_latency_seconds_bucket{integration="incidentio",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="incidentio",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="incidentio",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="incidentio",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="incidentio",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="incidentio",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="incidentio"} 0
alertmanager_notification_latency_seconds_count{integration="incidentio"} 0
alertmanager_notification_latency_seconds_bucket{integration="jira",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="jira",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="jira",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="jira",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="jira",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="jira",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="jira"} 0
alertmanager_notification_latency_seconds_count{integration="jira"} 0
alertmanager_notification_latency_seconds_bucket{integration="mattermost",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="mattermost",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="mattermost",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="mattermost",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="mattermost",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="mattermost",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="mattermost"} 0
alertmanager_notification_latency_seconds_count{integration="mattermost"} 0
alertmanager_notification_latency_seconds_bucket{integration="msteams",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="msteams",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="msteams",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="msteams",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="msteams",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="msteams",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="msteams"} 0
alertmanager_notification_latency_seconds_count{integration="msteams"} 0
alertmanager_notification_latency_seconds_bucket{integration="msteamsv2",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="msteamsv2",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="msteamsv2",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="msteamsv2",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="msteamsv2",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="msteamsv2",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="msteamsv2"} 0
alertmanager_notification_latency_seconds_count{integration="msteamsv2"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="opsgenie",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="opsgenie"} 0
alertmanager_notification_latency_seconds_count{integration="opsgenie"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="pagerduty",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="pagerduty"} 0
alertmanager_notification_latency_seconds_count{integration="pagerduty"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="pushover",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="pushover"} 0
alertmanager_notification_latency_seconds_count{integration="pushover"} 0
alertmanager_notification_latency_seconds_bucket{integration="rocketchat",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="rocketchat",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="rocketchat",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="rocketchat",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="rocketchat",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="rocketchat",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="rocketchat"} 0
alertmanager_notification_latency_seconds_count{integration="rocketchat"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="slack",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="slack"} 0
alertmanager_notification_latency_seconds_count{integration="slack"} 0
alertmanager_notification_latency_seconds_bucket{integration="sns",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="sns",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="sns",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="sns",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="sns",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="sns",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="sns"} 0
alertmanager_notification_latency_seconds_count{integration="sns"} 0
alertmanager_notification_latency_seconds_bucket{integration="telegram",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="telegram",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="telegram",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="telegram",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="telegram",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="telegram",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="telegram"} 0
alertmanager_notification_latency_seconds_count{integration="telegram"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="victorops",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="victorops"} 0
alertmanager_notification_latency_seconds_count{integration="victorops"} 0
alertmanager_notification_latency_seconds_bucket{integration="webex",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="webex",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="webex",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="webex",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="webex",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="webex",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="webex"} 0
alertmanager_notification_latency_seconds_count{integration="webex"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="webhook",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="webhook"} 0
alertmanager_notification_latency_seconds_count{integration="webhook"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="1"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="5"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="10"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="15"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="20"} 0
alertmanager_notification_latency_seconds_bucket{integration="wechat",le="+Inf"} 0
alertmanager_notification_latency_seconds_sum{integration="wechat"} 0
alertmanager_notification_latency_seconds_count{integration="wechat"} 0
# HELP alertmanager_notification_requests_failed_total The total number of failed notification requests.
# TYPE alertmanager_notification_requests_failed_total counter
alertmanager_notification_requests_failed_total{integration="discord"} 0
alertmanager_notification_requests_failed_total{integration="email"} 0
alertmanager_notification_requests_failed_total{integration="incidentio"} 0
alertmanager_notification_requests_failed_total{integration="jira"} 0
alertmanager_notification_requests_failed_total{integration="mattermost"} 0
alertmanager_notification_requests_failed_total{integration="msteams"} 0
alertmanager_notification_requests_failed_total{integration="msteamsv2"} 0
alertmanager_notification_requests_failed_total{integration="opsgenie"} 0
alertmanager_notification_requests_failed_total{integration="pagerduty"} 0
alertmanager_notification_requests_failed_total{integration="pushover"} 0
alertmanager_notification_requests_failed_total{integration="rocketchat"} 0
alertmanager_notification_requests_failed_total{integration="slack"} 0
alertmanager_notification_requests_failed_total{integration="sns"} 0
alertmanager_notification_requests_failed_total{integration="telegram"} 0
alertmanager_notification_requests_failed_total{integration="victorops"} 0
alertmanager_notification_requests_failed_total{integration="webex"} 0
alertmanager_notification_requests_failed_total{integration="webhook"} 0
alertmanager_notification_requests_failed_total{integration="wechat"} 0
# HELP alertmanager_notification_requests_total The total number of attempted notification requests.
# TYPE alertmanager_notification_requests_total counter
alertmanager_notification_requests_total{integration="discord"} 0
alertmanager_notification_requests_total{integration="email"} 0
alertmanager_notification_requests_total{integration="incidentio"} 0
alertmanager_notification_requests_total{integration="jira"} 0
alertmanager_notification_requests_total{integration="mattermost"} 0
alertmanager_notification_requests_total{integration="msteams"} 0
alertmanager_notification_requests_total{integration="msteamsv2"} 0
alertmanager_notification_requests_total{integration="opsgenie"} 0
alertmanager_notification_requests_total{integration="pagerduty"} 0
alertmanager_notification_requests_total{integration="pushover"} 0
alertmanager_notification_requests_total{integration="rocketchat"} 0
alertmanager_notification_requests_total{integration="slack"} 0
alertmanager_notification_requests_total{integration="sns"} 0
alertmanager_notification_requests_total{integration="telegram"} 0
alertmanager_notification_requests_total{integration="victorops"} 0
alertmanager_notification_requests_total{integration="webex"} 0
alertmanager_notification_requests_total{integration="webhook"} 0
alertmanager_notification_requests_total{integration="wechat"} 0
# HELP alertmanager_notifications_failed_total The total number of failed notifications.
# TYPE alertmanager_notifications_failed_total counter
alertmanager_notifications_failed_total{integration="discord",reason="clientError"} 0
alertmanager_notifications_failed_total{integration="discord",reason="contextCanceled"} 0
alertmanager_notifications_failed_total{integration="discord",reason="contextDeadlineExceeded"} 0
alertmanager_notifications_failed_total{integration="discord",reason="other"} 0
alertmanager_notifications_failed_total{integration="discord",reason="serverError"} 0
alertmanager_notifications_failed_total{integration="email",reason="clientError"} 0
alertmanager_notifications_failed_total{integration="email",reason="contextCanceled"} 0
alertmanager_notifications_failed_total{integration="email",reason="contextDeadlineExceeded"} 0
alertmanager_notifications_failed_total{integration="email",reason="other"} 0
alertmanager_notifications_failed_total{integration="email",reason="serverError"} 0
alertmanager_notifications_failed_total{integration="incidentio",reason="clientError"} 0
alertmanager_notifications_failed_total{integration="incidentio",reason="contextCanceled"} 0
alertmanager_notifications_failed_total{integration="incidentio",reason="contextDeadlineExceeded"} 0
alertmanager_notifications_failed_total{integration="incidentio",reason="other"} 0
alertmanager_notifications_failed_total{integration="incidentio",reason="serverError"} 0
alertmanager_notifications_failed_total{integration="jira",reason="clientError"} 0
alertmanager_notifications_failed_total{integration="jira",reason="contextCanceled"} 0
alertmanager_notifications_failed_total{integration="jira",reason="contextDeadlineExceeded"} 0
alertmanager_notifications_failed_total{integration="jira",reason="other"} 0
alertmanager_notifications_failed_total{integration="jira",reason="serverError"} 0
alertmanager_notifications_failed_total{integration="mattermost",reason="clientError"} 0
alertmanager_notifications_failed_total{integration="mattermost",reason="contextCanceled"} 0
alertmanager_notifications_failed_total{integration="mattermost",reason="contextDeadlineExceeded"} 0
alertmanager_notifications_failed_total{integration="mattermost",reason="other"} 0
alertmanager_notifications_failed_total{integration="mattermost",reason="serverError"} 0
alertmanager_notifications_failed_total{integration="msteams",reason="clientError"} 0
alertmanager_notifications_failed_total{integration="msteams",reason="contextCanceled"} 0
alertmanager_notifications_failed_total{integration="msteams",reason="contextDeadlineExceeded"} 0
alertmanager_notifications_failed_total{integration="msteams",reason="other"} 0
alertmanager_notifications_failed_total{integration="msteams",reason="serverError"} 0
alertmanager_notifications_failed_total{integration="msteamsv2",reason="clientError"} 0
alertmanager_notifications_failed_total{integration="msteamsv2",reason="contextCanceled"} 0
alertmanager_notifications_failed_total{integration="msteamsv2",reason="contextDeadlineExceeded"} 0
alertmanager_notifications_failed_total{integration="msteamsv2",reason="other"} 0
alertmanager_notifications_failed_total{integration="msteamsv2",reason="serverError"} 0
alertmanager_notifications_failed_total{integration="opsgenie",reason="clientError"} 0
alertmanager_notifications_failed_total{integration="opsgenie",reason="contextCanceled"} 0
alertmanager_notifications_failed_total{integration="opsgenie",reason="contextDeadlineExceeded"} 0
alertmanager_notifications_failed_total{integration="opsgenie",reason="other"} 0
alertmanager_notifications_failed_total{integration="opsgenie",reason="serverError"} 0
alertmanager_notifications_failed_total{integration="pagerduty",reason="clientError"} 0
alertmanager_notifications_failed_total{integration="pagerduty",reason="contextCanceled"} 0
alertmanager_notifications_failed_total{integration="pagerduty",reason="contextDeadlineExceeded"} 0
alertmanager_notifications_failed_total{integration="pagerduty",reason="other"} 0
alertmanager_notifications_failed_total{integration="pagerduty",reason="serverError"} 0
alertmanager_notifications_failed_total{integration="pushover",reason="clientError"} 0
alertmanager_notifications_failed_total{integration="pushover",reason="contextCanceled"} 0
alertmanager_notifications_failed_total{integration="pushover",reason="contextDeadlineExceeded"} 0
alertmanager_notifications_failed_total{integration="pushover",reason="other"} 0
alertmanager_notifications_failed_total{integration="pushover",reason="serverError"} 0
alertmanager_notifications_failed_total{integration="rocketchat",reason="clientError"} 0
alertmanager_notifications_failed_total{integration="rocketchat",reason="contextCanceled"} 0
alertmanager_notifications_failed_total{integration="rocketchat",reason="contextDeadlineExceeded"} 0
alertmanager_notifications_failed_total{integration="rocketchat",reason="other"} 0
alertmanager_notifications_failed_total{integration="rocketchat",reason="serverError"} 0
alertmanager_notifications_failed_total{integration="slack",reason="clientError"} 0
alertmanager_notifications_failed_total{integration="slack",reason="contextCanceled"} 0
alertmanager_notifications_failed_total{integration="slack",reason="contextDeadlineExceeded"} 0
alertmanager_notifications_failed_total{integration="slack",reason="other"} 0
alertmanager_notifications_failed_total{integration="slack",reason="serverError"} 0
alertmanager_notifications_failed_total{integration="sns",reason="clientError"} 0
alertmanager_notifications_failed_total{integration="sns",reason="contextCanceled"} 0
alertmanager_notifications_failed_total{integration="sns",reason="contextDeadlineExceeded"} 0
alertmanager_notifications_failed_total{integration="sns",reason="other"} 0
alertmanager_notifications_failed_total{integration="sns",reason="serverError"} 0
alertmanager_notifications_failed_total{integration="telegram",reason="clientError"} 0
alertmanager_notifications_failed_total{integration="telegram",reason="contextCanceled"} 0
alertmanager_notifications_failed_total{integration="telegram",reason="contextDeadlineExceeded"} 0
alertmanager_notifications_failed_total{integration="telegram",reason="other"} 0
alertmanager_notifications_failed_total{integration="telegram",reason="serverError"} 0
alertmanager_notifications_failed_total{integration="victorops",reason="clientError"} 0
alertmanager_notifications_failed_total{integration="victorops",reason="contextCanceled"} 0
alertmanager_notifications_failed_total{integration="victorops",reason="contextDeadlineExceeded"} 0
alertmanager_notifications_failed_total{integration="victorops",reason="other"} 0
alertmanager_notifications_failed_total{integration="victorops",reason="serverError"} 0
alertmanager_notifications_failed_total{integration="webex",reason="clientError"} 0
alertmanager_notifications_failed_total{integration="webex",reason="contextCanceled"} 0
alertmanager_notifications_failed_total{integration="webex",reason="contextDeadlineExceeded"} 0
alertmanager_notifications_failed_total{integration="webex",reason="other"} 0
alertmanager_notifications_failed_total{integration="webex",reason="serverError"} 0
alertmanager_notifications_failed_total{integration="webhook",reason="clientError"} 0
alertmanager_notifications_failed_total{integration="webhook",reason="contextCanceled"} 0
alertmanager_notifications_failed_total{integration="webhook",reason="contextDeadlineExceeded"} 0
alertmanager_notifications_failed_total{integration="webhook",reason="other"} 0
alertmanager_notifications_failed_total{integration="webhook",reason="serverError"} 0
alertmanager_notifications_failed_total{integration="wechat",reason="clientError"} 0
alertmanager_notifications_failed_total{integration="wechat",reason="contextCanceled"} 0
alertmanager_notifications_failed_total{integration="wechat",reason="contextDeadlineExceeded"} 0
alertmanager_notifications_failed_total{integration="wechat",reason="other"} 0
alertmanager_notifications_failed_total{integration="wechat",reason="serverError"} 0
# HELP alertmanager_notifications_suppressed_total The total number of notifications suppressed for being silenced, inhibited, outside of active time intervals or within muted time intervals.
# TYPE alertmanager_notifications_suppressed_total counter
alertmanager_notifications_suppressed_total{reason="silence"} 16
# HELP alertmanager_notifications_total The total number of attempted notifications.
# TYPE alertmanager_notifications_total counter
alertmanager_notifications_total{integration="discord"} 0
alertmanager_notifications_total{integration="email"} 0
alertmanager_notifications_total{integration="incidentio"} 0
alertmanager_notifications_total{integration="jira"} 0
alertmanager_notifications_total{integration="mattermost"} 0
alertmanager_notifications_total{integration="msteams"} 0
alertmanager_notifications_total{integration="msteamsv2"} 0
alertmanager_notifications_total{integration="opsgenie"} 0
alertmanager_notifications_total{integration="pagerduty"} 0
alertmanager_notifications_total{integration="pushover"} 0
alertmanager_notifications_total{integration="rocketchat"} 0
alertmanager_notifications_total{integration="slack"} 0
alertmanager_notifications_total{integration="sns"} 0
alertmanager_notifications_total{integration="telegram"} 0
alertmanager_notifications_total{integration="victorops"} 0
alertmanager_notifications_total{integration="webex"} 0
alertmanager_notifications_total{integration="webhook"} 0
alertmanager_notifications_total{integration="wechat"} 0
# HELP alertmanager_oversize_gossip_message_duration_seconds Duration of oversized gossip message requests.
# TYPE alertmanager_oversize_gossip_message_duration_seconds histogram
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.005"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.01"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.025"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.05"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.1"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.25"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="0.5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="1"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="2.5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="10"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="nfl",le="+Inf"} 0
alertmanager_oversize_gossip_message_duration_seconds_sum{key="nfl"} 0
alertmanager_oversize_gossip_message_duration_seconds_count{key="nfl"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.005"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.01"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.025"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.05"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.1"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.25"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="0.5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="1"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="2.5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="5"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="10"} 0
alertmanager_oversize_gossip_message_duration_seconds_bucket{key="sil",le="+Inf"} 0
alertmanager_oversize_gossip_message_duration_seconds_sum{key="sil"} 0
alertmanager_oversize_gossip_message_duration_seconds_count{key="sil"} 0
# HELP alertmanager_oversized_gossip_message_dropped_total Number of oversized gossip messages that were dropped due to a full message queue.
# TYPE alertmanager_oversized_gossip_message_dropped_total counter
alertmanager_oversized_gossip_message_dropped_total{key="nfl"} 0
alertmanager_oversized_gossip_message_dropped_total{key="sil"} 0
# HELP alertmanager_oversized_gossip_message_failure_total Number of oversized gossip message sends that failed.
# TYPE alertmanager_oversized_gossip_message_failure_total counter
alertmanager_oversized_gossip_message_failure_total{key="nfl"} 0
alertmanager_oversized_gossip_message_failure_total{key="sil"} 0
# HELP alertmanager_oversized_gossip_message_sent_total Number of oversized gossip message sent.
# TYPE alertmanager_oversized_gossip_message_sent_total counter
alertmanager_oversized_gossip_message_sent_total{key="nfl"} 0
alertmanager_oversized_gossip_message_sent_total{key="sil"} 0
# HELP alertmanager_peer_position Position the Alertmanager instance believes it's in. The position determines a peer's behavior in the cluster.
# TYPE alertmanager_peer_position gauge
alertmanager_peer_position 0
# HELP alertmanager_receivers Number of configured receivers.
# TYPE alertmanager_receivers gauge
alertmanager_receivers 3
# HELP alertmanager_silences How many silences by state.
# TYPE alertmanager_silences gauge
alertmanager_silences{state="active"} 3
alertmanager_silences{state="expired"} 0
alertmanager_silences{state="pending"} 0
# HELP alertmanager_silences_gc_duration_seconds Duration of the last silence garbage collection cycle.
# TYPE alertmanager_silences_gc_duration_seconds summary
alertmanager_silences_gc_duration_seconds_sum 0
alertmanager_silences_gc_duration_seconds_count 0
# HELP alertmanager_silences_gc_errors_total How many silence GC errors were encountered.
# TYPE alertmanager_silences_gc_errors_total counter
alertmanager_silences_gc_errors_total 0
# HELP alertmanager_silences_gossip_messages_propagated_total Number of received gossip messages that have been further gossiped.
# TYPE alertmanager_silences_gossip_messages_propagated_total counter
alertmanager_silences_gossip_messages_propagated_total 0
# HELP alertmanager_silences_maintenance_errors_total How many maintenances were executed for silences that failed.
# TYPE alertmanager_silences_maintenance_errors_total counter
alertmanager_silences_maintenance_errors_total 0
# HELP alertmanager_silences_maintenance_total How many maintenances were executed for silences.
# TYPE alertmanager_silences_maintenance_total counter
alertmanager_silences_maintenance_total 0
# HELP alertmanager_silences_matcher_compile_errors_total How many silence matcher compilations failed.
# TYPE alertmanager_silences_matcher_compile_errors_total counter
alertmanager_silences_matcher_compile_errors_total{stage="index"} 0
alertmanager_silences_matcher_compile_errors_total{stage="load_snapshot"} 0
# HELP alertmanager_silences_matcher_index_size The number of entries in the matcher cache index.
# TYPE alertmanager_silences_matcher_index_size gauge
alertmanager_silences_matcher_index_size 3
# HELP alertmanager_silences_queries_total How many silence queries were received.
# TYPE alertmanager_silences_queries_total counter
alertmanager_silences_queries_total 24
# HELP alertmanager_silences_query_duration_seconds Duration of silence query evaluation.
# TYPE alertmanager_silences_query_duration_seconds histogram
alertmanager_silences_query_duration_seconds_bucket{le="0.005"} 26
alertmanager_silences_query_duration_seconds_bucket{le="0.01"} 26
alertmanager_silences_query_duration_seconds_bucket{le="0.025"} 26
alertmanager_silences_query_duration_seconds_bucket{le="0.05"} 26
alertmanager_silences_query_duration_seconds_bucket{le="0.1"} 26
alertmanager_silences_query_duration_seconds_bucket{le="0.25"} 26
alertmanager_silences_query_duration_seconds_bucket{le="0.5"} 26
alertmanager_silences_query_duration_seconds_bucket{le="1"} 26
alertmanager_silences_query_duration_seconds_bucket{le="2.5"} 26
alertmanager_silences_query_duration_seconds_bucket{le="5"} 26
alertmanager_silences_query_duration_seconds_bucket{le="10"} 26
alertmanager_silences_query_duration_seconds_bucket{le="+Inf"} 26
alertmanager_silences_query_duration_seconds_sum 0.000124002
alertmanager_silences_query_duration_seconds_count 26
# HELP alertmanager_silences_query_errors_total How many silence received queries did not succeed.
# TYPE alertmanager_silences_query_errors_total counter
alertmanager_silences_query_errors_total 0
# HELP alertmanager_silences_query_silences_scanned_total How many silences were scanned during query evaluation.
# TYPE alertmanager_silences_query_silences_scanned_total counter
alertmanager_silences_query_silences_scanned_total 51
# HELP alertmanager_silences_query_silences_skipped_total How many silences were skipped during query evaluation using the version index.
# TYPE alertmanager_silences_query_silences_skipped_total counter
alertmanager_silences_query_silences_skipped_total 0
# HELP alertmanager_silences_snapshot_duration_seconds Duration of the last silence snapshot.
# TYPE alertmanager_silences_snapshot_duration_seconds summary
alertmanager_silences_snapshot_duration_seconds_sum 0
alertmanager_silences_snapshot_duration_seconds_count 0
# HELP alertmanager_silences_snapshot_size_bytes Size of the last silence snapshot in bytes.
# TYPE alertmanager_silences_snapshot_size_bytes gauge
alertmanager_silences_snapshot_size_bytes 0
# HELP alertmanager_silences_state_size The number of silences in the state map.
# TYPE alertmanager_silences_state_size gauge
alertmanager_silences_state_size 3
# HELP alertmanager_silences_version_index_size The number of entries in the version index.
# TYPE alertmanager_silences_version_index_size gauge
alertmanager_silences_version_index_size 3
# HELP go_gc_duration_seconds A summary of the wall-time pause (stop-the-world) duration in garbage collection cycles.
# TYPE go_gc_duration_seconds summary
go_gc_duration_seconds{quantile="0"} 3.3568e-05
go_gc_duration_seconds{quantile="0.25"} 3.6978e-05
go_gc_duration_seconds{quantile="0.5"} 3.8411e-05
go_gc_duration_seconds{quantile="0.75"} 4.6025e-05
go_gc_duration_seconds{quantile="1"} 5.9678e-05
go_gc_duration_seconds_sum 0.00021466
go_gc_duration_seconds_count 5
# HELP go_gc_gogc_percent Heap size target percentage configured by the user, otherwise 100. This value is set by the GOGC environment variable, and the runtime/debug.SetGCPercent function. Sourced from /gc/gogc:percent.
# TYPE go_gc_gogc_percent gauge
go_gc_gogc_percent 100
# HELP go_gc_gomemlimit_bytes Go runtime memory limit configured by the user, otherwise math.MaxInt64. This value is set by the GOMEMLIMIT environment variable, and the runtime/debug.SetMemoryLimit function. Sourced from /gc/gomemlimit:bytes.
# TYPE go_gc_gomemlimit_bytes gauge
go_gc_gomemlimit_bytes 9.223372036854776e+18
# HELP go_goroutines Number of goroutines that currently exist.
# TYPE go_goroutines gauge
go_goroutines 43
# HELP go_info Information about the Go environment.
# TYPE go_info gauge
go_info{version="go1.25.6"} 1
# HELP go_memstats_alloc_bytes Number of bytes allocated in heap and currently in use. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_alloc_bytes gauge
go_memstats_alloc_bytes 6.661032e+06
# HELP go_memstats_alloc_bytes_total Total number of bytes allocated in heap until now, even if released already. Equals to /gc/heap/allocs:bytes.
# TYPE go_memstats_alloc_bytes_total counter
go_memstats_alloc_bytes_total 1.483128e+07
# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table. Equals to /memory/classes/profiling/buckets:bytes.
# TYPE go_memstats_buck_hash_sys_bytes gauge
go_memstats_buck_hash_sys_bytes 1.461405e+06
# HELP go_memstats_frees_total Total number of heap objects frees. Equals to /gc/heap/frees:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_frees_total counter
go_memstats_frees_total 86567
# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata. Equals to /memory/classes/metadata/other:bytes.
# TYPE go_memstats_gc_sys_bytes gauge
go_memstats_gc_sys_bytes 3.506448e+06
# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and currently in use, same as go_memstats_alloc_bytes. Equals to /memory/classes/heap/objects:bytes.
# TYPE go_memstats_heap_alloc_bytes gauge
go_memstats_heap_alloc_bytes 6.661032e+06
# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used. Equals to /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_idle_bytes gauge
go_memstats_heap_idle_bytes 5.046272e+06
# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes
# TYPE go_memstats_heap_inuse_bytes gauge
go_memstats_heap_inuse_bytes 1.015808e+07
# HELP go_memstats_heap_objects Number of currently allocated objects. Equals to /gc/heap/objects:objects.
# TYPE go_memstats_heap_objects gauge
go_memstats_heap_objects 40312
# HELP go_memstats_heap_released_bytes Number of heap bytes released to OS. Equals to /memory/classes/heap/released:bytes.
# TYPE go_memstats_heap_released_bytes gauge
go_memstats_heap_released_bytes 4.841472e+06
# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system. Equals to /memory/classes/heap/objects:bytes + /memory/classes/heap/unused:bytes + /memory/classes/heap/released:bytes + /memory/classes/heap/free:bytes.
# TYPE go_memstats_heap_sys_bytes gauge
go_memstats_heap_sys_bytes 1.5204352e+07
# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
# TYPE go_memstats_last_gc_time_seconds gauge
go_memstats_last_gc_time_seconds 1.7732333033521466e+09
# HELP go_memstats_mallocs_total Total number of heap objects allocated, both live and gc-ed. Semantically a counter version for go_memstats_heap_objects gauge. Equals to /gc/heap/allocs:objects + /gc/heap/tiny/allocs:objects.
# TYPE go_memstats_mallocs_total counter
go_memstats_mallocs_total 126879
# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures. Equals to /memory/classes/metadata/mcache/inuse:bytes.
# TYPE go_memstats_mcache_inuse_bytes gauge
go_memstats_mcache_inuse_bytes 24160
# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system. Equals to /memory/classes/metadata/mcache/inuse:bytes + /memory/classes/metadata/mcache/free:bytes.
# TYPE go_memstats_mcache_sys_bytes gauge
go_memstats_mcache_sys_bytes 31408
# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures. Equals to /memory/classes/metadata/mspan/inuse:bytes.
# TYPE go_memstats_mspan_inuse_bytes gauge
go_memstats_mspan_inuse_bytes 309440
# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system. Equals to /memory/classes/metadata/mspan/inuse:bytes + /memory/classes/metadata/mspan/free:bytes.
# TYPE go_memstats_mspan_sys_bytes gauge
go_memstats_mspan_sys_bytes 310080
# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place. Equals to /gc/heap/goal:bytes.
# TYPE go_memstats_next_gc_bytes gauge
go_memstats_next_gc_bytes 1.0524914e+07
# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations. Equals to /memory/classes/other:bytes.
# TYPE go_memstats_other_sys_bytes gauge
go_memstats_other_sys_bytes 3.429739e+06
# HELP go_memstats_stack_inuse_bytes Number of bytes obtained from system for stack allocator in non-CGO environments. Equals to /memory/classes/heap/stacks:bytes.
# TYPE go_memstats_stack_inuse_bytes gauge
go_memstats_stack_inuse_bytes 1.572864e+06
# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator. Equals to /memory/classes/heap/stacks:bytes + /memory/classes/os-stacks:bytes.
# TYPE go_memstats_stack_sys_bytes gauge
go_memstats_stack_sys_bytes 1.572864e+06
# HELP go_memstats_sys_bytes Number of bytes obtained from system. Equals to /memory/classes/total:byte.
# TYPE go_memstats_sys_bytes gauge
go_memstats_sys_bytes 2.5516296e+07
# HELP go_sched_gomaxprocs_threads The current runtime.GOMAXPROCS setting, or the number of operating system threads that can execute user-level Go code simultaneously. Sourced from /sched/gomaxprocs:threads.
# TYPE go_sched_gomaxprocs_threads gauge
go_sched_gomaxprocs_threads 20
# HELP go_threads Number of OS threads created.
# TYPE go_threads gauge
go_threads 20
# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
# TYPE process_cpu_seconds_total counter
process_cpu_seconds_total 0.2
# HELP process_max_fds Maximum number of open file descriptors.
# TYPE process_max_fds gauge
process_max_fds 524287
# HELP process_network_receive_bytes_total Number of bytes received by the process over the network.
# TYPE process_network_receive_bytes_total counter
process_network_receive_bytes_total 21050
# HELP process_network_transmit_bytes_total Number of bytes sent by the process over the network.
# TYPE process_network_transmit_bytes_total counter
process_network_transmit_bytes_total 3578
# HELP process_open_fds Number of open file descriptors.
# TYPE process_open_fds gauge
process_open_fds 9
# HELP process_resident_memory_bytes Resident memory size in bytes.
# TYPE process_resident_memory_bytes gauge
process_resident_memory_bytes 3.7634048e+07
# HELP process_start_time_seconds Start time of the process since unix epoch in seconds.
# TYPE process_start_time_seconds gauge
process_start_time_seconds 1.77323330302e+09
# HELP process_virtual_memory_bytes Virtual memory size in bytes.
# TYPE process_virtual_memory_bytes gauge
process_virtual_memory_bytes 1.291026432e+09
# HELP process_virtual_memory_max_bytes Maximum amount of virtual memory available in bytes.
# TYPE process_virtual_memory_max_bytes gauge
process_virtual_memory_max_bytes 1.8446744073709552e+19
# HELP promhttp_metric_handler_requests_in_flight Current number of scrapes being served.
# TYPE promhttp_metric_handler_requests_in_flight gauge
promhttp_metric_handler_requests_in_flight 1
# HELP promhttp_metric_handler_requests_total Total number of scrapes by HTTP status code.
# TYPE promhttp_metric_handler_requests_total counter
promhttp_metric_handler_requests_total{code="200"} 0
promhttp_metric_handler_requests_total{code="500"} 0
promhttp_metric_handler_requests_total{code="503"} 0

View File

@@ -5,7 +5,7 @@ DOCKER_ARGS := --name $(DOCKER_NAME) --rm -d -p 9093:9093 \
-v $(CURDIR)/alertmanager.yml:/etc/alertmanager/alertmanager.yml
# list of Alertmanager versions to generate mock files for
VERSIONS := 0.22.0 022.1 0.23.0 0.24.0 0.25.0 0.26.0 0.27.0
VERSIONS := 0.22.0 022.1 0.23.0 0.24.0 0.25.0 0.26.0 0.27.0 0.31.0
%/.ok: livemock.py
$(eval VERSION := $(word 1, $(subst /, ,$@)))

View File

@@ -113,13 +113,13 @@ var stripLabelTests = []stripLabelTest{
},
}
func TestStripLables(t *testing.T) {
func TestStripLabels(t *testing.T) {
for _, testCase := range stripLabelTests {
keepRegex := getCompiledRegex(testCase.keepRegex, t)
stripRegex := getCompiledRegex(testCase.stripRegex, t)
labels := transform.StripLabels(testCase.keep, testCase.strip, keepRegex, stripRegex, testCase.before)
if !reflect.DeepEqual(labels, testCase.after) {
t.Errorf("StripLables failed, expected %v, got %v", testCase.after, labels)
t.Errorf("StripLabels failed, expected %v, got %v", testCase.after, labels)
}
}
}