Merge pull request #96 from cloudflare/alertmanager-0.6.2

Alertmanager 0.6.2 support
This commit is contained in:
Łukasz Mierzwa
2017-05-10 10:45:21 +01:00
committed by GitHub
10 changed files with 624 additions and 4 deletions

View File

@@ -2,7 +2,7 @@ NAME := unsee
VERSION := $(shell git describe --tags --always --dirty='-dev')
# Alertmanager instance used when running locally, points to mock data
MOCK_PATH := $(CURDIR)/mock/0.6.1
MOCK_PATH := $(CURDIR)/mock/0.6.2
ALERTMANAGER_URI := file://$(MOCK_PATH)
# Listen port when running locally
PORT := 8080

View File

@@ -12,7 +12,7 @@ to alert data, therefore safe to be accessed by wider audience.
## Supported Alertmanager versions
Alertmanager's API isn't stable yet and can change between releases.
unsee currently supports Alertmanager `0.4` and `0.5`.
unsee currently supports Alertmanager `0.4`, `0.5` and `0.6`.
## Security

View File

@@ -7,6 +7,7 @@ import (
"github.com/cloudflare/unsee/mapper/v04"
"github.com/cloudflare/unsee/mapper/v05"
"github.com/cloudflare/unsee/mapper/v061"
"github.com/cloudflare/unsee/mapper/v062"
"github.com/cloudflare/unsee/models"
log "github.com/Sirupsen/logrus"
@@ -17,6 +18,7 @@ func init() {
mapper.RegisterAlertMapper(v04.AlertMapper{})
mapper.RegisterAlertMapper(v05.AlertMapper{})
mapper.RegisterAlertMapper(v061.AlertMapper{})
mapper.RegisterAlertMapper(v062.AlertMapper{})
mapper.RegisterSilenceMapper(v04.SilenceMapper{})
mapper.RegisterSilenceMapper(v05.SilenceMapper{})
}

View File

@@ -47,7 +47,7 @@ type AlertMapper struct {
// IsSupported returns true if given version string is supported
func (m AlertMapper) IsSupported(version string) bool {
versionRange := semver.MustParseRange(">=0.6.1")
versionRange := semver.MustParseRange("=0.6.1")
return versionRange(semver.MustParse(version))
}

110
mapper/v062/alerts.go Normal file
View File

@@ -0,0 +1,110 @@
// Package v062 package implements support for interacting with
// Alertmanager 0.6.2
// Collected data will be mapped to unsee internal schema defined the
// unsee/models package
// This file defines Alertmanager alerts mapping
package v062
import (
"errors"
"time"
"github.com/blang/semver"
"github.com/cloudflare/unsee/config"
"github.com/cloudflare/unsee/mapper"
"github.com/cloudflare/unsee/models"
"github.com/cloudflare/unsee/transport"
)
type alertStatus struct {
State string `json:"state"`
SilencedBy []string `json:"silencedBy"`
InhibitedBy []string `json:"inhibitedBy"`
}
type alert struct {
Annotations map[string]string `json:"annotations"`
Labels map[string]string `json:"labels"`
StartsAt time.Time `json:"startsAt"`
EndsAt time.Time `json:"endsAt"`
GeneratorURL string `json:"generatorURL"`
Status alertStatus `json:"status"`
}
type alertsGroups struct {
Labels map[string]string `json:"labels"`
Blocks []struct {
Alerts []alert `json:"alerts"`
} `json:"blocks"`
}
type alertsGroupsAPISchema struct {
Status string `json:"status"`
Groups []alertsGroups `json:"data"`
Error string `json:"error"`
}
// AlertMapper implements Alertmanager API schema
type AlertMapper struct {
mapper.AlertMapper
}
// IsSupported returns true if given version string is supported
func (m AlertMapper) IsSupported(version string) bool {
versionRange := semver.MustParseRange(">=0.6.2")
return versionRange(semver.MustParse(version))
}
// GetAlerts will make a request to Alertmanager API and parse the response
// It will only return alerts or error (if any)
func (m AlertMapper) GetAlerts() ([]models.AlertGroup, error) {
groups := []models.AlertGroup{}
resp := alertsGroupsAPISchema{}
url, err := transport.JoinURL(config.Config.AlertmanagerURI, "api/v1/alerts/groups")
if err != nil {
return groups, err
}
err = transport.ReadJSON(url, config.Config.AlertmanagerTimeout, &resp)
if err != nil {
return groups, err
}
if resp.Status != "success" {
return groups, errors.New(resp.Error)
}
for _, g := range resp.Groups {
alertList := models.AlertList{}
for _, b := range g.Blocks {
for _, a := range b.Alerts {
inhibitedBy := []string{}
if a.Status.InhibitedBy != nil {
inhibitedBy = a.Status.InhibitedBy
}
silencedBy := []string{}
if a.Status.SilencedBy != nil {
silencedBy = a.Status.SilencedBy
}
us := models.Alert{
Annotations: a.Annotations,
Labels: a.Labels,
StartsAt: a.StartsAt,
EndsAt: a.EndsAt,
GeneratorURL: a.GeneratorURL,
Status: a.Status.State,
InhibitedBy: inhibitedBy,
SilencedBy: silencedBy,
}
alertList = append(alertList, us)
}
}
ug := models.AlertGroup{
Labels: g.Labels,
Alerts: alertList,
}
groups = append(groups, ug)
}
return groups, nil
}

0
mock/0.6.2/.ok Normal file
View File

View File

@@ -0,0 +1,390 @@
{
"data": [
{
"blocks": [
{
"alerts": [
{
"annotations": {
"alert": "Less than 10% disk space is free",
"dashboard": "http://localhost/dashboard.html"
},
"endsAt": "0001-01-01T00:00:00Z",
"generatorURL": "",
"labels": {
"alertname": "Free_Disk_Space_Too_Low",
"cluster": "staging",
"instance": "server5",
"job": "node_exporter"
},
"startsAt": "2017-05-09T20:45:54.84503502Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
}
}
],
"routeOpts": {
"groupBy": [
"alertname",
"cluster",
"service"
],
"groupInterval": 35000000000,
"groupWait": 15000000000,
"receiver": "default",
"repeatInterval": 3596400000000000
}
}
],
"groupKey": "{}:{alertname=\"Free_Disk_Space_Too_Low\", cluster=\"staging\"}",
"labels": {
"alertname": "Free_Disk_Space_Too_Low",
"cluster": "staging"
}
},
{
"blocks": [
{
"alerts": [
{
"annotations": {
"help": "Example help annotation",
"summary": "Example summary",
"url": "http://localhost/example.html"
},
"endsAt": "0001-01-01T00:00:00Z",
"generatorURL": "",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web1",
"job": "node_exporter"
},
"startsAt": "2017-05-09T20:45:54.84503502Z",
"status": {
"inhibitedBy": null,
"silencedBy": [
"6e1d1c04-8bd1-4aee-8c48-258d3b886f49"
],
"state": "suppressed"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "0001-01-01T00:00:00Z",
"generatorURL": "",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web2",
"job": "node_exporter"
},
"startsAt": "2017-05-09T20:45:54.84503502Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
}
}
],
"routeOpts": {
"groupBy": [
"alertname",
"cluster",
"service"
],
"groupInterval": 35000000000,
"groupWait": 15000000000,
"receiver": "default",
"repeatInterval": 3596400000000000
}
}
],
"groupKey": "{}:{alertname=\"HTTP_Probe_Failed\", cluster=\"dev\"}",
"labels": {
"alertname": "HTTP_Probe_Failed",
"cluster": "dev"
}
},
{
"blocks": [
{
"alerts": [
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "0001-01-01T00:00:00Z",
"generatorURL": "",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server6",
"job": "node_ping"
},
"startsAt": "2017-05-09T20:45:54.84503502Z",
"status": {
"inhibitedBy": null,
"silencedBy": [
"b7b316a1-9ca8-4b66-8369-83ba78984c46"
],
"state": "suppressed"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "0001-01-01T00:00:00Z",
"generatorURL": "",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server7",
"job": "node_ping"
},
"startsAt": "2017-05-09T20:45:54.84503502Z",
"status": {
"inhibitedBy": null,
"silencedBy": [
"b7b316a1-9ca8-4b66-8369-83ba78984c46"
],
"state": "suppressed"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "0001-01-01T00:00:00Z",
"generatorURL": "",
"labels": {
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server8",
"job": "node_ping"
},
"startsAt": "2017-05-09T20:45:54.84503502Z",
"status": {
"inhibitedBy": null,
"silencedBy": [
"b7b316a1-9ca8-4b66-8369-83ba78984c46"
],
"state": "suppressed"
}
}
],
"routeOpts": {
"groupBy": [
"service",
"alertname",
"cluster"
],
"groupInterval": 35000000000,
"groupWait": 15000000000,
"receiver": "default",
"repeatInterval": 3596400000000000
}
}
],
"groupKey": "{}:{alertname=\"Host_Down\", cluster=\"dev\"}",
"labels": {
"alertname": "Host_Down",
"cluster": "dev"
}
},
{
"blocks": [
{
"alerts": [
{
"annotations": {
"summary": "Example summary",
"url": "http://localhost/example.html"
},
"endsAt": "0001-01-01T00:00:00Z",
"generatorURL": "",
"labels": {
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server1",
"job": "node_ping"
},
"startsAt": "2017-05-09T20:45:54.84503502Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "0001-01-01T00:00:00Z",
"generatorURL": "",
"labels": {
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server2",
"job": "node_ping"
},
"startsAt": "2017-05-09T20:45:54.84503502Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
}
}
],
"routeOpts": {
"groupBy": [
"alertname",
"cluster",
"service"
],
"groupInterval": 35000000000,
"groupWait": 15000000000,
"receiver": "default",
"repeatInterval": 3596400000000000
}
}
],
"groupKey": "{}:{alertname=\"Host_Down\", cluster=\"prod\"}",
"labels": {
"alertname": "Host_Down",
"cluster": "prod"
}
},
{
"blocks": [
{
"alerts": [
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "0001-01-01T00:00:00Z",
"generatorURL": "",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server3",
"job": "node_ping"
},
"startsAt": "2017-05-09T20:45:54.84503502Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "0001-01-01T00:00:00Z",
"generatorURL": "",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server4",
"job": "node_ping"
},
"startsAt": "2017-05-09T20:45:54.84503502Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
}
},
{
"annotations": {
"summary": "Example summary"
},
"endsAt": "0001-01-01T00:00:00Z",
"generatorURL": "",
"labels": {
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server5",
"job": "node_ping"
},
"startsAt": "2017-05-09T20:45:54.84503502Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
}
}
],
"routeOpts": {
"groupBy": [
"alertname",
"cluster",
"service"
],
"groupInterval": 35000000000,
"groupWait": 15000000000,
"receiver": "default",
"repeatInterval": 3596400000000000
}
}
],
"groupKey": "{}:{alertname=\"Host_Down\", cluster=\"staging\"}",
"labels": {
"alertname": "Host_Down",
"cluster": "staging"
}
},
{
"blocks": [
{
"alerts": [
{
"annotations": {
"alert": "Memory usage exceeding threshold",
"dashboard": "http://localhost/dashboard.html"
},
"endsAt": "0001-01-01T00:00:00Z",
"generatorURL": "",
"labels": {
"alertname": "Memory_Usage_Too_High",
"cluster": "prod",
"instance": "server2",
"job": "node_exporter"
},
"startsAt": "2017-05-09T20:45:54.84503502Z",
"status": {
"inhibitedBy": [],
"silencedBy": [],
"state": "active"
}
}
],
"routeOpts": {
"groupBy": [
"alertname",
"cluster",
"service"
],
"groupInterval": 35000000000,
"groupWait": 15000000000,
"receiver": "default",
"repeatInterval": 3596400000000000
}
}
],
"groupKey": "{}:{alertname=\"Memory_Usage_Too_High\", cluster=\"prod\"}",
"labels": {
"alertname": "Memory_Usage_Too_High",
"cluster": "prod"
}
}
],
"status": "success"
}

View File

@@ -0,0 +1,40 @@
{
"data": [
{
"comment": "Silenced instance",
"createdBy": "john@example.com",
"endsAt": "2063-01-01T00:00:00Z",
"id": "6e1d1c04-8bd1-4aee-8c48-258d3b886f49",
"matchers": [
{
"isRegex": false,
"name": "instance",
"value": "web1"
}
],
"startsAt": "2017-05-09T20:45:54.83551337Z",
"updatedAt": "2017-05-09T20:45:54.83551337Z"
},
{
"comment": "Silenced Host_Down alerts in the dev cluster",
"createdBy": "john@example.com",
"endsAt": "2063-01-01T00:00:00Z",
"id": "b7b316a1-9ca8-4b66-8369-83ba78984c46",
"matchers": [
{
"isRegex": false,
"name": "alertname",
"value": "Host_Down"
},
{
"isRegex": false,
"name": "cluster",
"value": "dev"
}
],
"startsAt": "2017-05-09T20:45:54.838410296Z",
"updatedAt": "2017-05-09T20:45:54.838410296Z"
}
],
"status": "success"
}

78
mock/0.6.2/api/v1/status Normal file
View File

@@ -0,0 +1,78 @@
{
"data": {
"config": "route:\n group_by: ['alertname', 'cluster', 'service']\n group_wait: 15s\n group_interval: 35s\n repeat_interval: 999h\n receiver: default\n\ninhibit_rules:\n- source_match:\n severity: 'critical'\n target_match:\n severity: 'warning'\n # Apply inhibition if the alertname is the same.\n equal: ['alertname', 'cluster', 'service']\n\nreceivers:\n- name: 'default'\n",
"configJSON": {
"global": {
"hipchat_auth_token": "",
"hipchat_url": "https://api.hipchat.com/",
"opsgenie_api_host": "https://api.opsgenie.com/",
"pagerduty_url": "https://events.pagerduty.com/generic/2010-04-15/create_event.json",
"resolve_timeout": 300000000000,
"slack_api_url": "",
"smtp_auth_identity": "",
"smtp_auth_password": "",
"smtp_auth_secret": "",
"smtp_auth_username": "",
"smtp_from": "",
"smtp_require_tls": true,
"smtp_smarthost": "",
"victorops_api_url": "https://alert.victorops.com/integrations/generic/20131114/alert/"
},
"inhibit_rules": [
{
"equal": [
"alertname",
"cluster",
"service"
],
"source_match": {
"severity": "critical"
},
"source_match_re": null,
"target_match": {
"severity": "warning"
},
"target_match_re": null
}
],
"receivers": [
{
"name": "default"
}
],
"route": {
"group_by": [
"alertname",
"cluster",
"service"
],
"group_interval": 35000000000,
"group_wait": 15000000000,
"receiver": "default",
"repeat_interval": 3596400000000000
},
"templates": null
},
"meshStatus": {
"name": "02:42:ac:11:00:02",
"nickName": "16ffa16ef025",
"peers": [
{
"name": "02:42:ac:11:00:02",
"nickName": "16ffa16ef025",
"uid": 9016669185209747900
}
]
},
"uptime": "2017-05-09T20:45:39.702335917Z",
"versionInfo": {
"branch": "master",
"buildDate": "20170509-08:56:14",
"buildUser": "root@e3ca4de32142",
"goVersion": "go1.8.1",
"revision": "b011c0a32ce887c1a10f7d34d52fd8cce485c1cf",
"version": "0.6.2"
}
},
"status": "success"
}

View File

@@ -3,7 +3,7 @@ DOCKER_IMAGE := prom/alertmanager
DOCKER_ARGS := --name $(DOCKER_NAME) --rm -d -p 9093:9093 -v $(CURDIR)/alertmanager.yml:/etc/alertmanager/config.yml
# list of Alertmanager versions to generate mock files for
VERSIONS := 0.4.0 0.4.1 0.4.2 0.5.0 0.5.1 0.6.0 0.6.1
VERSIONS := 0.4.0 0.4.1 0.4.2 0.5.0 0.5.1 0.6.0 0.6.1 0.6.2
%/.ok:
$(eval VERSION := $(word 1, $(subst /, ,$@)))