feat(api): add alertList.json endpoint

This commit is contained in:
Łukasz Mierzwa
2021-06-10 15:43:40 +01:00
committed by Łukasz Mierzwa
parent 8b4a17683d
commit f13cd9a112
3 changed files with 384 additions and 0 deletions

View File

@@ -138,6 +138,7 @@ func setupRouter(router *chi.Mux, historyPoller *historyPoller) {
h.ServeHTTP(w, r)
}))
router.Get(getViewURL("/alerts.json"), alerts)
router.Get(getViewURL("/alertList.json"), alertList)
router.Get(getViewURL("/autocomplete.json"), autocomplete)
router.Get(getViewURL("/labelNames.json"), knownLabelNames)
router.Get(getViewURL("/labelValues.json"), knownLabelValues)

View File

@@ -12,6 +12,8 @@ import (
"strings"
"time"
"github.com/cnf/structhash"
"github.com/fvbommel/sortorder"
"github.com/prymitive/karma/internal/alertmanager"
"github.com/prymitive/karma/internal/config"
"github.com/prymitive/karma/internal/filters"
@@ -661,3 +663,86 @@ func silences(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
_, _ = w.Write(data.([]byte))
}
type AlertList struct {
Alerts []map[string]string `json:"alerts"`
}
func alertList(w http.ResponseWriter, r *http.Request) {
noCache(w)
// use full URI (including query args) as cache key
cacheKey := r.RequestURI
d, found := apiCache.Get(cacheKey)
if found {
r := bytes.NewReader(d.([]byte))
rawData, _ := decompressCachedResponse(r)
mimeJSON(w)
w.WriteHeader(http.StatusOK)
_, _ = w.Write(rawData)
return
}
q, _ := lookupQueryStringSlice(r, "q")
matchFilters := getFiltersFromQuery(q)
dedupedAlerts := alertmanager.DedupAlerts()
filtered := filterAlerts(dedupedAlerts, matchFilters)
labelMap := map[string]map[string]string{}
for _, ag := range filtered {
for _, alert := range ag.Alerts {
labels := map[string]string{}
for k, v := range ag.Labels {
labels[k] = v
}
for k, v := range alert.Labels {
labels[k] = v
}
h := fmt.Sprintf("%x", structhash.Sha1(labels, 1))
labelMap[h] = labels
}
}
sortMap := map[string]struct{}{}
for _, k := range labelMap {
for v := range k {
sortMap[v] = struct{}{}
}
}
var sortKeys []string
for k := range sortMap {
sortKeys = append(sortKeys, k)
}
sort.Strings(sortKeys)
al := AlertList{}
for _, labels := range labelMap {
al.Alerts = append(al.Alerts, labels)
}
sortSliceOfLabels(al.Alerts, sortKeys, "alertname")
mimeJSON(w)
w.WriteHeader(http.StatusOK)
data, _ := json.Marshal(al)
compressedData, _ := compressResponse(data, nil)
_ = apiCache.Add(cacheKey, compressedData)
_, _ = w.Write(data)
}
func sortSliceOfLabels(labels []map[string]string, sortKeys []string, fallback string) {
sort.SliceStable(labels, func(i, j int) bool {
for _, k := range sortKeys {
if labels[i][k] != "" && labels[j][k] == "" {
return true
}
if labels[i][k] == "" && labels[j][k] != "" {
return false
}
if labels[i][k] != labels[j][k] {
return sortorder.NaturalLess(labels[i][k], labels[j][k])
}
}
return sortorder.NaturalLess(labels[i][fallback], labels[j][fallback])
})
}

View File

@@ -2995,3 +2995,301 @@ func TestGridLimit(t *testing.T) {
}
}
}
func TestAlertList(t *testing.T) {
type testCaseT struct {
args string
alerts AlertList
}
testCases := []testCaseT{
{
args: "",
alerts: AlertList{
Alerts: []map[string]string{
{
"alertname": "Free_Disk_Space_Too_Low",
"cluster": "staging",
"disk": "sda",
"instance": "server5",
"job": "node_exporter",
},
{
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web1",
"job": "node_exporter",
},
{
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web2",
"job": "node_exporter",
},
{
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server6",
"ip": "127.0.0.6",
"job": "node_ping",
},
{
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server7",
"ip": "127.0.0.7",
"job": "node_ping",
},
{
"alertname": "Host_Down",
"cluster": "dev",
"instance": "server8",
"ip": "127.0.0.8",
"job": "node_ping",
},
{
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server1",
"ip": "127.0.0.1",
"job": "node_ping",
},
{
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server2",
"ip": "127.0.0.2",
"job": "node_ping",
},
{
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server3",
"ip": "127.0.0.3",
"job": "node_ping",
},
{
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server4",
"ip": "127.0.0.4",
"job": "node_ping",
},
{
"alertname": "Host_Down",
"cluster": "staging",
"instance": "server5",
"ip": "127.0.0.5",
"job": "node_ping",
},
{
"alertname": "Memory_Usage_Too_High",
"cluster": "prod",
"instance": "server2",
"job": "node_exporter",
},
},
},
},
{
args: "q=alertname=Free_Disk_Space_Too_Low",
alerts: AlertList{
Alerts: []map[string]string{
{
"alertname": "Free_Disk_Space_Too_Low",
"cluster": "staging",
"disk": "sda",
"instance": "server5",
"job": "node_exporter",
},
},
},
},
{
args: "q=alertname=HTTP_Probe_Failed",
alerts: AlertList{
Alerts: []map[string]string{
{
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web1",
"job": "node_exporter",
},
{
"alertname": "HTTP_Probe_Failed",
"cluster": "dev",
"instance": "web2",
"job": "node_exporter",
},
},
},
},
{
args: "q=instance=server2",
alerts: AlertList{
Alerts: []map[string]string{
{
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server2",
"ip": "127.0.0.2",
"job": "node_ping",
},
{
"alertname": "Memory_Usage_Too_High",
"cluster": "prod",
"instance": "server2",
"job": "node_exporter",
},
},
},
},
{
args: "q=alertname=Host_Down&q=cluster=prod",
alerts: AlertList{
Alerts: []map[string]string{
{
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server1",
"ip": "127.0.0.1",
"job": "node_ping",
},
{
"alertname": "Host_Down",
"cluster": "prod",
"instance": "server2",
"ip": "127.0.0.2",
"job": "node_ping",
},
},
},
},
}
mockConfig()
for _, tc := range testCases {
for _, version := range mock.ListAllMocks() {
t.Run(fmt.Sprintf("%s:%s", version, tc.args), func(t *testing.T) {
t.Logf("Testing alerts using mock files from Alertmanager %s", version)
mockAlerts(version)
r := testRouter()
setupRouter(r, nil)
// re-run a few times to test the cache
for i := 1; i <= 3; i++ {
req := httptest.NewRequest("GET", "/alertList.json?"+tc.args, nil)
resp := httptest.NewRecorder()
r.ServeHTTP(resp, req)
if resp.Code != http.StatusOK {
t.Errorf("GET /alertList.json returned status %d", resp.Code)
}
ur := AlertList{}
err := json.Unmarshal(resp.Body.Bytes(), &ur)
if err != nil {
t.Errorf("Failed to unmarshal response: %s", err)
}
if diff := cmp.Diff(tc.alerts, ur); diff != "" {
t.Errorf("Wrong alert list returned (-want +got):\n%s", diff)
}
}
})
break
}
}
}
func TestSortSliceOfLabels(t *testing.T) {
type testCaseT struct {
labels []map[string]string
sortKeys []string
fallback string
output []map[string]string
}
testCases := []testCaseT{
{
labels: []map[string]string{
{"alertname": "alert2"},
{"alertname": "alert1"},
},
sortKeys: []string{},
fallback: "",
output: []map[string]string{
{"alertname": "alert2"},
{"alertname": "alert1"},
},
},
{
labels: []map[string]string{
{"alertname": "alert2"},
{"alertname": "alert1"},
},
sortKeys: []string{"alertname"},
fallback: "alertname",
output: []map[string]string{
{"alertname": "alert1"},
{"alertname": "alert2"},
},
},
{
labels: []map[string]string{
{"alertname": "alert2"},
{"alertname": "alert1"},
},
sortKeys: []string{},
fallback: "alertname",
output: []map[string]string{
{"alertname": "alert1"},
{"alertname": "alert2"},
},
},
{
labels: []map[string]string{
{"alertname": "alert2"},
{"alertname": "alert1"},
},
sortKeys: []string{"foo"},
fallback: "alertname",
output: []map[string]string{
{"alertname": "alert1"},
{"alertname": "alert2"},
},
},
{
labels: []map[string]string{
{"alertname": "alert1"},
{"alertname": "alert1"},
},
sortKeys: []string{"alertname"},
fallback: "alertname",
output: []map[string]string{
{"alertname": "alert1"},
{"alertname": "alert1"},
},
},
{
labels: []map[string]string{
{"alertname": "alert2", "job": "a"},
{"alertname": "alert1"},
{"alertname": "alert3", "job": "b"},
},
sortKeys: []string{"job"},
fallback: "alertname",
output: []map[string]string{
{"alertname": "alert2", "job": "a"},
{"alertname": "alert3", "job": "b"},
{"alertname": "alert1"},
},
},
}
for i, tc := range testCases {
t.Run(fmt.Sprintf("%d:%v", i, tc.sortKeys), func(t *testing.T) {
sortSliceOfLabels(tc.labels, tc.sortKeys, tc.fallback)
if diff := cmp.Diff(tc.output, tc.labels); diff != "" {
t.Errorf("Wrong labels order after sorting (-want +got):\n%s", diff)
}
})
}
}