adding collecting metrics (#752)
Some checks failed
Functional & Unit Tests / Functional & Unit Tests (push) Failing after 1m28s
Functional & Unit Tests / Generate Coverage Badge (push) Has been skipped

Signed-off-by: Paige Patton <prubenda@redhat.com>
This commit is contained in:
Paige Patton
2025-03-19 12:08:44 -04:00
committed by GitHub
parent 0eba329305
commit cad6b68f43
6 changed files with 308 additions and 46 deletions

View File

@@ -60,12 +60,10 @@ performance_monitoring:
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
enable_metrics: False
alert_profile: config/alerts.yaml # Path or URL to alert profile with the prometheus queries
metrics_profile: config/metrics.yaml
metrics_profile: config/metrics-report.yaml
check_critical_alerts: False # When enabled will check prometheus for critical alerts firing post chaos
elastic:
enable_elastic: False
collect_metrics: False
collect_alerts: False
verify_certs: False
elastic_url: "" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
elastic_port: 32766

248
config/metrics-report.yaml Normal file
View File

@@ -0,0 +1,248 @@
metrics:
# API server
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
metricName: APIInflightRequests
instant: true
# Kubelet & CRI-O
# Average and max of the CPU usage from all worker's kubelet
- query: avg(avg_over_time(irate(process_cpu_seconds_total{service="kubelet",job="kubelet"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
metricName: cpu-kubelet
instant: true
- query: max(max_over_time(irate(process_cpu_seconds_total{service="kubelet",job="kubelet"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
metricName: max-cpu-kubelet
instant: true
# Average of the memory usage from all worker's kubelet
- query: avg(avg_over_time(process_resident_memory_bytes{service="kubelet",job="kubelet"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
metricName: memory-kubelet
instant: true
# Max of the memory usage from all worker's kubelet
- query: max(max_over_time(process_resident_memory_bytes{service="kubelet",job="kubelet"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
metricName: max-memory-kubelet
instant: true
- query: max_over_time(sum(process_resident_memory_bytes{service="kubelet",job="kubelet"} and on (node) kube_node_role{role="worker"})[.elapsed:])
metricName: max-memory-sum-kubelet
instant: true
# Average and max of the CPU usage from all worker's CRI-O
- query: avg(avg_over_time(irate(process_cpu_seconds_total{service="kubelet",job="crio"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
metricName: cpu-crio
instant: true
- query: max(max_over_time(irate(process_cpu_seconds_total{service="kubelet",job="crio"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
metricName: max-cpu-crio
instant: true
# Average of the memory usage from all worker's CRI-O
- query: avg(avg_over_time(process_resident_memory_bytes{service="kubelet",job="crio"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
metricName: memory-crio
instant: true
# Max of the memory usage from all worker's CRI-O
- query: max(max_over_time(process_resident_memory_bytes{service="kubelet",job="crio"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
metricName: max-memory-crio
instant: true
# Etcd
- query: avg(avg_over_time(histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))[.elapsed:]))
metricName: 99thEtcdDiskBackendCommit
instant: true
- query: avg(avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[.elapsed:]))
metricName: 99thEtcdDiskWalFsync
instant: true
- query: avg(avg_over_time(histogram_quantile(0.99, irate(etcd_network_peer_round_trip_time_seconds_bucket[2m]))[.elapsed:]))
metricName: 99thEtcdRoundTripTime
instant: true
# Control-plane
- query: avg(avg_over_time(topk(1, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-kube-controller-manager"}[2m])) by (pod))[.elapsed:]))
metricName: cpu-kube-controller-manager
instant: true
- query: max(max_over_time(topk(1, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-kube-controller-manager"}[2m])) by (pod))[.elapsed:]))
metricName: max-cpu-kube-controller-manager
instant: true
- query: avg(avg_over_time(topk(1, sum(container_memory_rss{name!="", namespace="openshift-kube-controller-manager"}) by (pod))[.elapsed:]))
metricName: memory-kube-controller-manager
instant: true
- query: max(max_over_time(topk(1, sum(container_memory_rss{name!="", namespace="openshift-kube-controller-manager"}) by (pod))[.elapsed:]))
metricName: max-memory-kube-controller-manager
instant: true
- query: avg(avg_over_time(topk(3, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-kube-apiserver"}[2m])) by (pod))[.elapsed:]))
metricName: cpu-kube-apiserver
instant: true
- query: avg(avg_over_time(topk(3, sum(container_memory_rss{name!="", namespace="openshift-kube-apiserver"}) by (pod))[.elapsed:]))
metricName: memory-kube-apiserver
instant: true
- query: avg(avg_over_time(topk(3, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-apiserver"}[2m])) by (pod))[.elapsed:]))
metricName: cpu-openshift-apiserver
instant: true
- query: avg(avg_over_time(topk(3, sum(container_memory_rss{name!="", namespace="openshift-apiserver"}) by (pod))[.elapsed:]))
metricName: memory-openshift-apiserver
instant: true
- query: avg(avg_over_time(topk(3, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-etcd"}[2m])) by (pod))[.elapsed:]))
metricName: cpu-etcd
instant: true
- query: avg(avg_over_time(topk(3,sum(container_memory_rss{name!="", namespace="openshift-etcd"}) by (pod))[.elapsed:]))
metricName: memory-etcd
instant: true
- query: avg(avg_over_time(topk(1, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-controller-manager"}[2m])) by (pod))[.elapsed:]))
metricName: cpu-openshift-controller-manager
instant: true
- query: avg(avg_over_time(topk(1, sum(container_memory_rss{name!="", namespace="openshift-controller-manager"}) by (pod))[.elapsed:]))
metricName: memory-openshift-controller-manager
instant: true
# multus
- query: avg(avg_over_time(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-multus", pod=~"(multus).+", container!="POD"}[2m])[.elapsed:])) by (container)
metricName: cpu-multus
instant: true
- query: avg(avg_over_time(container_memory_rss{name!="", namespace="openshift-multus", pod=~"(multus).+", container!="POD"}[.elapsed:])) by (container)
metricName: memory-multus
instant: true
# OVNKubernetes - standard & IC
- query: avg(avg_over_time(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-ovn-kubernetes", pod=~"(ovnkube-master|ovnkube-control-plane).+", container!="POD"}[2m])[.elapsed:])) by (container)
metricName: cpu-ovn-control-plane
instant: true
- query: avg(avg_over_time(container_memory_rss{name!="", namespace="openshift-ovn-kubernetes", pod=~"(ovnkube-master|ovnkube-control-plane).+", container!="POD"}[.elapsed:])) by (container)
metricName: memory-ovn-control-plane
instant: true
- query: avg(avg_over_time(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-ovn-kubernetes", pod=~"ovnkube-node.+", container!="POD"}[2m])[.elapsed:])) by (container)
metricName: cpu-ovnkube-node
instant: true
- query: avg(avg_over_time(container_memory_rss{name!="", namespace="openshift-ovn-kubernetes", pod=~"ovnkube-node.+", container!="POD"}[.elapsed:])) by (container)
metricName: memory-ovnkube-node
instant: true
# Nodes
- query: avg(avg_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
metricName: cpu-masters
instant: true
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
metricName: memory-masters
instant: true
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
metricName: max-memory-masters
instant: true
- query: avg(avg_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
metricName: cpu-workers
instant: true
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
metricName: max-cpu-workers
instant: true
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
metricName: memory-workers
instant: true
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
metricName: max-memory-workers
instant: true
- query: sum( (node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)") )
metricName: memory-sum-workers
instant: true
- query: avg(avg_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
metricName: cpu-infra
instant: true
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
metricName: max-cpu-infra
instant: true
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
metricName: memory-infra
instant: true
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
metricName: max-memory-infra
instant: true
- query: max_over_time(sum((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))[.elapsed:])
metricName: max-memory-sum-infra
instant: true
# Monitoring and ingress
- query: avg(avg_over_time(sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}[2m])) by (pod)[.elapsed:]))
metricName: cpu-prometheus
instant: true
- query: max(max_over_time(sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}[2m])) by (pod)[.elapsed:]))
metricName: max-cpu-prometheus
instant: true
- query: avg(avg_over_time(sum(container_memory_rss{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}) by (pod)[.elapsed:]))
metricName: memory-prometheus
instant: true
- query: max(max_over_time(sum(container_memory_rss{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}) by (pod)[.elapsed:]))
metricName: max-memory-prometheus
instant: true
- query: avg(avg_over_time(sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-ingress", pod=~"router-default.+"}[2m])) by (pod)[.elapsed:]))
metricName: cpu-router
instant: true
- query: avg(avg_over_time(sum(container_memory_rss{name!="", namespace="openshift-ingress", pod=~"router-default.+"}) by (pod)[.elapsed:]))
metricName: memory-router
instant: true
# Cluster
- query: avg_over_time(cluster:memory_usage:ratio[.elapsed:])
metricName: memory-cluster-usage-ratio
instant: true
- query: avg_over_time(cluster:node_cpu:ratio[.elapsed:])
metricName: cpu-cluster-usage-ratio
instant: true
# Retain the raw CPU seconds totals for comparison
- query: sum(node_cpu_seconds_total and on (instance) label_replace(kube_node_role{role="worker",role!="infra"}, "instance", "$1", "node", "(.+)")) by (mode)
metricName: nodeCPUSeconds-Workers
instant: true
- query: sum(node_cpu_seconds_total and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) by (mode)
metricName: nodeCPUSeconds-Masters
instant: true
- query: sum(node_cpu_seconds_total and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (mode)
metricName: nodeCPUSeconds-Infra
instant: true

View File

@@ -1,13 +1,7 @@
metrics:
# API server
- query: histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb!~"WATCH", subresource!="log"}[2m])) by (verb,resource,subresource,instance,le)) > 0
metricName: API99thLatency
- query: sum(irate(apiserver_request_total{apiserver="kube-apiserver",verb!="WATCH",subresource!="log"}[2m])) by (verb,instance,resource,code) > 0
metricName: APIRequestRate
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
metricName: APIInflightRequests
- query: irate(apiserver_request_total{verb="POST", resource="pods", subresource="binding",code="201"}[2m]) > 0
metricName: schedulingThroughput
# Containers & pod metrics
- query: sum(irate(container_cpu_usage_seconds_total{name!="",namespace=~"openshift-(etcd|oauth-apiserver|.*apiserver|ovn-kubernetes|sdn|ingress|authentication|.*controller-manager|.*scheduler|monitoring|logging|image-registry)"}[2m]) * 100) by (pod, namespace, node)

View File

@@ -2,10 +2,11 @@ from __future__ import annotations
import datetime
import os.path
import math
from typing import Optional, List, Dict, Any
import urllib3
import logging
import urllib3
import sys
import yaml
@@ -25,8 +26,7 @@ def alerts(
start_time,
end_time,
alert_profile,
elastic_collect_alerts,
elastic_alerts_index,
elastic_alerts_index
):
if alert_profile is None or os.path.exists(alert_profile) is False:
@@ -46,6 +46,7 @@ def alerts(
for alert in profile_yaml:
if list(alert.keys()).sort() != ["expr", "description", "severity"].sort():
logging.error(f"wrong alert {alert}, skipping")
continue
processed_alert = prom_cli.process_alert(
alert,
@@ -56,7 +57,6 @@ def alerts(
processed_alert[0]
and processed_alert[1]
and elastic
and elastic_collect_alerts
):
elastic_alert = ElasticAlert(
run_uuid=run_uuid,
@@ -156,15 +156,15 @@ def metrics(
start_time,
end_time,
metrics_profile,
elastic_collect_metrics,
elastic_metrics_index,
elastic_metrics_index
) -> list[dict[str, list[(int, float)] | str]]:
metrics_list: list[dict[str, list[(int, float)] | str]] = []
if metrics_profile is None or os.path.exists(metrics_profile) is False:
logging.error(f"{metrics_profile} alert profile does not exist")
sys.exit(1)
with open(metrics_profile) as profile:
profile_yaml = yaml.safe_load(profile)
if not profile_yaml["metrics"] or not isinstance(profile_yaml["metrics"], list):
logging.error(
f"{metrics_profile} wrong file format, alert profile must be "
@@ -172,30 +172,58 @@ def metrics(
f"expr, description, severity"
)
sys.exit(1)
elapsed_ceil = math.ceil((end_time - start_time)/ 60 )
elapsed_time = str(elapsed_ceil) + "m"
metrics_list: list[dict[str, int | float | str]] = []
for metric_query in profile_yaml["metrics"]:
if (
query = metric_query['query']
# calculate elapsed time
if ".elapsed" in metric_query["query"]:
query = metric_query['query'].replace(".elapsed", elapsed_time)
if "instant" in list(metric_query.keys()) and metric_query['instant']:
metrics_result = prom_cli.process_query(
query
)
elif (
list(metric_query.keys()).sort()
!= ["query", "metricName", "instant"].sort()
== ["query", "metricName"].sort()
):
logging.error(f"wrong alert {metric_query}, skipping")
metrics_result = prom_cli.process_prom_query_in_range(
metric_query["query"],
start_time=datetime.datetime.fromtimestamp(start_time),
end_time=datetime.datetime.fromtimestamp(end_time),
)
metric = {"name": metric_query["metricName"], "values": []}
metrics_result = prom_cli.process_prom_query_in_range(
query,
start_time=datetime.datetime.fromtimestamp(start_time),
end_time=datetime.datetime.fromtimestamp(end_time), granularity=30
)
else:
logging.info('didnt match keys')
continue
for returned_metric in metrics_result:
if "values" in returned_metric:
metric = {"query": query, "metricName": metric_query['metricName']}
for k,v in returned_metric['metric'].items():
metric[k] = v
if "values" in returned_metric:
for value in returned_metric["values"]:
try:
metric["values"].append((value[0], float(value[1])))
metric['timestamp'] = str(datetime.datetime.fromtimestamp(value[0]))
metric["value"] = float(value[1])
# want double array of the known details and the metrics specific to each call
metrics_list.append(metric.copy())
except ValueError:
pass
metrics_list.append(metric)
elif "value" in returned_metric:
try:
value = returned_metric["value"]
metric['timestamp'] = str(datetime.datetime.fromtimestamp(value[0]))
metric["value"] = float(value[1])
if elastic_collect_metrics and elastic:
# want double array of the known details and the metrics specific to each call
metrics_list.append(metric.copy())
except ValueError:
pass
if elastic:
result = elastic.upload_metrics_to_elasticsearch(
run_uuid=run_uuid, index=elastic_metrics_index, raw_data=metrics_list
)

View File

@@ -35,6 +35,7 @@ werkzeug==3.0.6
wheel==0.42.0
zope.interface==5.4.0
git+https://github.com/krkn-chaos/arcaflow-plugin-kill-pod.git@v0.1.0
git+https://github.com/vmware/vsphere-automation-sdk-python.git@v8.0.0.0
cryptography>=42.0.4 # not directly required, pinned by Snyk to avoid a vulnerability

View File

@@ -91,13 +91,6 @@ def main(cfg) -> int:
)
# elastic search
enable_elastic = get_yaml_item_value(config["elastic"], "enable_elastic", False)
elastic_collect_metrics = get_yaml_item_value(
config["elastic"], "collect_metrics", False
)
elastic_colllect_alerts = get_yaml_item_value(
config["elastic"], "collect_alerts", False
)
elastic_url = get_yaml_item_value(config["elastic"], "elastic_url", "")
@@ -210,7 +203,7 @@ def main(cfg) -> int:
else:
# If can't make a connection, set alerts to false
enable_alerts = False
critical_alerts = False
check_critical_alerts = False
except Exception:
logging.error(
"invalid distribution selected, running openshift scenarios against kubernetes cluster."
@@ -230,6 +223,7 @@ def main(cfg) -> int:
safe_logger, ocpcli, telemetry_request_id, config["telemetry"]
)
if enable_elastic:
logging.info(f"Elastic collection enabled at: {elastic_url}:{elastic_port}")
elastic_search = KrknElastic(
safe_logger,
elastic_url,
@@ -489,8 +483,7 @@ def main(cfg) -> int:
start_time,
end_time,
alert_profile,
elastic_colllect_alerts,
elastic_alerts_index,
elastic_alerts_index
)
else:
@@ -498,15 +491,15 @@ def main(cfg) -> int:
return 1
# sys.exit(1)
if enable_metrics:
logging.info(f'Capturing metrics using file {metrics_profile}')
prometheus_plugin.metrics(
prometheus,
elastic_search,
start_time,
run_uuid,
start_time,
end_time,
metrics_profile,
elastic_collect_metrics,
elastic_metrics_index,
elastic_metrics_index
)
if post_critical_alerts > 0: