mirror of
https://github.com/krkn-chaos/krkn.git
synced 2026-02-14 18:10:00 +00:00
Implemented the new pod monitoring api on kill pod and kill container scenario (#896)
* implemented the new pod monitoring api Signed-off-by: Tullio Sebastiani <tsebasti@redhat.com> * minor refactoring Signed-off-by: Tullio Sebastiani <tsebasti@redhat.com> * krkn-lib 5.1.5 update Signed-off-by: Tullio Sebastiani <tsebasti@redhat.com> --------- Signed-off-by: Tullio Sebastiani <tsebasti@redhat.com>
This commit is contained in:
committed by
GitHub
parent
194e3b87ee
commit
9b930a02a5
@@ -1,10 +1,10 @@
|
||||
import logging
|
||||
import random
|
||||
import time
|
||||
|
||||
from asyncio import Future
|
||||
import yaml
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.k8s.pods_monitor_pool import PodsMonitorPool
|
||||
from krkn_lib.k8s.pod_monitor import select_and_monitor_by_namespace_pattern_and_label
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.utils import get_yaml_item_value
|
||||
@@ -22,27 +22,21 @@ class ContainerScenarioPlugin(AbstractScenarioPlugin):
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
) -> int:
|
||||
pool = PodsMonitorPool(lib_telemetry.get_lib_kubernetes())
|
||||
try:
|
||||
with open(scenario, "r") as f:
|
||||
cont_scenario_config = yaml.full_load(f)
|
||||
|
||||
for kill_scenario in cont_scenario_config["scenarios"]:
|
||||
self.start_monitoring(
|
||||
kill_scenario, pool
|
||||
future_snapshot = self.start_monitoring(
|
||||
kill_scenario,
|
||||
lib_telemetry
|
||||
)
|
||||
killed_containers = self.container_killing_in_pod(
|
||||
self.container_killing_in_pod(
|
||||
kill_scenario, lib_telemetry.get_lib_kubernetes()
|
||||
)
|
||||
result = pool.join()
|
||||
if result.error:
|
||||
logging.error(
|
||||
logging.error(
|
||||
f"ContainerScenarioPlugin pods failed to recovery: {result.error}"
|
||||
)
|
||||
)
|
||||
return 1
|
||||
scenario_telemetry.affected_pods = result
|
||||
snapshot = future_snapshot.result()
|
||||
result = snapshot.get_pods_status()
|
||||
scenario_telemetry.affected_pods = result
|
||||
|
||||
except (RuntimeError, Exception):
|
||||
logging.error("ContainerScenarioPlugin exiting due to Exception %s")
|
||||
@@ -53,17 +47,18 @@ class ContainerScenarioPlugin(AbstractScenarioPlugin):
|
||||
def get_scenario_types(self) -> list[str]:
|
||||
return ["container_scenarios"]
|
||||
|
||||
def start_monitoring(self, kill_scenario: dict, pool: PodsMonitorPool):
|
||||
def start_monitoring(self, kill_scenario: dict, lib_telemetry: KrknTelemetryOpenshift) -> Future:
|
||||
|
||||
namespace_pattern = f"^{kill_scenario['namespace']}$"
|
||||
label_selector = kill_scenario["label_selector"]
|
||||
recovery_time = kill_scenario["expected_recovery_time"]
|
||||
pool.select_and_monitor_by_namespace_pattern_and_label(
|
||||
future_snapshot = select_and_monitor_by_namespace_pattern_and_label(
|
||||
namespace_pattern=namespace_pattern,
|
||||
label_selector=label_selector,
|
||||
max_timeout=recovery_time,
|
||||
field_selector="status.phase=Running"
|
||||
v1_client=lib_telemetry.get_lib_kubernetes().cli
|
||||
)
|
||||
return future_snapshot
|
||||
|
||||
def container_killing_in_pod(self, cont_scenario, kubecli: KrknKubernetes):
|
||||
scenario_name = get_yaml_item_value(cont_scenario, "name", "")
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
import logging
|
||||
import random
|
||||
import time
|
||||
from asyncio import Future
|
||||
|
||||
import yaml
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.k8s.pods_monitor_pool import PodsMonitorPool
|
||||
from krkn_lib.k8s.pod_monitor import select_and_monitor_by_namespace_pattern_and_label, \
|
||||
select_and_monitor_by_name_pattern_and_namespace_pattern
|
||||
|
||||
from krkn.scenario_plugins.pod_disruption.models.models import InputParams
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.utils import get_yaml_item_value
|
||||
from datetime import datetime
|
||||
from dataclasses import dataclass
|
||||
|
||||
@@ -29,31 +31,23 @@ class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
) -> int:
|
||||
pool = PodsMonitorPool(lib_telemetry.get_lib_kubernetes())
|
||||
try:
|
||||
with open(scenario, "r") as f:
|
||||
cont_scenario_config = yaml.full_load(f)
|
||||
for kill_scenario in cont_scenario_config:
|
||||
kill_scenario_config = InputParams(kill_scenario["config"])
|
||||
self.start_monitoring(
|
||||
kill_scenario_config, pool
|
||||
future_snapshot=self.start_monitoring(
|
||||
kill_scenario_config,
|
||||
lib_telemetry
|
||||
)
|
||||
return_status = self.killing_pods(
|
||||
self.killing_pods(
|
||||
kill_scenario_config, lib_telemetry.get_lib_kubernetes()
|
||||
)
|
||||
if return_status != 0:
|
||||
result = pool.cancel()
|
||||
else:
|
||||
result = pool.join()
|
||||
if result.error:
|
||||
logging.error(
|
||||
logging.error(
|
||||
f"PodDisruptionScenariosPlugin pods failed to recovery: {result.error}"
|
||||
)
|
||||
)
|
||||
return 1
|
||||
|
||||
scenario_telemetry.affected_pods = result
|
||||
|
||||
snapshot = future_snapshot.result()
|
||||
result = snapshot.get_pods_status()
|
||||
scenario_telemetry.affected_pods = result
|
||||
|
||||
|
||||
except (RuntimeError, Exception) as e:
|
||||
logging.error("PodDisruptionScenariosPlugin exiting due to Exception %s" % e)
|
||||
@@ -64,7 +58,7 @@ class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
def get_scenario_types(self) -> list[str]:
|
||||
return ["pod_disruption_scenarios"]
|
||||
|
||||
def start_monitoring(self, kill_scenario: InputParams, pool: PodsMonitorPool):
|
||||
def start_monitoring(self, kill_scenario: InputParams, lib_telemetry: KrknTelemetryOpenshift) -> Future:
|
||||
|
||||
recovery_time = kill_scenario.krkn_pod_recovery_time
|
||||
if (
|
||||
@@ -73,16 +67,17 @@ class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
):
|
||||
namespace_pattern = kill_scenario.namespace_pattern
|
||||
label_selector = kill_scenario.label_selector
|
||||
pool.select_and_monitor_by_namespace_pattern_and_label(
|
||||
future_snapshot = select_and_monitor_by_namespace_pattern_and_label(
|
||||
namespace_pattern=namespace_pattern,
|
||||
label_selector=label_selector,
|
||||
max_timeout=recovery_time,
|
||||
field_selector="status.phase=Running"
|
||||
v1_client=lib_telemetry.get_lib_kubernetes().cli
|
||||
)
|
||||
logging.info(
|
||||
f"waiting up to {recovery_time} seconds for pod recovery, "
|
||||
f"pod label pattern: {label_selector} namespace pattern: {namespace_pattern}"
|
||||
)
|
||||
return future_snapshot
|
||||
|
||||
elif (
|
||||
kill_scenario.namespace_pattern
|
||||
@@ -90,16 +85,17 @@ class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
):
|
||||
namespace_pattern = kill_scenario.namespace_pattern
|
||||
name_pattern = kill_scenario.name_pattern
|
||||
pool.select_and_monitor_by_name_pattern_and_namespace_pattern(
|
||||
future_snapshot = select_and_monitor_by_name_pattern_and_namespace_pattern(
|
||||
pod_name_pattern=name_pattern,
|
||||
namespace_pattern=namespace_pattern,
|
||||
max_timeout=recovery_time,
|
||||
field_selector="status.phase=Running"
|
||||
v1_client=lib_telemetry.get_lib_kubernetes().cli
|
||||
)
|
||||
logging.info(
|
||||
f"waiting up to {recovery_time} seconds for pod recovery, "
|
||||
f"pod name pattern: {name_pattern} namespace pattern: {namespace_pattern}"
|
||||
)
|
||||
return future_snapshot
|
||||
else:
|
||||
raise Exception(
|
||||
f"impossible to determine monitor parameters, check {kill_scenario} configuration"
|
||||
|
||||
@@ -16,7 +16,7 @@ google-cloud-compute==1.22.0
|
||||
ibm_cloud_sdk_core==3.18.0
|
||||
ibm_vpc==0.20.0
|
||||
jinja2==3.1.6
|
||||
krkn-lib==5.1.2
|
||||
krkn-lib==5.1.5
|
||||
lxml==5.1.0
|
||||
kubernetes==28.1.0
|
||||
numpy==1.26.4
|
||||
|
||||
Reference in New Issue
Block a user