mirror of
https://github.com/krkn-chaos/krkn.git
synced 2026-02-19 20:40:33 +00:00
Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4f250c9601 | ||
|
|
6480adc00a | ||
|
|
5002f210ae | ||
|
|
62c5afa9a2 | ||
|
|
c109fc0b17 | ||
|
|
fff675f3dd | ||
|
|
c125e5acf7 | ||
|
|
ca6995a1a1 | ||
|
|
50cf91ac9e | ||
|
|
11069c6982 | ||
|
|
106d9bf1ae | ||
|
|
17f832637c | ||
|
|
0e5c8c55a4 | ||
|
|
9d9a6f9b80 | ||
|
|
f8fe2ae5b7 | ||
|
|
77b1dd32c7 | ||
|
|
9df727ccf5 | ||
|
|
70c8fec705 | ||
|
|
0731144a6b | ||
|
|
9337052e7b |
49
.github/workflows/tests.yml
vendored
49
.github/workflows/tests.yml
vendored
@@ -14,34 +14,16 @@ jobs:
|
||||
uses: actions/checkout@v3
|
||||
- name: Create multi-node KinD cluster
|
||||
uses: redhat-chaos/actions/kind@main
|
||||
- name: Install Helm & add repos
|
||||
run: |
|
||||
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
|
||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
||||
helm repo add stable https://charts.helm.sh/stable
|
||||
helm repo update
|
||||
- name: Deploy prometheus & Port Forwarding
|
||||
run: |
|
||||
kubectl create namespace prometheus-k8s
|
||||
helm install \
|
||||
--wait --timeout 360s \
|
||||
kind-prometheus \
|
||||
prometheus-community/kube-prometheus-stack \
|
||||
--namespace prometheus-k8s \
|
||||
--set prometheus.service.nodePort=30000 \
|
||||
--set prometheus.service.type=NodePort \
|
||||
--set grafana.service.nodePort=31000 \
|
||||
--set grafana.service.type=NodePort \
|
||||
--set alertmanager.service.nodePort=32000 \
|
||||
--set alertmanager.service.type=NodePort \
|
||||
--set prometheus-node-exporter.service.nodePort=32001 \
|
||||
--set prometheus-node-exporter.service.type=NodePort \
|
||||
--set prometheus.prometheusSpec.maximumStartupDurationSeconds=300
|
||||
|
||||
SELECTOR=`kubectl -n prometheus-k8s get service kind-prometheus-kube-prome-prometheus -o wide --no-headers=true | awk '{ print $7 }'`
|
||||
POD_NAME=`kubectl -n prometheus-k8s get pods --selector="$SELECTOR" --no-headers=true | awk '{ print $1 }'`
|
||||
kubectl -n prometheus-k8s port-forward $POD_NAME 9090:9090 &
|
||||
sleep 5
|
||||
uses: redhat-chaos/actions/prometheus@main
|
||||
|
||||
- name: Deploy Elasticsearch
|
||||
with:
|
||||
ELASTIC_URL: ${{ vars.ELASTIC_URL }}
|
||||
ELASTIC_PORT: ${{ vars.ELASTIC_PORT }}
|
||||
ELASTIC_USER: ${{ vars.ELASTIC_USER }}
|
||||
ELASTIC_PASSWORD: ${{ vars.ELASTIC_PASSWORD }}
|
||||
uses: redhat-chaos/actions/elastic@main
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
@@ -55,6 +37,8 @@ jobs:
|
||||
|
||||
- name: Deploy test workloads
|
||||
run: |
|
||||
es_pod_name=$(kubectl get pods -l "app.kubernetes.io/instance=elasticsearch" -o name)
|
||||
kubectl --namespace default port-forward $es_pod_name 9200 &
|
||||
kubectl apply -f CI/templates/outage_pod.yaml
|
||||
kubectl wait --for=condition=ready pod -l scenario=outage --timeout=300s
|
||||
kubectl apply -f CI/templates/container_scenario_pod.yaml
|
||||
@@ -79,6 +63,9 @@ jobs:
|
||||
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.performance_monitoring="localhost:9090"' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.elastic_port=9200' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.elastic_url="https://localhost"' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.enable_elastic=True' CI/config/common_test_config.yaml
|
||||
echo "test_service_hijacking" > ./CI/tests/functional_tests
|
||||
echo "test_app_outages" >> ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
@@ -89,6 +76,7 @@ jobs:
|
||||
echo "test_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_io_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_pod_network_filter" >> ./CI/tests/functional_tests
|
||||
|
||||
|
||||
# Push on main only steps + all other functional to collect coverage
|
||||
@@ -106,6 +94,8 @@ jobs:
|
||||
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.performance_monitoring="localhost:9090"' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.elastic_port=9200' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.elastic_url="https://localhost"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.username="${{secrets.TELEMETRY_USERNAME}}"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.password="${{secrets.TELEMETRY_PASSWORD}}"' CI/config/common_test_config.yaml
|
||||
echo "test_telemetry" > ./CI/tests/functional_tests
|
||||
@@ -119,6 +109,7 @@ jobs:
|
||||
echo "test_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_io_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_pod_network_filter" >> ./CI/tests/functional_tests
|
||||
|
||||
# Final common steps
|
||||
- name: Run Functional tests
|
||||
@@ -129,20 +120,24 @@ jobs:
|
||||
cat ./CI/results.markdown >> $GITHUB_STEP_SUMMARY
|
||||
echo >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload CI logs
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ci-logs
|
||||
path: CI/out
|
||||
if-no-files-found: error
|
||||
- name: Collect coverage report
|
||||
if: ${{ success() || failure() }}
|
||||
run: |
|
||||
python -m coverage html
|
||||
python -m coverage json
|
||||
- name: Publish coverage report to job summary
|
||||
if: ${{ success() || failure() }}
|
||||
run: |
|
||||
pip install html2text
|
||||
html2text --ignore-images --ignore-links -b 0 htmlcov/index.html >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload coverage data
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage
|
||||
|
||||
@@ -10,8 +10,6 @@ cerberus:
|
||||
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal.
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift.
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
capture_metrics: False
|
||||
metrics_profile_path: config/metrics-aggregated.yaml
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
|
||||
@@ -8,9 +8,9 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: docker.io/fedora/tools
|
||||
image: quay.io/krkn-chaos/krkn:tools
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
sleep infinity
|
||||
sleep infinity
|
||||
|
||||
29
CI/templates/pod_network_filter.yaml
Normal file
29
CI/templates/pod_network_filter.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-network-filter-test
|
||||
labels:
|
||||
app.kubernetes.io/name: pod-network-filter
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: quay.io/krkn-chaos/krkn-funtests:pod-network-filter
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
name: pod-network-prt
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: pod-network-filter-service
|
||||
spec:
|
||||
selector:
|
||||
app.kubernetes.io/name: pod-network-filter
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: pod-network-filter-svc
|
||||
protocol: TCP
|
||||
port: 80
|
||||
targetPort: pod-network-prt
|
||||
nodePort: 30037
|
||||
@@ -8,9 +8,9 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: docker.io/fedora/tools
|
||||
image: quay.io/krkn-chaos/krkn:tools
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
sleep infinity
|
||||
sleep infinity
|
||||
|
||||
@@ -13,6 +13,10 @@ function functional_test_app_outage {
|
||||
export scenario_type="application_outages_scenarios"
|
||||
export scenario_file="scenarios/openshift/app_outage.yaml"
|
||||
export post_config=""
|
||||
|
||||
kubectl get services -A
|
||||
|
||||
kubectl get pods
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/app_outage.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/app_outage.yaml
|
||||
echo "App outage scenario test: Success"
|
||||
|
||||
@@ -7,7 +7,7 @@ trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_cpu_hog {
|
||||
yq -i '.node_selector="kubernetes.io/hostname=kind-worker2"' scenarios/kube/cpu-hog.yml
|
||||
yq -i '."node-selector"="kubernetes.io/hostname=kind-worker2"' scenarios/kube/cpu-hog.yml
|
||||
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/cpu-hog.yml"
|
||||
|
||||
@@ -5,12 +5,13 @@ source CI/tests/common.sh
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_io_hog {
|
||||
yq -i '.node_selector="kubernetes.io/hostname=kind-worker2"' scenarios/kube/io-hog.yml
|
||||
yq -i '."node-selector"="kubernetes.io/hostname=kind-worker2"' scenarios/kube/io-hog.yml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/io-hog.yml"
|
||||
export post_config=""
|
||||
|
||||
cat $scenario_file
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/io_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/io_hog.yaml
|
||||
echo "IO Hog: Success"
|
||||
|
||||
@@ -7,7 +7,7 @@ trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_memory_hog {
|
||||
yq -i '.node_selector="kubernetes.io/hostname=kind-worker2"' scenarios/kube/memory-hog.yml
|
||||
yq -i '."node-selector"="kubernetes.io/hostname=kind-worker2"' scenarios/kube/memory-hog.yml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/memory-hog.yml"
|
||||
export post_config=""
|
||||
|
||||
59
CI/tests/test_pod_network_filter.sh
Executable file
59
CI/tests/test_pod_network_filter.sh
Executable file
@@ -0,0 +1,59 @@
|
||||
function functional_pod_network_filter {
|
||||
export SERVICE_URL="http://localhost:8889"
|
||||
export scenario_type="network_chaos_ng_scenarios"
|
||||
export scenario_file="scenarios/kube/pod-network-filter.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_network_filter.yaml
|
||||
yq -i '.[0].test_duration=10' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].label_selector=""' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].ingress=false' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].egress=true' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].target="pod-network-filter-test"' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].protocols=["tcp"]' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].ports=[443]' scenarios/kube/pod-network-filter.yml
|
||||
|
||||
|
||||
## Test webservice deployment
|
||||
kubectl apply -f ./CI/templates/pod_network_filter.yaml
|
||||
COUNTER=0
|
||||
while true
|
||||
do
|
||||
curl $SERVICE_URL
|
||||
EXITSTATUS=$?
|
||||
if [ "$EXITSTATUS" -eq "0" ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
COUNTER=$((COUNTER+1))
|
||||
[ $COUNTER -eq "100" ] && echo "maximum number of retry reached, test failed" && exit 1
|
||||
done
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_network_filter.yaml > /dev/null 2>&1 &
|
||||
PID=$!
|
||||
|
||||
# wait until the dns resolution starts failing and the service returns 400
|
||||
DNS_FAILURE_STATUS=0
|
||||
while true
|
||||
do
|
||||
OUT_STATUS_CODE=$(curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL)
|
||||
if [ "$OUT_STATUS_CODE" -eq "404" ]
|
||||
then
|
||||
DNS_FAILURE_STATUS=404
|
||||
fi
|
||||
|
||||
if [ "$DNS_FAILURE_STATUS" -eq "404" ] && [ "$OUT_STATUS_CODE" -eq "200" ]
|
||||
then
|
||||
echo "service restored"
|
||||
break
|
||||
fi
|
||||
COUNTER=$((COUNTER+1))
|
||||
[ $COUNTER -eq "100" ] && echo "maximum number of retry reached, test failed" && exit 1
|
||||
sleep 2
|
||||
done
|
||||
|
||||
wait $PID
|
||||
}
|
||||
|
||||
functional_pod_network_filter
|
||||
|
||||
10
ROADMAP.md
10
ROADMAP.md
@@ -2,11 +2,11 @@
|
||||
|
||||
Following are a list of enhancements that we are planning to work on adding support in Krkn. Of course any help/contributions are greatly appreciated.
|
||||
|
||||
- [ ] [Ability to run multiple chaos scenarios in parallel under load to mimic real world outages](https://github.com/krkn-chaos/krkn/issues/424)
|
||||
- [x] [Ability to run multiple chaos scenarios in parallel under load to mimic real world outages](https://github.com/krkn-chaos/krkn/issues/424)
|
||||
- [x] [Centralized storage for chaos experiments artifacts](https://github.com/krkn-chaos/krkn/issues/423)
|
||||
- [ ] [Support for causing DNS outages](https://github.com/krkn-chaos/krkn/issues/394)
|
||||
- [x] [Support for causing DNS outages](https://github.com/krkn-chaos/krkn/issues/394)
|
||||
- [x] [Chaos recommender](https://github.com/krkn-chaos/krkn/tree/main/utils/chaos-recommender) to suggest scenarios having probability of impacting the service under test using profiling results
|
||||
- [] Chaos AI integration to improve test coverage while reducing fault space to save costs and execution time
|
||||
- [x] Chaos AI integration to improve test coverage while reducing fault space to save costs and execution time [krkn-chaos-ai](https://github.com/krkn-chaos/krkn-chaos-ai)
|
||||
- [x] [Support for pod level network traffic shaping](https://github.com/krkn-chaos/krkn/issues/393)
|
||||
- [ ] [Ability to visualize the metrics that are being captured by Kraken and stored in Elasticsearch](https://github.com/krkn-chaos/krkn/issues/124)
|
||||
- [x] Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/krkn-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
|
||||
@@ -14,3 +14,7 @@ Following are a list of enhancements that we are planning to work on adding supp
|
||||
- [x] [Switch documentation references to Kubernetes](https://github.com/krkn-chaos/krkn/issues/495)
|
||||
- [x] [OCP and Kubernetes functionalities segregation](https://github.com/krkn-chaos/krkn/issues/497)
|
||||
- [x] [Krknctl - client for running Krkn scenarios with ease](https://github.com/krkn-chaos/krknctl)
|
||||
- [x] [AI Chat bot to help get started with Krkn and commands](https://github.com/krkn-chaos/krkn-lightspeed)
|
||||
- [ ] [Ability to roll back cluster to original state if chaos fails](https://github.com/krkn-chaos/krkn/issues/804)
|
||||
- [ ] Add recovery time metrics to each scenario for each better regression analysis
|
||||
- [ ] [Add resiliency scoring to chaos scenarios ran on cluster](https://github.com/krkn-chaos/krkn/issues/125)
|
||||
@@ -46,7 +46,10 @@ kraken:
|
||||
- syn_flood_scenarios:
|
||||
- scenarios/kube/syn_flood.yaml
|
||||
- network_chaos_ng_scenarios:
|
||||
- scenarios/kube/network-filter.yml
|
||||
- scenarios/kube/pod-network-filter.yml
|
||||
- scenarios/kube/node-network-filter.yml
|
||||
- kubevirt_vm_outage:
|
||||
- scenarios/kubevirt/kubevirt-vm-outage.yaml
|
||||
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
@@ -54,9 +57,7 @@ cerberus:
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
prometheus_url: '' # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_url: '' # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
|
||||
@@ -7,10 +7,8 @@ kraken:
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load
|
||||
- plugin_scenarios:
|
||||
- scenarios/kind/scheduler.yml
|
||||
- node_scenarios:
|
||||
- scenarios/kind/node_scenarios_example.yml
|
||||
- pod_disruption_scenarios:
|
||||
- scenarios/kube/pod.yml
|
||||
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
@@ -18,15 +16,24 @@ cerberus:
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
alert_profile: config/alerts.yaml # Path to alert profile with the prometheus queries
|
||||
|
||||
elastic:
|
||||
enable_elastic: False
|
||||
|
||||
tunings:
|
||||
wait_duration: 60 # Duration to wait between each chaos scenario
|
||||
iterations: 1 # Number of times to execute the scenarios
|
||||
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever
|
||||
|
||||
telemetry:
|
||||
enabled: False # enable/disables the telemetry collection feature
|
||||
archive_path: /tmp # local path where the archive files will be temporarly stored
|
||||
events_backup: False # enables/disables cluster events collection
|
||||
logs_backup: False
|
||||
|
||||
health_checks: # Utilizing health check endpoints to observe application behavior during chaos injection.
|
||||
|
||||
@@ -17,8 +17,6 @@ cerberus:
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
|
||||
@@ -10,7 +10,7 @@ RUN go mod edit -go 1.23.1 &&\
|
||||
go get github.com/docker/docker@v25.0.6&&\
|
||||
go get github.com/opencontainers/runc@v1.1.14&&\
|
||||
go get github.com/go-git/go-git/v5@v5.13.0&&\
|
||||
go get golang.org/x/net@v0.36.0&&\
|
||||
go get golang.org/x/net@v0.38.0&&\
|
||||
go get github.com/containerd/containerd@v1.7.27&&\
|
||||
go get golang.org/x/oauth2@v0.27.0&&\
|
||||
go get golang.org/x/crypto@v0.35.0&&\
|
||||
@@ -47,7 +47,7 @@ RUN if [ -n "$PR_NUMBER" ]; then git fetch origin pull/${PR_NUMBER}/head:pr-${PR
|
||||
RUN if [ -n "$TAG" ]; then git checkout "$TAG";fi
|
||||
|
||||
RUN python3.9 -m ensurepip --upgrade --default-pip
|
||||
RUN python3.9 -m pip install --upgrade pip setuptools==70.0.0
|
||||
RUN python3.9 -m pip install --upgrade pip setuptools==78.1.1
|
||||
RUN pip3.9 install -r requirements.txt
|
||||
RUN pip3.9 install jsonschema
|
||||
|
||||
|
||||
@@ -5,6 +5,8 @@ nodes:
|
||||
extraPortMappings:
|
||||
- containerPort: 30036
|
||||
hostPort: 8888
|
||||
- containerPort: 30037
|
||||
hostPort: 8889
|
||||
- role: control-plane
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
|
||||
@@ -19,7 +19,6 @@ def invoke_no_exit(command, timeout=None):
|
||||
output = ""
|
||||
try:
|
||||
output = subprocess.check_output(command, shell=True, universal_newlines=True, timeout=timeout)
|
||||
logging.info("output " + str(output))
|
||||
except Exception as e:
|
||||
logging.error("Failed to run %s, error: %s" % (command, e))
|
||||
return str(e)
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
import subprocess
|
||||
import logging
|
||||
import git
|
||||
import sys
|
||||
|
||||
|
||||
# Installs a mutable grafana on the Kubernetes/OpenShift cluster and loads the performance dashboards
|
||||
def setup(repo, distribution):
|
||||
if distribution == "kubernetes":
|
||||
command = "cd performance-dashboards/dittybopper && ./k8s-deploy.sh"
|
||||
elif distribution == "openshift":
|
||||
command = "cd performance-dashboards/dittybopper && ./deploy.sh"
|
||||
else:
|
||||
logging.error("Provided distribution: %s is not supported" % (distribution))
|
||||
sys.exit(1)
|
||||
delete_repo = "rm -rf performance-dashboards || exit 0"
|
||||
logging.info(
|
||||
"Cloning, installing mutable grafana on the cluster and loading the dashboards"
|
||||
)
|
||||
try:
|
||||
# delete repo to clone the latest copy if exists
|
||||
subprocess.run(delete_repo, shell=True, universal_newlines=True, timeout=45)
|
||||
# clone the repo
|
||||
git.Repo.clone_from(repo, "performance-dashboards")
|
||||
# deploy performance dashboards
|
||||
subprocess.run(command, shell=True, universal_newlines=True)
|
||||
except Exception as e:
|
||||
logging.error("Failed to install performance-dashboards, error: %s" % (e))
|
||||
@@ -9,6 +9,7 @@ import logging
|
||||
import urllib3
|
||||
import sys
|
||||
import json
|
||||
import tempfile
|
||||
|
||||
import yaml
|
||||
from krkn_lib.elastic.krkn_elastic import KrknElastic
|
||||
@@ -251,11 +252,29 @@ def metrics(
|
||||
metric[k] = v
|
||||
metric['timestamp'] = str(datetime.datetime.now())
|
||||
metrics_list.append(metric.copy())
|
||||
if elastic:
|
||||
|
||||
save_metrics = False
|
||||
if elastic is not None and elastic_metrics_index is not None:
|
||||
result = elastic.upload_metrics_to_elasticsearch(
|
||||
run_uuid=run_uuid, index=elastic_metrics_index, raw_data=metrics_list
|
||||
)
|
||||
if result == -1:
|
||||
logging.error("failed to save metrics on ElasticSearch")
|
||||
save_metrics = True
|
||||
else:
|
||||
save_metrics = True
|
||||
if save_metrics:
|
||||
local_dir = os.path.join(tempfile.gettempdir(), "krkn_metrics")
|
||||
os.makedirs(local_dir, exist_ok=True)
|
||||
local_file = os.path.join(local_dir, f"{elastic_metrics_index}_{run_uuid}.json")
|
||||
|
||||
try:
|
||||
with open(local_file, "w") as f:
|
||||
json.dump({
|
||||
"run_uuid": run_uuid,
|
||||
"metrics": metrics_list
|
||||
}, f, indent=2)
|
||||
logging.info(f"Metrics saved to {local_file}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save metrics to {local_file}: {e}")
|
||||
return metrics_list
|
||||
|
||||
@@ -25,6 +25,10 @@ class HogsScenarioPlugin(AbstractScenarioPlugin):
|
||||
with open(scenario, "r") as f:
|
||||
scenario = yaml.full_load(f)
|
||||
scenario_config = HogConfig.from_yaml_dict(scenario)
|
||||
|
||||
# Get node-name if provided
|
||||
node_name = scenario.get('node-name')
|
||||
|
||||
has_selector = True
|
||||
if not scenario_config.node_selector or not re.match("^.+=.*$", scenario_config.node_selector):
|
||||
if scenario_config.node_selector:
|
||||
@@ -33,13 +37,19 @@ class HogsScenarioPlugin(AbstractScenarioPlugin):
|
||||
else:
|
||||
node_selector = scenario_config.node_selector
|
||||
|
||||
available_nodes = lib_telemetry.get_lib_kubernetes().list_nodes(node_selector)
|
||||
if len(available_nodes) == 0:
|
||||
raise Exception("no available nodes to schedule workload")
|
||||
if node_name:
|
||||
logging.info(f"Using specific node: {node_name}")
|
||||
all_nodes = lib_telemetry.get_lib_kubernetes().list_nodes("")
|
||||
if node_name not in all_nodes:
|
||||
raise Exception(f"Specified node {node_name} not found or not available")
|
||||
available_nodes = [node_name]
|
||||
else:
|
||||
available_nodes = lib_telemetry.get_lib_kubernetes().list_nodes(node_selector)
|
||||
if len(available_nodes) == 0:
|
||||
raise Exception("no available nodes to schedule workload")
|
||||
|
||||
if not has_selector:
|
||||
# if selector not specified picks a random node between the available
|
||||
available_nodes = [available_nodes[random.randint(0, len(available_nodes))]]
|
||||
if not has_selector:
|
||||
available_nodes = [available_nodes[random.randint(0, len(available_nodes))]]
|
||||
|
||||
if scenario_config.number_of_nodes and len(available_nodes) > scenario_config.number_of_nodes:
|
||||
available_nodes = random.sample(available_nodes, scenario_config.number_of_nodes)
|
||||
|
||||
@@ -0,0 +1,399 @@
|
||||
import logging
|
||||
import time
|
||||
from typing import Dict, Any, Optional
|
||||
import random
|
||||
import re
|
||||
import yaml
|
||||
from kubernetes.client.rest import ApiException
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.utils import log_exception
|
||||
from krkn_lib.models.k8s import AffectedPod, PodsStatus
|
||||
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
|
||||
|
||||
class KubevirtVmOutageScenarioPlugin(AbstractScenarioPlugin):
|
||||
"""
|
||||
A scenario plugin that injects chaos by deleting a KubeVirt Virtual Machine Instance (VMI).
|
||||
This plugin simulates a VM crash or outage scenario and supports automated or manual recovery.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.k8s_client = None
|
||||
self.original_vmi = None
|
||||
|
||||
# Scenario type is handled directly in execute_scenario
|
||||
def get_scenario_types(self) -> list[str]:
|
||||
return ["kubevirt_vm_outage"]
|
||||
|
||||
def run(
|
||||
self,
|
||||
run_uuid: str,
|
||||
scenario: str,
|
||||
krkn_config: dict[str, any],
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
) -> int:
|
||||
"""
|
||||
Main entry point for the plugin.
|
||||
Parses the scenario configuration and executes the chaos scenario.
|
||||
"""
|
||||
try:
|
||||
with open(scenario, "r") as f:
|
||||
scenario_config = yaml.full_load(f)
|
||||
|
||||
self.init_clients(lib_telemetry.get_lib_kubernetes())
|
||||
pods_status = PodsStatus()
|
||||
for config in scenario_config["scenarios"]:
|
||||
if config.get("scenario") == "kubevirt_vm_outage":
|
||||
single_pods_status = self.execute_scenario(config, scenario_telemetry)
|
||||
pods_status.merge(single_pods_status)
|
||||
|
||||
scenario_telemetry.affected_pods = pods_status
|
||||
|
||||
return 0
|
||||
except Exception as e:
|
||||
logging.error(f"KubeVirt VM Outage scenario failed: {e}")
|
||||
log_exception(e)
|
||||
return 1
|
||||
|
||||
def init_clients(self, k8s_client: KrknKubernetes):
|
||||
"""
|
||||
Initialize Kubernetes client for KubeVirt operations.
|
||||
"""
|
||||
self.k8s_client = k8s_client
|
||||
self.custom_object_client = k8s_client.custom_object_client
|
||||
logging.info("Successfully initialized Kubernetes client for KubeVirt operations")
|
||||
|
||||
def get_vmi(self, name: str, namespace: str) -> Optional[Dict]:
|
||||
"""
|
||||
Get a Virtual Machine Instance by name and namespace.
|
||||
|
||||
:param name: Name of the VMI to retrieve
|
||||
:param namespace: Namespace of the VMI
|
||||
:return: The VMI object if found, None otherwise
|
||||
"""
|
||||
try:
|
||||
vmi = self.custom_object_client.get_namespaced_custom_object(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace=namespace,
|
||||
plural="virtualmachineinstances",
|
||||
name=name
|
||||
)
|
||||
return vmi
|
||||
except ApiException as e:
|
||||
if e.status == 404:
|
||||
logging.warning(f"VMI {name} not found in namespace {namespace}")
|
||||
return None
|
||||
else:
|
||||
logging.error(f"Error getting VMI {name}: {e}")
|
||||
raise
|
||||
except Exception as e:
|
||||
logging.error(f"Unexpected error getting VMI {name}: {e}")
|
||||
raise
|
||||
|
||||
def get_vmis(self, regex_name: str, namespace: str) -> Optional[Dict]:
|
||||
"""
|
||||
Get a Virtual Machine Instance by name and namespace.
|
||||
|
||||
:param name: Name of the VMI to retrieve
|
||||
:param namespace: Namespace of the VMI
|
||||
:return: The VMI object if found, None otherwise
|
||||
"""
|
||||
try:
|
||||
vmis = self.custom_object_client.list_namespaced_custom_object(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace=namespace,
|
||||
plural="virtualmachineinstances",
|
||||
)
|
||||
|
||||
vmi_list = []
|
||||
for vmi in vmis.get("items"):
|
||||
vmi_name = vmi.get("metadata",{}).get("name")
|
||||
match = re.match(regex_name, vmi_name)
|
||||
if match:
|
||||
vmi_list.append(vmi)
|
||||
return vmi_list
|
||||
except ApiException as e:
|
||||
if e.status == 404:
|
||||
logging.warning(f"VMI {regex_name} not found in namespace {namespace}")
|
||||
return None
|
||||
else:
|
||||
logging.error(f"Error getting VMI {regex_name}: {e}")
|
||||
raise
|
||||
except Exception as e:
|
||||
logging.error(f"Unexpected error getting VMI {regex_name}: {e}")
|
||||
raise
|
||||
|
||||
def execute_scenario(self, config: Dict[str, Any], scenario_telemetry: ScenarioTelemetry) -> int:
|
||||
"""
|
||||
Execute a KubeVirt VM outage scenario based on the provided configuration.
|
||||
|
||||
:param config: The scenario configuration
|
||||
:param scenario_telemetry: The telemetry object for recording metrics
|
||||
:return: 0 for success, 1 for failure
|
||||
"""
|
||||
try:
|
||||
params = config.get("parameters", {})
|
||||
vm_name = params.get("vm_name")
|
||||
namespace = params.get("namespace", "default")
|
||||
timeout = params.get("timeout", 60)
|
||||
kill_count = params.get("kill_count", 1)
|
||||
disable_auto_restart = params.get("disable_auto_restart", False)
|
||||
self.pods_status = PodsStatus()
|
||||
if not vm_name:
|
||||
logging.error("vm_name parameter is required")
|
||||
return 1
|
||||
vmis_list = self.get_vmis(vm_name,namespace)
|
||||
rand_int = random.randint(0, len(vmis_list) - 1)
|
||||
vmi = vmis_list[rand_int]
|
||||
|
||||
logging.info(f"Starting KubeVirt VM outage scenario for VM: {vm_name} in namespace: {namespace}")
|
||||
vmi_name = vmi.get("metadata").get("name")
|
||||
if not self.validate_environment(vmi_name, namespace):
|
||||
return 1
|
||||
|
||||
vmi = self.get_vmi(vmi_name, namespace)
|
||||
self.affected_pod = AffectedPod(
|
||||
pod_name=vmi_name,
|
||||
namespace=namespace,
|
||||
)
|
||||
if not vmi:
|
||||
logging.error(f"VMI {vm_name} not found in namespace {namespace}")
|
||||
return 1
|
||||
|
||||
self.original_vmi = vmi
|
||||
logging.info(f"Captured initial state of VMI: {vm_name}")
|
||||
result = self.delete_vmi(vmi_name, namespace, disable_auto_restart)
|
||||
if result != 0:
|
||||
|
||||
return self.pods_status
|
||||
|
||||
result = self.wait_for_running(vmi_name,namespace, timeout)
|
||||
if result != 0:
|
||||
self.recover(vmi_name, namespace)
|
||||
self.pods_status.unrecovered = self.affected_pod
|
||||
return self.pods_status
|
||||
|
||||
self.affected_pod.total_recovery_time = (
|
||||
self.affected_pod.pod_readiness_time
|
||||
+ self.affected_pod.pod_rescheduling_time
|
||||
)
|
||||
|
||||
self.pods_status.recovered.append(self.affected_pod)
|
||||
logging.info(f"Successfully completed KubeVirt VM outage scenario for VM: {vm_name}")
|
||||
|
||||
return self.pods_status
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error executing KubeVirt VM outage scenario: {e}")
|
||||
log_exception(e)
|
||||
return 1
|
||||
|
||||
def validate_environment(self, vm_name: str, namespace: str) -> bool:
|
||||
"""
|
||||
Validate that KubeVirt is installed and the specified VM exists.
|
||||
|
||||
:param vm_name: Name of the VM to validate
|
||||
:param namespace: Namespace of the VM
|
||||
:return: True if environment is valid, False otherwise
|
||||
"""
|
||||
try:
|
||||
# Check if KubeVirt CRDs exist
|
||||
crd_list = self.custom_object_client.list_namespaced_custom_object("kubevirt.io","v1",namespace,"virtualmachines")
|
||||
kubevirt_crds = [crd for crd in crd_list.items() ]
|
||||
|
||||
if not kubevirt_crds:
|
||||
logging.error("KubeVirt CRDs not found. Ensure KubeVirt/CNV is installed in the cluster")
|
||||
return False
|
||||
|
||||
# Check if VMI exists
|
||||
vmi = self.get_vmi(vm_name, namespace)
|
||||
if not vmi:
|
||||
logging.error(f"VMI {vm_name} not found in namespace {namespace}")
|
||||
return False
|
||||
|
||||
logging.info(f"Validated environment: KubeVirt is installed and VMI {vm_name} exists")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error validating environment: {e}")
|
||||
return False
|
||||
|
||||
def patch_vm_spec(self, vm_name: str, namespace: str, running: bool) -> bool:
|
||||
"""
|
||||
Patch the VM spec to enable/disable auto-restart.
|
||||
|
||||
:param vm_name: Name of the VM to patch
|
||||
:param namespace: Namespace of the VM
|
||||
:param running: Whether the VM should be set to running state
|
||||
:return: True if patch was successful, False otherwise
|
||||
"""
|
||||
try:
|
||||
# Get the VM object first to get its current spec
|
||||
vm = self.custom_object_client.get_namespaced_custom_object(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace=namespace,
|
||||
plural="virtualmachines",
|
||||
name=vm_name
|
||||
)
|
||||
|
||||
# Update the running state
|
||||
if 'spec' not in vm:
|
||||
vm['spec'] = {}
|
||||
vm['spec']['running'] = running
|
||||
|
||||
# Apply the patch
|
||||
self.custom_object_client.patch_namespaced_custom_object(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace=namespace,
|
||||
plural="virtualmachines",
|
||||
name=vm_name,
|
||||
body=vm
|
||||
)
|
||||
return True
|
||||
|
||||
except ApiException as e:
|
||||
logging.error(f"Failed to patch VM {vm_name}: {e}")
|
||||
return False
|
||||
except Exception as e:
|
||||
logging.error(f"Unexpected error patching VM {vm_name}: {e}")
|
||||
return False
|
||||
|
||||
def delete_vmi(self, vm_name: str, namespace: str, disable_auto_restart: bool = False, timeout: int = 120) -> int:
|
||||
"""
|
||||
Delete a Virtual Machine Instance to simulate a VM outage.
|
||||
|
||||
:param vm_name: Name of the VMI to delete
|
||||
:param namespace: Namespace of the VMI
|
||||
:return: 0 for success, 1 for failure
|
||||
"""
|
||||
try:
|
||||
logging.info(f"Injecting chaos: Deleting VMI {vm_name} in namespace {namespace}")
|
||||
|
||||
# If auto-restart should be disabled, patch the VM spec first
|
||||
if disable_auto_restart:
|
||||
logging.info(f"Disabling auto-restart for VM {vm_name} by setting spec.running=False")
|
||||
if not self.patch_vm_spec(vm_name, namespace, running=False):
|
||||
logging.error("Failed to disable auto-restart for VM"
|
||||
" - proceeding with deletion but VM may auto-restart")
|
||||
start_creation_time = self.original_vmi.get('metadata', {}).get('creationTimestamp')
|
||||
start_time = time.time()
|
||||
try:
|
||||
self.custom_object_client.delete_namespaced_custom_object(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace=namespace,
|
||||
plural="virtualmachineinstances",
|
||||
name=vm_name
|
||||
)
|
||||
except ApiException as e:
|
||||
if e.status == 404:
|
||||
logging.warning(f"VMI {vm_name} not found during deletion")
|
||||
return 1
|
||||
else:
|
||||
logging.error(f"API error during VMI deletion: {e}")
|
||||
return 1
|
||||
|
||||
# Wait for the VMI to be deleted
|
||||
|
||||
while time.time() - start_time < timeout:
|
||||
deleted_vmi = self.get_vmi(vm_name, namespace)
|
||||
if deleted_vmi:
|
||||
if start_creation_time != deleted_vmi.get('metadata', {}).get('creationTimestamp'):
|
||||
logging.info(f"VMI {vm_name} successfully recreated")
|
||||
self.affected_pod.pod_rescheduling_time = time.time() - start_time
|
||||
return 0
|
||||
else:
|
||||
logging.info(f"VMI {vm_name} successfully deleted")
|
||||
time.sleep(1)
|
||||
|
||||
logging.error(f"Timed out waiting for VMI {vm_name} to be deleted")
|
||||
self.pods_status.unrecovered = self.affected_pod
|
||||
return 1
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error deleting VMI {vm_name}: {e}")
|
||||
log_exception(e)
|
||||
self.pods_status.unrecovered = self.affected_pod
|
||||
return 1
|
||||
|
||||
def wait_for_running(self, vm_name: str, namespace: str, timeout: int = 120) -> int:
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < timeout:
|
||||
|
||||
# Check current state once since we've already waited for the duration
|
||||
vmi = self.get_vmi(vm_name, namespace)
|
||||
|
||||
if vmi:
|
||||
if vmi.get('status', {}).get('phase') == "Running":
|
||||
end_time = time.time()
|
||||
self.affected_pod.pod_readiness_time = end_time - start_time
|
||||
|
||||
logging.info(f"VMI {vm_name} is already running")
|
||||
return 0
|
||||
logging.info(f"VMI {vm_name} exists but is not in Running state. Current state: {vmi.get('status', {}).get('phase')}")
|
||||
else:
|
||||
logging.info(f"VMI {vm_name} not yet recreated")
|
||||
time.sleep(1)
|
||||
return 1
|
||||
|
||||
|
||||
def recover(self, vm_name: str, namespace: str, disable_auto_restart: bool = False) -> int:
|
||||
"""
|
||||
Recover a deleted VMI, either by waiting for auto-recovery or manually recreating it.
|
||||
|
||||
:param vm_name: Name of the VMI to recover
|
||||
:param namespace: Namespace of the VMI
|
||||
:param disable_auto_restart: Whether auto-restart was disabled during injection
|
||||
:return: 0 for success, 1 for failure
|
||||
"""
|
||||
try:
|
||||
logging.info(f"Attempting to recover VMI {vm_name} in namespace {namespace}")
|
||||
|
||||
if self.original_vmi:
|
||||
logging.info(f"Auto-recovery didn't occur for VMI {vm_name}. Attempting manual recreation")
|
||||
|
||||
try:
|
||||
# Clean up server-generated fields
|
||||
vmi_dict = self.original_vmi.copy()
|
||||
if 'metadata' in vmi_dict:
|
||||
metadata = vmi_dict['metadata']
|
||||
for field in ['resourceVersion', 'uid', 'creationTimestamp', 'generation']:
|
||||
if field in metadata:
|
||||
del metadata[field]
|
||||
|
||||
# Create the VMI
|
||||
self.custom_object_client.create_namespaced_custom_object(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace=namespace,
|
||||
plural="virtualmachineinstances",
|
||||
body=vmi_dict
|
||||
)
|
||||
logging.info(f"Successfully recreated VMI {vm_name}")
|
||||
|
||||
# Wait for VMI to start running
|
||||
self.wait_for_running(vm_name,namespace)
|
||||
|
||||
logging.warning(f"VMI {vm_name} was recreated but didn't reach Running state in time")
|
||||
return 0 # Still consider it a success as the VMI was recreated
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error recreating VMI {vm_name}: {e}")
|
||||
log_exception(e)
|
||||
return 1
|
||||
else:
|
||||
logging.error(f"Failed to recover VMI {vm_name}: No original state captured and auto-recovery did not occur")
|
||||
return 1
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Unexpected error recovering VMI {vm_name}: {e}")
|
||||
log_exception(e)
|
||||
return 1
|
||||
@@ -1,6 +1,5 @@
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
from krkn.scenario_plugins.native.plugins import PLUGINS
|
||||
from krkn_lib.k8s.pods_monitor_pool import PodsMonitorPool
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from typing import Any
|
||||
@@ -28,7 +27,6 @@ class NativeScenarioPlugin(AbstractScenarioPlugin):
|
||||
|
||||
except Exception as e:
|
||||
logging.error("NativeScenarioPlugin exiting due to Exception %s" % e)
|
||||
pool.cancel()
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
@@ -28,6 +28,14 @@ class NetworkScenarioConfig:
|
||||
},
|
||||
)
|
||||
|
||||
image: typing.Annotated[str, validation.min(1)]= field(
|
||||
default="quay.io/krkn-chaos/krkn:tools",
|
||||
metadata={
|
||||
"name": "Image",
|
||||
"description": "Image of krkn tools to run"
|
||||
}
|
||||
)
|
||||
|
||||
label_selector: typing.Annotated[
|
||||
typing.Optional[str], validation.required_if_not("node_interface_name")
|
||||
] = field(
|
||||
@@ -142,7 +150,7 @@ class NetworkScenarioErrorOutput:
|
||||
)
|
||||
|
||||
|
||||
def get_default_interface(node: str, pod_template, cli: CoreV1Api) -> str:
|
||||
def get_default_interface(node: str, pod_template, cli: CoreV1Api, image: str) -> str:
|
||||
"""
|
||||
Function that returns a random interface from a node
|
||||
|
||||
@@ -161,7 +169,7 @@ def get_default_interface(node: str, pod_template, cli: CoreV1Api) -> str:
|
||||
Default interface (string) belonging to the node
|
||||
"""
|
||||
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node, image=image))
|
||||
logging.info("Creating pod to query interface on node %s" % node)
|
||||
kube_helper.create_pod(cli, pod_body, "default", 300)
|
||||
|
||||
@@ -189,7 +197,7 @@ def get_default_interface(node: str, pod_template, cli: CoreV1Api) -> str:
|
||||
|
||||
|
||||
def verify_interface(
|
||||
input_interface_list: typing.List[str], node: str, pod_template, cli: CoreV1Api
|
||||
input_interface_list: typing.List[str], node: str, pod_template, cli: CoreV1Api, image: str
|
||||
) -> typing.List[str]:
|
||||
"""
|
||||
Function that verifies whether a list of interfaces is present in the node.
|
||||
@@ -212,7 +220,7 @@ def verify_interface(
|
||||
Returns:
|
||||
The interface list for the node
|
||||
"""
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node, image=image))
|
||||
logging.info("Creating pod to query interface on node %s" % node)
|
||||
kube_helper.create_pod(cli, pod_body, "default", 300)
|
||||
try:
|
||||
@@ -268,6 +276,7 @@ def get_node_interfaces(
|
||||
instance_count: int,
|
||||
pod_template,
|
||||
cli: CoreV1Api,
|
||||
image: str
|
||||
) -> typing.Dict[str, typing.List[str]]:
|
||||
"""
|
||||
Function that is used to process the input dictionary with the nodes and
|
||||
@@ -309,7 +318,7 @@ def get_node_interfaces(
|
||||
nodes = kube_helper.get_node(None, label_selector, instance_count, cli)
|
||||
node_interface_dict = {}
|
||||
for node in nodes:
|
||||
node_interface_dict[node] = get_default_interface(node, pod_template, cli)
|
||||
node_interface_dict[node] = get_default_interface(node, pod_template, cli, image)
|
||||
else:
|
||||
node_name_list = node_interface_dict.keys()
|
||||
filtered_node_list = []
|
||||
@@ -321,7 +330,7 @@ def get_node_interfaces(
|
||||
|
||||
for node in filtered_node_list:
|
||||
node_interface_dict[node] = verify_interface(
|
||||
node_interface_dict[node], node, pod_template, cli
|
||||
node_interface_dict[node], node, pod_template, cli, image
|
||||
)
|
||||
|
||||
return node_interface_dict
|
||||
@@ -337,6 +346,7 @@ def apply_ingress_filter(
|
||||
cli: CoreV1Api,
|
||||
create_interfaces: bool = True,
|
||||
param_selector: str = "all",
|
||||
image:str = "quay.io/krkn-chaos/krkn:tools",
|
||||
) -> str:
|
||||
"""
|
||||
Function that applies the filters to shape incoming traffic to
|
||||
@@ -382,14 +392,14 @@ def apply_ingress_filter(
|
||||
network_params = {param_selector: cfg.network_params[param_selector]}
|
||||
|
||||
if create_interfaces:
|
||||
create_virtual_interfaces(cli, interface_list, node, pod_template)
|
||||
create_virtual_interfaces(cli, interface_list, node, pod_template, image)
|
||||
|
||||
exec_cmd = get_ingress_cmd(
|
||||
interface_list, network_params, duration=cfg.test_duration
|
||||
)
|
||||
logging.info("Executing %s on node %s" % (exec_cmd, node))
|
||||
job_body = yaml.safe_load(
|
||||
job_template.render(jobname=str(hash(node))[:5], nodename=node, cmd=exec_cmd)
|
||||
job_template.render(jobname=str(hash(node))[:5], nodename=node, image=image, cmd=exec_cmd)
|
||||
)
|
||||
api_response = kube_helper.create_job(batch_cli, job_body)
|
||||
|
||||
@@ -400,7 +410,7 @@ def apply_ingress_filter(
|
||||
|
||||
|
||||
def create_virtual_interfaces(
|
||||
cli: CoreV1Api, interface_list: typing.List[str], node: str, pod_template
|
||||
cli: CoreV1Api, interface_list: typing.List[str], node: str, pod_template, image: str
|
||||
) -> None:
|
||||
"""
|
||||
Function that creates a privileged pod and uses it to create
|
||||
@@ -421,7 +431,7 @@ def create_virtual_interfaces(
|
||||
- The YAML template used to instantiate a pod to create
|
||||
virtual interfaces on the node
|
||||
"""
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node, image=image))
|
||||
kube_helper.create_pod(cli, pod_body, "default", 300)
|
||||
logging.info(
|
||||
"Creating {0} virtual interfaces on node {1} using a pod".format(
|
||||
@@ -434,7 +444,7 @@ def create_virtual_interfaces(
|
||||
|
||||
|
||||
def delete_virtual_interfaces(
|
||||
cli: CoreV1Api, node_list: typing.List[str], pod_template
|
||||
cli: CoreV1Api, node_list: typing.List[str], pod_template, image: str
|
||||
):
|
||||
"""
|
||||
Function that creates a privileged pod and uses it to delete all
|
||||
@@ -457,7 +467,7 @@ def delete_virtual_interfaces(
|
||||
"""
|
||||
|
||||
for node in node_list:
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node, image=image))
|
||||
kube_helper.create_pod(cli, pod_body, "default", 300)
|
||||
logging.info("Deleting all virtual interfaces on node {0}".format(node))
|
||||
delete_ifb(cli, "modtools")
|
||||
@@ -700,7 +710,7 @@ def network_chaos(
|
||||
pod_interface_template = env.get_template("pod_interface.j2")
|
||||
pod_module_template = env.get_template("pod_module.j2")
|
||||
cli, batch_cli = kube_helper.setup_kubernetes(cfg.kubeconfig_path)
|
||||
|
||||
test_image = cfg.image
|
||||
logging.info("Starting Ingress Network Chaos")
|
||||
try:
|
||||
node_interface_dict = get_node_interfaces(
|
||||
@@ -709,6 +719,7 @@ def network_chaos(
|
||||
cfg.instance_count,
|
||||
pod_interface_template,
|
||||
cli,
|
||||
test_image
|
||||
)
|
||||
except Exception:
|
||||
return "error", NetworkScenarioErrorOutput(format_exc())
|
||||
@@ -726,6 +737,7 @@ def network_chaos(
|
||||
job_template,
|
||||
batch_cli,
|
||||
cli,
|
||||
test_image
|
||||
)
|
||||
)
|
||||
logging.info("Waiting for parallel job to finish")
|
||||
@@ -746,6 +758,7 @@ def network_chaos(
|
||||
cli,
|
||||
create_interfaces=create_interfaces,
|
||||
param_selector=param,
|
||||
image=test_image
|
||||
)
|
||||
)
|
||||
logging.info("Waiting for serial job to finish")
|
||||
@@ -772,6 +785,6 @@ def network_chaos(
|
||||
logging.error("Ingress Network Chaos exiting due to Exception - %s" % e)
|
||||
return "error", NetworkScenarioErrorOutput(format_exc())
|
||||
finally:
|
||||
delete_virtual_interfaces(cli, node_interface_dict.keys(), pod_module_template)
|
||||
delete_virtual_interfaces(cli, node_interface_dict.keys(), pod_module_template, test_image)
|
||||
logging.info("Deleting jobs(if any)")
|
||||
delete_jobs(cli, batch_cli, job_list[:])
|
||||
|
||||
@@ -9,7 +9,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: networkchaos
|
||||
image: docker.io/fedora/tools
|
||||
image: {{image}}
|
||||
command: ["/bin/sh", "-c", "{{cmd}}"]
|
||||
securityContext:
|
||||
privileged: true
|
||||
@@ -22,4 +22,4 @@ spec:
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
restartPolicy: Never
|
||||
backoffLimit: 0
|
||||
backoffLimit: 0
|
||||
|
||||
@@ -7,7 +7,7 @@ spec:
|
||||
nodeName: {{nodename}}
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: docker.io/fedora/tools
|
||||
image: {{image}}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
|
||||
@@ -6,7 +6,7 @@ spec:
|
||||
nodeName: {{nodename}}
|
||||
containers:
|
||||
- name: modtools
|
||||
image: docker.io/fedora/tools
|
||||
image: {{image}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
@@ -27,4 +27,4 @@ spec:
|
||||
hostNetwork: true
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
restartPolicy: Never
|
||||
restartPolicy: Never
|
||||
|
||||
@@ -9,7 +9,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: networkchaos
|
||||
image: docker.io/fedora/tools
|
||||
image: {{image}}
|
||||
command: ["chroot", "/host", "/bin/sh", "-c", "{{cmd}}"]
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
||||
@@ -6,7 +6,7 @@ spec:
|
||||
nodeName: {{nodename}}
|
||||
containers:
|
||||
- name: modtools
|
||||
image: docker.io/fedora/tools
|
||||
image: {{image}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
@@ -27,4 +27,4 @@ spec:
|
||||
hostNetwork: true
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
restartPolicy: Never
|
||||
restartPolicy: Never
|
||||
|
||||
@@ -192,6 +192,7 @@ def apply_outage_policy(
|
||||
duration: str,
|
||||
bridge_name: str,
|
||||
kubecli: KrknKubernetes,
|
||||
image: str
|
||||
) -> typing.List[str]:
|
||||
"""
|
||||
Function that applies filters(ingress or egress) to block traffic.
|
||||
@@ -223,6 +224,8 @@ def apply_outage_policy(
|
||||
batch_cli (BatchV1Api)
|
||||
- Object to interact with Kubernetes Python client's BatchV1Api API
|
||||
|
||||
image (string)
|
||||
- Image of network chaos tool
|
||||
Returns:
|
||||
The name of the job created that executes the commands on a node
|
||||
for ingress chaos scenario
|
||||
@@ -239,7 +242,7 @@ def apply_outage_policy(
|
||||
br = "br-int"
|
||||
table = 8
|
||||
for node, ips in node_dict.items():
|
||||
while len(check_cookie(node, pod_template, br, cookie, kubecli)) > 2 or cookie in cookie_list:
|
||||
while len(check_cookie(node, pod_template, br, cookie, kubecli, image)) > 2 or cookie in cookie_list:
|
||||
cookie = random.randint(100, 10000)
|
||||
exec_cmd = ""
|
||||
for ip in ips:
|
||||
@@ -257,6 +260,7 @@ def apply_outage_policy(
|
||||
job_template.render(
|
||||
jobname=str(hash(node))[:5] + str(random.randint(0, 10000)),
|
||||
nodename=node,
|
||||
image=image,
|
||||
cmd=exec_cmd,
|
||||
)
|
||||
)
|
||||
@@ -281,6 +285,7 @@ def apply_ingress_policy(
|
||||
bridge_name: str,
|
||||
kubecli: KrknKubernetes,
|
||||
test_execution: str,
|
||||
image: str,
|
||||
) -> typing.List[str]:
|
||||
"""
|
||||
Function that applies ingress traffic shaping to pod interface.
|
||||
@@ -327,22 +332,23 @@ def apply_ingress_policy(
|
||||
job_list = []
|
||||
yml_list = []
|
||||
|
||||
create_virtual_interfaces(kubecli, len(ips), node, pod_template)
|
||||
create_virtual_interfaces(kubecli, len(ips), node, pod_template, image)
|
||||
|
||||
for count, pod_ip in enumerate(set(ips)):
|
||||
pod_inf = get_pod_interface(node, pod_ip, pod_template, bridge_name, kubecli)
|
||||
pod_inf = get_pod_interface(node, pod_ip, pod_template, bridge_name, kubecli, image)
|
||||
exec_cmd = get_ingress_cmd(
|
||||
test_execution, pod_inf, mod, count, network_params, duration
|
||||
)
|
||||
logging.info("Executing %s on pod %s in node %s" % (exec_cmd, pod_ip, node))
|
||||
job_body = yaml.safe_load(
|
||||
job_template.render(jobname=mod + str(pod_ip), nodename=node, cmd=exec_cmd)
|
||||
job_template.render(jobname=mod + str(pod_ip), nodename=node, image=image, cmd=exec_cmd)
|
||||
)
|
||||
yml_list.append(job_body)
|
||||
if pod_ip == node:
|
||||
break
|
||||
|
||||
for job_body in yml_list:
|
||||
print('jbo body' + str(job_body))
|
||||
api_response = kubecli.create_job(job_body)
|
||||
if api_response is None:
|
||||
raise Exception("Error creating job")
|
||||
@@ -362,6 +368,7 @@ def apply_net_policy(
|
||||
bridge_name: str,
|
||||
kubecli: KrknKubernetes,
|
||||
test_execution: str,
|
||||
image: str,
|
||||
) -> typing.List[str]:
|
||||
"""
|
||||
Function that applies egress traffic shaping to pod interface.
|
||||
@@ -415,7 +422,7 @@ def apply_net_policy(
|
||||
)
|
||||
logging.info("Executing %s on pod %s in node %s" % (exec_cmd, pod_ip, node))
|
||||
job_body = yaml.safe_load(
|
||||
job_template.render(jobname=mod + str(pod_ip), nodename=node, cmd=exec_cmd)
|
||||
job_template.render(jobname=mod + str(pod_ip), nodename=node, image=image, cmd=exec_cmd)
|
||||
)
|
||||
yml_list.append(job_body)
|
||||
|
||||
@@ -530,7 +537,7 @@ def get_egress_cmd(
|
||||
|
||||
|
||||
def create_virtual_interfaces(
|
||||
kubecli: KrknKubernetes, nummber: int, node: str, pod_template
|
||||
kubecli: KrknKubernetes, nummber: int, node: str, pod_template, image: str,
|
||||
) -> None:
|
||||
"""
|
||||
Function that creates a privileged pod and uses it to create
|
||||
@@ -550,8 +557,11 @@ def create_virtual_interfaces(
|
||||
pod_template (jinja2.environment.Template))
|
||||
- The YAML template used to instantiate a pod to create
|
||||
virtual interfaces on the node
|
||||
|
||||
image (string)
|
||||
- Image of network chaos tool
|
||||
"""
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node, image=image))
|
||||
kubecli.create_pod(pod_body, "default", 300)
|
||||
logging.info(
|
||||
"Creating {0} virtual interfaces on node {1} using a pod".format(nummber, node)
|
||||
@@ -562,7 +572,7 @@ def create_virtual_interfaces(
|
||||
|
||||
|
||||
def delete_virtual_interfaces(
|
||||
kubecli: KrknKubernetes, node_list: typing.List[str], pod_template
|
||||
kubecli: KrknKubernetes, node_list: typing.List[str], pod_template, image: str,
|
||||
):
|
||||
"""
|
||||
Function that creates a privileged pod and uses it to delete all
|
||||
@@ -582,10 +592,13 @@ def delete_virtual_interfaces(
|
||||
pod_template (jinja2.environment.Template))
|
||||
- The YAML template used to instantiate a pod to delete
|
||||
virtual interfaces on the node
|
||||
|
||||
image (string)
|
||||
- Image of network chaos tool
|
||||
"""
|
||||
|
||||
for node in node_list:
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node, image=image))
|
||||
kubecli.create_pod(pod_body, "default", 300)
|
||||
logging.info("Deleting all virtual interfaces on node {0}".format(node))
|
||||
delete_ifb(kubecli, "modtools")
|
||||
@@ -619,7 +632,7 @@ def delete_ifb(kubecli: KrknKubernetes, pod_name: str):
|
||||
kubecli.exec_cmd_in_pod(exec_command, pod_name, "default", base_command="chroot")
|
||||
|
||||
|
||||
def list_bridges(node: str, pod_template, kubecli: KrknKubernetes) -> typing.List[str]:
|
||||
def list_bridges(node: str, pod_template, kubecli: KrknKubernetes, image: str) -> typing.List[str]:
|
||||
"""
|
||||
Function that returns a list of bridges on the node
|
||||
|
||||
@@ -634,11 +647,13 @@ def list_bridges(node: str, pod_template, kubecli: KrknKubernetes) -> typing.Lis
|
||||
kubecli (KrknKubernetes)
|
||||
- Object to interact with Kubernetes Python client
|
||||
|
||||
image (string)
|
||||
- Image of network chaos tool
|
||||
Returns:
|
||||
List of bridges on the node.
|
||||
"""
|
||||
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node, image=image))
|
||||
logging.info("Creating pod to query bridge on node %s" % node)
|
||||
kubecli.create_pod(pod_body, "default", 300)
|
||||
|
||||
@@ -662,7 +677,7 @@ def list_bridges(node: str, pod_template, kubecli: KrknKubernetes) -> typing.Lis
|
||||
|
||||
|
||||
def check_cookie(
|
||||
node: str, pod_template, br_name, cookie, kubecli: KrknKubernetes
|
||||
node: str, pod_template, br_name, cookie, kubecli: KrknKubernetes, image: str
|
||||
) -> str:
|
||||
"""
|
||||
Function to check for matching flow rules
|
||||
@@ -684,11 +699,13 @@ def check_cookie(
|
||||
cli (CoreV1Api)
|
||||
- Object to interact with Kubernetes Python client's CoreV1 API
|
||||
|
||||
image (string)
|
||||
- Image of network chaos tool
|
||||
Returns
|
||||
Returns the matching flow rules
|
||||
"""
|
||||
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node, image=image))
|
||||
logging.info("Creating pod to query duplicate rules on node %s" % node)
|
||||
kubecli.create_pod(pod_body, "default", 300)
|
||||
|
||||
@@ -721,7 +738,7 @@ def check_cookie(
|
||||
|
||||
|
||||
def get_pod_interface(
|
||||
node: str, ip: str, pod_template, br_name, kubecli: KrknKubernetes
|
||||
node: str, ip: str, pod_template, br_name, kubecli: KrknKubernetes, image: str = "quay.io/krkn-chaos/krkn:tools"
|
||||
) -> str:
|
||||
"""
|
||||
Function to query the pod interface on a node
|
||||
@@ -747,7 +764,7 @@ def get_pod_interface(
|
||||
Returns the pod interface name
|
||||
"""
|
||||
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node, image=image))
|
||||
logging.info("Creating pod to query pod interface on node %s" % node)
|
||||
kubecli.create_pod(pod_body, "default", 300)
|
||||
inf = ""
|
||||
@@ -788,7 +805,8 @@ def get_pod_interface(
|
||||
|
||||
|
||||
def check_bridge_interface(
|
||||
node_name: str, pod_template, bridge_name: str, kubecli: KrknKubernetes
|
||||
node_name: str, pod_template, bridge_name: str, kubecli: KrknKubernetes,
|
||||
image: str = "quay.io/krkn-chaos/krkn:tools"
|
||||
) -> bool:
|
||||
"""
|
||||
Function is used to check if the required OVS or OVN bridge is found in
|
||||
@@ -814,7 +832,7 @@ def check_bridge_interface(
|
||||
nodes = kubecli.get_node(node_name, None, 1)
|
||||
node_bridge = []
|
||||
for node in nodes:
|
||||
node_bridge = list_bridges(node, pod_template, kubecli)
|
||||
node_bridge = list_bridges(node, pod_template, kubecli, image=image)
|
||||
if bridge_name not in node_bridge:
|
||||
raise Exception(f"OVS bridge {bridge_name} not found on the node ")
|
||||
|
||||
@@ -835,6 +853,14 @@ class InputParams:
|
||||
}
|
||||
)
|
||||
|
||||
image: typing.Annotated[str, validation.min(1)]= field(
|
||||
default="quay.io/krkn-chaos/krkn:tools",
|
||||
metadata={
|
||||
"name": "Image",
|
||||
"description": "Image of krkn tools to run"
|
||||
}
|
||||
)
|
||||
|
||||
direction: typing.List[str] = field(
|
||||
default_factory=lambda: ["ingress", "egress"],
|
||||
metadata={
|
||||
@@ -1004,6 +1030,7 @@ def pod_outage(
|
||||
test_namespace = params.namespace
|
||||
test_label_selector = params.label_selector
|
||||
test_pod_name = params.pod_name
|
||||
test_image = params.image
|
||||
filter_dict = {}
|
||||
job_list = []
|
||||
publish = False
|
||||
@@ -1040,7 +1067,7 @@ def pod_outage(
|
||||
label_set.add("%s=%s" % (key, value))
|
||||
|
||||
check_bridge_interface(
|
||||
list(node_dict.keys())[0], pod_module_template, br_name, kubecli
|
||||
list(node_dict.keys())[0], pod_module_template, br_name, kubecli, test_image
|
||||
)
|
||||
|
||||
for direction, ports in filter_dict.items():
|
||||
@@ -1055,6 +1082,7 @@ def pod_outage(
|
||||
params.test_duration,
|
||||
br_name,
|
||||
kubecli,
|
||||
test_image
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1095,7 +1123,16 @@ class EgressParams:
|
||||
}
|
||||
)
|
||||
|
||||
image: typing.Annotated[str, validation.min(1)]= field(
|
||||
default="quay.io/krkn-chaos/krkn:tools",
|
||||
metadata={
|
||||
"name": "Image",
|
||||
"description": "Image of krkn tools to run"
|
||||
}
|
||||
)
|
||||
|
||||
network_params: typing.Dict[str, str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Network Parameters",
|
||||
"description": "The network filters that are applied on the interface. "
|
||||
@@ -1254,6 +1291,7 @@ def pod_egress_shaping(
|
||||
test_namespace = params.namespace
|
||||
test_label_selector = params.label_selector
|
||||
test_pod_name = params.pod_name
|
||||
test_image = params.image
|
||||
job_list = []
|
||||
publish = False
|
||||
|
||||
@@ -1287,7 +1325,7 @@ def pod_egress_shaping(
|
||||
label_set.add("%s=%s" % (key, value))
|
||||
|
||||
check_bridge_interface(
|
||||
list(node_dict.keys())[0], pod_module_template, br_name, kubecli
|
||||
list(node_dict.keys())[0], pod_module_template, br_name, kubecli, test_image
|
||||
)
|
||||
|
||||
for mod in mod_lst:
|
||||
@@ -1304,6 +1342,7 @@ def pod_egress_shaping(
|
||||
br_name,
|
||||
kubecli,
|
||||
params.execution_type,
|
||||
test_image
|
||||
)
|
||||
)
|
||||
if params.execution_type == "serial":
|
||||
@@ -1357,8 +1396,17 @@ class IngressParams:
|
||||
"for details.",
|
||||
}
|
||||
)
|
||||
|
||||
image: typing.Annotated[str, validation.min(1)] = field(
|
||||
default="quay.io/krkn-chaos/krkn:tools",
|
||||
metadata={
|
||||
"name": "Image",
|
||||
"description": "Image to use for injecting network chaos",
|
||||
}
|
||||
)
|
||||
|
||||
network_params: typing.Dict[str, str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Network Parameters",
|
||||
"description": "The network filters that are applied on the interface. "
|
||||
@@ -1518,6 +1566,7 @@ def pod_ingress_shaping(
|
||||
test_namespace = params.namespace
|
||||
test_label_selector = params.label_selector
|
||||
test_pod_name = params.pod_name
|
||||
test_image = params.image
|
||||
job_list = []
|
||||
publish = False
|
||||
|
||||
@@ -1551,7 +1600,7 @@ def pod_ingress_shaping(
|
||||
label_set.add("%s=%s" % (key, value))
|
||||
|
||||
check_bridge_interface(
|
||||
list(node_dict.keys())[0], pod_module_template, br_name, kubecli
|
||||
list(node_dict.keys())[0], pod_module_template, br_name, kubecli, test_image
|
||||
)
|
||||
|
||||
for mod in mod_lst:
|
||||
@@ -1568,6 +1617,7 @@ def pod_ingress_shaping(
|
||||
br_name,
|
||||
kubecli,
|
||||
params.execution_type,
|
||||
image=test_image
|
||||
)
|
||||
)
|
||||
if params.execution_type == "serial":
|
||||
@@ -1604,6 +1654,6 @@ def pod_ingress_shaping(
|
||||
logging.error("Pod network Shaping scenario exiting due to Exception - %s" % e)
|
||||
return "error", PodIngressNetShapingErrorOutput(format_exc())
|
||||
finally:
|
||||
delete_virtual_interfaces(kubecli, node_dict.keys(), pod_module_template)
|
||||
delete_virtual_interfaces(kubecli, node_dict.keys(), pod_module_template, test_image)
|
||||
logging.info("Deleting jobs(if any)")
|
||||
delete_jobs(kubecli, job_list[:])
|
||||
|
||||
@@ -9,7 +9,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: networkchaos
|
||||
image: docker.io/fedora/tools
|
||||
image: {{image}}
|
||||
command: ["/bin/sh", "-c", "{{cmd}}"]
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
||||
@@ -42,7 +42,9 @@ class NetworkChaosScenarioPlugin(AbstractScenarioPlugin):
|
||||
test_egress = get_yaml_item_value(
|
||||
test_dict, "egress", {"bandwidth": "100mbit"}
|
||||
)
|
||||
|
||||
test_image = get_yaml_item_value(
|
||||
test_dict, "image", "quay.io/krkn-chaos/krkn:tools"
|
||||
)
|
||||
if test_node:
|
||||
node_name_list = test_node.split(",")
|
||||
nodelst = common_node_functions.get_node_by_name(node_name_list, lib_telemetry.get_lib_kubernetes())
|
||||
@@ -60,6 +62,7 @@ class NetworkChaosScenarioPlugin(AbstractScenarioPlugin):
|
||||
nodelst,
|
||||
pod_template,
|
||||
lib_telemetry.get_lib_kubernetes(),
|
||||
image=test_image
|
||||
)
|
||||
joblst = []
|
||||
egress_lst = [i for i in param_lst if i in test_egress]
|
||||
@@ -71,6 +74,7 @@ class NetworkChaosScenarioPlugin(AbstractScenarioPlugin):
|
||||
"execution": test_execution,
|
||||
"instance_count": test_instance_count,
|
||||
"egress": test_egress,
|
||||
"image": test_image
|
||||
}
|
||||
}
|
||||
logging.info(
|
||||
@@ -94,6 +98,7 @@ class NetworkChaosScenarioPlugin(AbstractScenarioPlugin):
|
||||
jobname=i + str(hash(node))[:5],
|
||||
nodename=node,
|
||||
cmd=exec_cmd,
|
||||
image=test_image
|
||||
)
|
||||
)
|
||||
joblst.append(job_body["metadata"]["name"])
|
||||
@@ -153,10 +158,10 @@ class NetworkChaosScenarioPlugin(AbstractScenarioPlugin):
|
||||
return 0
|
||||
|
||||
def verify_interface(
|
||||
self, test_interface, nodelst, template, kubecli: KrknKubernetes
|
||||
self, test_interface, nodelst, template, kubecli: KrknKubernetes, image: str
|
||||
):
|
||||
pod_index = random.randint(0, len(nodelst) - 1)
|
||||
pod_body = yaml.safe_load(template.render(nodename=nodelst[pod_index]))
|
||||
pod_body = yaml.safe_load(template.render(nodename=nodelst[pod_index], image=image))
|
||||
logging.info("Creating pod to query interface on node %s" % nodelst[pod_index])
|
||||
kubecli.create_pod(pod_body, "default", 300)
|
||||
try:
|
||||
@@ -177,7 +182,7 @@ class NetworkChaosScenarioPlugin(AbstractScenarioPlugin):
|
||||
raise RuntimeError()
|
||||
return test_interface
|
||||
finally:
|
||||
logging.info("Deleteing pod to query interface on node")
|
||||
logging.info("Deleting pod to query interface on node")
|
||||
kubecli.delete_pod("fedtools", "default")
|
||||
|
||||
# krkn_lib
|
||||
|
||||
@@ -7,7 +7,7 @@ spec:
|
||||
nodeName: {{nodename}}
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: docker.io/fedora/tools
|
||||
image: {{image}}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
|
||||
@@ -14,9 +14,11 @@ class BaseNetworkChaosConfig:
|
||||
wait_duration: int
|
||||
test_duration: int
|
||||
label_selector: str
|
||||
service_account: str
|
||||
instance_count: int
|
||||
execution: str
|
||||
namespace: str
|
||||
taints: list[str]
|
||||
|
||||
def validate(self) -> list[str]:
|
||||
errors = []
|
||||
|
||||
@@ -4,16 +4,26 @@ metadata:
|
||||
name: {{pod_name}}
|
||||
namespace: {{namespace}}
|
||||
spec:
|
||||
{% if service_account %}
|
||||
serviceAccountName: {{ service_account }}
|
||||
{%endif%}
|
||||
{% if host_network %}
|
||||
hostNetwork: true
|
||||
{%endif%}
|
||||
{% if taints %}
|
||||
tolerations:
|
||||
{% for toleration in taints %}
|
||||
- key: "{{ toleration.key }}"
|
||||
operator: "{{ toleration.operator }}"
|
||||
{% if toleration.value %}
|
||||
value: "{{ toleration.value }}"
|
||||
{% endif %}
|
||||
effect: "{{ toleration.effect }}"
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
hostPID: true
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: {{target}}
|
||||
tolerations:
|
||||
- key: "node-role.kubernetes.io/master"
|
||||
operator: "Exists"
|
||||
effect: "NoSchedule"
|
||||
containers:
|
||||
- name: {{container_name}}
|
||||
imagePullPolicy: Always
|
||||
|
||||
@@ -58,6 +58,27 @@ def deploy_network_filter_pod(
|
||||
file_loader = FileSystemLoader(os.path.abspath(os.path.dirname(__file__)))
|
||||
env = Environment(loader=file_loader, autoescape=True)
|
||||
pod_template = env.get_template("templates/network-chaos.j2")
|
||||
tolerations = []
|
||||
|
||||
for taint in config.taints:
|
||||
key_value_part, effect = taint.split(":", 1)
|
||||
if "=" in key_value_part:
|
||||
key, value = key_value_part.split("=", 1)
|
||||
operator = "Equal"
|
||||
else:
|
||||
key = key_value_part
|
||||
value = None
|
||||
operator = "Exists"
|
||||
toleration = {
|
||||
"key": key,
|
||||
"operator": operator,
|
||||
"effect": effect,
|
||||
}
|
||||
if value is not None:
|
||||
toleration["value"] = value
|
||||
tolerations.append(toleration)
|
||||
|
||||
|
||||
pod_body = yaml.safe_load(
|
||||
pod_template.render(
|
||||
pod_name=pod_name,
|
||||
@@ -66,6 +87,8 @@ def deploy_network_filter_pod(
|
||||
target=target_node,
|
||||
container_name=container_name,
|
||||
workload_image=config.image,
|
||||
taints=tolerations,
|
||||
service_account=config.service_account
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@@ -84,7 +84,7 @@ class abstract_node_scenarios:
|
||||
)
|
||||
logging.error("stop_kubelet_scenario injection failed!")
|
||||
raise e
|
||||
self.add_affected_node(affected_node)
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop and start the kubelet
|
||||
def stop_start_kubelet_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -106,7 +106,6 @@ class abstract_node_scenarios:
|
||||
+ node
|
||||
+ " -- chroot /host systemctl restart kubelet &"
|
||||
)
|
||||
nodeaction.wait_for_not_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli,affected_node)
|
||||
logging.info("The kubelet of the node %s has been restarted" % (node))
|
||||
logging.info("restart_kubelet_scenario has been successfuly injected!")
|
||||
@@ -117,7 +116,7 @@ class abstract_node_scenarios:
|
||||
)
|
||||
logging.error("restart_kubelet_scenario injection failed!")
|
||||
raise e
|
||||
self.add_affected_node(affected_node)
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to crash the node
|
||||
def node_crash_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -125,7 +124,7 @@ class abstract_node_scenarios:
|
||||
try:
|
||||
logging.info("Starting node_crash_scenario injection")
|
||||
logging.info("Crashing the node %s" % (node))
|
||||
runcommand.invoke(
|
||||
runcommand.run(
|
||||
"oc debug node/" + node + " -- chroot /host "
|
||||
"dd if=/dev/urandom of=/proc/sysrq-trigger"
|
||||
)
|
||||
@@ -136,7 +135,7 @@ class abstract_node_scenarios:
|
||||
"Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_crash_scenario injection failed!")
|
||||
raise e
|
||||
return 1
|
||||
|
||||
# Node scenario to check service status on helper node
|
||||
def node_service_status(self, node, service, ssh_private_key, timeout):
|
||||
|
||||
@@ -274,7 +274,7 @@ done'''
|
||||
logging.info("Disk response: %s" % (disk_response))
|
||||
node_disks = [disk for disk in disk_response.split("\n") if disk]
|
||||
logging.info("Node disks: %s" % (node_disks))
|
||||
offline_disks = [disk for disk in node_disks if disk not in user_disks]
|
||||
offline_disks = [disk for disk in user_disks if disk in node_disks]
|
||||
return offline_disks if offline_disks else node_disks
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
|
||||
@@ -1,16 +1,10 @@
|
||||
import datetime
|
||||
import time
|
||||
import random
|
||||
import logging
|
||||
import paramiko
|
||||
from krkn_lib.models.k8s import AffectedNode
|
||||
import krkn.invoke.command as runcommand
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
from krkn_lib.models.k8s import AffectedNode
|
||||
|
||||
node_general = False
|
||||
|
||||
|
||||
def get_node_by_name(node_name_list, kubecli: KrknKubernetes):
|
||||
killable_nodes = kubecli.list_killable_nodes()
|
||||
@@ -65,14 +59,6 @@ def wait_for_unknown_status(node, timeout, kubecli: KrknKubernetes, affected_nod
|
||||
return affected_node
|
||||
|
||||
|
||||
# Get the ip of the cluster node
|
||||
def get_node_ip(node):
|
||||
return runcommand.invoke(
|
||||
"kubectl get node %s -o "
|
||||
"jsonpath='{.status.addresses[?(@.type==\"InternalIP\")].address}'" % (node)
|
||||
)
|
||||
|
||||
|
||||
def check_service_status(node, service, ssh_private_key, timeout):
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
|
||||
@@ -36,10 +36,25 @@ class IbmCloud:
|
||||
self.service = VpcV1(authenticator=authenticator)
|
||||
|
||||
self.service.set_service_url(service_url)
|
||||
|
||||
except Exception as e:
|
||||
logging.error("error authenticating" + str(e))
|
||||
|
||||
|
||||
def configure_ssl_verification(self, disable_ssl_verification):
|
||||
"""
|
||||
Configure SSL verification for IBM Cloud VPC service.
|
||||
|
||||
Args:
|
||||
disable_ssl_verification: If True, disables SSL verification.
|
||||
"""
|
||||
logging.info(f"Configuring SSL verification: disable_ssl_verification={disable_ssl_verification}")
|
||||
if disable_ssl_verification:
|
||||
self.service.set_disable_ssl_verification(True)
|
||||
logging.info("SSL verification disabled for IBM Cloud VPC service")
|
||||
else:
|
||||
self.service.set_disable_ssl_verification(False)
|
||||
logging.info("SSL verification enabled for IBM Cloud VPC service")
|
||||
|
||||
# Get the instance ID of the node
|
||||
def get_instance_id(self, node_name):
|
||||
node_list = self.list_instances()
|
||||
@@ -260,9 +275,13 @@ class IbmCloud:
|
||||
|
||||
@dataclass
|
||||
class ibm_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, kubecli: KrknKubernetes, node_action_kube_check: bool, affected_nodes_status: AffectedNodeStatus):
|
||||
def __init__(self, kubecli: KrknKubernetes, node_action_kube_check: bool, affected_nodes_status: AffectedNodeStatus, disable_ssl_verification: bool):
|
||||
super().__init__(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
self.ibmcloud = IbmCloud()
|
||||
|
||||
# Configure SSL verification
|
||||
self.ibmcloud.configure_ssl_verification(disable_ssl_verification)
|
||||
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -327,7 +346,7 @@ class ibm_node_scenarios(abstract_node_scenarios):
|
||||
logging.info("Starting node_reboot_scenario injection")
|
||||
logging.info("Rebooting the node %s " % (node))
|
||||
self.ibmcloud.reboot_instances(instance_id)
|
||||
self.ibmcloud.wait_until_rebooted(instance_id, timeout)
|
||||
self.ibmcloud.wait_until_rebooted(instance_id, timeout, affected_node)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(
|
||||
node, timeout, affected_node
|
||||
|
||||
@@ -120,7 +120,8 @@ class NodeActionsScenarioPlugin(AbstractScenarioPlugin):
|
||||
node_scenario["cloud_type"].lower() == "ibm"
|
||||
or node_scenario["cloud_type"].lower() == "ibmcloud"
|
||||
):
|
||||
return ibm_node_scenarios(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
disable_ssl_verification = get_yaml_item_value(node_scenario, "disable_ssl_verification", True)
|
||||
return ibm_node_scenarios(kubecli, node_action_kube_check, affected_nodes_status, disable_ssl_verification)
|
||||
else:
|
||||
logging.error(
|
||||
"Cloud type "
|
||||
|
||||
@@ -38,3 +38,4 @@ zope.interface==5.4.0
|
||||
|
||||
git+https://github.com/vmware/vsphere-automation-sdk-python.git@v8.0.0.0
|
||||
cryptography>=42.0.4 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
protobuf>=4.25.8 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
|
||||
@@ -16,7 +16,6 @@ from krkn_lib.elastic.krkn_elastic import KrknElastic
|
||||
from krkn_lib.models.elastic import ElasticChaosRunTelemetry
|
||||
from krkn_lib.models.krkn import ChaosRunOutput, ChaosRunAlertSummary
|
||||
from krkn_lib.prometheus.krkn_prometheus import KrknPrometheus
|
||||
import krkn.performance_dashboards.setup as performance_dashboards
|
||||
import krkn.prometheus as prometheus_plugin
|
||||
import server as server
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
@@ -69,14 +68,6 @@ def main(cfg) -> int:
|
||||
wait_duration = get_yaml_item_value(config["tunings"], "wait_duration", 60)
|
||||
iterations = get_yaml_item_value(config["tunings"], "iterations", 1)
|
||||
daemon_mode = get_yaml_item_value(config["tunings"], "daemon_mode", False)
|
||||
deploy_performance_dashboards = get_yaml_item_value(
|
||||
config["performance_monitoring"], "deploy_dashboards", False
|
||||
)
|
||||
dashboard_repo = get_yaml_item_value(
|
||||
config["performance_monitoring"],
|
||||
"repo",
|
||||
"https://github.com/cloud-bulldozer/performance-dashboards.git",
|
||||
)
|
||||
|
||||
prometheus_url = config["performance_monitoring"].get("prometheus_url")
|
||||
prometheus_bearer_token = config["performance_monitoring"].get(
|
||||
@@ -240,10 +231,6 @@ def main(cfg) -> int:
|
||||
|
||||
logging.info("Server URL: %s" % kubecli.get_host())
|
||||
|
||||
# Deploy performance dashboards
|
||||
if deploy_performance_dashboards:
|
||||
performance_dashboards.setup(dashboard_repo, distribution)
|
||||
|
||||
# Initialize the start iteration to 0
|
||||
iteration = 0
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ image: quay.io/krkn-chaos/krkn-hog
|
||||
namespace: default
|
||||
cpu-load-percentage: 90
|
||||
cpu-method: all
|
||||
# node-name: "worker-0" # Uncomment to target a specific node by name
|
||||
node-selector: "node-role.kubernetes.io/worker="
|
||||
number-of-nodes: 2
|
||||
taints: [] #example ["node-role.kubernetes.io/master:NoSchedule"]
|
||||
|
||||
@@ -6,10 +6,11 @@ namespace: default
|
||||
io-block-size: 1m
|
||||
io-write-bytes: 1g
|
||||
io-target-pod-folder: /hog-data
|
||||
# node-name: "worker-0" # Uncomment to target a specific node by name
|
||||
io-target-pod-volume:
|
||||
name: node-volume
|
||||
hostPath:
|
||||
path: /root # a path writable by kubelet in the root filesystem of the node
|
||||
node-selector: "node-role.kubernetes.io/worker="
|
||||
number-of-nodes: ''
|
||||
taints: [] #example ["node-role.kubernetes.io/master:NoSchedule"]
|
||||
taints: [] #example ["node-role.kubernetes.io/master:NoSchedule"]
|
||||
@@ -4,6 +4,7 @@ hog-type: memory
|
||||
image: quay.io/krkn-chaos/krkn-hog
|
||||
namespace: default
|
||||
memory-vm-bytes: 90%
|
||||
# node-name: "worker-0" # Uncomment to target a specific node by name
|
||||
node-selector: "node-role.kubernetes.io/worker="
|
||||
number-of-nodes: ''
|
||||
taints: [] #example ["node-role.kubernetes.io/master:NoSchedule"]
|
||||
|
||||
@@ -1,14 +1,18 @@
|
||||
- id: node_network_filter
|
||||
image: "quay.io/krkn-chaos/krkn-network-chaos:latest"
|
||||
wait_duration: 300
|
||||
test_duration: 100
|
||||
label_selector: "kubernetes.io/hostname=minikube"
|
||||
wait_duration: 1
|
||||
test_duration: 10
|
||||
label_selector: "<node_selector>"
|
||||
service_account: ""
|
||||
taints: [] # example ["node-role.kubernetes.io/master:NoSchedule"]
|
||||
namespace: 'default'
|
||||
instance_count: 1
|
||||
execution: parallel
|
||||
ingress: false
|
||||
egress: true
|
||||
target: ''
|
||||
target: '<node_name>'
|
||||
interfaces: []
|
||||
ports:
|
||||
- 53
|
||||
- 2309
|
||||
protocols:
|
||||
- tcp
|
||||
@@ -2,13 +2,15 @@
|
||||
image: "quay.io/krkn-chaos/krkn-network-chaos:latest"
|
||||
wait_duration: 1
|
||||
test_duration: 60
|
||||
label_selector: "app=network-attacked"
|
||||
label_selector: "<pod_selector>"
|
||||
service_account: ""
|
||||
taints: [] # example ["node-role.kubernetes.io/master:NoSchedule"]
|
||||
namespace: 'default'
|
||||
instance_count: 1
|
||||
execution: parallel
|
||||
ingress: false
|
||||
egress: true
|
||||
target: ""
|
||||
target: "<pod_name>"
|
||||
interfaces: []
|
||||
protocols:
|
||||
- tcp
|
||||
|
||||
7
scenarios/kubevirt/kubevirt-vm-outage.yaml
Normal file
7
scenarios/kubevirt/kubevirt-vm-outage.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
scenarios:
|
||||
- name: "kubevirt outage test"
|
||||
scenario: kubevirt_vm_outage
|
||||
parameters:
|
||||
vm_name: <vm-name>
|
||||
namespace: <namespace>
|
||||
timeout: 60
|
||||
@@ -7,10 +7,12 @@ node_scenarios:
|
||||
timeout: 360
|
||||
duration: 120
|
||||
cloud_type: ibm
|
||||
disable_ssl_verification: true # Set to true for CI environments with certificate issues
|
||||
- actions:
|
||||
- node_reboot_scenario
|
||||
node_name:
|
||||
label_selector: node-role.kubernetes.io/worker
|
||||
instance_count: 1
|
||||
timeout: 120
|
||||
cloud_type: ibm
|
||||
cloud_type: ibm
|
||||
disable_ssl_verification: true # Set to true for CI environments with certificate issues
|
||||
File diff suppressed because it is too large
Load Diff
215
tests/kubevirt_vm_outage/test_kubevirt_vm_outage.py
Normal file
215
tests/kubevirt_vm_outage/test_kubevirt_vm_outage.py
Normal file
@@ -0,0 +1,215 @@
|
||||
import unittest
|
||||
import time
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import yaml
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
from krkn.scenario_plugins.kubevirt_vm_outage.kubevirt_vm_outage_scenario_plugin import KubevirtVmOutageScenarioPlugin
|
||||
|
||||
|
||||
class TestKubevirtVmOutageScenarioPlugin(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
"""
|
||||
Set up test fixtures for KubevirtVmOutageScenarioPlugin
|
||||
"""
|
||||
self.plugin = KubevirtVmOutageScenarioPlugin()
|
||||
|
||||
# Create mock k8s client
|
||||
self.k8s_client = MagicMock()
|
||||
self.custom_object_client = MagicMock()
|
||||
self.k8s_client.custom_object_client = self.custom_object_client
|
||||
self.plugin.k8s_client = self.k8s_client
|
||||
|
||||
# Mock methods needed for KubeVirt operations
|
||||
self.k8s_client.list_custom_resource_definition = MagicMock()
|
||||
|
||||
# Mock custom resource definition list with KubeVirt CRDs
|
||||
crd_list = MagicMock()
|
||||
crd_item = MagicMock()
|
||||
crd_item.spec = MagicMock()
|
||||
crd_item.spec.group = "kubevirt.io"
|
||||
crd_list.items = [crd_item]
|
||||
self.k8s_client.list_custom_resource_definition.return_value = crd_list
|
||||
|
||||
# Mock VMI data
|
||||
self.mock_vmi = {
|
||||
"metadata": {
|
||||
"name": "test-vm",
|
||||
"namespace": "default"
|
||||
},
|
||||
"status": {
|
||||
"phase": "Running"
|
||||
}
|
||||
}
|
||||
|
||||
# Create test config
|
||||
self.config = {
|
||||
"scenarios": [
|
||||
{
|
||||
"name": "kubevirt outage test",
|
||||
"scenario": "kubevirt_vm_outage",
|
||||
"parameters": {
|
||||
"vm_name": "test-vm",
|
||||
"namespace": "default",
|
||||
"duration": 0
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# Create a temporary config file
|
||||
import tempfile, os
|
||||
temp_dir = tempfile.gettempdir()
|
||||
self.scenario_file = os.path.join(temp_dir, "test_kubevirt_scenario.yaml")
|
||||
with open(self.scenario_file, "w") as f:
|
||||
yaml.dump(self.config, f)
|
||||
|
||||
# Mock dependencies
|
||||
self.telemetry = MagicMock(spec=KrknTelemetryOpenshift)
|
||||
self.scenario_telemetry = MagicMock(spec=ScenarioTelemetry)
|
||||
self.telemetry.get_lib_kubernetes.return_value = self.k8s_client
|
||||
|
||||
def test_successful_injection_and_recovery(self):
|
||||
"""
|
||||
Test successful deletion and recovery of a VMI
|
||||
"""
|
||||
# Mock get_vmi to return our mock VMI
|
||||
with patch.object(self.plugin, 'get_vmi', return_value=self.mock_vmi):
|
||||
# Mock inject and recover to simulate success
|
||||
with patch.object(self.plugin, 'inject', return_value=0) as mock_inject:
|
||||
with patch.object(self.plugin, 'recover', return_value=0) as mock_recover:
|
||||
with patch("builtins.open", unittest.mock.mock_open(read_data=yaml.dump(self.config))):
|
||||
result = self.plugin.run("test-uuid", self.scenario_file, {}, self.telemetry, self.scenario_telemetry)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
mock_inject.assert_called_once_with("test-vm", "default", False)
|
||||
mock_recover.assert_called_once_with("test-vm", "default", False)
|
||||
|
||||
def test_injection_failure(self):
|
||||
"""
|
||||
Test failure during VMI deletion
|
||||
"""
|
||||
# Mock get_vmi to return our mock VMI
|
||||
with patch.object(self.plugin, 'get_vmi', return_value=self.mock_vmi):
|
||||
# Mock inject to simulate failure
|
||||
with patch.object(self.plugin, 'inject', return_value=1) as mock_inject:
|
||||
with patch.object(self.plugin, 'recover', return_value=0) as mock_recover:
|
||||
with patch("builtins.open", unittest.mock.mock_open(read_data=yaml.dump(self.config))):
|
||||
result = self.plugin.run("test-uuid", self.scenario_file, {}, self.telemetry, self.scenario_telemetry)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
mock_inject.assert_called_once_with("test-vm", "default", False)
|
||||
mock_recover.assert_not_called()
|
||||
|
||||
def test_disable_auto_restart(self):
|
||||
"""
|
||||
Test VM auto-restart can be disabled
|
||||
"""
|
||||
# Configure test with disable_auto_restart=True
|
||||
self.config["scenarios"][0]["parameters"]["disable_auto_restart"] = True
|
||||
|
||||
# Mock VM object for patching
|
||||
mock_vm = {
|
||||
"metadata": {"name": "test-vm", "namespace": "default"},
|
||||
"spec": {}
|
||||
}
|
||||
|
||||
# Mock get_vmi to return our mock VMI
|
||||
with patch.object(self.plugin, 'get_vmi', return_value=self.mock_vmi):
|
||||
# Mock VM patch operation
|
||||
with patch.object(self.plugin, 'patch_vm_spec') as mock_patch_vm:
|
||||
mock_patch_vm.return_value = True
|
||||
# Mock inject and recover
|
||||
with patch.object(self.plugin, 'inject', return_value=0) as mock_inject:
|
||||
with patch.object(self.plugin, 'recover', return_value=0) as mock_recover:
|
||||
with patch("builtins.open", unittest.mock.mock_open(read_data=yaml.dump(self.config))):
|
||||
result = self.plugin.run("test-uuid", self.scenario_file, {}, self.telemetry, self.scenario_telemetry)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
# Should call patch_vm_spec to disable auto-restart
|
||||
mock_patch_vm.assert_any_call("test-vm", "default", False)
|
||||
# Should call patch_vm_spec to re-enable auto-restart during recovery
|
||||
mock_patch_vm.assert_any_call("test-vm", "default", True)
|
||||
mock_inject.assert_called_once_with("test-vm", "default", True)
|
||||
mock_recover.assert_called_once_with("test-vm", "default", True)
|
||||
|
||||
def test_recovery_when_vmi_does_not_exist(self):
|
||||
"""
|
||||
Test recovery logic when VMI does not exist after deletion
|
||||
"""
|
||||
# Store the original VMI in the plugin for recovery
|
||||
self.plugin.original_vmi = self.mock_vmi.copy()
|
||||
|
||||
# Create a cleaned vmi_dict as the plugin would
|
||||
vmi_dict = self.mock_vmi.copy()
|
||||
|
||||
# Set up running VMI data for after recovery
|
||||
running_vmi = {
|
||||
"metadata": {"name": "test-vm", "namespace": "default"},
|
||||
"status": {"phase": "Running"}
|
||||
}
|
||||
|
||||
# Set up time.time to immediately exceed the timeout for auto-recovery
|
||||
with patch('time.time', side_effect=[0, 301, 301, 301, 301, 310, 320]):
|
||||
# Mock get_vmi to always return None (not auto-recovered)
|
||||
with patch.object(self.plugin, 'get_vmi', side_effect=[None, None, running_vmi]):
|
||||
# Mock the custom object API to return success
|
||||
self.custom_object_client.create_namespaced_custom_object = MagicMock(return_value=running_vmi)
|
||||
|
||||
# Run recovery with mocked time.sleep
|
||||
with patch('time.sleep'):
|
||||
result = self.plugin.recover("test-vm", "default", False)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
# Verify create was called with the right arguments for our API version and kind
|
||||
self.custom_object_client.create_namespaced_custom_object.assert_called_once_with(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace="default",
|
||||
plural="virtualmachineinstances",
|
||||
body=vmi_dict
|
||||
)
|
||||
|
||||
def test_validation_failure(self):
|
||||
"""
|
||||
Test validation failure when KubeVirt is not installed
|
||||
"""
|
||||
# Mock empty CRD list (no KubeVirt CRDs)
|
||||
empty_crd_list = MagicMock()
|
||||
empty_crd_list.items = []
|
||||
self.k8s_client.list_custom_resource_definition.return_value = empty_crd_list
|
||||
|
||||
with patch("builtins.open", unittest.mock.mock_open(read_data=yaml.dump(self.config))):
|
||||
result = self.plugin.run("test-uuid", self.scenario_file, {}, self.telemetry, self.scenario_telemetry)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
def test_delete_vmi_timeout(self):
|
||||
"""
|
||||
Test timeout during VMI deletion
|
||||
"""
|
||||
# Mock successful delete operation
|
||||
self.custom_object_client.delete_namespaced_custom_object = MagicMock(return_value={})
|
||||
|
||||
# Mock that get_vmi always returns VMI (never gets deleted)
|
||||
with patch.object(self.plugin, 'get_vmi', return_value=self.mock_vmi):
|
||||
# Simulate timeout by making time.time return values that exceed the timeout
|
||||
with patch('time.sleep'), patch('time.time', side_effect=[0, 10, 20, 130, 130, 130, 130, 140]):
|
||||
result = self.plugin.inject("test-vm", "default", False)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
self.custom_object_client.delete_namespaced_custom_object.assert_called_once_with(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace="default",
|
||||
plural="virtualmachineinstances",
|
||||
name="test-vm"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
Reference in New Issue
Block a user