mirror of
https://github.com/krkn-chaos/krkn.git
synced 2026-02-17 03:19:54 +00:00
Compare commits
89 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0372013b67 | ||
|
|
4fea1a354d | ||
|
|
667798d588 | ||
|
|
0c30d89a1b | ||
|
|
2ba20fa483 | ||
|
|
97035a765c | ||
|
|
10ba53574e | ||
|
|
0ecba41082 | ||
|
|
491f59d152 | ||
|
|
2549c9a146 | ||
|
|
949f1f09e0 | ||
|
|
959766254d | ||
|
|
0e68dedb12 | ||
|
|
34a676a795 | ||
|
|
e5c5b35db3 | ||
|
|
93d2e60386 | ||
|
|
462c9ac67e | ||
|
|
04e44738d9 | ||
|
|
f810cadad2 | ||
|
|
4b869bad83 | ||
|
|
a36b0c76b2 | ||
|
|
a17e16390c | ||
|
|
f8534d616c | ||
|
|
9670ce82f5 | ||
|
|
95e4b68389 | ||
|
|
0aac6119b0 | ||
|
|
7e5bdfd5cf | ||
|
|
3c207ab2ea | ||
|
|
d91172d9b2 | ||
|
|
a13fb43d94 | ||
|
|
37ee7177bc | ||
|
|
32142cc159 | ||
|
|
34bfc0d3d9 | ||
|
|
736c90e937 | ||
|
|
5e7938ba4a | ||
|
|
b525f83261 | ||
|
|
26460a0dce | ||
|
|
7968c2a776 | ||
|
|
6186555c15 | ||
|
|
9cd086f59c | ||
|
|
1057917731 | ||
|
|
5484828b67 | ||
|
|
d18b6332e5 | ||
|
|
89a0e166f1 | ||
|
|
624f50acd1 | ||
|
|
e02c6d1287 | ||
|
|
04425a8d8a | ||
|
|
f3933f0e62 | ||
|
|
56ff0a8c72 | ||
|
|
9378cd74cd | ||
|
|
4d3491da0f | ||
|
|
d6ce66160b | ||
|
|
ef1a55438b | ||
|
|
d8f54b83a2 | ||
|
|
4870c86515 | ||
|
|
6ae17cf678 | ||
|
|
ce9f8aa050 | ||
|
|
05148317c1 | ||
|
|
5f836f294b | ||
|
|
cfa1bb09a0 | ||
|
|
5ddfff5a85 | ||
|
|
7d18487228 | ||
|
|
08de42c91a | ||
|
|
dc7d5bb01b | ||
|
|
ea3444d375 | ||
|
|
7b660a0878 | ||
|
|
5fe0655f22 | ||
|
|
5df343c183 | ||
|
|
f364e9f283 | ||
|
|
86a7427606 | ||
|
|
31266fbc3e | ||
|
|
57de3769e7 | ||
|
|
42fc8eea40 | ||
|
|
22d56e2cdc | ||
|
|
a259b68221 | ||
|
|
052f83e7d9 | ||
|
|
fb3bbe4e26 | ||
|
|
96ba9be4b8 | ||
|
|
58d5d1d8dc | ||
|
|
3fe22a0d8f | ||
|
|
21b89a32a7 | ||
|
|
dbe3ea9718 | ||
|
|
a142f6e7a4 | ||
|
|
2610a7af67 | ||
|
|
f827f65132 | ||
|
|
aa6cbbc11a | ||
|
|
e17354e54d | ||
|
|
2dfa5cb0cd | ||
|
|
0799008cd5 |
34
.github/workflows/docker-image.yml
vendored
34
.github/workflows/docker-image.yml
vendored
@@ -1,8 +1,7 @@
|
||||
name: Docker Image CI
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags: ['v[0-9].[0-9]+.[0-9]+']
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
@@ -12,30 +11,45 @@ jobs:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
- name: Build the Docker images
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
run: |
|
||||
docker build --no-cache -t quay.io/krkn-chaos/krkn containers/
|
||||
./containers/compile_dockerfile.sh
|
||||
docker build --no-cache -t quay.io/krkn-chaos/krkn containers/ --build-arg TAG=${GITHUB_REF#refs/tags/}
|
||||
docker tag quay.io/krkn-chaos/krkn quay.io/redhat-chaos/krkn
|
||||
docker tag quay.io/krkn-chaos/krkn quay.io/krkn-chaos/krkn:${GITHUB_REF#refs/tags/}
|
||||
docker tag quay.io/krkn-chaos/krkn quay.io/redhat-chaos/krkn:${GITHUB_REF#refs/tags/}
|
||||
|
||||
- name: Test Build the Docker images
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
run: |
|
||||
./containers/compile_dockerfile.sh
|
||||
docker build --no-cache -t quay.io/krkn-chaos/krkn containers/ --build-arg PR_NUMBER=${{ github.event.pull_request.number }}
|
||||
- name: Login in quay
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
run: docker login quay.io -u ${QUAY_USER} -p ${QUAY_TOKEN}
|
||||
env:
|
||||
QUAY_USER: ${{ secrets.QUAY_USERNAME }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_PASSWORD }}
|
||||
- name: Push the KrknChaos Docker images
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker push quay.io/krkn-chaos/krkn
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
run: |
|
||||
docker push quay.io/krkn-chaos/krkn
|
||||
docker push quay.io/krkn-chaos/krkn:${GITHUB_REF#refs/tags/}
|
||||
- name: Login in to redhat-chaos quay
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: docker login quay.io -u ${QUAY_USER} -p ${QUAY_TOKEN}
|
||||
env:
|
||||
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}
|
||||
- name: Push the RedHat Chaos Docker images
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker push quay.io/redhat-chaos/krkn
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
run: |
|
||||
docker push quay.io/redhat-chaos/krkn
|
||||
docker push quay.io/redhat-chaos/krkn:${GITHUB_REF#refs/tags/}
|
||||
- name: Rebuild krkn-hub
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
uses: redhat-chaos/actions/krkn-hub@main
|
||||
with:
|
||||
QUAY_USER: ${{ secrets.QUAY_USERNAME }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_PASSWORD }}
|
||||
AUTOPUSH: ${{ secrets.AUTOPUSH }}
|
||||
|
||||
74
.github/workflows/tests.yml
vendored
74
.github/workflows/tests.yml
vendored
@@ -61,6 +61,8 @@ jobs:
|
||||
kubectl create namespace namespace-scenario
|
||||
kubectl apply -f CI/templates/time_pod.yaml
|
||||
kubectl wait --for=condition=ready pod -l scenario=time-skew --timeout=300s
|
||||
kubectl apply -f CI/templates/service_hijacking.yaml
|
||||
kubectl wait --for=condition=ready pod -l "app.kubernetes.io/name=proxy" --timeout=300s
|
||||
- name: Get Kind nodes
|
||||
run: |
|
||||
kubectl get nodes --show-labels=true
|
||||
@@ -70,12 +72,14 @@ jobs:
|
||||
run: python -m coverage run -a -m unittest discover -s tests -v
|
||||
|
||||
- name: Setup Pull Request Functional Tests
|
||||
if: github.event_name == 'pull_request'
|
||||
if: |
|
||||
github.event_name == 'pull_request'
|
||||
run: |
|
||||
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.performance_monitoring="localhost:9090"' CI/config/common_test_config.yaml
|
||||
echo "test_app_outages" > ./CI/tests/functional_tests
|
||||
echo "test_service_hijacking" > ./CI/tests/functional_tests
|
||||
echo "test_app_outages" >> ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
@@ -84,7 +88,9 @@ jobs:
|
||||
echo "test_arca_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_io_hog" >> ./CI/tests/functional_tests
|
||||
|
||||
# Push on main only steps
|
||||
|
||||
# Push on main only steps + all other functional to collect coverage
|
||||
# for the badge
|
||||
- name: Configure AWS Credentials
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
@@ -101,6 +107,15 @@ jobs:
|
||||
yq -i '.telemetry.username="${{secrets.TELEMETRY_USERNAME}}"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.password="${{secrets.TELEMETRY_PASSWORD}}"' CI/config/common_test_config.yaml
|
||||
echo "test_telemetry" > ./CI/tests/functional_tests
|
||||
echo "test_service_hijacking" >> ./CI/tests/functional_tests
|
||||
echo "test_app_outages" >> ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_time" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_io_hog" >> ./CI/tests/functional_tests
|
||||
|
||||
# Final common steps
|
||||
- name: Run Functional tests
|
||||
@@ -111,7 +126,7 @@ jobs:
|
||||
cat ./CI/results.markdown >> $GITHUB_STEP_SUMMARY
|
||||
echo >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload CI logs
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ci-logs
|
||||
path: CI/out
|
||||
@@ -119,16 +134,65 @@ jobs:
|
||||
- name: Collect coverage report
|
||||
run: |
|
||||
python -m coverage html
|
||||
python -m coverage json
|
||||
- name: Publish coverage report to job summary
|
||||
run: |
|
||||
pip install html2text
|
||||
html2text --ignore-images --ignore-links -b 0 htmlcov/index.html >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload coverage data
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage
|
||||
path: htmlcov
|
||||
if-no-files-found: error
|
||||
- name: Upload json coverage
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage.json
|
||||
path: coverage.json
|
||||
if-no-files-found: error
|
||||
- name: Check CI results
|
||||
run: grep Fail CI/results.markdown && false || true
|
||||
badge:
|
||||
permissions:
|
||||
contents: write
|
||||
name: Generate Coverage Badge
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- tests
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
steps:
|
||||
- name: Check out doc repo
|
||||
uses: actions/checkout@master
|
||||
with:
|
||||
repository: krkn-chaos/krkn-lib-docs
|
||||
path: krkn-lib-docs
|
||||
ssh-key: ${{ secrets.KRKN_LIB_DOCS_PRIV_KEY }}
|
||||
- name: Download json coverage
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: coverage.json
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Copy badge on GitHub Page Repo
|
||||
env:
|
||||
COLOR: yellow
|
||||
run: |
|
||||
# generate coverage badge on previously calculated total coverage
|
||||
# and copy in the docs page
|
||||
export TOTAL=$(python -c "import json;print(json.load(open('coverage.json'))['totals']['percent_covered_display'])")
|
||||
[[ $TOTAL > 40 ]] && COLOR=green
|
||||
echo "TOTAL: $TOTAL"
|
||||
echo "COLOR: $COLOR"
|
||||
curl "https://img.shields.io/badge/coverage-$TOTAL%25-$COLOR" > ./krkn-lib-docs/coverage_badge_krkn.svg
|
||||
- name: Push updated Coverage Badge
|
||||
run: |
|
||||
cd krkn-lib-docs
|
||||
git add .
|
||||
git config user.name "krkn-chaos"
|
||||
git config user.email "<>"
|
||||
git commit -m "[KRKN] Coverage Badge ${GITHUB_REF##*/}" || echo "no changes to commit"
|
||||
git push
|
||||
|
||||
|
||||
@@ -50,3 +50,15 @@ telemetry:
|
||||
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
|
||||
events_backup: True # enables/disables cluster events collection
|
||||
telemetry_group: "funtests"
|
||||
elastic:
|
||||
enable_elastic: False
|
||||
collect_metrics: False
|
||||
collect_alerts: False
|
||||
verify_certs: False
|
||||
elastic_url: "https://192.168.39.196" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
|
||||
elastic_port: 32766
|
||||
username: "elastic"
|
||||
password: "test"
|
||||
metrics_index: "krkn-metrics"
|
||||
alerts_index: "krkn-alerts"
|
||||
telemetry_index: "krkn-telemetry"
|
||||
|
||||
29
CI/templates/service_hijacking.yaml
Normal file
29
CI/templates/service_hijacking.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: proxy
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:stable
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: http-web-svc
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx-service
|
||||
spec:
|
||||
selector:
|
||||
app.kubernetes.io/name: proxy
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: name-of-service-port
|
||||
protocol: TCP
|
||||
port: 80
|
||||
targetPort: http-web-svc
|
||||
nodePort: 30036
|
||||
@@ -10,7 +10,7 @@ function functional_test_app_outage {
|
||||
yq -i '.application_outage.duration=10' scenarios/openshift/app_outage.yaml
|
||||
yq -i '.application_outage.pod_selector={"scenario":"outage"}' scenarios/openshift/app_outage.yaml
|
||||
yq -i '.application_outage.namespace="default"' scenarios/openshift/app_outage.yaml
|
||||
export scenario_type="application_outages"
|
||||
export scenario_type="application_outages_scenarios"
|
||||
export scenario_file="scenarios/openshift/app_outage.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/app_outage.yaml
|
||||
|
||||
@@ -7,9 +7,9 @@ trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_arca_cpu_hog {
|
||||
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/arcaflow/cpu-hog/input.yaml
|
||||
export scenario_type="arcaflow_scenarios"
|
||||
export scenario_file="scenarios/arcaflow/cpu-hog/input.yaml"
|
||||
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/kube/cpu-hog/input.yaml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/cpu-hog/input.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/arca_cpu_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/arca_cpu_hog.yaml
|
||||
|
||||
@@ -7,9 +7,9 @@ trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_arca_io_hog {
|
||||
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/arcaflow/io-hog/input.yaml
|
||||
export scenario_type="arcaflow_scenarios"
|
||||
export scenario_file="scenarios/arcaflow/io-hog/input.yaml"
|
||||
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/kube/io-hog/input.yaml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/io-hog/input.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/arca_io_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/arca_io_hog.yaml
|
||||
|
||||
@@ -7,9 +7,9 @@ trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_arca_memory_hog {
|
||||
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/arcaflow/memory-hog/input.yaml
|
||||
export scenario_type="arcaflow_scenarios"
|
||||
export scenario_file="scenarios/arcaflow/memory-hog/input.yaml"
|
||||
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/kube/memory-hog/input.yaml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/memory-hog/input.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/arca_memory_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/arca_memory_hog.yaml
|
||||
|
||||
@@ -12,7 +12,7 @@ function functional_test_container_crash {
|
||||
yq -i '.scenarios[0].label_selector="scenario=container"' scenarios/openshift/container_etcd.yml
|
||||
yq -i '.scenarios[0].container_name="fedtools"' scenarios/openshift/container_etcd.yml
|
||||
export scenario_type="container_scenarios"
|
||||
export scenario_file="- scenarios/openshift/container_etcd.yml"
|
||||
export scenario_file="scenarios/openshift/container_etcd.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/container_config.yaml
|
||||
|
||||
|
||||
@@ -6,8 +6,8 @@ trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function funtional_test_namespace_deletion {
|
||||
export scenario_type="namespace_scenarios"
|
||||
export scenario_file="- scenarios/openshift/ingress_namespace.yaml"
|
||||
export scenario_type="service_disruption_scenarios"
|
||||
export scenario_file="scenarios/openshift/ingress_namespace.yaml"
|
||||
export post_config=""
|
||||
yq '.scenarios[0].namespace="^namespace-scenario$"' -i scenarios/openshift/ingress_namespace.yaml
|
||||
yq '.scenarios[0].wait_time=30' -i scenarios/openshift/ingress_namespace.yaml
|
||||
|
||||
@@ -15,7 +15,7 @@ function functional_test_network_chaos {
|
||||
yq -i 'del(.network_chaos.egress.latency)' scenarios/openshift/network_chaos.yaml
|
||||
yq -i 'del(.network_chaos.egress.loss)' scenarios/openshift/network_chaos.yaml
|
||||
|
||||
export scenario_type="network_chaos"
|
||||
export scenario_type="network_chaos_scenarios"
|
||||
export scenario_file="scenarios/openshift/network_chaos.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/network_chaos.yaml
|
||||
|
||||
114
CI/tests/test_service_hijacking.sh
Normal file
114
CI/tests/test_service_hijacking.sh
Normal file
@@ -0,0 +1,114 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
# port mapping has been configured in kind-config.yml
|
||||
SERVICE_URL=http://localhost:8888
|
||||
PAYLOAD_GET_1="{ \
|
||||
\"status\":\"internal server error\" \
|
||||
}"
|
||||
STATUS_CODE_GET_1=500
|
||||
|
||||
PAYLOAD_PATCH_1="resource patched"
|
||||
STATUS_CODE_PATCH_1=201
|
||||
|
||||
PAYLOAD_POST_1="{ \
|
||||
\"status\": \"unauthorized\" \
|
||||
}"
|
||||
STATUS_CODE_POST_1=401
|
||||
|
||||
PAYLOAD_GET_2="{ \
|
||||
\"status\":\"resource created\" \
|
||||
}"
|
||||
STATUS_CODE_GET_2=201
|
||||
|
||||
PAYLOAD_PATCH_2="bad request"
|
||||
STATUS_CODE_PATCH_2=400
|
||||
|
||||
PAYLOAD_POST_2="not found"
|
||||
STATUS_CODE_POST_2=404
|
||||
|
||||
JSON_MIME="application/json"
|
||||
TEXT_MIME="text/plain; charset=utf-8"
|
||||
|
||||
function functional_test_service_hijacking {
|
||||
|
||||
export scenario_type="service_hijacking_scenarios"
|
||||
export scenario_file="scenarios/kube/service_hijacking.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/service_hijacking.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/service_hijacking.yaml > /dev/null 2>&1 &
|
||||
PID=$!
|
||||
#Waiting the hijacking to have effect
|
||||
COUNTER=0
|
||||
while [ `curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php` == 404 ]
|
||||
do
|
||||
echo "waiting scenario to kick in."
|
||||
sleep 1
|
||||
COUNTER=$((COUNTER+1))
|
||||
[ $COUNTER -eq "100" ] && echo "maximum number of retry reached, test failed" && exit 1
|
||||
done
|
||||
|
||||
#Checking Step 1 GET on /list/index.php
|
||||
OUT_GET="`curl -X GET -s $SERVICE_URL/list/index.php`"
|
||||
OUT_CONTENT=`curl -X GET -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
|
||||
OUT_STATUS_CODE=`curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
|
||||
[ "${PAYLOAD_GET_1//[$'\t\r\n ']}" == "${OUT_GET//[$'\t\r\n ']}" ] && echo "Step 1 GET Payload OK" || (echo "Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_GET_1" ] && echo "Step 1 GET Status Code OK" || (echo " Step 1 GET status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$JSON_MIME" ] && echo "Step 1 GET MIME OK" || (echo " Step 1 GET MIME did not match. Test failed." && exit 1)
|
||||
|
||||
#Checking Step 1 POST on /list/index.php
|
||||
OUT_POST="`curl -s -X POST $SERVICE_URL/list/index.php`"
|
||||
OUT_STATUS_CODE=`curl -X POST -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
|
||||
OUT_CONTENT=`curl -X POST -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
|
||||
[ "${PAYLOAD_POST_1//[$'\t\r\n ']}" == "${OUT_POST//[$'\t\r\n ']}" ] && echo "Step 1 POST Payload OK" || (echo "Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_POST_1" ] && echo "Step 1 POST Status Code OK" || (echo "Step 1 POST status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$JSON_MIME" ] && echo "Step 1 POST MIME OK" || (echo " Step 1 POST MIME did not match. Test failed." && exit 1)
|
||||
|
||||
#Checking Step 1 PATCH on /patch
|
||||
OUT_PATCH="`curl -s -X PATCH $SERVICE_URL/patch`"
|
||||
OUT_STATUS_CODE=`curl -X PATCH -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/patch`
|
||||
OUT_CONTENT=`curl -X PATCH -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/patch`
|
||||
[ "${PAYLOAD_PATCH_1//[$'\t\r\n ']}" == "${OUT_PATCH//[$'\t\r\n ']}" ] && echo "Step 1 PATCH Payload OK" || (echo "Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_PATCH_1" ] && echo "Step 1 PATCH Status Code OK" || (echo "Step 1 PATCH status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$TEXT_MIME" ] && echo "Step 1 PATCH MIME OK" || (echo " Step 1 PATCH MIME did not match. Test failed." && exit 1)
|
||||
# wait for the next step
|
||||
sleep 16
|
||||
|
||||
#Checking Step 2 GET on /list/index.php
|
||||
OUT_GET="`curl -X GET -s $SERVICE_URL/list/index.php`"
|
||||
OUT_CONTENT=`curl -X GET -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
|
||||
OUT_STATUS_CODE=`curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
|
||||
[ "${PAYLOAD_GET_2//[$'\t\r\n ']}" == "${OUT_GET//[$'\t\r\n ']}" ] && echo "Step 2 GET Payload OK" || (echo "Step 2 GET Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_GET_2" ] && echo "Step 2 GET Status Code OK" || (echo "Step 2 GET status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$JSON_MIME" ] && echo "Step 2 GET MIME OK" || (echo " Step 2 GET MIME did not match. Test failed." && exit 1)
|
||||
|
||||
#Checking Step 2 POST on /list/index.php
|
||||
OUT_POST="`curl -s -X POST $SERVICE_URL/list/index.php`"
|
||||
OUT_CONTENT=`curl -X POST -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
|
||||
OUT_STATUS_CODE=`curl -X POST -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
|
||||
[ "${PAYLOAD_POST_2//[$'\t\r\n ']}" == "${OUT_POST//[$'\t\r\n ']}" ] && echo "Step 2 POST Payload OK" || (echo "Step 2 POST Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_POST_2" ] && echo "Step 2 POST Status Code OK" || (echo "Step 2 POST status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$TEXT_MIME" ] && echo "Step 2 POST MIME OK" || (echo " Step 2 POST MIME did not match. Test failed." && exit 1)
|
||||
|
||||
#Checking Step 2 PATCH on /patch
|
||||
OUT_PATCH="`curl -s -X PATCH $SERVICE_URL/patch`"
|
||||
OUT_CONTENT=`curl -X PATCH -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/patch`
|
||||
OUT_STATUS_CODE=`curl -X PATCH -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/patch`
|
||||
[ "${PAYLOAD_PATCH_2//[$'\t\r\n ']}" == "${OUT_PATCH//[$'\t\r\n ']}" ] && echo "Step 2 PATCH Payload OK" || (echo "Step 2 PATCH Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_PATCH_2" ] && echo "Step 2 PATCH Status Code OK" || (echo "Step 2 PATCH status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$TEXT_MIME" ] && echo "Step 2 PATCH MIME OK" || (echo " Step 2 PATCH MIME did not match. Test failed." && exit 1)
|
||||
wait $PID
|
||||
|
||||
# now checking if service has been restore correctly and nginx responds correctly
|
||||
curl -s $SERVICE_URL | grep nginx! && echo "BODY: Service restored!" || (echo "BODY: failed to restore service" && exit 1)
|
||||
OUT_STATUS_CODE=`curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL`
|
||||
[ "$OUT_STATUS_CODE" == "200" ] && echo "STATUS_CODE: Service restored!" || (echo "STATUS_CODE: failed to restore service" && exit 1)
|
||||
|
||||
echo "Service Hijacking Chaos test: Success"
|
||||
}
|
||||
|
||||
|
||||
functional_test_service_hijacking
|
||||
@@ -18,15 +18,14 @@ function functional_test_telemetry {
|
||||
yq -i '.performance_monitoring.prometheus_url="http://localhost:9090"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.run_tag=env(RUN_TAG)' CI/config/common_test_config.yaml
|
||||
|
||||
export scenario_type="arcaflow_scenarios"
|
||||
export scenario_file="scenarios/arcaflow/cpu-hog/input.yaml"
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/cpu-hog/input.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/telemetry.yaml
|
||||
retval=$(python3 -m coverage run -a run_kraken.py -c CI/config/telemetry.yaml)
|
||||
RUN_FOLDER=`cat CI/out/test_telemetry.out | grep amazonaws.com | sed -rn "s#.*https:\/\/.*\/files/(.*)#\1#p"`
|
||||
$AWS_CLI s3 ls "s3://$AWS_BUCKET/$RUN_FOLDER/" | awk '{ print $4 }' > s3_remote_files
|
||||
echo "checking if telemetry files are uploaded on s3"
|
||||
cat s3_remote_files | grep events-00.json || ( echo "FAILED: events-00.json not uploaded" && exit 1 )
|
||||
cat s3_remote_files | grep critical-alerts-00.log || ( echo "FAILED: critical-alerts-00.log not uploaded" && exit 1 )
|
||||
cat s3_remote_files | grep prometheus-00.tar || ( echo "FAILED: prometheus backup not uploaded" && exit 1 )
|
||||
cat s3_remote_files | grep telemetry.json || ( echo "FAILED: telemetry.json not uploaded" && exit 1 )
|
||||
|
||||
22
README.md
22
README.md
@@ -1,5 +1,7 @@
|
||||
# Krkn aka Kraken
|
||||

|
||||

|
||||

|
||||
|
||||

|
||||
|
||||
@@ -39,18 +41,6 @@ After installation, refer back to the below sections for supported scenarios and
|
||||
#### Running Kraken with minimal configuration tweaks
|
||||
For cases where you want to run Kraken with minimal configuration changes, refer to [krkn-hub](https://github.com/krkn-chaos/krkn-hub). One use case is CI integration where you do not want to carry around different configuration files for the scenarios.
|
||||
|
||||
### Setting up infrastructure dependencies
|
||||
Kraken indexes the metrics specified in the profile into Elasticsearch in addition to leveraging Cerberus for understanding the health of the Kubernetes cluster under test. More information on the features is documented below. The infrastructure pieces can be easily installed and uninstalled by running:
|
||||
|
||||
```
|
||||
$ cd kraken
|
||||
$ podman-compose up or $ docker-compose up # Spins up the containers specified in the docker-compose.yml file present in the run directory.
|
||||
$ podman-compose down or $ docker-compose down # Delete the containers installed.
|
||||
```
|
||||
This will manage the Cerberus and Elasticsearch containers on the host on which you are running Kraken.
|
||||
|
||||
**NOTE**: Make sure you have enough resources (memory and disk) on the machine on top of which the containers are running as Elasticsearch is resource intensive. Cerberus monitors the system components by default, the [config](config/cerberus.yaml) can be tweaked to add applications namespaces, routes and other components to monitor as well. The command will keep running until killed since detached mode is not supported as of now.
|
||||
|
||||
|
||||
### Config
|
||||
Instructions on how to setup the config and the options supported can be found at [Config](docs/config.md).
|
||||
@@ -73,6 +63,8 @@ Scenario type | Kubernetes
|
||||
[PVC scenario](docs/pvc_scenario.md) | :heavy_check_mark: |
|
||||
[Network_Chaos](docs/network_chaos.md) | :heavy_check_mark: |
|
||||
[ManagedCluster Scenarios](docs/managedcluster_scenarios.md) | :heavy_check_mark: |
|
||||
[Service Hijacking Scenarios](docs/service_hijacking_scenarios.md) | :heavy_check_mark: |
|
||||
[SYN Flood Scenarios](docs/syn_flood_scenarios.md) | :heavy_check_mark: |
|
||||
|
||||
|
||||
### Kraken scenario pass/fail criteria and report
|
||||
@@ -127,6 +119,12 @@ If adding a new scenario or tweaking the main config, be sure to add in updates
|
||||
Please read [this file]((CI/README.md#adding-a-test-case)) for more information on updates.
|
||||
|
||||
|
||||
### Scenario Plugin Development
|
||||
|
||||
If you're gearing up to develop new scenarios, take a moment to review our
|
||||
[Scenario Plugin API Documentation](docs/scenario_plugin_api.md).
|
||||
It’s the perfect starting point to tap into your chaotic creativity!
|
||||
|
||||
### Community
|
||||
Key Members(slack_usernames/full name): paigerube14/Paige Rubendall, mffiedler/Mike Fiedler, tsebasti/Tullio Sebastiani, yogi/Yogananth Subramanian, sahil/Sahil Shah, pradeep/Pradeep Surisetty and ravielluri/Naga Ravi Chaitanya Elluri.
|
||||
* [**#krkn on Kubernetes Slack**](https://kubernetes.slack.com/messages/C05SFMHRWK1)
|
||||
|
||||
11
ROADMAP.md
11
ROADMAP.md
@@ -6,10 +6,11 @@ Following are a list of enhancements that we are planning to work on adding supp
|
||||
- [x] [Centralized storage for chaos experiments artifacts](https://github.com/krkn-chaos/krkn/issues/423)
|
||||
- [ ] [Support for causing DNS outages](https://github.com/krkn-chaos/krkn/issues/394)
|
||||
- [x] [Chaos recommender](https://github.com/krkn-chaos/krkn/tree/main/utils/chaos-recommender) to suggest scenarios having probability of impacting the service under test using profiling results
|
||||
- [ ] Chaos AI integration to improve and automate test coverage
|
||||
- [] Chaos AI integration to improve test coverage while reducing fault space to save costs and execution time
|
||||
- [x] [Support for pod level network traffic shaping](https://github.com/krkn-chaos/krkn/issues/393)
|
||||
- [ ] [Ability to visualize the metrics that are being captured by Kraken and stored in Elasticsearch](https://github.com/krkn-chaos/krkn/issues/124)
|
||||
- [ ] Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/krkn-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
|
||||
- [ ] Continue to improve [Chaos Testing Guide](https://krkn-chaos.github.io/krkn) in terms of adding best practices, test environment recommendations and scenarios to make sure the OpenShift platform, as well the applications running on top it, are resilient and performant under chaotic conditions.
|
||||
- [ ] [Switch documentation references to Kubernetes](https://github.com/krkn-chaos/krkn/issues/495)
|
||||
- [ ] [OCP and Kubernetes functionalities segregation](https://github.com/krkn-chaos/krkn/issues/497)
|
||||
- [x] Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/krkn-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
|
||||
- [x] Continue to improve [Chaos Testing Guide](https://krkn-chaos.github.io/krkn) in terms of adding best practices, test environment recommendations and scenarios to make sure the OpenShift platform, as well the applications running on top it, are resilient and performant under chaotic conditions.
|
||||
- [x] [Switch documentation references to Kubernetes](https://github.com/krkn-chaos/krkn/issues/495)
|
||||
- [x] [OCP and Kubernetes functionalities segregation](https://github.com/krkn-chaos/krkn/issues/497)
|
||||
- [x] [Krknctl - client for running Krkn scenarios with ease](https://github.com/krkn-chaos/krknctl)
|
||||
|
||||
@@ -88,3 +88,42 @@
|
||||
- expr: ALERTS{severity="critical", alertstate="firing"} > 0
|
||||
description: Critical prometheus alert. {{$labels.alertname}}
|
||||
severity: warning
|
||||
|
||||
# etcd CPU and usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-etcd', container='etcd'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: Etcd CPU usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# etcd memory usage increase
|
||||
- expr: sum(deriv(container_memory_usage_bytes{image!='', namespace='openshift-etcd', container='etcd'}[5m])) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: Etcd memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Openshift API server CPU and memory usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-apiserver', container='openshift-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: openshift apiserver cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-apiserver', container='openshift-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: openshift apiserver memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Openshift kube API server CPU and memory usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-kube-apiserver', container='kube-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: openshift apiserver cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-kube-apiserver', container='kube-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: openshift apiserver memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Master node CPU usage increase
|
||||
- expr: (sum((sum(deriv(pod:container_cpu_usage:sum{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: master nodes cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Master nodes memory usage increase
|
||||
- expr: (sum((sum(deriv(container_memory_usage_bytes{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: master nodes memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
|
||||
@@ -99,3 +99,41 @@
|
||||
- expr: ALERTS{severity="critical", alertstate="firing"} > 0
|
||||
description: Critical prometheus alert. {{$labels.alertname}}
|
||||
severity: warning
|
||||
|
||||
# etcd CPU and usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-etcd', container='etcd'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: Etcd CPU usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# etcd memory usage increase
|
||||
- expr: sum(deriv(container_memory_usage_bytes{image!='', namespace='openshift-etcd', container='etcd'}[5m])) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: Etcd memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Openshift API server CPU and memory usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-apiserver', container='openshift-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: openshift apiserver cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-apiserver', container='openshift-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: openshift apiserver memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Openshift kube API server CPU and memory usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-kube-apiserver', container='kube-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: openshift apiserver cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-kube-apiserver', container='kube-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: openshift apiserver memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Master node CPU usage increase
|
||||
- expr: (sum((sum(deriv(pod:container_cpu_usage:sum{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: master nodes cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Master nodes memory usage increase
|
||||
- expr: (sum((sum(deriv(container_memory_usage_bytes{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: master nodes memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
kraken:
|
||||
distribution: kubernetes # Distribution can be kubernetes or openshift
|
||||
kubeconfig_path: ~/.kube/config # Path to kubeconfig
|
||||
kubeconfig_path: ~/.kube/config # Path to kubeconfig
|
||||
exit_on_failure: False # Exit when a post action scenario fails
|
||||
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
@@ -8,40 +8,47 @@ kraken:
|
||||
port: 8081 # Signal port
|
||||
chaos_scenarios:
|
||||
# List of policies/chaos scenarios to load
|
||||
- arcaflow_scenarios:
|
||||
- scenarios/arcaflow/cpu-hog/input.yaml
|
||||
- scenarios/arcaflow/memory-hog/input.yaml
|
||||
- scenarios/arcaflow/io-hog/input.yaml
|
||||
- application_outages:
|
||||
- hog_scenarios:
|
||||
- scenarios/kube/cpu-hog/input.yaml
|
||||
- scenarios/kube/memory-hog/input.yaml
|
||||
- scenarios/kube/io-hog/input.yaml
|
||||
- scenarios/kube/io-hog/input.yaml
|
||||
- application_outages_scenarios:
|
||||
- scenarios/openshift/app_outage.yaml
|
||||
- container_scenarios: # List of chaos pod scenarios to load
|
||||
- - scenarios/openshift/container_etcd.yml
|
||||
- plugin_scenarios:
|
||||
- scenarios/openshift/container_etcd.yml
|
||||
- pod_network_scenarios:
|
||||
- scenarios/openshift/network_chaos_ingress.yml
|
||||
- scenarios/openshift/pod_network_outage.yml
|
||||
- pod_disruption_scenarios:
|
||||
- scenarios/openshift/etcd.yml
|
||||
- scenarios/openshift/regex_openshift_pod_kill.yml
|
||||
- scenarios/openshift/vmware_node_scenarios.yml
|
||||
- scenarios/openshift/network_chaos_ingress.yml
|
||||
- scenarios/openshift/prom_kill.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/node_scenarios_example.yml
|
||||
- plugin_scenarios:
|
||||
- scenarios/openshift/openshift-apiserver.yml
|
||||
- scenarios/openshift/openshift-kube-apiserver.yml
|
||||
- vmware_node_scenarios:
|
||||
- scenarios/openshift/vmware_node_scenarios.yml
|
||||
- ibmcloud_node_scenarios:
|
||||
- scenarios/openshift/ibmcloud_node_scenarios.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/aws_node_scenarios.yml
|
||||
- time_scenarios: # List of chaos time scenarios to load
|
||||
- scenarios/openshift/time_scenarios_example.yml
|
||||
- cluster_shut_down_scenarios:
|
||||
- - scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- scenarios/openshift/post_action_shut_down.py
|
||||
- scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- service_disruption_scenarios:
|
||||
- - scenarios/openshift/regex_namespace.yaml
|
||||
- - scenarios/openshift/ingress_namespace.yaml
|
||||
- scenarios/openshift/post_action_namespace.py
|
||||
- zone_outages:
|
||||
- scenarios/openshift/regex_namespace.yaml
|
||||
- scenarios/openshift/ingress_namespace.yaml
|
||||
- zone_outages_scenarios:
|
||||
- scenarios/openshift/zone_outage.yaml
|
||||
- pvc_scenarios:
|
||||
- scenarios/openshift/pvc_scenario.yaml
|
||||
- network_chaos:
|
||||
- network_chaos_scenarios:
|
||||
- scenarios/openshift/network_chaos.yaml
|
||||
- service_hijacking_scenarios:
|
||||
- scenarios/kube/service_hijacking.yaml
|
||||
- syn_flood_scenarios:
|
||||
- scenarios/kube/syn_flood.yaml
|
||||
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
@@ -51,12 +58,27 @@ cerberus:
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_url: '' # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
enable_metrics: False
|
||||
alert_profile: config/alerts.yaml # Path or URL to alert profile with the prometheus queries
|
||||
metrics_profile: config/metrics.yaml
|
||||
check_critical_alerts: False # When enabled will check prometheus for critical alerts firing post chaos
|
||||
elastic:
|
||||
enable_elastic: False
|
||||
collect_metrics: False
|
||||
collect_alerts: False
|
||||
verify_certs: False
|
||||
elastic_url: "" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
|
||||
elastic_port: 32766
|
||||
username: "elastic"
|
||||
password: "test"
|
||||
metrics_index: "krkn-metrics"
|
||||
alerts_index: "krkn-alerts"
|
||||
telemetry_index: "krkn-telemetry"
|
||||
|
||||
tunings:
|
||||
wait_duration: 60 # Duration to wait between each chaos scenario
|
||||
iterations: 1 # Number of times to execute the scenarios
|
||||
@@ -90,9 +112,7 @@ telemetry:
|
||||
- "(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z).+" # 2023-09-15T11:20:36.123425532Z log
|
||||
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
|
||||
events_backup: True # enables/disables cluster events collection
|
||||
elastic:
|
||||
elastic_url: "" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
|
||||
elastic_index: "" # Elastic search index pattern to post results to
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ kraken:
|
||||
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load
|
||||
- plugin_scenarios:
|
||||
- scenarios/kind/scheduler.yml
|
||||
- node_scenarios:
|
||||
|
||||
@@ -7,7 +7,7 @@ kraken:
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load
|
||||
- container_scenarios: # List of chaos pod scenarios to load
|
||||
- - scenarios/kube/container_dns.yml
|
||||
- scenarios/kube/container_dns.yml
|
||||
- plugin_scenarios:
|
||||
- scenarios/kube/scheduler.yml
|
||||
|
||||
|
||||
@@ -12,15 +12,14 @@ kraken:
|
||||
- scenarios/openshift/regex_openshift_pod_kill.yml
|
||||
- scenarios/openshift/prom_kill.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/node_scenarios_example.yml
|
||||
- scenarios/openshift/node_scenarios_example.yml
|
||||
- plugin_scenarios:
|
||||
- scenarios/openshift/openshift-apiserver.yml
|
||||
- scenarios/openshift/openshift-kube-apiserver.yml
|
||||
- time_scenarios: # List of chaos time scenarios to load
|
||||
- scenarios/openshift/time_scenarios_example.yml
|
||||
- cluster_shut_down_scenarios:
|
||||
- - scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- scenarios/openshift/post_action_shut_down.py
|
||||
- scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- service_disruption_scenarios:
|
||||
- scenarios/openshift/regex_namespace.yaml
|
||||
- scenarios/openshift/ingress_namespace.yaml
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
# Dockerfile for kraken
|
||||
|
||||
FROM mcr.microsoft.com/azure-cli:latest as azure-cli
|
||||
|
||||
FROM registry.access.redhat.com/ubi8/ubi:latest
|
||||
|
||||
ENV KUBECONFIG /root/.kube/config
|
||||
|
||||
# Copy azure client binary from azure-cli image
|
||||
COPY --from=azure-cli /usr/local/bin/az /usr/bin/az
|
||||
|
||||
# Install dependencies
|
||||
RUN yum install -y git python39 python3-pip jq gettext wget && \
|
||||
python3.9 -m pip install -U pip && \
|
||||
git clone https://github.com/krkn-chaos/krkn.git --branch v1.5.13 /root/kraken && \
|
||||
mkdir -p /root/.kube && cd /root/kraken && \
|
||||
pip3.9 install -r requirements.txt && \
|
||||
pip3.9 install virtualenv && \
|
||||
wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq && chmod +x /usr/bin/yq
|
||||
|
||||
# Get Kubernetes and OpenShift clients from stable releases
|
||||
WORKDIR /tmp
|
||||
RUN wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-client-linux.tar.gz && tar -xvf openshift-client-linux.tar.gz && cp oc /usr/local/bin/oc && cp oc /usr/bin/oc && cp kubectl /usr/local/bin/kubectl && cp kubectl /usr/bin/kubectl
|
||||
|
||||
WORKDIR /root/kraken
|
||||
|
||||
ENTRYPOINT ["python3.9", "run_kraken.py"]
|
||||
CMD ["--config=config/config.yaml"]
|
||||
@@ -1,29 +0,0 @@
|
||||
# Dockerfile for kraken
|
||||
|
||||
FROM ppc64le/centos:8
|
||||
|
||||
FROM mcr.microsoft.com/azure-cli:latest as azure-cli
|
||||
|
||||
LABEL org.opencontainers.image.authors="Red Hat OpenShift Chaos Engineering"
|
||||
|
||||
ENV KUBECONFIG /root/.kube/config
|
||||
|
||||
# Copy azure client binary from azure-cli image
|
||||
COPY --from=azure-cli /usr/local/bin/az /usr/bin/az
|
||||
|
||||
# Install dependencies
|
||||
RUN yum install -y git python39 python3-pip jq gettext wget && \
|
||||
python3.9 -m pip install -U pip && \
|
||||
git clone https://github.com/redhat-chaos/krkn.git --branch v1.5.13 /root/kraken && \
|
||||
mkdir -p /root/.kube && cd /root/kraken && \
|
||||
pip3.9 install -r requirements.txt && \
|
||||
pip3.9 install virtualenv && \
|
||||
wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq && chmod +x /usr/bin/yq
|
||||
|
||||
# Get Kubernetes and OpenShift clients from stable releases
|
||||
WORKDIR /tmp
|
||||
RUN wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-client-linux.tar.gz && tar -xvf openshift-client-linux.tar.gz && cp oc /usr/local/bin/oc && cp oc /usr/bin/oc && cp kubectl /usr/local/bin/kubectl && cp kubectl /usr/bin/kubectl
|
||||
|
||||
WORKDIR /root/kraken
|
||||
|
||||
ENTRYPOINT python3.9 run_kraken.py --config=config/config.yaml
|
||||
60
containers/Dockerfile.template
Normal file
60
containers/Dockerfile.template
Normal file
@@ -0,0 +1,60 @@
|
||||
# oc build
|
||||
FROM golang:1.22.5 AS oc-build
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libkrb5-dev
|
||||
WORKDIR /tmp
|
||||
RUN git clone --branch release-4.18 https://github.com/openshift/oc.git
|
||||
WORKDIR /tmp/oc
|
||||
RUN go mod edit -go 1.22.5 &&\
|
||||
go get github.com/moby/buildkit@v0.12.5 &&\
|
||||
go get github.com/containerd/containerd@v1.7.11&&\
|
||||
go get github.com/docker/docker@v25.0.6&&\
|
||||
go get github.com/opencontainers/runc@v1.1.14&&\
|
||||
go mod tidy && go mod vendor
|
||||
RUN make GO_REQUIRED_MIN_VERSION:= oc
|
||||
|
||||
FROM fedora:40
|
||||
ARG PR_NUMBER
|
||||
ARG TAG
|
||||
RUN groupadd -g 1001 krkn && useradd -m -u 1001 -g krkn krkn
|
||||
RUN dnf update -y
|
||||
|
||||
ENV KUBECONFIG /home/krkn/.kube/config
|
||||
|
||||
# install kubectl
|
||||
RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" &&\
|
||||
cp kubectl /usr/local/bin/kubectl && chmod +x /usr/local/bin/kubectl &&\
|
||||
cp kubectl /usr/bin/kubectl && chmod +x /usr/bin/kubectl
|
||||
|
||||
# This overwrites any existing configuration in /etc/yum.repos.d/kubernetes.repo
|
||||
RUN dnf update && dnf install -y --setopt=install_weak_deps=False \
|
||||
git python39 jq yq gettext wget which &&\
|
||||
dnf clean all
|
||||
|
||||
# copy oc client binary from oc-build image
|
||||
COPY --from=oc-build /tmp/oc/oc /usr/bin/oc
|
||||
|
||||
# krkn build
|
||||
RUN git clone https://github.com/krkn-chaos/krkn.git /home/krkn/kraken && \
|
||||
mkdir -p /home/krkn/.kube
|
||||
|
||||
WORKDIR /home/krkn/kraken
|
||||
|
||||
# default behaviour will be to build main
|
||||
# if it is a PR trigger the PR itself will be checked out
|
||||
RUN if [ -n "$PR_NUMBER" ]; then git fetch origin pull/${PR_NUMBER}/head:pr-${PR_NUMBER} && git checkout pr-${PR_NUMBER};fi
|
||||
# if it is a TAG trigger checkout the tag
|
||||
RUN if [ -n "$TAG" ]; then git checkout "$TAG";fi
|
||||
|
||||
RUN python3.9 -m ensurepip
|
||||
RUN pip3.9 install -r requirements.txt
|
||||
RUN pip3.9 install jsonschema
|
||||
|
||||
LABEL krknctl.title="Krkn Base Image"
|
||||
LABEL krknctl.description="This is the krkn base image."
|
||||
LABEL krknctl.input_fields='$KRKNCTL_INPUT'
|
||||
|
||||
|
||||
RUN chown -R krkn:krkn /home/krkn && chmod 755 /home/krkn
|
||||
USER krkn
|
||||
ENTRYPOINT ["python3.9", "run_kraken.py"]
|
||||
CMD ["--config=config/config.yaml"]
|
||||
@@ -12,35 +12,3 @@ Refer [instructions](https://github.com/redhat-chaos/krkn/blob/main/docs/install
|
||||
### Run Custom Kraken Image
|
||||
|
||||
Refer to [instructions](https://github.com/redhat-chaos/krkn/blob/main/containers/build_own_image-README.md) for information on how to run a custom containerized version of kraken using podman.
|
||||
|
||||
|
||||
### Kraken as a KubeApp ( Unsupported and not recommended )
|
||||
|
||||
#### GENERAL NOTES:
|
||||
|
||||
- It is not generally recommended to run Kraken internal to the cluster as the pod which is running Kraken might get disrupted, the suggested use case to run kraken from inside k8s/OpenShift is to target **another** cluster (eg. to bypass network restrictions or to leverage cluster's computational resources)
|
||||
|
||||
- your kubeconfig might contain several cluster contexts and credentials so be sure, before creating the ConfigMap, to keep **only** the credentials related to the destination cluster. Please refer to the [Kubernetes documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) for more details
|
||||
- to add privileges to the service account you must be logged in the cluster with an highly privileged account (ideally kubeadmin)
|
||||
|
||||
|
||||
|
||||
To run containerized Kraken as a Kubernetes/OpenShift Deployment, follow these steps:
|
||||
|
||||
1. Configure the [config.yaml](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) file according to your requirements.
|
||||
|
||||
**NOTE**: both the scenarios ConfigMaps are needed regardless you're running kraken in Kubernetes or OpenShift
|
||||
|
||||
2. Create a namespace under which you want to run the kraken pod using `kubectl create ns <namespace>`.
|
||||
3. Switch to `<namespace>` namespace:
|
||||
- In Kubernetes, use `kubectl config set-context --current --namespace=<namespace>`
|
||||
- In OpenShift, use `oc project <namespace>`
|
||||
|
||||
4. Create a ConfigMap named kube-config using `kubectl create configmap kube-config --from-file=<path_to_kubeconfig>` *(eg. ~/.kube/config)*
|
||||
5. Create a ConfigMap named kraken-config using `kubectl create configmap kraken-config --from-file=<path_to_kraken>/config`
|
||||
6. Create a ConfigMap named scenarios-config using `kubectl create configmap scenarios-config --from-file=<path_to_kraken>/scenarios`
|
||||
7. Create a ConfigMap named scenarios-openshift-config using `kubectl create configmap scenarios-openshift-config --from-file=<path_to_kraken>/scenarios/openshift`
|
||||
8. Create a ConfigMap named scenarios-kube-config using `kubectl create configmap scenarios-kube-config --from-file=<path_to_kraken>/scenarios/kube`
|
||||
9. Create a service account to run the kraken pod `kubectl create serviceaccount useroot`.
|
||||
10. In Openshift, add privileges to service account and execute `oc adm policy add-scc-to-user privileged -z useroot`.
|
||||
11. Create a Job using `kubectl apply -f <path_to_kraken>/containers/kraken.yml` and monitor the status using `oc get jobs` and `oc get pods`.
|
||||
|
||||
5
containers/compile_dockerfile.sh
Executable file
5
containers/compile_dockerfile.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$SCRIPT_DIR"
|
||||
export KRKNCTL_INPUT=$(cat krknctl-input.json|tr -d "\n")
|
||||
|
||||
envsubst '${KRKNCTL_INPUT}' < Dockerfile.template > Dockerfile
|
||||
@@ -1,49 +0,0 @@
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: kraken
|
||||
spec:
|
||||
parallelism: 1
|
||||
completions: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tool: Kraken
|
||||
spec:
|
||||
serviceAccountName: useroot
|
||||
containers:
|
||||
- name: kraken
|
||||
securityContext:
|
||||
privileged: true
|
||||
image: quay.io/redhat-chaos/krkn
|
||||
command: ["/bin/sh", "-c"]
|
||||
args: ["python3.9 run_kraken.py -c config/config.yaml"]
|
||||
volumeMounts:
|
||||
- mountPath: "/root/.kube"
|
||||
name: config
|
||||
- mountPath: "/root/kraken/config"
|
||||
name: kraken-config
|
||||
- mountPath: "/root/kraken/scenarios"
|
||||
name: scenarios-config
|
||||
- mountPath: "/root/kraken/scenarios/openshift"
|
||||
name: scenarios-openshift-config
|
||||
- mountPath: "/root/kraken/scenarios/kube"
|
||||
name: scenarios-kube-config
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: kube-config
|
||||
- name: kraken-config
|
||||
configMap:
|
||||
name: kraken-config
|
||||
- name: scenarios-config
|
||||
configMap:
|
||||
name: scenarios-config
|
||||
- name: scenarios-openshift-config
|
||||
configMap:
|
||||
name: scenarios-openshift-config
|
||||
- name: scenarios-kube-config
|
||||
configMap:
|
||||
name: scenarios-kube-config
|
||||
396
containers/krknctl-input.json
Normal file
396
containers/krknctl-input.json
Normal file
@@ -0,0 +1,396 @@
|
||||
[
|
||||
{
|
||||
"name": "cerberus-enabled",
|
||||
"short_description": "Enable Cerberus",
|
||||
"description": "Enables Cerberus Support",
|
||||
"variable": "CERBERUS_ENABLED",
|
||||
"type": "enum",
|
||||
"default": "False",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "cerberus-url",
|
||||
"short_description": "Cerberus URL",
|
||||
"description": "Cerberus http url",
|
||||
"variable": "CERBERUS_URL",
|
||||
"type": "string",
|
||||
"default": "http://0.0.0.0:8080",
|
||||
"validator": "^(http|https):\/\/.*",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "distribution",
|
||||
"short_description": "Orchestrator distribution",
|
||||
"description": "Selects the orchestrator distribution",
|
||||
"variable": "DISTRIBUTION",
|
||||
"type": "enum",
|
||||
"default": "openshift",
|
||||
"allowed_values": "openshift,kubernetes",
|
||||
"separator": ",",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "krkn-kubeconfig",
|
||||
"short_description": "Krkn kubeconfig path",
|
||||
"description": "Sets the path where krkn will search for kubeconfig (in container)",
|
||||
"variable": "KRKN_KUBE_CONFIG",
|
||||
"type": "string",
|
||||
"default": "/home/krkn/.kube/config",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "wait-duration",
|
||||
"short_description": "Post chaos wait duration",
|
||||
"description": "waits for a certain amount of time after the scenario",
|
||||
"variable": "WAIT_DURATION",
|
||||
"type": "number",
|
||||
"default": "1"
|
||||
},
|
||||
{
|
||||
"name": "iterations",
|
||||
"short_description": "Chaos scenario iterations",
|
||||
"description": "number of times the same chaos scenario will be executed",
|
||||
"variable": "ITERATIONS",
|
||||
"type": "number",
|
||||
"default": "1"
|
||||
},
|
||||
{
|
||||
"name": "daemon-mode",
|
||||
"short_description": "Sets krkn daemon mode",
|
||||
"description": "if set the scenario will execute forever",
|
||||
"variable": "DAEMON_MODE",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "uuid",
|
||||
"short_description": "Sets krkn run uuid",
|
||||
"description": "sets krkn run uuid instead of generating it",
|
||||
"variable": "UUID",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "capture-metrics",
|
||||
"short_description": "Enables metrics capture",
|
||||
"description": "Enables metrics capture",
|
||||
"variable": "CAPTURE_METRICS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "enable-alerts",
|
||||
"short_description": "Enables cluster alerts check",
|
||||
"description": "Enables cluster alerts check",
|
||||
"variable": "ENABLE_ALERTS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "alerts-path",
|
||||
"short_description": "Cluster alerts path file (in container)",
|
||||
"description": "Enables cluster alerts check",
|
||||
"variable": "ALERTS_PATH",
|
||||
"type": "string",
|
||||
"default": "config/alerts.yaml",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "enable-es",
|
||||
"short_description": "Enables elastic search data collection",
|
||||
"description": "Enables elastic search data collection",
|
||||
"variable": "ENABLE_ES",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-server",
|
||||
"short_description": "Elasticsearch instance URL",
|
||||
"description": "Elasticsearch instance URL",
|
||||
"variable": "ES_SERVER",
|
||||
"type": "string",
|
||||
"default": "http://0.0.0.0",
|
||||
"validator": "^(http|https):\/\/.*",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-port",
|
||||
"short_description": "Elasticsearch instance port",
|
||||
"description": "Elasticsearch instance port",
|
||||
"variable": "ES_PORT",
|
||||
"type": "number",
|
||||
"default": "443",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-username",
|
||||
"short_description": "Elasticsearch instance username",
|
||||
"description": "Elasticsearch instance username",
|
||||
"variable": "ES_USERNAME",
|
||||
"type": "string",
|
||||
"default": "elastic",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-password",
|
||||
"short_description": "Elasticsearch instance password",
|
||||
"description": "Elasticsearch instance password",
|
||||
"variable": "ES_PASSWORD",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-verify-certs",
|
||||
"short_description": "Enables elasticsearch TLS certificate verification",
|
||||
"description": "Enables elasticsearch TLS certificate verification",
|
||||
"variable": "ES_VERIFY_CERTS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-collect-metrics",
|
||||
"short_description": "Enables metrics collection on elastic search",
|
||||
"description": "Enables metrics collection on elastic search",
|
||||
"variable": "ES_COLLECT_METRICS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-collect-alerts",
|
||||
"short_description": "Enables alerts collection on elastic search",
|
||||
"description": "Enables alerts collection on elastic search",
|
||||
"variable": "ES_COLLECT_ALERTS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-metrics-index",
|
||||
"short_description": "Elasticsearch metrics index",
|
||||
"description": "Index name for metrics in Elasticsearch",
|
||||
"variable": "ES_METRICS_INDEX",
|
||||
"type": "string",
|
||||
"default": "krkn-metrics",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-alerts-index",
|
||||
"short_description": "Elasticsearch alerts index",
|
||||
"description": "Index name for alerts in Elasticsearch",
|
||||
"variable": "ES_ALERTS_INDEX",
|
||||
"type": "string",
|
||||
"default": "krkn-alerts",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-telemetry-index",
|
||||
"short_description": "Elasticsearch telemetry index",
|
||||
"description": "Index name for telemetry in Elasticsearch",
|
||||
"variable": "ES_TELEMETRY_INDEX",
|
||||
"type": "string",
|
||||
"default": "krkn-telemetry",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "check-critical-alerts",
|
||||
"short_description": "Check critical alerts",
|
||||
"description": "Enables checking for critical alerts",
|
||||
"variable": "CHECK_CRITICAL_ALERTS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-enabled",
|
||||
"short_description": "Enable telemetry",
|
||||
"description": "Enables telemetry support",
|
||||
"variable": "TELEMETRY_ENABLED",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-api-url",
|
||||
"short_description": "Telemetry API URL",
|
||||
"description": "API endpoint for telemetry data",
|
||||
"variable": "TELEMETRY_API_URL",
|
||||
"type": "string",
|
||||
"default": "https://ulnmf9xv7j.execute-api.us-west-2.amazonaws.com/production",
|
||||
"validator": "^(http|https):\/\/.*",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-username",
|
||||
"short_description": "Telemetry username",
|
||||
"description": "Username for telemetry authentication",
|
||||
"variable": "TELEMETRY_USERNAME",
|
||||
"type": "string",
|
||||
"default": "redhat-chaos",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-password",
|
||||
"short_description": "Telemetry password",
|
||||
"description": "Password for telemetry authentication",
|
||||
"variable": "TELEMETRY_PASSWORD",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-prometheus-backup",
|
||||
"short_description": "Prometheus backup for telemetry",
|
||||
"description": "Enables Prometheus backup for telemetry",
|
||||
"variable": "TELEMETRY_PROMETHEUS_BACKUP",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "True",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-full-prometheus-backup",
|
||||
"short_description": "Full Prometheus backup",
|
||||
"description": "Enables full Prometheus backup for telemetry",
|
||||
"variable": "TELEMETRY_FULL_PROMETHEUS_BACKUP",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-backup-threads",
|
||||
"short_description": "Telemetry backup threads",
|
||||
"description": "Number of threads for telemetry backup",
|
||||
"variable": "TELEMETRY_BACKUP_THREADS",
|
||||
"type": "number",
|
||||
"default": "5",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-archive-path",
|
||||
"short_description": "Telemetry archive path",
|
||||
"description": "Path to save telemetry archive",
|
||||
"variable": "TELEMETRY_ARCHIVE_PATH",
|
||||
"type": "string",
|
||||
"default": "/tmp",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-max-retries",
|
||||
"short_description": "Telemetry max retries",
|
||||
"description": "Maximum retries for telemetry operations",
|
||||
"variable": "TELEMETRY_MAX_RETRIES",
|
||||
"type": "number",
|
||||
"default": "0",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-run-tag",
|
||||
"short_description": "Telemetry run tag",
|
||||
"description": "Tag for telemetry run",
|
||||
"variable": "TELEMETRY_RUN_TAG",
|
||||
"type": "string",
|
||||
"default": "chaos",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-group",
|
||||
"short_description": "Telemetry group",
|
||||
"description": "Group name for telemetry data",
|
||||
"variable": "TELEMETRY_GROUP",
|
||||
"type": "string",
|
||||
"default": "default",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-archive-size",
|
||||
"short_description": "Telemetry archive size",
|
||||
"description": "Maximum size for telemetry archives",
|
||||
"variable": "TELEMETRY_ARCHIVE_SIZE",
|
||||
"type": "number",
|
||||
"default": "1000",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-logs-backup",
|
||||
"short_description": "Telemetry logs backup",
|
||||
"description": "Enables logs backup for telemetry",
|
||||
"variable": "TELEMETRY_LOGS_BACKUP",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-filter-pattern",
|
||||
"short_description": "Telemetry filter pattern",
|
||||
"description": "Filter pattern for telemetry logs",
|
||||
"variable": "TELEMETRY_FILTER_PATTERN",
|
||||
"type": "string",
|
||||
"default": "[\"(\\\\w{3}\\\\s\\\\d{1,2}\\\\s\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d+).+\",\"kinit (\\\\d+/\\\\d+/\\\\d+\\\\s\\\\d{2}:\\\\d{2}:\\\\d{2})\\\\s+\",\"(\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d+Z).+\"]",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-cli-path",
|
||||
"short_description": "Telemetry CLI path (oc)",
|
||||
"description": "Path to telemetry CLI tool (oc)",
|
||||
"variable": "TELEMETRY_CLI_PATH",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-events-backup",
|
||||
"short_description": "Telemetry events backup",
|
||||
"description": "Enables events backup for telemetry",
|
||||
"variable": "TELEMETRY_EVENTS_BACKUP",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "True",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "krkn-debug",
|
||||
"short_description": "Krkn debug mode",
|
||||
"description": "Enables debug mode for Krkn",
|
||||
"variable": "KRKN_DEBUG",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
}
|
||||
]
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
version: "3"
|
||||
services:
|
||||
elastic:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:7.13.2
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
network_mode: host
|
||||
environment:
|
||||
discovery.type: single-node
|
||||
kibana:
|
||||
image: docker.elastic.co/kibana/kibana:7.13.2
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
network_mode: host
|
||||
environment:
|
||||
ELASTICSEARCH_HOSTS: "http://0.0.0.0:9200"
|
||||
cerberus:
|
||||
image: quay.io/openshift-scale/cerberus:latest
|
||||
privileged: true
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
network_mode: host
|
||||
volumes:
|
||||
- ./config/cerberus.yaml:/root/cerberus/config/config.yaml:Z # Modify the config in case of the need to monitor additional components
|
||||
- ${HOME}/.kube/config:/root/.kube/config:Z
|
||||
@@ -38,11 +38,11 @@ A couple of [alert profiles](https://github.com/redhat-chaos/krkn/tree/main/conf
|
||||
severity: critical
|
||||
```
|
||||
|
||||
Kube-burner supports setting the severity for the alerts with each one having different effects:
|
||||
Krkn supports setting the severity for the alerts with each one having different effects:
|
||||
|
||||
```
|
||||
info: Prints an info message with the alarm description to stdout. By default all expressions have this severity.
|
||||
warning: Prints a warning message with the alarm description to stdout.
|
||||
error: Prints a error message with the alarm description to stdout and makes kube-burner rc = 1
|
||||
error: Prints a error message with the alarm description to stdout and sets Krkn rc = 1
|
||||
critical: Prints a fatal message with the alarm description to stdout and exits execution inmediatly with rc != 0
|
||||
```
|
||||
|
||||
@@ -13,13 +13,26 @@ Supported Cloud Providers:
|
||||
**NOTE**: For clusters with AWS make sure [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) is installed and properly [configured](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) using an AWS account
|
||||
|
||||
## GCP
|
||||
**NOTE**: For clusters with GCP make sure [GCP CLI](https://cloud.google.com/sdk/docs/install#linux) is installed.
|
||||
|
||||
A google service account is required to give proper authentication to GCP for node actions. See [here](https://cloud.google.com/docs/authentication/getting-started) for how to create a service account.
|
||||
In order to set up Application Default Credentials (ADC) for use by Cloud Client Libraries, you can provide either service account credentials or the credentials associated with your user acccount:
|
||||
|
||||
**NOTE**: A user with 'resourcemanager.projects.setIamPolicy' permission is required to grant project-level permissions to the service account.
|
||||
- Using service account credentials:
|
||||
|
||||
After creating the service account you will need to enable the account using the following: ```export GOOGLE_APPLICATION_CREDENTIALS="<serviceaccount.json>"```
|
||||
A google service account is required to give proper authentication to GCP for node actions. See [here](https://cloud.google.com/docs/authentication/getting-started) for how to create a service account.
|
||||
|
||||
**NOTE**: A user with 'resourcemanager.projects.setIamPolicy' permission is required to grant project-level permissions to the service account.
|
||||
|
||||
After creating the service account you will need to enable the account using the following: ```export GOOGLE_APPLICATION_CREDENTIALS="<serviceaccount.json>"```
|
||||
|
||||
- Using the credentials associated with your user acccount:
|
||||
|
||||
1. Make sure that the [GCP CLI](https://cloud.google.com/sdk/docs/install#linux) is installed and [initialized](https://cloud.google.com/sdk/docs/initializing) by running:
|
||||
|
||||
```gcloud init```
|
||||
|
||||
2. Create local authentication credentials for your user account:
|
||||
|
||||
```gcloud auth application-default login```
|
||||
|
||||
## Openstack
|
||||
|
||||
@@ -27,14 +40,13 @@ After creating the service account you will need to enable the account using the
|
||||
|
||||
## Azure
|
||||
|
||||
**NOTE**: For Azure node killing scenarios, make sure [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) is installed.
|
||||
|
||||
You will also need to create a service principal and give it the correct access, see [here](https://docs.openshift.com/container-platform/4.5/installing/installing_azure/installing-azure-account.html) for creating the service principal and setting the proper permissions.
|
||||
**NOTE**: You will need to create a service principal and give it the correct access, see [here](https://docs.openshift.com/container-platform/4.5/installing/installing_azure/installing-azure-account.html) for creating the service principal and setting the proper permissions.
|
||||
|
||||
To properly run the service principal requires “Azure Active Directory Graph/Application.ReadWrite.OwnedBy” api permission granted and “User Access Administrator”.
|
||||
|
||||
Before running you will need to set the following:
|
||||
1. Login using ```az login```
|
||||
|
||||
1. ```export AZURE_SUBSCRIPTION_ID=<subscription_id>```
|
||||
|
||||
2. ```export AZURE_TENANT_ID=<tenant_id>```
|
||||
|
||||
@@ -68,9 +80,10 @@ Set the following environment variables
|
||||
|
||||
These are the credentials that you would normally use to access the vSphere client.
|
||||
|
||||
|
||||
## IBMCloud
|
||||
If no api key is set up with proper VPC resource permissions, use the following to create:
|
||||
|
||||
If no API key is set up with proper VPC resource permissions, use the following to create it:
|
||||
|
||||
* Access group
|
||||
* Service id with the following access
|
||||
* With policy **VPC Infrastructure Services**
|
||||
|
||||
@@ -8,6 +8,7 @@ Current accepted cloud types:
|
||||
* [GCP](cloud_setup.md#gcp)
|
||||
* [AWS](cloud_setup.md#aws)
|
||||
* [Openstack](cloud_setup.md#openstack)
|
||||
* [IBMCloud](cloud_setup.md#ibmcloud)
|
||||
|
||||
|
||||
```
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
* [Scenarios](#scenarios)
|
||||
* [Test Environment Recommendations - how and where to run chaos tests](#test-environment-recommendations---how-and-where-to-run-chaos-tests)
|
||||
* [Chaos testing in Practice](#chaos-testing-in-practice)
|
||||
* [OpenShift oraganization](#openshift-organization)
|
||||
* [OpenShift organization](#openshift-organization)
|
||||
* [startx-lab](#startx-lab)
|
||||
|
||||
|
||||
|
||||
@@ -43,12 +43,3 @@ $ python3.9 run_kraken.py --config <config_file_location>
|
||||
[Krkn-hub](https://github.com/krkn-chaos/krkn-hub) is a wrapper that allows running Krkn chaos scenarios via podman or docker runtime with scenario parameters/configuration defined as environment variables.
|
||||
|
||||
Refer [instructions](https://github.com/krkn-chaos/krkn-hub#supported-chaos-scenarios) to get started.
|
||||
|
||||
|
||||
### Run Kraken as a Kubernetes deployment ( unsupported option - standalone or containerized deployers are recommended )
|
||||
Refer [Instructions](https://github.com/krkn-chaos/krkn/blob/main/containers/README.md) on how to deploy and run Kraken as a Kubernetes/OpenShift deployment.
|
||||
|
||||
|
||||
Refer to the [chaos-kraken chart manpage](https://artifacthub.io/packages/helm/startx/chaos-kraken)
|
||||
and especially the [kraken configuration values](https://artifacthub.io/packages/helm/startx/chaos-kraken#chaos-kraken-values-dictionary)
|
||||
for details on how to configure this chart.
|
||||
|
||||
@@ -18,7 +18,7 @@ network_chaos: # Scenario to create an outage
|
||||
```
|
||||
|
||||
##### Sample scenario config for ingress traffic shaping (using a plugin)
|
||||
'''
|
||||
```
|
||||
- id: network_chaos
|
||||
config:
|
||||
node_interface_name: # Dictionary with key as node name(s) and value as a list of its interfaces to test
|
||||
@@ -35,7 +35,7 @@ network_chaos: # Scenario to create an outage
|
||||
bandwidth: 10mbit
|
||||
wait_duration: 120
|
||||
test_duration: 60
|
||||
'''
|
||||
```
|
||||
|
||||
Note: For ingress traffic shaping, ensure that your node doesn't have any [IFB](https://wiki.linuxfoundation.org/networking/ifb) interfaces already present. The scenario relies on creating IFBs to do the shaping, and they are deleted at the end of the scenario.
|
||||
|
||||
|
||||
@@ -4,29 +4,36 @@ The following node chaos scenarios are supported:
|
||||
|
||||
1. **node_start_scenario**: Scenario to stop the node instance.
|
||||
2. **node_stop_scenario**: Scenario to stop the node instance.
|
||||
3. **node_stop_start_scenario**: Scenario to stop and then start the node instance. Not supported on VMware.
|
||||
3. **node_stop_start_scenario**: Scenario to stop the node instance for specified duration and then start the node instance. Not supported on VMware.
|
||||
4. **node_termination_scenario**: Scenario to terminate the node instance.
|
||||
5. **node_reboot_scenario**: Scenario to reboot the node instance.
|
||||
6. **stop_kubelet_scenario**: Scenario to stop the kubelet of the node instance.
|
||||
7. **stop_start_kubelet_scenario**: Scenario to stop and start the kubelet of the node instance.
|
||||
8. **node_crash_scenario**: Scenario to crash the node instance.
|
||||
9. **stop_start_helper_node_scenario**: Scenario to stop and start the helper node and check service status.
|
||||
8. **restart_kubelet_scenario**: Scenario to restart the kubelet of the node instance.
|
||||
9. **node_crash_scenario**: Scenario to crash the node instance.
|
||||
10. **stop_start_helper_node_scenario**: Scenario to stop and start the helper node and check service status.
|
||||
11. **node_disk_detach_attach_scenario**: Scenario to detach node disk for specified duration.
|
||||
|
||||
|
||||
**NOTE**: If the node does not recover from the node_crash_scenario injection, reboot the node to get it back to Ready state.
|
||||
|
||||
**NOTE**: node_start_scenario, node_stop_scenario, node_stop_start_scenario, node_termination_scenario
|
||||
, node_reboot_scenario and stop_start_kubelet_scenario are supported only on AWS, Azure, OpenStack, BareMetal, GCP
|
||||
, VMware and Alibaba as of now.
|
||||
, node_reboot_scenario and stop_start_kubelet_scenario are supported on AWS, Azure, OpenStack, BareMetal, GCP
|
||||
, VMware and Alibaba.
|
||||
|
||||
**NOTE**: Node scenarios are supported only when running the standalone version of Kraken until https://github.com/redhat-chaos/krkn/issues/106 gets fixed.
|
||||
**NOTE**: node_disk_detach_attach_scenario is supported only on AWS and cannot detach root disk.
|
||||
|
||||
|
||||
#### AWS
|
||||
|
||||
How to set up AWS cli to run node scenarios is defined [here](cloud_setup.md#aws).
|
||||
Cloud setup instructions can be found [here](cloud_setup.md#aws). Sample scenario config can be found [here](https://github.com/krkn-chaos/krkn/blob/main/scenarios/openshift/aws_node_scenarios.yml).
|
||||
|
||||
|
||||
|
||||
#### Baremetal
|
||||
|
||||
Sample scenario config can be found [here](https://github.com/krkn-chaos/krkn/blob/main/scenarios/openshift/baremetal_node_scenarios.yml).
|
||||
|
||||
**NOTE**: Baremetal requires setting the IPMI user and password to power on, off, and reboot nodes, using the config options `bm_user` and `bm_password`. It can either be set in the root of the entry in the scenarios config, or it can be set per machine.
|
||||
|
||||
If no per-machine addresses are specified, kraken attempts to use the BMC value in the BareMetalHost object. To list them, you can do 'oc get bmh -o wide --all-namespaces'. If the BMC values are blank, you must specify them per-machine using the config option 'bmc_addr' as specified below.
|
||||
@@ -38,6 +45,8 @@ See the example node scenario or the example below.
|
||||
|
||||
**NOTE**: Baremetal machines are fragile. Some node actions can occasionally corrupt the filesystem if it does not shut down properly, and sometimes the kubelet does not start properly.
|
||||
|
||||
|
||||
|
||||
#### Docker
|
||||
|
||||
The Docker provider can be used to run node scenarios against kind clusters.
|
||||
@@ -46,8 +55,13 @@ The Docker provider can be used to run node scenarios against kind clusters.
|
||||
|
||||
kind was primarily designed for testing Kubernetes itself, but may be used for local development or CI.
|
||||
|
||||
|
||||
|
||||
#### GCP
|
||||
How to set up GCP cli to run node scenarios is defined [here](cloud_setup.md#gcp).
|
||||
Cloud setup instructions can be found [here](cloud_setup.md#gcp). Sample scenario config can be found [here](https://github.com/krkn-chaos/krkn/blob/main/scenarios/openshift/gcp_node_scenarios.yml).
|
||||
|
||||
NOTE: The parallel option is not available for GCP, the api doesn't perform processes at the same time
|
||||
|
||||
|
||||
#### Openstack
|
||||
|
||||
@@ -60,9 +74,11 @@ The supported node level chaos scenarios on an OPENSTACK cloud are `node_stop_st
|
||||
To execute the scenario, ensure the value for `ssh_private_key` in the node scenarios config file is set with the correct private key file path for ssh connection to the helper node. Ensure passwordless ssh is configured on the host running Kraken and the helper node to avoid connection errors.
|
||||
|
||||
|
||||
|
||||
#### Azure
|
||||
|
||||
How to set up Azure cli to run node scenarios is defined [here](cloud_setup.md#azure).
|
||||
Cloud setup instructions can be found [here](cloud_setup.md#azure). Sample scenario config can be found [here](https://github.com/krkn-chaos/krkn/blob/main/scenarios/openshift/azure_node_scenarios.yml).
|
||||
|
||||
|
||||
|
||||
#### Alibaba
|
||||
@@ -73,43 +89,28 @@ How to set up Alibaba cli to run node scenarios is defined [here](cloud_setup.md
|
||||
. Releasing a node is 2 steps, stopping the node and then releasing it.
|
||||
|
||||
|
||||
|
||||
#### VMware
|
||||
How to set up VMware vSphere to run node scenarios is defined [here](cloud_setup.md#vmware)
|
||||
|
||||
This cloud type uses a different configuration style, see actions below and [example config file](../scenarios/openshift/vmware_node_scenarios.yml)
|
||||
|
||||
*vmware-node-terminate, vmware-node-reboot, vmware-node-stop, vmware-node-start*
|
||||
- vmware-node-terminate
|
||||
- vmware-node-reboot
|
||||
- vmware-node-stop
|
||||
- vmware-node-start
|
||||
|
||||
|
||||
|
||||
#### IBMCloud
|
||||
How to set up IBMCloud to run node scenarios is defined [here](cloud_setup.md#ibmcloud)
|
||||
|
||||
This cloud type uses a different configuration style, see actions below and [example config file](../scenarios/openshift/ibmcloud_node_scenarios.yml)
|
||||
|
||||
*ibmcloud-node-terminate, ibmcloud-node-reboot, ibmcloud-node-stop, ibmcloud-node-start
|
||||
*
|
||||
|
||||
|
||||
#### IBMCloud and Vmware example
|
||||
|
||||
|
||||
```
|
||||
- id: ibmcloud-node-stop
|
||||
config:
|
||||
name: "<node_name>"
|
||||
label_selector: "node-role.kubernetes.io/worker" # When node_name is not specified, a node with matching label_selector is selected for node chaos scenario injection
|
||||
runs: 1 # Number of times to inject each scenario under actions (will perform on same node each time)
|
||||
instance_count: 1 # Number of nodes to perform action/select that match the label selector
|
||||
timeout: 30 # Duration to wait for completion of node scenario injection
|
||||
skip_openshift_checks: False # Set to True if you don't want to wait for the status of the nodes to change on OpenShift before passing the scenario
|
||||
- id: ibmcloud-node-start
|
||||
config:
|
||||
name: "<node_name>" #Same name as before
|
||||
label_selector: "node-role.kubernetes.io/worker" # When node_name is not specified, a node with matching label_selector is selected for node chaos scenario injection
|
||||
runs: 1 # Number of times to inject each scenario under actions (will perform on same node each time)
|
||||
instance_count: 1 # Number of nodes to perform action/select that match the label selector
|
||||
timeout: 30 # Duration to wait for completion of node scenario injection
|
||||
skip_openshift_checks: False # Set to True if you don't want to wait for the status of the nodes to change on OpenShift before passing the scenario
|
||||
```
|
||||
- ibmcloud-node-terminate
|
||||
- ibmcloud-node-reboot
|
||||
- ibmcloud-node-stop
|
||||
- ibmcloud-node-start
|
||||
|
||||
|
||||
|
||||
@@ -118,60 +119,3 @@ This cloud type uses a different configuration style, see actions below and [exa
|
||||
**NOTE**: The `node_crash_scenario` and `stop_kubelet_scenario` scenario is supported independent of the cloud platform.
|
||||
|
||||
Use 'generic' or do not add the 'cloud_type' key to your scenario if your cluster is not set up using one of the current supported cloud types.
|
||||
|
||||
Node scenarios can be injected by placing the node scenarios config files under node_scenarios option in the kraken config. Refer to [node_scenarios_example](https://github.com/redhat-chaos/krkn/blob/main/scenarios/node_scenarios_example.yml) config file.
|
||||
|
||||
|
||||
```
|
||||
node_scenarios:
|
||||
- actions: # Node chaos scenarios to be injected.
|
||||
- node_stop_start_scenario
|
||||
- stop_start_kubelet_scenario
|
||||
- node_crash_scenario
|
||||
node_name: # Node on which scenario has to be injected.
|
||||
label_selector: node-role.kubernetes.io/worker # When node_name is not specified, a node with matching label_selector is selected for node chaos scenario injection.
|
||||
instance_count: 1 # Number of nodes to perform action/select that match the label selector.
|
||||
runs: 1 # Number of times to inject each scenario under actions (will perform on same node each time).
|
||||
timeout: 120 # Duration to wait for completion of node scenario injection.
|
||||
cloud_type: aws # Cloud type on which Kubernetes/OpenShift runs.
|
||||
- actions:
|
||||
- node_reboot_scenario
|
||||
node_name:
|
||||
label_selector: node-role.kubernetes.io/infra
|
||||
instance_count: 1
|
||||
timeout: 120
|
||||
cloud_type: azure
|
||||
- actions:
|
||||
- node_crash_scenario
|
||||
node_name:
|
||||
label_selector: node-role.kubernetes.io/infra
|
||||
instance_count: 1
|
||||
timeout: 120
|
||||
- actions:
|
||||
- stop_start_helper_node_scenario # Node chaos scenario for helper node.
|
||||
instance_count: 1
|
||||
timeout: 120
|
||||
helper_node_ip: # ip address of the helper node.
|
||||
service: # Check status of the services on the helper node.
|
||||
- haproxy
|
||||
- dhcpd
|
||||
- named
|
||||
ssh_private_key: /root/.ssh/id_rsa # ssh key to access the helper node.
|
||||
cloud_type: openstack
|
||||
- actions:
|
||||
- node_stop_start_scenario
|
||||
node_name:
|
||||
label_selector: node-role.kubernetes.io/worker
|
||||
instance_count: 1
|
||||
timeout: 120
|
||||
cloud_type: bm
|
||||
bmc_user: defaultuser # For baremetal (bm) cloud type. The default IPMI username. Optional if specified for all machines.
|
||||
bmc_password: defaultpass # For baremetal (bm) cloud type. The default IPMI password. Optional if specified for all machines.
|
||||
bmc_info: # This section is here to specify baremetal per-machine info, so it is optional if there is no per-machine info.
|
||||
node-1: # The node name for the baremetal machine
|
||||
bmc_addr: mgmt-machine1.example.com # Optional. For baremetal nodes with the IPMI BMC address missing from 'oc get bmh'.
|
||||
node-2:
|
||||
bmc_addr: mgmt-machine2.example.com
|
||||
bmc_user: user # The baremetal IPMI user. Overrides the default IPMI user specified above. Optional if the default is set.
|
||||
bmc_password: pass # The baremetal IPMI password. Overrides the default IPMI user specified above. Optional if the default is set.
|
||||
```
|
||||
|
||||
136
docs/scenario_plugin_api.md
Normal file
136
docs/scenario_plugin_api.md
Normal file
@@ -0,0 +1,136 @@
|
||||
# Scenario Plugin API:
|
||||
|
||||
This API enables seamless integration of Scenario Plugins for Krkn. Plugins are automatically
|
||||
detected and loaded by the plugin loader, provided they extend the `AbstractPluginScenario`
|
||||
abstract class, implement the required methods, and adhere to the specified [naming conventions](#naming-conventions).
|
||||
|
||||
## Plugin folder:
|
||||
|
||||
The plugin loader automatically loads plugins found in the `krkn/scenario_plugins` directory,
|
||||
relative to the Krkn root folder. Each plugin must reside in its own directory and can consist
|
||||
of one or more Python files. The entry point for each plugin is a Python class that extends the
|
||||
[AbstractPluginScenario](../krkn/scenario_plugins/abstract_scenario_plugin.py) abstract class and implements its required methods.
|
||||
|
||||
## `AbstractPluginScenario` abstract class:
|
||||
|
||||
This [abstract class](../krkn/scenario_plugins/abstract_scenario_plugin.py) defines the contract between the plugin and krkn.
|
||||
It consists of two methods:
|
||||
- `run(...)`
|
||||
- `get_scenario_type()`
|
||||
|
||||
Most IDEs can automatically suggest and implement the abstract methods defined in `AbstractPluginScenario`:
|
||||

|
||||
_(IntelliJ PyCharm)_
|
||||
|
||||
### `run(...)`
|
||||
|
||||
```python
|
||||
def run(
|
||||
self,
|
||||
run_uuid: str,
|
||||
scenario: str,
|
||||
krkn_config: dict[str, any],
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
) -> int:
|
||||
|
||||
```
|
||||
|
||||
This method represents the entry point of the plugin and the first method
|
||||
that will be executed.
|
||||
#### Parameters:
|
||||
|
||||
- `run_uuid`:
|
||||
- the uuid of the chaos run generated by krkn for every single run.
|
||||
- `scenario`:
|
||||
- the config file of the scenario that is currently executed
|
||||
- `krkn_config`:
|
||||
- the full dictionary representation of the `config.yaml`
|
||||
- `lib_telemetry`
|
||||
- it is a composite object of all the [krkn-lib](https://krkn-chaos.github.io/krkn-lib-docs/modules.html) objects and methods needed by a krkn plugin to run.
|
||||
- `scenario_telemetry`
|
||||
- the `ScenarioTelemetry` object of the scenario that is currently executed
|
||||
|
||||
### Return value:
|
||||
Returns 0 if the scenario succeeds and 1 if it fails.
|
||||
> [!WARNING]
|
||||
> All the exception must be handled __inside__ the run method and not propagated.
|
||||
|
||||
### `get_scenario_types()`:
|
||||
|
||||
```python def get_scenario_types(self) -> list[str]:```
|
||||
|
||||
Indicates the scenario types specified in the `config.yaml`. For the plugin to be properly
|
||||
loaded, recognized and executed, it must be implemented and must return one or more
|
||||
strings matching `scenario_type` strings set in the config.
|
||||
> [!WARNING]
|
||||
> Multiple strings can map to a *single* `ScenarioPlugin` but the same string cannot map
|
||||
> to different plugins, an exception will be thrown for scenario_type redefinition.
|
||||
|
||||
> [!Note]
|
||||
> The `scenario_type` strings must be unique across all plugins; otherwise, an exception will be thrown.
|
||||
|
||||
## Naming conventions:
|
||||
A key requirement for developing a plugin that will be properly loaded
|
||||
by the plugin loader is following the established naming conventions.
|
||||
These conventions are enforced to maintain a uniform and readable codebase,
|
||||
making it easier to onboard new developers from the community.
|
||||
|
||||
### plugin folder:
|
||||
- the plugin folder must be placed in the `krkn/scenario_plugin` folder starting from the krkn root folder
|
||||
- the plugin folder __cannot__ contain the words
|
||||
- `plugin`
|
||||
- `scenario`
|
||||
### plugin file name and class name:
|
||||
- the plugin file containing the main plugin class must be named in _snake case_ and must have the suffix `_scenario_plugin`:
|
||||
- `example_scenario_plugin.py`
|
||||
- the main plugin class must named in _capital camel case_ and must have the suffix `ScenarioPlugin` :
|
||||
- `ExampleScenarioPlugin`
|
||||
- the file name must match the class name in the respective syntax:
|
||||
- `example_scenario_plugin.py` -> `ExampleScenarioPlugin`
|
||||
|
||||
### scenario type:
|
||||
- the scenario type __must__ be unique between all the scenarios.
|
||||
|
||||
### logging:
|
||||
If your new scenario does not adhere to the naming conventions, an error log will be generated in the Krkn standard output,
|
||||
providing details about the issue:
|
||||
|
||||
```commandline
|
||||
2024-10-03 18:06:31,136 [INFO] 📣 `ScenarioPluginFactory`: types from config.yaml mapped to respective classes for execution:
|
||||
2024-10-03 18:06:31,136 [INFO] ✅ type: application_outages_scenarios ➡️ `ApplicationOutageScenarioPlugin`
|
||||
2024-10-03 18:06:31,136 [INFO] ✅ types: [hog_scenarios, arcaflow_scenario] ➡️ `ArcaflowScenarioPlugin`
|
||||
2024-10-03 18:06:31,136 [INFO] ✅ type: container_scenarios ➡️ `ContainerScenarioPlugin`
|
||||
2024-10-03 18:06:31,136 [INFO] ✅ type: managedcluster_scenarios ➡️ `ManagedClusterScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ types: [pod_disruption_scenarios, pod_network_scenario, vmware_node_scenarios, ibmcloud_node_scenarios] ➡️ `NativeScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: network_chaos_scenarios ➡️ `NetworkChaosScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: node_scenarios ➡️ `NodeActionsScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: pvc_scenarios ➡️ `PvcScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: service_disruption_scenarios ➡️ `ServiceDisruptionScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: service_hijacking_scenarios ➡️ `ServiceHijackingScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: cluster_shut_down_scenarios ➡️ `ShutDownScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: syn_flood_scenarios ➡️ `SynFloodScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: time_scenarios ➡️ `TimeActionsScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: zone_outages_scenarios ➡️ `ZoneOutageScenarioPlugin`
|
||||
|
||||
2024-09-18 14:48:41,735 [INFO] Failed to load Scenario Plugins:
|
||||
|
||||
2024-09-18 14:48:41,735 [ERROR] ⛔ Class: ExamplePluginScenario Module: krkn.scenario_plugins.example.example_scenario_plugin
|
||||
2024-09-18 14:48:41,735 [ERROR] ⚠️ scenario plugin class name must start with a capital letter, end with `ScenarioPlugin`, and cannot be just `ScenarioPlugin`.
|
||||
```
|
||||
|
||||
>[!NOTE]
|
||||
>If you're trying to understand how the scenario types in the config.yaml are mapped to
|
||||
> their corresponding plugins, this log will guide you!
|
||||
> Each scenario plugin class mentioned can be found in the `krkn/scenario_plugin` folder
|
||||
> simply convert the camel case notation and remove the ScenarioPlugin suffix from the class name
|
||||
> e.g `ShutDownScenarioPlugin` class can be found in the `krkn/scenario_plugin/shut_down` folder.
|
||||
|
||||
## ExampleScenarioPlugin
|
||||
The [ExampleScenarioPlugin](../krkn/tests/test_classes/example_scenario_plugin.py) class included in the tests folder can be used as a scaffolding for new plugins and it is considered
|
||||
part of the documentation.
|
||||
|
||||
For any questions or further guidance, feel free to reach out to us on the
|
||||
[Kubernetes workspace](https://kubernetes.slack.com/) in the `#krkn` channel.
|
||||
We’re happy to assist. Now, __release the Krkn!__
|
||||
|
||||
BIN
docs/scenario_plugin_pycharm.gif
Normal file
BIN
docs/scenario_plugin_pycharm.gif
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 340 KiB |
80
docs/service_hijacking_scenarios.md
Normal file
80
docs/service_hijacking_scenarios.md
Normal file
@@ -0,0 +1,80 @@
|
||||
### Service Hijacking Scenarios
|
||||
|
||||
Service Hijacking Scenarios aim to simulate fake HTTP responses from a workload targeted by a
|
||||
`Service` already deployed in the cluster.
|
||||
This scenario is executed by deploying a custom-made web service and modifying the target `Service`
|
||||
selector to direct traffic to this web service for a specified duration.
|
||||
|
||||
The web service's source code is available [here](https://github.com/krkn-chaos/krkn-service-hijacking).
|
||||
It employs a time-based test plan from the scenario configuration file, which specifies the behavior of resources during the chaos scenario as follows:
|
||||
|
||||
```yaml
|
||||
service_target_port: http-web-svc # The port of the service to be hijacked (can be named or numeric, based on the workload and service configuration).
|
||||
service_name: nginx-service # The name of the service that will be hijacked.
|
||||
service_namespace: default # The namespace where the target service is located.
|
||||
image: quay.io/krkn-chaos/krkn-service-hijacking:v0.1.3 # Image of the krkn web service to be deployed to receive traffic.
|
||||
chaos_duration: 30 # Total duration of the chaos scenario in seconds.
|
||||
plan:
|
||||
- resource: "/list/index.php" # Specifies the resource or path to respond to in the scenario. For paths, both the path and query parameters are captured but ignored. For resources, only query parameters are captured.
|
||||
|
||||
steps: # A time-based plan consisting of steps can be defined for each resource.
|
||||
GET: # One or more HTTP methods can be specified for each step. Note: Non-standard methods are supported for fully custom web services (e.g., using NONEXISTENT instead of POST).
|
||||
|
||||
- duration: 15 # Duration in seconds for this step before moving to the next one, if defined. Otherwise, this step will continue until the chaos scenario ends.
|
||||
|
||||
status: 500 # HTTP status code to be returned in this step.
|
||||
mime_type: "application/json" # MIME type of the response for this step.
|
||||
payload: | # The response payload for this step.
|
||||
{
|
||||
"status":"internal server error"
|
||||
}
|
||||
- duration: 15
|
||||
status: 201
|
||||
mime_type: "application/json"
|
||||
payload: |
|
||||
{
|
||||
"status":"resource created"
|
||||
}
|
||||
POST:
|
||||
- duration: 15
|
||||
status: 401
|
||||
mime_type: "application/json"
|
||||
payload: |
|
||||
{
|
||||
"status": "unauthorized"
|
||||
}
|
||||
- duration: 15
|
||||
status: 404
|
||||
mime_type: "text/plain"
|
||||
payload: "not found"
|
||||
|
||||
|
||||
```
|
||||
The scenario will focus on the `service_name` within the `service_namespace`,
|
||||
substituting the selector with a randomly generated one, which is added as a label in the mock service manifest.
|
||||
This allows multiple scenarios to be executed in the same namespace, each targeting different services without
|
||||
causing conflicts.
|
||||
|
||||
The newly deployed mock web service will expose a `service_target_port`,
|
||||
which can be either a named or numeric port based on the service configuration.
|
||||
This ensures that the Service correctly routes HTTP traffic to the mock web service during the chaos run.
|
||||
|
||||
Each step will last for `duration` seconds from the deployment of the mock web service in the cluster.
|
||||
For each HTTP resource, defined as a top-level YAML property of the plan
|
||||
(it could be a specific resource, e.g., /list/index.php, or a path-based resource typical in MVC frameworks),
|
||||
one or more HTTP request methods can be specified. Both standard and custom request methods are supported.
|
||||
|
||||
During this time frame, the web service will respond with:
|
||||
|
||||
- `status`: The [HTTP status code](https://datatracker.ietf.org/doc/html/rfc7231#section-6) (can be standard or custom).
|
||||
- `mime_type`: The [MIME type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types) (can be standard or custom).
|
||||
- `payload`: The response body to be returned to the client.
|
||||
|
||||
At the end of the step `duration`, the web service will proceed to the next step (if available) until
|
||||
the global `chaos_duration` concludes. At this point, the original service will be restored,
|
||||
and the custom web service and its resources will be undeployed.
|
||||
|
||||
__NOTE__: Some clients (e.g., cURL, jQuery) may optimize queries using lightweight methods (like HEAD or OPTIONS)
|
||||
to probe API behavior. If these methods are not defined in the test plan, the web service may respond with
|
||||
a `405` or `404` status code. If you encounter unexpected behavior, consider this use case.
|
||||
|
||||
33
docs/syn_flood_scenarios.md
Normal file
33
docs/syn_flood_scenarios.md
Normal file
@@ -0,0 +1,33 @@
|
||||
### SYN Flood Scenarios
|
||||
|
||||
This scenario generates a substantial amount of TCP traffic directed at one or more Kubernetes services within
|
||||
the cluster to test the server's resiliency under extreme traffic conditions.
|
||||
It can also target hosts outside the cluster by specifying a reachable IP address or hostname.
|
||||
This scenario leverages the distributed nature of Kubernetes clusters to instantiate multiple instances
|
||||
of the same pod against a single host, significantly increasing the effectiveness of the attack.
|
||||
The configuration also allows for the specification of multiple node selectors, enabling Kubernetes to schedule
|
||||
the attacker pods on a user-defined subset of nodes to make the test more realistic.
|
||||
|
||||
```yaml
|
||||
packet-size: 120 # hping3 packet size
|
||||
window-size: 64 # hping 3 TCP window size
|
||||
duration: 10 # chaos scenario duration
|
||||
namespace: default # namespace where the target service(s) are deployed
|
||||
target-service: target-svc # target service name (if set target-service-label must be empty)
|
||||
target-port: 80 # target service TCP port
|
||||
target-service-label : "" # target service label, can be used to target multiple target at the same time
|
||||
# if they have the same label set (if set target-service must be empty)
|
||||
number-of-pods: 2 # number of attacker pod instantiated per each target
|
||||
image: quay.io/krkn-chaos/krkn-syn-flood # syn flood attacker container image
|
||||
attacker-nodes: # this will set the node affinity to schedule the attacker node. Per each node label selector
|
||||
# can be specified multiple values in this way the kube scheduler will schedule the attacker pods
|
||||
# in the best way possible based on the provided labels. Multiple labels can be specified
|
||||
kubernetes.io/hostname:
|
||||
- host_1
|
||||
- host_2
|
||||
kubernetes.io/os:
|
||||
- linux
|
||||
|
||||
```
|
||||
|
||||
The attacker container source code is available [here](https://github.com/krkn-chaos/krkn-syn-flood).
|
||||
@@ -13,10 +13,12 @@ zone_outage: # Scenario to create an out
|
||||
duration: 600 # Duration in seconds after which the zone will be back online.
|
||||
vpc_id: # Cluster virtual private network to target.
|
||||
subnet_id: [subnet1, subnet2] # List of subnet-id's to deny both ingress and egress traffic.
|
||||
default_acl_id: acl-xxxxxxxx # (Optional) ID of an existing network ACL to use instead of creating a new one. If provided, this ACL will not be deleted after the scenario.
|
||||
```
|
||||
|
||||
**NOTE**: vpc_id and subnet_id can be obtained from the cloud web console by selecting one of the instances in the targeted zone ( us-west-2a for example ).
|
||||
**NOTE**: Multiple zones will experience downtime in case of targeting multiple subnets which might have an impact on the cluster health especially if the zones have control plane components deployed.
|
||||
**NOTE**: default_acl_id can be obtained from the AWS VPC Console by selecting "Network ACLs" from the left sidebar ( the ID will be in the format 'acl-xxxxxxxx' ). Make sure the selected ACL has the desired ingress/egress rules for your outage scenario ( i.e., deny all ).
|
||||
|
||||
##### Debugging steps in case of failures
|
||||
In case of failures during the steps which revert back the network acl to allow traffic and bring back the cluster nodes in the zone, the nodes in the particular zone will be in `NotReady` condition. Here is how to fix it:
|
||||
|
||||
@@ -2,6 +2,9 @@ kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
extraPortMappings:
|
||||
- containerPort: 30036
|
||||
hostPort: 8888
|
||||
- role: control-plane
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
|
||||
@@ -1,84 +0,0 @@
|
||||
import yaml
|
||||
import logging
|
||||
import time
|
||||
import kraken.cerberus.setup as cerberus
|
||||
from jinja2 import Template
|
||||
import kraken.invoke.command as runcommand
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
|
||||
|
||||
# Reads the scenario config, applies and deletes a network policy to
|
||||
# block the traffic for the specified duration
|
||||
def run(scenarios_list, config, wait_duration,kubecli: KrknKubernetes, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
failed_post_scenarios = ""
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for app_outage_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = app_outage_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, app_outage_config)
|
||||
if len(app_outage_config) > 1:
|
||||
try:
|
||||
with open(app_outage_config, "r") as f:
|
||||
app_outage_config_yaml = yaml.full_load(f)
|
||||
scenario_config = app_outage_config_yaml["application_outage"]
|
||||
pod_selector = get_yaml_item_value(
|
||||
scenario_config, "pod_selector", "{}"
|
||||
)
|
||||
traffic_type = get_yaml_item_value(
|
||||
scenario_config, "block", "[Ingress, Egress]"
|
||||
)
|
||||
namespace = get_yaml_item_value(
|
||||
scenario_config, "namespace", ""
|
||||
)
|
||||
duration = get_yaml_item_value(
|
||||
scenario_config, "duration", 60
|
||||
)
|
||||
|
||||
start_time = int(time.time())
|
||||
|
||||
network_policy_template = """---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: kraken-deny
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels: {{ pod_selector }}
|
||||
policyTypes: {{ traffic_type }}
|
||||
"""
|
||||
t = Template(network_policy_template)
|
||||
rendered_spec = t.render(pod_selector=pod_selector, traffic_type=traffic_type)
|
||||
yaml_spec = yaml.safe_load(rendered_spec)
|
||||
# Block the traffic by creating network policy
|
||||
logging.info("Creating the network policy")
|
||||
|
||||
kubecli.create_net_policy(yaml_spec, namespace)
|
||||
|
||||
# wait for the specified duration
|
||||
logging.info("Waiting for the specified duration in the config: %s" % (duration))
|
||||
time.sleep(duration)
|
||||
|
||||
# unblock the traffic by deleting the network policy
|
||||
logging.info("Deleting the network policy")
|
||||
kubecli.delete_net_policy("kraken-deny", namespace)
|
||||
|
||||
logging.info("End of scenario. Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
except Exception as e :
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(app_outage_config)
|
||||
log_exception(app_outage_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
from .arcaflow_plugin import *
|
||||
from .context_auth import ContextAuth
|
||||
@@ -1,178 +0,0 @@
|
||||
import time
|
||||
import arcaflow
|
||||
import os
|
||||
import yaml
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from .context_auth import ContextAuth
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
|
||||
|
||||
def run(scenarios_list: List[str], kubeconfig_path: str, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_post_scenarios = []
|
||||
for scenario in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = scenario
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry,scenario)
|
||||
engine_args = build_args(scenario)
|
||||
status_code = run_workflow(engine_args, kubeconfig_path)
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetry.exitStatus = status_code
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
if status_code != 0:
|
||||
failed_post_scenarios.append(scenario)
|
||||
return failed_post_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
def run_workflow(engine_args: arcaflow.EngineArgs, kubeconfig_path: str) -> int:
|
||||
set_arca_kubeconfig(engine_args, kubeconfig_path)
|
||||
exit_status = arcaflow.run(engine_args)
|
||||
return exit_status
|
||||
|
||||
|
||||
def build_args(input_file: str) -> arcaflow.EngineArgs:
|
||||
"""sets the kubeconfig parsed by setArcaKubeConfig as an input to the arcaflow workflow"""
|
||||
context = Path(input_file).parent
|
||||
workflow = "{}/workflow.yaml".format(context)
|
||||
config = "{}/config.yaml".format(context)
|
||||
if not os.path.exists(context):
|
||||
raise Exception(
|
||||
"context folder for arcaflow workflow not found: {}".format(
|
||||
context)
|
||||
)
|
||||
if not os.path.exists(input_file):
|
||||
raise Exception(
|
||||
"input file for arcaflow workflow not found: {}".format(input_file))
|
||||
if not os.path.exists(workflow):
|
||||
raise Exception(
|
||||
"workflow file for arcaflow workflow not found: {}".format(
|
||||
workflow)
|
||||
)
|
||||
if not os.path.exists(config):
|
||||
raise Exception(
|
||||
"configuration file for arcaflow workflow not found: {}".format(
|
||||
config)
|
||||
)
|
||||
|
||||
engine_args = arcaflow.EngineArgs()
|
||||
engine_args.context = context
|
||||
engine_args.config = config
|
||||
engine_args.input = input_file
|
||||
return engine_args
|
||||
|
||||
|
||||
def set_arca_kubeconfig(engine_args: arcaflow.EngineArgs, kubeconfig_path: str):
|
||||
|
||||
context_auth = ContextAuth()
|
||||
if not os.path.exists(kubeconfig_path):
|
||||
raise Exception("kubeconfig not found in {}".format(kubeconfig_path))
|
||||
|
||||
with open(kubeconfig_path, "r") as stream:
|
||||
try:
|
||||
kubeconfig = yaml.safe_load(stream)
|
||||
context_auth.fetch_auth_data(kubeconfig)
|
||||
except Exception as e:
|
||||
logging.error("impossible to read kubeconfig file in: {}".format(
|
||||
kubeconfig_path))
|
||||
raise e
|
||||
|
||||
kubeconfig_str = set_kubeconfig_auth(kubeconfig, context_auth)
|
||||
|
||||
with open(engine_args.input, "r") as stream:
|
||||
input_file = yaml.safe_load(stream)
|
||||
if "input_list" in input_file and isinstance(input_file["input_list"],list):
|
||||
for index, _ in enumerate(input_file["input_list"]):
|
||||
if isinstance(input_file["input_list"][index], dict):
|
||||
input_file["input_list"][index]["kubeconfig"] = kubeconfig_str
|
||||
else:
|
||||
input_file["kubeconfig"] = kubeconfig_str
|
||||
stream.close()
|
||||
with open(engine_args.input, "w") as stream:
|
||||
yaml.safe_dump(input_file, stream)
|
||||
|
||||
with open(engine_args.config, "r") as stream:
|
||||
config_file = yaml.safe_load(stream)
|
||||
if config_file["deployers"]["image"]["deployer_name"] == "kubernetes":
|
||||
kube_connection = set_kubernetes_deployer_auth(config_file["deployers"]["image"]["connection"], context_auth)
|
||||
config_file["deployers"]["image"]["connection"]=kube_connection
|
||||
with open(engine_args.config, "w") as stream:
|
||||
yaml.safe_dump(config_file, stream,explicit_start=True, width=4096)
|
||||
|
||||
|
||||
def set_kubernetes_deployer_auth(deployer: any, context_auth: ContextAuth) -> any:
|
||||
if context_auth.clusterHost is not None :
|
||||
deployer["host"] = context_auth.clusterHost
|
||||
if context_auth.clientCertificateData is not None :
|
||||
deployer["cert"] = context_auth.clientCertificateData
|
||||
if context_auth.clientKeyData is not None:
|
||||
deployer["key"] = context_auth.clientKeyData
|
||||
if context_auth.clusterCertificateData is not None:
|
||||
deployer["cacert"] = context_auth.clusterCertificateData
|
||||
if context_auth.username is not None:
|
||||
deployer["username"] = context_auth.username
|
||||
if context_auth.password is not None:
|
||||
deployer["password"] = context_auth.password
|
||||
if context_auth.bearerToken is not None:
|
||||
deployer["bearerToken"] = context_auth.bearerToken
|
||||
return deployer
|
||||
|
||||
|
||||
def set_kubeconfig_auth(kubeconfig: any, context_auth: ContextAuth) -> str:
|
||||
"""
|
||||
Builds an arcaflow-compatible kubeconfig representation and returns it as a string.
|
||||
In order to run arcaflow plugins in kubernetes/openshift the kubeconfig must contain client certificate/key
|
||||
and server certificate base64 encoded within the kubeconfig file itself in *-data fields. That is not always the
|
||||
case, infact kubeconfig may contain filesystem paths to those files, this function builds an arcaflow-compatible
|
||||
kubeconfig file and returns it as a string that can be safely included in input.yaml
|
||||
"""
|
||||
|
||||
if "current-context" not in kubeconfig.keys():
|
||||
raise Exception(
|
||||
"invalid kubeconfig file, impossible to determine current-context"
|
||||
)
|
||||
user_id = None
|
||||
cluster_id = None
|
||||
user_name = None
|
||||
cluster_name = None
|
||||
current_context = kubeconfig["current-context"]
|
||||
for context in kubeconfig["contexts"]:
|
||||
if context["name"] == current_context:
|
||||
user_name = context["context"]["user"]
|
||||
cluster_name = context["context"]["cluster"]
|
||||
if user_name is None:
|
||||
raise Exception(
|
||||
"user not set for context {} in kubeconfig file".format(current_context)
|
||||
)
|
||||
if cluster_name is None:
|
||||
raise Exception(
|
||||
"cluster not set for context {} in kubeconfig file".format(current_context)
|
||||
)
|
||||
|
||||
for index, user in enumerate(kubeconfig["users"]):
|
||||
if user["name"] == user_name:
|
||||
user_id = index
|
||||
for index, cluster in enumerate(kubeconfig["clusters"]):
|
||||
if cluster["name"] == cluster_name:
|
||||
cluster_id = index
|
||||
|
||||
if cluster_id is None:
|
||||
raise Exception(
|
||||
"no cluster {} found in kubeconfig users".format(cluster_name)
|
||||
)
|
||||
if "client-certificate" in kubeconfig["users"][user_id]["user"]:
|
||||
kubeconfig["users"][user_id]["user"]["client-certificate-data"] = context_auth.clientCertificateDataBase64
|
||||
del kubeconfig["users"][user_id]["user"]["client-certificate"]
|
||||
|
||||
if "client-key" in kubeconfig["users"][user_id]["user"]:
|
||||
kubeconfig["users"][user_id]["user"]["client-key-data"] = context_auth.clientKeyDataBase64
|
||||
del kubeconfig["users"][user_id]["user"]["client-key"]
|
||||
|
||||
if "certificate-authority" in kubeconfig["clusters"][cluster_id]["cluster"]:
|
||||
kubeconfig["clusters"][cluster_id]["cluster"]["certificate-authority-data"] = context_auth.clusterCertificateDataBase64
|
||||
del kubeconfig["clusters"][cluster_id]["cluster"]["certificate-authority"]
|
||||
kubeconfig_str = yaml.dump(kubeconfig)
|
||||
return kubeconfig_str
|
||||
@@ -1,112 +0,0 @@
|
||||
import logging
|
||||
|
||||
from prometheus_api_client import PrometheusConnect
|
||||
import pandas as pd
|
||||
import urllib3
|
||||
|
||||
|
||||
saved_metrics_path = "./utilisation.txt"
|
||||
|
||||
|
||||
def convert_data_to_dataframe(data, label):
|
||||
df = pd.DataFrame()
|
||||
df['service'] = [item['metric']['pod'] for item in data]
|
||||
df[label] = [item['value'][1] for item in data]
|
||||
|
||||
return df
|
||||
|
||||
|
||||
def convert_data(data, service):
|
||||
|
||||
result = {}
|
||||
for entry in data:
|
||||
pod_name = entry['metric']['pod']
|
||||
value = entry['value'][1]
|
||||
result[pod_name] = value
|
||||
return result.get(service, '100000000000') # for those pods whose limits are not defined they can take as much resources, there assigning a very high value
|
||||
|
||||
|
||||
def save_utilization_to_file(utilization, filename):
|
||||
merged_df = pd.DataFrame(columns=['namespace', 'service', 'CPU', 'CPU_LIMITS', 'MEM', 'MEM_LIMITS', 'NETWORK'])
|
||||
for namespace in utilization:
|
||||
# Loading utilization_data[] for namespace
|
||||
# indexes -- 0 CPU, 1 CPU limits, 2 mem, 3 mem limits, 4 network
|
||||
utilization_data = utilization[namespace]
|
||||
df_cpu = convert_data_to_dataframe(utilization_data[0], "CPU")
|
||||
services = df_cpu.service.unique()
|
||||
logging.info(f"Services for namespace {namespace}: {services}")
|
||||
|
||||
for s in services:
|
||||
|
||||
new_row_df = pd.DataFrame({
|
||||
"namespace": namespace, "service": s,
|
||||
"CPU": convert_data(utilization_data[0], s),
|
||||
"CPU_LIMITS": convert_data(utilization_data[1], s),
|
||||
"MEM": convert_data(utilization_data[2], s),
|
||||
"MEM_LIMITS": convert_data(utilization_data[3], s),
|
||||
"NETWORK": convert_data(utilization_data[4], s)}, index=[0])
|
||||
merged_df = pd.concat([merged_df, new_row_df], ignore_index=True)
|
||||
|
||||
# Convert columns to string
|
||||
merged_df['CPU'] = merged_df['CPU'].astype(str)
|
||||
merged_df['MEM'] = merged_df['MEM'].astype(str)
|
||||
merged_df['CPU_LIMITS'] = merged_df['CPU_LIMITS'].astype(str)
|
||||
merged_df['MEM_LIMITS'] = merged_df['MEM_LIMITS'].astype(str)
|
||||
merged_df['NETWORK'] = merged_df['NETWORK'].astype(str)
|
||||
|
||||
# Extract integer part before the decimal point
|
||||
merged_df['CPU'] = merged_df['CPU'].str.split('.').str[0]
|
||||
merged_df['MEM'] = merged_df['MEM'].str.split('.').str[0]
|
||||
merged_df['CPU_LIMITS'] = merged_df['CPU_LIMITS'].str.split('.').str[0]
|
||||
merged_df['MEM_LIMITS'] = merged_df['MEM_LIMITS'].str.split('.').str[0]
|
||||
merged_df['NETWORK'] = merged_df['NETWORK'].str.split('.').str[0]
|
||||
|
||||
merged_df.to_csv(filename, sep='\t', index=False)
|
||||
|
||||
|
||||
def fetch_utilization_from_prometheus(prometheus_endpoint, auth_token,
|
||||
namespaces, scrape_duration):
|
||||
urllib3.disable_warnings()
|
||||
prometheus = PrometheusConnect(url=prometheus_endpoint, headers={
|
||||
'Authorization':'Bearer {}'.format(auth_token)}, disable_ssl=True)
|
||||
|
||||
# Dicts for saving utilisation and queries -- key is namespace
|
||||
utilization = {}
|
||||
queries = {}
|
||||
|
||||
logging.info("Fetching utilization...")
|
||||
for namespace in namespaces:
|
||||
|
||||
# Fetch CPU utilization
|
||||
cpu_query = 'sum (rate (container_cpu_usage_seconds_total{image!="", namespace="%s"}[%s])) by (pod) *1000' % (namespace,scrape_duration)
|
||||
cpu_result = prometheus.custom_query(cpu_query)
|
||||
|
||||
cpu_limits_query = '(sum by (pod) (kube_pod_container_resource_limits{resource="cpu", namespace="%s"}))*1000' %(namespace)
|
||||
cpu_limits_result = prometheus.custom_query(cpu_limits_query)
|
||||
|
||||
mem_query = 'sum by (pod) (avg_over_time(container_memory_usage_bytes{image!="", namespace="%s"}[%s]))' % (namespace, scrape_duration)
|
||||
mem_result = prometheus.custom_query(mem_query)
|
||||
|
||||
mem_limits_query = 'sum by (pod) (kube_pod_container_resource_limits{resource="memory", namespace="%s"}) ' %(namespace)
|
||||
mem_limits_result = prometheus.custom_query(mem_limits_query)
|
||||
|
||||
network_query = 'sum by (pod) ((avg_over_time(container_network_transmit_bytes_total{namespace="%s"}[%s])) + \
|
||||
(avg_over_time(container_network_receive_bytes_total{namespace="%s"}[%s])))' % (namespace, scrape_duration, namespace, scrape_duration)
|
||||
network_result = prometheus.custom_query(network_query)
|
||||
|
||||
utilization[namespace] = [cpu_result, cpu_limits_result, mem_result, mem_limits_result, network_result]
|
||||
queries[namespace] = json_queries(cpu_query, cpu_limits_query, mem_query, mem_limits_query, network_query)
|
||||
|
||||
save_utilization_to_file(utilization, saved_metrics_path)
|
||||
return saved_metrics_path, queries
|
||||
|
||||
|
||||
def json_queries(cpu_query, cpu_limits_query, mem_query, mem_limits_query, network_query):
|
||||
queries = {
|
||||
"cpu_query": cpu_query,
|
||||
"cpu_limit_query": cpu_limits_query,
|
||||
"memory_query": mem_query,
|
||||
"memory_limit_query": mem_limits_query,
|
||||
"network_query": network_query
|
||||
}
|
||||
return queries
|
||||
@@ -1,892 +0,0 @@
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
from kubernetes import client, config, utils, watch
|
||||
from kubernetes.client.rest import ApiException
|
||||
from kubernetes.dynamic.client import DynamicClient
|
||||
from kubernetes.stream import stream
|
||||
|
||||
from ..kubernetes.resources import (PVC, ChaosEngine, ChaosResult, Container,
|
||||
LitmusChaosObject, Pod, Volume,
|
||||
VolumeMount)
|
||||
|
||||
kraken_node_name = ""
|
||||
|
||||
|
||||
# Load kubeconfig and initialize kubernetes python client
|
||||
def initialize_clients(kubeconfig_path):
|
||||
global cli
|
||||
global batch_cli
|
||||
global watch_resource
|
||||
global api_client
|
||||
global dyn_client
|
||||
global custom_object_client
|
||||
try:
|
||||
if kubeconfig_path:
|
||||
config.load_kube_config(kubeconfig_path)
|
||||
else:
|
||||
config.load_incluster_config()
|
||||
api_client = client.ApiClient()
|
||||
cli = client.CoreV1Api(api_client)
|
||||
batch_cli = client.BatchV1Api(api_client)
|
||||
custom_object_client = client.CustomObjectsApi(api_client)
|
||||
dyn_client = DynamicClient(api_client)
|
||||
watch_resource = watch.Watch()
|
||||
except ApiException as e:
|
||||
logging.error("Failed to initialize kubernetes client: %s\n" % e)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_host() -> str:
|
||||
"""Returns the Kubernetes server URL"""
|
||||
return client.configuration.Configuration.get_default_copy().host
|
||||
|
||||
|
||||
def get_clusterversion_string() -> str:
|
||||
"""
|
||||
Returns clusterversion status text on OpenShift, empty string
|
||||
on other distributions
|
||||
"""
|
||||
try:
|
||||
cvs = custom_object_client.list_cluster_custom_object(
|
||||
"config.openshift.io",
|
||||
"v1",
|
||||
"clusterversions",
|
||||
)
|
||||
for cv in cvs["items"]:
|
||||
for condition in cv["status"]["conditions"]:
|
||||
if condition["type"] == "Progressing":
|
||||
return condition["message"]
|
||||
return ""
|
||||
except client.exceptions.ApiException as e:
|
||||
if e.status == 404:
|
||||
return ""
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
# List all namespaces
|
||||
def list_namespaces(label_selector=None):
|
||||
namespaces = []
|
||||
try:
|
||||
if label_selector:
|
||||
ret = cli.list_namespace(
|
||||
pretty=True,
|
||||
label_selector=label_selector
|
||||
)
|
||||
else:
|
||||
ret = cli.list_namespace(pretty=True)
|
||||
except ApiException as e:
|
||||
logging.error(
|
||||
"Exception when calling CoreV1Api->list_namespaced_pod: %s\n" % e
|
||||
)
|
||||
raise e
|
||||
for namespace in ret.items:
|
||||
namespaces.append(namespace.metadata.name)
|
||||
return namespaces
|
||||
|
||||
|
||||
def get_namespace_status(namespace_name):
|
||||
"""Get status of a given namespace"""
|
||||
ret = ""
|
||||
try:
|
||||
ret = cli.read_namespace_status(namespace_name)
|
||||
except ApiException as e:
|
||||
logging.error(
|
||||
"Exception when calling CoreV1Api->read_namespace_status: %s\n" % e
|
||||
)
|
||||
return ret.status.phase
|
||||
|
||||
|
||||
def delete_namespace(namespace):
|
||||
"""Deletes a given namespace using kubernetes python client"""
|
||||
try:
|
||||
api_response = cli.delete_namespace(namespace)
|
||||
logging.debug(
|
||||
"Namespace deleted. status='%s'" % str(api_response.status)
|
||||
)
|
||||
return api_response
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
CoreV1Api->delete_namespace: %s\n"
|
||||
% e
|
||||
)
|
||||
|
||||
|
||||
def check_namespaces(namespaces, label_selectors=None):
|
||||
"""Check if all the watch_namespaces are valid"""
|
||||
try:
|
||||
valid_namespaces = list_namespaces(label_selectors)
|
||||
regex_namespaces = set(namespaces) - set(valid_namespaces)
|
||||
final_namespaces = set(namespaces) - set(regex_namespaces)
|
||||
valid_regex = set()
|
||||
if regex_namespaces:
|
||||
for namespace in valid_namespaces:
|
||||
for regex_namespace in regex_namespaces:
|
||||
if re.search(regex_namespace, namespace):
|
||||
final_namespaces.add(namespace)
|
||||
valid_regex.add(regex_namespace)
|
||||
break
|
||||
invalid_namespaces = regex_namespaces - valid_regex
|
||||
if invalid_namespaces:
|
||||
raise Exception(
|
||||
"There exists no namespaces matching: %s" %
|
||||
(invalid_namespaces)
|
||||
)
|
||||
return list(final_namespaces)
|
||||
except Exception as e:
|
||||
logging.info("%s" % (e))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# List nodes in the cluster
|
||||
def list_nodes(label_selector=None):
|
||||
nodes = []
|
||||
try:
|
||||
if label_selector:
|
||||
ret = cli.list_node(pretty=True, label_selector=label_selector)
|
||||
else:
|
||||
ret = cli.list_node(pretty=True)
|
||||
except ApiException as e:
|
||||
logging.error("Exception when calling CoreV1Api->list_node: %s\n" % e)
|
||||
raise e
|
||||
for node in ret.items:
|
||||
nodes.append(node.metadata.name)
|
||||
return nodes
|
||||
|
||||
|
||||
# List nodes in the cluster that can be killed
|
||||
def list_killable_nodes(label_selector=None):
|
||||
nodes = []
|
||||
try:
|
||||
if label_selector:
|
||||
ret = cli.list_node(pretty=True, label_selector=label_selector)
|
||||
else:
|
||||
ret = cli.list_node(pretty=True)
|
||||
except ApiException as e:
|
||||
logging.error("Exception when calling CoreV1Api->list_node: %s\n" % e)
|
||||
raise e
|
||||
for node in ret.items:
|
||||
if kraken_node_name != node.metadata.name:
|
||||
for cond in node.status.conditions:
|
||||
if str(cond.type) == "Ready" and str(cond.status) == "True":
|
||||
nodes.append(node.metadata.name)
|
||||
return nodes
|
||||
|
||||
|
||||
# List managedclusters attached to the hub that can be killed
|
||||
def list_killable_managedclusters(label_selector=None):
|
||||
managedclusters = []
|
||||
try:
|
||||
ret = custom_object_client.list_cluster_custom_object(
|
||||
group="cluster.open-cluster-management.io",
|
||||
version="v1",
|
||||
plural="managedclusters",
|
||||
label_selector=label_selector
|
||||
)
|
||||
except ApiException as e:
|
||||
logging.error("Exception when calling CustomObjectsApi->list_cluster_custom_object: %s\n" % e)
|
||||
raise e
|
||||
for managedcluster in ret['items']:
|
||||
conditions = managedcluster['status']['conditions']
|
||||
available = list(filter(lambda condition: condition['reason'] == 'ManagedClusterAvailable', conditions))
|
||||
if available and available[0]['status'] == 'True':
|
||||
managedclusters.append(managedcluster['metadata']['name'])
|
||||
return managedclusters
|
||||
|
||||
# List pods in the given namespace
|
||||
def list_pods(namespace, label_selector=None):
|
||||
pods = []
|
||||
try:
|
||||
if label_selector:
|
||||
ret = cli.list_namespaced_pod(
|
||||
namespace,
|
||||
pretty=True,
|
||||
label_selector=label_selector
|
||||
)
|
||||
else:
|
||||
ret = cli.list_namespaced_pod(namespace, pretty=True)
|
||||
except ApiException as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
CoreV1Api->list_namespaced_pod: %s\n"
|
||||
% e
|
||||
)
|
||||
raise e
|
||||
for pod in ret.items:
|
||||
pods.append(pod.metadata.name)
|
||||
return pods
|
||||
|
||||
|
||||
def get_all_pods(label_selector=None):
|
||||
pods = []
|
||||
if label_selector:
|
||||
ret = cli.list_pod_for_all_namespaces(
|
||||
pretty=True,
|
||||
label_selector=label_selector
|
||||
)
|
||||
else:
|
||||
ret = cli.list_pod_for_all_namespaces(pretty=True)
|
||||
for pod in ret.items:
|
||||
pods.append([pod.metadata.name, pod.metadata.namespace])
|
||||
return pods
|
||||
|
||||
|
||||
# Execute command in pod
|
||||
def exec_cmd_in_pod(
|
||||
command,
|
||||
pod_name,
|
||||
namespace,
|
||||
container=None,
|
||||
base_command="bash"
|
||||
):
|
||||
|
||||
exec_command = [base_command, "-c", command]
|
||||
try:
|
||||
if container:
|
||||
ret = stream(
|
||||
cli.connect_get_namespaced_pod_exec,
|
||||
pod_name,
|
||||
namespace,
|
||||
container=container,
|
||||
command=exec_command,
|
||||
stderr=True,
|
||||
stdin=False,
|
||||
stdout=True,
|
||||
tty=False,
|
||||
)
|
||||
else:
|
||||
ret = stream(
|
||||
cli.connect_get_namespaced_pod_exec,
|
||||
pod_name,
|
||||
namespace,
|
||||
command=exec_command,
|
||||
stderr=True,
|
||||
stdin=False,
|
||||
stdout=True,
|
||||
tty=False,
|
||||
)
|
||||
except Exception:
|
||||
return False
|
||||
return ret
|
||||
|
||||
|
||||
def delete_pod(name, namespace):
|
||||
try:
|
||||
cli.delete_namespaced_pod(name=name, namespace=namespace)
|
||||
while cli.read_namespaced_pod(name=name, namespace=namespace):
|
||||
time.sleep(1)
|
||||
except ApiException as e:
|
||||
if e.status == 404:
|
||||
logging.info("Pod already deleted")
|
||||
else:
|
||||
logging.error("Failed to delete pod %s" % e)
|
||||
raise e
|
||||
|
||||
|
||||
def create_pod(body, namespace, timeout=120):
|
||||
try:
|
||||
pod_stat = None
|
||||
pod_stat = cli.create_namespaced_pod(body=body, namespace=namespace)
|
||||
end_time = time.time() + timeout
|
||||
while True:
|
||||
pod_stat = cli.read_namespaced_pod(
|
||||
name=body["metadata"]["name"],
|
||||
namespace=namespace
|
||||
)
|
||||
if pod_stat.status.phase == "Running":
|
||||
break
|
||||
if time.time() > end_time:
|
||||
raise Exception("Starting pod failed")
|
||||
time.sleep(1)
|
||||
except Exception as e:
|
||||
logging.error("Pod creation failed %s" % e)
|
||||
if pod_stat:
|
||||
logging.error(pod_stat.status.container_statuses)
|
||||
delete_pod(body["metadata"]["name"], namespace)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def read_pod(name, namespace="default"):
|
||||
return cli.read_namespaced_pod(name=name, namespace=namespace)
|
||||
|
||||
|
||||
def get_pod_log(name, namespace="default"):
|
||||
return cli.read_namespaced_pod_log(
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
_return_http_data_only=True,
|
||||
_preload_content=False
|
||||
)
|
||||
|
||||
|
||||
def get_containers_in_pod(pod_name, namespace):
|
||||
pod_info = cli.read_namespaced_pod(pod_name, namespace)
|
||||
container_names = []
|
||||
|
||||
for cont in pod_info.spec.containers:
|
||||
container_names.append(cont.name)
|
||||
return container_names
|
||||
|
||||
|
||||
def delete_job(name, namespace="default"):
|
||||
try:
|
||||
api_response = batch_cli.delete_namespaced_job(
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
body=client.V1DeleteOptions(
|
||||
propagation_policy="Foreground",
|
||||
grace_period_seconds=0
|
||||
),
|
||||
)
|
||||
logging.debug("Job deleted. status='%s'" % str(api_response.status))
|
||||
return api_response
|
||||
except ApiException as api:
|
||||
logging.warn(
|
||||
"Exception when calling \
|
||||
BatchV1Api->create_namespaced_job: %s"
|
||||
% api
|
||||
)
|
||||
logging.warn("Job already deleted\n")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
BatchV1Api->delete_namespaced_job: %s\n"
|
||||
% e
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def create_job(body, namespace="default"):
|
||||
try:
|
||||
api_response = batch_cli.create_namespaced_job(
|
||||
body=body,
|
||||
namespace=namespace
|
||||
)
|
||||
return api_response
|
||||
except ApiException as api:
|
||||
logging.warn(
|
||||
"Exception when calling \
|
||||
BatchV1Api->create_job: %s"
|
||||
% api
|
||||
)
|
||||
if api.status == 409:
|
||||
logging.warn("Job already present")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
BatchV1Api->create_namespaced_job: %s"
|
||||
% e
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def create_manifestwork(body, namespace):
|
||||
try:
|
||||
api_response = custom_object_client.create_namespaced_custom_object(
|
||||
group="work.open-cluster-management.io",
|
||||
version="v1",
|
||||
plural="manifestworks",
|
||||
body=body,
|
||||
namespace=namespace
|
||||
)
|
||||
return api_response
|
||||
except ApiException as e:
|
||||
print("Exception when calling CustomObjectsApi->create_namespaced_custom_object: %s\n" % e)
|
||||
|
||||
|
||||
def delete_manifestwork(namespace):
|
||||
try:
|
||||
api_response = custom_object_client.delete_namespaced_custom_object(
|
||||
group="work.open-cluster-management.io",
|
||||
version="v1",
|
||||
plural="manifestworks",
|
||||
name="managedcluster-scenarios-template",
|
||||
namespace=namespace
|
||||
)
|
||||
return api_response
|
||||
except ApiException as e:
|
||||
print("Exception when calling CustomObjectsApi->delete_namespaced_custom_object: %s\n" % e)
|
||||
|
||||
def get_job_status(name, namespace="default"):
|
||||
try:
|
||||
return batch_cli.read_namespaced_job_status(
|
||||
name=name,
|
||||
namespace=namespace
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
BatchV1Api->read_namespaced_job_status: %s"
|
||||
% e
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
# Monitor the status of the cluster nodes and set the status to true or false
|
||||
def monitor_nodes():
|
||||
nodes = list_nodes()
|
||||
notready_nodes = []
|
||||
node_kerneldeadlock_status = "False"
|
||||
for node in nodes:
|
||||
try:
|
||||
node_info = cli.read_node_status(node, pretty=True)
|
||||
except ApiException as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
CoreV1Api->read_node_status: %s\n"
|
||||
% e
|
||||
)
|
||||
raise e
|
||||
for condition in node_info.status.conditions:
|
||||
if condition.type == "KernelDeadlock":
|
||||
node_kerneldeadlock_status = condition.status
|
||||
elif condition.type == "Ready":
|
||||
node_ready_status = condition.status
|
||||
else:
|
||||
continue
|
||||
if node_kerneldeadlock_status != "False" or node_ready_status != "True": # noqa # noqa
|
||||
notready_nodes.append(node)
|
||||
if len(notready_nodes) != 0:
|
||||
status = False
|
||||
else:
|
||||
status = True
|
||||
return status, notready_nodes
|
||||
|
||||
|
||||
# Monitor the status of the pods in the specified namespace
|
||||
# and set the status to true or false
|
||||
def monitor_namespace(namespace):
|
||||
pods = list_pods(namespace)
|
||||
notready_pods = []
|
||||
for pod in pods:
|
||||
try:
|
||||
pod_info = cli.read_namespaced_pod_status(
|
||||
pod,
|
||||
namespace,
|
||||
pretty=True
|
||||
)
|
||||
except ApiException as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
CoreV1Api->read_namespaced_pod_status: %s\n"
|
||||
% e
|
||||
)
|
||||
raise e
|
||||
pod_status = pod_info.status.phase
|
||||
if (
|
||||
pod_status != "Running" and
|
||||
pod_status != "Completed" and
|
||||
pod_status != "Succeeded"
|
||||
):
|
||||
notready_pods.append(pod)
|
||||
if len(notready_pods) != 0:
|
||||
status = False
|
||||
else:
|
||||
status = True
|
||||
return status, notready_pods
|
||||
|
||||
|
||||
# Monitor component namespace
|
||||
def monitor_component(iteration, component_namespace):
|
||||
watch_component_status, failed_component_pods = \
|
||||
monitor_namespace(component_namespace)
|
||||
logging.info(
|
||||
"Iteration %s: %s: %s" % (
|
||||
iteration,
|
||||
component_namespace,
|
||||
watch_component_status
|
||||
)
|
||||
)
|
||||
return watch_component_status, failed_component_pods
|
||||
|
||||
|
||||
def apply_yaml(path, namespace='default'):
|
||||
"""
|
||||
Apply yaml config to create Kubernetes resources
|
||||
|
||||
Args:
|
||||
path (string)
|
||||
- Path to the YAML file
|
||||
namespace (string)
|
||||
- Namespace to create the resource
|
||||
|
||||
Returns:
|
||||
The object created
|
||||
"""
|
||||
|
||||
return utils.create_from_yaml(
|
||||
api_client,
|
||||
yaml_file=path,
|
||||
namespace=namespace
|
||||
)
|
||||
|
||||
|
||||
def get_pod_info(name: str, namespace: str = 'default') -> Pod:
|
||||
"""
|
||||
Function to retrieve information about a specific pod
|
||||
in a given namespace. The kubectl command is given by:
|
||||
kubectl get pods <name> -n <namespace>
|
||||
|
||||
Args:
|
||||
name (string)
|
||||
- Name of the pod
|
||||
|
||||
namespace (string)
|
||||
- Namespace to look for the pod
|
||||
|
||||
Returns:
|
||||
- Data class object of type Pod with the output of the above
|
||||
kubectl command in the given format if the pod exists
|
||||
- Returns None if the pod doesn't exist
|
||||
"""
|
||||
pod_exists = check_if_pod_exists(name=name, namespace=namespace)
|
||||
if pod_exists:
|
||||
response = cli.read_namespaced_pod(
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
pretty='true'
|
||||
)
|
||||
container_list = []
|
||||
|
||||
# Create a list of containers present in the pod
|
||||
for container in response.spec.containers:
|
||||
volume_mount_list = []
|
||||
for volume_mount in container.volume_mounts:
|
||||
volume_mount_list.append(
|
||||
VolumeMount(
|
||||
name=volume_mount.name,
|
||||
mountPath=volume_mount.mount_path
|
||||
)
|
||||
)
|
||||
container_list.append(
|
||||
Container(
|
||||
name=container.name,
|
||||
image=container.image,
|
||||
volumeMounts=volume_mount_list
|
||||
)
|
||||
)
|
||||
|
||||
for i, container in enumerate(response.status.container_statuses):
|
||||
container_list[i].ready = container.ready
|
||||
|
||||
# Create a list of volumes associated with the pod
|
||||
volume_list = []
|
||||
for volume in response.spec.volumes:
|
||||
volume_name = volume.name
|
||||
pvc_name = (
|
||||
volume.persistent_volume_claim.claim_name
|
||||
if volume.persistent_volume_claim is not None
|
||||
else None
|
||||
)
|
||||
volume_list.append(Volume(name=volume_name, pvcName=pvc_name))
|
||||
|
||||
# Create the Pod data class object
|
||||
pod_info = Pod(
|
||||
name=response.metadata.name,
|
||||
podIP=response.status.pod_ip,
|
||||
namespace=response.metadata.namespace,
|
||||
containers=container_list,
|
||||
nodeName=response.spec.node_name,
|
||||
volumes=volume_list
|
||||
)
|
||||
return pod_info
|
||||
else:
|
||||
logging.error(
|
||||
"Pod '%s' doesn't exist in namespace '%s'" % (
|
||||
str(name),
|
||||
str(namespace)
|
||||
)
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def get_litmus_chaos_object(
|
||||
kind: str,
|
||||
name: str,
|
||||
namespace: str
|
||||
) -> LitmusChaosObject:
|
||||
"""
|
||||
Function that returns an object of a custom resource type of
|
||||
the litmus project. Currently, only ChaosEngine and ChaosResult
|
||||
objects are supported.
|
||||
|
||||
Args:
|
||||
kind (string)
|
||||
- The custom resource type
|
||||
|
||||
namespace (string)
|
||||
- Namespace where the custom object is present
|
||||
|
||||
Returns:
|
||||
Data class object of a subclass of LitmusChaosObject
|
||||
"""
|
||||
|
||||
group = 'litmuschaos.io'
|
||||
version = 'v1alpha1'
|
||||
|
||||
if kind.lower() == 'chaosengine':
|
||||
plural = 'chaosengines'
|
||||
response = custom_object_client.get_namespaced_custom_object(
|
||||
group=group,
|
||||
plural=plural,
|
||||
version=version,
|
||||
namespace=namespace,
|
||||
name=name
|
||||
)
|
||||
try:
|
||||
engine_status = response['status']['engineStatus']
|
||||
exp_status = response['status']['experiments'][0]['status']
|
||||
except Exception:
|
||||
engine_status = 'Not Initialized'
|
||||
exp_status = 'Not Initialized'
|
||||
custom_object = ChaosEngine(
|
||||
kind='ChaosEngine',
|
||||
group=group,
|
||||
namespace=namespace,
|
||||
name=name,
|
||||
plural=plural,
|
||||
version=version,
|
||||
engineStatus=engine_status,
|
||||
expStatus=exp_status
|
||||
)
|
||||
elif kind.lower() == 'chaosresult':
|
||||
plural = 'chaosresults'
|
||||
response = custom_object_client.get_namespaced_custom_object(
|
||||
group=group,
|
||||
plural=plural,
|
||||
version=version,
|
||||
namespace=namespace,
|
||||
name=name
|
||||
)
|
||||
try:
|
||||
verdict = response['status']['experimentStatus']['verdict']
|
||||
fail_step = response['status']['experimentStatus']['failStep']
|
||||
except Exception:
|
||||
verdict = 'N/A'
|
||||
fail_step = 'N/A'
|
||||
custom_object = ChaosResult(
|
||||
kind='ChaosResult',
|
||||
group=group,
|
||||
namespace=namespace,
|
||||
name=name,
|
||||
plural=plural,
|
||||
version=version,
|
||||
verdict=verdict,
|
||||
failStep=fail_step
|
||||
)
|
||||
else:
|
||||
logging.error("Invalid litmus chaos custom resource name")
|
||||
custom_object = None
|
||||
return custom_object
|
||||
|
||||
|
||||
def check_if_namespace_exists(name: str) -> bool:
|
||||
"""
|
||||
Function that checks if a namespace exists by parsing through
|
||||
the list of projects.
|
||||
Args:
|
||||
name (string)
|
||||
- Namespace name
|
||||
|
||||
Returns:
|
||||
Boolean value indicating whether the namespace exists or not
|
||||
"""
|
||||
|
||||
v1_projects = dyn_client.resources.get(
|
||||
api_version='project.openshift.io/v1',
|
||||
kind='Project'
|
||||
)
|
||||
project_list = v1_projects.get()
|
||||
return True if name in str(project_list) else False
|
||||
|
||||
|
||||
def check_if_pod_exists(name: str, namespace: str) -> bool:
|
||||
"""
|
||||
Function that checks if a pod exists in the given namespace
|
||||
Args:
|
||||
name (string)
|
||||
- Pod name
|
||||
|
||||
namespace (string)
|
||||
- Namespace name
|
||||
|
||||
Returns:
|
||||
Boolean value indicating whether the pod exists or not
|
||||
"""
|
||||
|
||||
namespace_exists = check_if_namespace_exists(namespace)
|
||||
if namespace_exists:
|
||||
pod_list = list_pods(namespace=namespace)
|
||||
if name in pod_list:
|
||||
return True
|
||||
else:
|
||||
logging.error("Namespace '%s' doesn't exist" % str(namespace))
|
||||
return False
|
||||
|
||||
|
||||
def check_if_pvc_exists(name: str, namespace: str) -> bool:
|
||||
"""
|
||||
Function that checks if a namespace exists by parsing through
|
||||
the list of projects.
|
||||
Args:
|
||||
name (string)
|
||||
- PVC name
|
||||
|
||||
namespace (string)
|
||||
- Namespace name
|
||||
|
||||
Returns:
|
||||
Boolean value indicating whether the Persistent Volume Claim
|
||||
exists or not.
|
||||
"""
|
||||
namespace_exists = check_if_namespace_exists(namespace)
|
||||
if namespace_exists:
|
||||
response = cli.list_namespaced_persistent_volume_claim(
|
||||
namespace=namespace
|
||||
)
|
||||
pvc_list = [pvc.metadata.name for pvc in response.items]
|
||||
if name in pvc_list:
|
||||
return True
|
||||
else:
|
||||
logging.error("Namespace '%s' doesn't exist" % str(namespace))
|
||||
return False
|
||||
|
||||
|
||||
def get_pvc_info(name: str, namespace: str) -> PVC:
|
||||
"""
|
||||
Function to retrieve information about a Persistent Volume Claim in a
|
||||
given namespace
|
||||
|
||||
Args:
|
||||
name (string)
|
||||
- Name of the persistent volume claim
|
||||
|
||||
namespace (string)
|
||||
- Namespace where the persistent volume claim is present
|
||||
|
||||
Returns:
|
||||
- A PVC data class containing the name, capacity, volume name,
|
||||
namespace and associated pod names of the PVC if the PVC exists
|
||||
- Returns None if the PVC doesn't exist
|
||||
"""
|
||||
|
||||
pvc_exists = check_if_pvc_exists(name=name, namespace=namespace)
|
||||
if pvc_exists:
|
||||
pvc_info_response = cli.read_namespaced_persistent_volume_claim(
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
pretty=True
|
||||
)
|
||||
pod_list_response = cli.list_namespaced_pod(namespace=namespace)
|
||||
|
||||
capacity = pvc_info_response.status.capacity['storage']
|
||||
volume_name = pvc_info_response.spec.volume_name
|
||||
|
||||
# Loop through all pods in the namespace to find associated PVCs
|
||||
pvc_pod_list = []
|
||||
for pod in pod_list_response.items:
|
||||
for volume in pod.spec.volumes:
|
||||
if (
|
||||
volume.persistent_volume_claim is not None
|
||||
and volume.persistent_volume_claim.claim_name == name
|
||||
):
|
||||
pvc_pod_list.append(pod.metadata.name)
|
||||
|
||||
pvc_info = PVC(
|
||||
name=name,
|
||||
capacity=capacity,
|
||||
volumeName=volume_name,
|
||||
podNames=pvc_pod_list,
|
||||
namespace=namespace
|
||||
)
|
||||
return pvc_info
|
||||
else:
|
||||
logging.error(
|
||||
"PVC '%s' doesn't exist in namespace '%s'" % (
|
||||
str(name),
|
||||
str(namespace)
|
||||
)
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
# Find the node kraken is deployed on
|
||||
# Set global kraken node to not delete
|
||||
def find_kraken_node():
|
||||
pods = get_all_pods()
|
||||
kraken_pod_name = None
|
||||
for pod in pods:
|
||||
if "kraken-deployment" in pod[0]:
|
||||
kraken_pod_name = pod[0]
|
||||
kraken_project = pod[1]
|
||||
break
|
||||
# have to switch to proper project
|
||||
|
||||
if kraken_pod_name:
|
||||
# get kraken-deployment pod, find node name
|
||||
try:
|
||||
node_name = get_pod_info(kraken_pod_name, kraken_project).nodeName
|
||||
global kraken_node_name
|
||||
kraken_node_name = node_name
|
||||
except Exception as e:
|
||||
logging.info("%s" % (e))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Watch for a specific node status
|
||||
def watch_node_status(node, status, timeout, resource_version):
|
||||
count = timeout
|
||||
for event in watch_resource.stream(
|
||||
cli.list_node,
|
||||
field_selector=f"metadata.name={node}",
|
||||
timeout_seconds=timeout,
|
||||
resource_version=f"{resource_version}"
|
||||
):
|
||||
conditions = [
|
||||
status
|
||||
for status in event["object"].status.conditions
|
||||
if status.type == "Ready"
|
||||
]
|
||||
if conditions[0].status == status:
|
||||
watch_resource.stop()
|
||||
break
|
||||
else:
|
||||
count -= 1
|
||||
logging.info(
|
||||
"Status of node " + node + ": " + str(conditions[0].status)
|
||||
)
|
||||
if not count:
|
||||
watch_resource.stop()
|
||||
|
||||
|
||||
# Watch for a specific managedcluster status
|
||||
# TODO: Implement this with a watcher instead of polling
|
||||
def watch_managedcluster_status(managedcluster, status, timeout):
|
||||
elapsed_time = 0
|
||||
while True:
|
||||
conditions = custom_object_client.get_cluster_custom_object_status(
|
||||
"cluster.open-cluster-management.io", "v1", "managedclusters", managedcluster
|
||||
)['status']['conditions']
|
||||
available = list(filter(lambda condition: condition['reason'] == 'ManagedClusterAvailable', conditions))
|
||||
if status == "True":
|
||||
if available and available[0]['status'] == "True":
|
||||
logging.info("Status of managedcluster " + managedcluster + ": Available")
|
||||
return True
|
||||
else:
|
||||
if not available:
|
||||
logging.info("Status of managedcluster " + managedcluster + ": Unavailable")
|
||||
return True
|
||||
time.sleep(2)
|
||||
elapsed_time += 2
|
||||
if elapsed_time >= timeout:
|
||||
logging.info("Timeout waiting for managedcluster " + managedcluster + " to become: " + status)
|
||||
return False
|
||||
|
||||
|
||||
# Get the resource version for the specified node
|
||||
def get_node_resource_version(node):
|
||||
return cli.read_node(name=node).metadata.resource_version
|
||||
@@ -1,74 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import List
|
||||
|
||||
|
||||
@dataclass(frozen=True, order=False)
|
||||
class Volume:
|
||||
"""Data class to hold information regarding volumes in a pod"""
|
||||
name: str
|
||||
pvcName: str
|
||||
|
||||
|
||||
@dataclass(order=False)
|
||||
class VolumeMount:
|
||||
"""Data class to hold information regarding volume mounts"""
|
||||
name: str
|
||||
mountPath: str
|
||||
|
||||
|
||||
@dataclass(frozen=True, order=False)
|
||||
class PVC:
|
||||
"""Data class to hold information regarding persistent volume claims"""
|
||||
name: str
|
||||
capacity: str
|
||||
volumeName: str
|
||||
podNames: List[str]
|
||||
namespace: str
|
||||
|
||||
|
||||
@dataclass(order=False)
|
||||
class Container:
|
||||
"""Data class to hold information regarding containers in a pod"""
|
||||
image: str
|
||||
name: str
|
||||
volumeMounts: List[VolumeMount]
|
||||
ready: bool = False
|
||||
|
||||
|
||||
@dataclass(frozen=True, order=False)
|
||||
class Pod:
|
||||
"""Data class to hold information regarding a pod"""
|
||||
name: str
|
||||
podIP: str
|
||||
namespace: str
|
||||
containers: List[Container]
|
||||
nodeName: str
|
||||
volumes: List[Volume]
|
||||
|
||||
|
||||
@dataclass(frozen=True, order=False)
|
||||
class LitmusChaosObject:
|
||||
"""Data class to hold information regarding a custom object of litmus project"""
|
||||
kind: str
|
||||
group: str
|
||||
namespace: str
|
||||
name: str
|
||||
plural: str
|
||||
version: str
|
||||
|
||||
|
||||
@dataclass(frozen=True, order=False)
|
||||
class ChaosEngine(LitmusChaosObject):
|
||||
"""Data class to hold information regarding a ChaosEngine object"""
|
||||
engineStatus: str
|
||||
expStatus: str
|
||||
|
||||
|
||||
@dataclass(frozen=True, order=False)
|
||||
class ChaosResult(LitmusChaosObject):
|
||||
"""Data class to hold information regarding a ChaosResult object"""
|
||||
verdict: str
|
||||
failStep: str
|
||||
|
||||
|
||||
|
||||
@@ -1,68 +0,0 @@
|
||||
apiVersion: work.open-cluster-management.io/v1
|
||||
kind: ManifestWork
|
||||
metadata:
|
||||
namespace: {{managedcluster_name}}
|
||||
name: managedcluster-scenarios-template
|
||||
spec:
|
||||
workload:
|
||||
manifests:
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: scale-deploy
|
||||
namespace: open-cluster-management
|
||||
rules:
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["deployments/scale"]
|
||||
verbs: ["patch"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["deployments"]
|
||||
verbs: ["get"]
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: scale-deploy-to-sa
|
||||
namespace: open-cluster-management
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: internal-kubectl
|
||||
namespace: open-cluster-management
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: scale-deploy
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: scale-deploy-to-sa
|
||||
namespace: open-cluster-management-agent
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: internal-kubectl
|
||||
namespace: open-cluster-management
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: scale-deploy
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
- apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: internal-kubectl
|
||||
namespace: open-cluster-management
|
||||
- apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: managedcluster-scenarios-template
|
||||
namespace: open-cluster-management
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
serviceAccountName: internal-kubectl
|
||||
containers:
|
||||
- name: kubectl
|
||||
image: quay.io/sighup/kubectl-kustomize:1.21.6_3.9.1
|
||||
command: ["/bin/sh", "-c"]
|
||||
args:
|
||||
- {{args}}
|
||||
restartPolicy: Never
|
||||
backoffLimit: 0
|
||||
@@ -1,78 +0,0 @@
|
||||
import yaml
|
||||
import logging
|
||||
import time
|
||||
from kraken.managedcluster_scenarios.managedcluster_scenarios import managedcluster_scenarios
|
||||
import kraken.managedcluster_scenarios.common_managedcluster_functions as common_managedcluster_functions
|
||||
import kraken.cerberus.setup as cerberus
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.utils.functions import get_yaml_item_value
|
||||
|
||||
# Get the managedcluster scenarios object of specfied cloud type
|
||||
# krkn_lib
|
||||
def get_managedcluster_scenario_object(managedcluster_scenario, kubecli: KrknKubernetes):
|
||||
return managedcluster_scenarios(kubecli)
|
||||
|
||||
# Run defined scenarios
|
||||
# krkn_lib
|
||||
def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes):
|
||||
for managedcluster_scenario_config in scenarios_list:
|
||||
with open(managedcluster_scenario_config, "r") as f:
|
||||
managedcluster_scenario_config = yaml.full_load(f)
|
||||
for managedcluster_scenario in managedcluster_scenario_config["managedcluster_scenarios"]:
|
||||
managedcluster_scenario_object = get_managedcluster_scenario_object(managedcluster_scenario, kubecli)
|
||||
if managedcluster_scenario["actions"]:
|
||||
for action in managedcluster_scenario["actions"]:
|
||||
start_time = int(time.time())
|
||||
inject_managedcluster_scenario(action, managedcluster_scenario, managedcluster_scenario_object, kubecli)
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.get_status(config, start_time, end_time)
|
||||
logging.info("")
|
||||
|
||||
|
||||
# Inject the specified managedcluster scenario
|
||||
# krkn_lib
|
||||
def inject_managedcluster_scenario(action, managedcluster_scenario, managedcluster_scenario_object, kubecli: KrknKubernetes):
|
||||
# Get the managedcluster scenario configurations
|
||||
run_kill_count = get_yaml_item_value(
|
||||
managedcluster_scenario, "runs", 1
|
||||
)
|
||||
instance_kill_count = get_yaml_item_value(
|
||||
managedcluster_scenario, "instance_count", 1
|
||||
)
|
||||
managedcluster_name = get_yaml_item_value(
|
||||
managedcluster_scenario, "managedcluster_name", ""
|
||||
)
|
||||
label_selector = get_yaml_item_value(
|
||||
managedcluster_scenario, "label_selector", ""
|
||||
)
|
||||
timeout = get_yaml_item_value(managedcluster_scenario, "timeout", 120)
|
||||
# Get the managedcluster to apply the scenario
|
||||
if managedcluster_name:
|
||||
managedcluster_name_list = managedcluster_name.split(",")
|
||||
else:
|
||||
managedcluster_name_list = [managedcluster_name]
|
||||
for single_managedcluster_name in managedcluster_name_list:
|
||||
managedclusters = common_managedcluster_functions.get_managedcluster(single_managedcluster_name, label_selector, instance_kill_count, kubecli)
|
||||
for single_managedcluster in managedclusters:
|
||||
if action == "managedcluster_start_scenario":
|
||||
managedcluster_scenario_object.managedcluster_start_scenario(run_kill_count, single_managedcluster, timeout)
|
||||
elif action == "managedcluster_stop_scenario":
|
||||
managedcluster_scenario_object.managedcluster_stop_scenario(run_kill_count, single_managedcluster, timeout)
|
||||
elif action == "managedcluster_stop_start_scenario":
|
||||
managedcluster_scenario_object.managedcluster_stop_start_scenario(run_kill_count, single_managedcluster, timeout)
|
||||
elif action == "managedcluster_termination_scenario":
|
||||
managedcluster_scenario_object.managedcluster_termination_scenario(run_kill_count, single_managedcluster, timeout)
|
||||
elif action == "managedcluster_reboot_scenario":
|
||||
managedcluster_scenario_object.managedcluster_reboot_scenario(run_kill_count, single_managedcluster, timeout)
|
||||
elif action == "stop_start_klusterlet_scenario":
|
||||
managedcluster_scenario_object.stop_start_klusterlet_scenario(run_kill_count, single_managedcluster, timeout)
|
||||
elif action == "start_klusterlet_scenario":
|
||||
managedcluster_scenario_object.stop_klusterlet_scenario(run_kill_count, single_managedcluster, timeout)
|
||||
elif action == "stop_klusterlet_scenario":
|
||||
managedcluster_scenario_object.stop_klusterlet_scenario(run_kill_count, single_managedcluster, timeout)
|
||||
elif action == "managedcluster_crash_scenario":
|
||||
managedcluster_scenario_object.managedcluster_crash_scenario(run_kill_count, single_managedcluster, timeout)
|
||||
else:
|
||||
logging.info("There is no managedcluster action that matches %s, skipping scenario" % action)
|
||||
@@ -1,210 +0,0 @@
|
||||
import yaml
|
||||
import logging
|
||||
import time
|
||||
import os
|
||||
import random
|
||||
import kraken.cerberus.setup as cerberus
|
||||
import kraken.node_actions.common_node_functions as common_node_functions
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
|
||||
|
||||
# krkn_lib
|
||||
# Reads the scenario config and introduces traffic variations in Node's host network interface.
|
||||
def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
failed_post_scenarios = ""
|
||||
logging.info("Runing the Network Chaos tests")
|
||||
failed_post_scenarios = ""
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for net_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = net_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, net_config)
|
||||
try:
|
||||
with open(net_config, "r") as file:
|
||||
param_lst = ["latency", "loss", "bandwidth"]
|
||||
test_config = yaml.safe_load(file)
|
||||
test_dict = test_config["network_chaos"]
|
||||
test_duration = int(
|
||||
get_yaml_item_value(test_dict, "duration", 300)
|
||||
)
|
||||
test_interface = get_yaml_item_value(
|
||||
test_dict, "interfaces", []
|
||||
)
|
||||
test_node = get_yaml_item_value(test_dict, "node_name", "")
|
||||
test_node_label = get_yaml_item_value(
|
||||
test_dict, "label_selector",
|
||||
"node-role.kubernetes.io/master"
|
||||
)
|
||||
test_execution = get_yaml_item_value(
|
||||
test_dict, "execution", "serial"
|
||||
)
|
||||
test_instance_count = get_yaml_item_value(
|
||||
test_dict, "instance_count", 1
|
||||
)
|
||||
test_egress = get_yaml_item_value(
|
||||
test_dict, "egress", {"bandwidth": "100mbit"}
|
||||
)
|
||||
if test_node:
|
||||
node_name_list = test_node.split(",")
|
||||
else:
|
||||
node_name_list = [test_node]
|
||||
nodelst = []
|
||||
for single_node_name in node_name_list:
|
||||
nodelst.extend(common_node_functions.get_node(single_node_name, test_node_label, test_instance_count, kubecli))
|
||||
file_loader = FileSystemLoader(os.path.abspath(os.path.dirname(__file__)))
|
||||
env = Environment(loader=file_loader, autoescape=True)
|
||||
pod_template = env.get_template("pod.j2")
|
||||
test_interface = verify_interface(test_interface, nodelst, pod_template, kubecli)
|
||||
joblst = []
|
||||
egress_lst = [i for i in param_lst if i in test_egress]
|
||||
chaos_config = {
|
||||
"network_chaos": {
|
||||
"duration": test_duration,
|
||||
"interfaces": test_interface,
|
||||
"node_name": ",".join(nodelst),
|
||||
"execution": test_execution,
|
||||
"instance_count": test_instance_count,
|
||||
"egress": test_egress,
|
||||
}
|
||||
}
|
||||
logging.info("Executing network chaos with config \n %s" % yaml.dump(chaos_config))
|
||||
job_template = env.get_template("job.j2")
|
||||
try:
|
||||
for i in egress_lst:
|
||||
for node in nodelst:
|
||||
exec_cmd = get_egress_cmd(
|
||||
test_execution, test_interface, i, test_dict["egress"], duration=test_duration
|
||||
)
|
||||
logging.info("Executing %s on node %s" % (exec_cmd, node))
|
||||
job_body = yaml.safe_load(
|
||||
job_template.render(jobname=i + str(hash(node))[:5], nodename=node, cmd=exec_cmd)
|
||||
)
|
||||
joblst.append(job_body["metadata"]["name"])
|
||||
api_response = kubecli.create_job(job_body)
|
||||
if api_response is None:
|
||||
raise Exception("Error creating job")
|
||||
if test_execution == "serial":
|
||||
logging.info("Waiting for serial job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(joblst[:], kubecli, test_duration + 300)
|
||||
logging.info("Waiting for wait_duration %s" % wait_duration)
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
if test_execution == "parallel":
|
||||
break
|
||||
if test_execution == "parallel":
|
||||
logging.info("Waiting for parallel job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(joblst[:], kubecli, test_duration + 300)
|
||||
logging.info("Waiting for wait_duration %s" % wait_duration)
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
except Exception as e:
|
||||
logging.error("Network Chaos exiting due to Exception %s" % e)
|
||||
raise RuntimeError()
|
||||
finally:
|
||||
logging.info("Deleting jobs")
|
||||
delete_job(joblst[:], kubecli)
|
||||
except (RuntimeError, Exception):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(net_config)
|
||||
log_exception(net_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def verify_interface(test_interface, nodelst, template, kubecli: KrknKubernetes):
|
||||
pod_index = random.randint(0, len(nodelst) - 1)
|
||||
pod_body = yaml.safe_load(template.render(nodename=nodelst[pod_index]))
|
||||
logging.info("Creating pod to query interface on node %s" % nodelst[pod_index])
|
||||
kubecli.create_pod(pod_body, "default", 300)
|
||||
try:
|
||||
if test_interface == []:
|
||||
cmd = "ip r | grep default | awk '/default/ {print $5}'"
|
||||
output = kubecli.exec_cmd_in_pod(cmd, "fedtools", "default")
|
||||
test_interface = [output.replace("\n", "")]
|
||||
else:
|
||||
cmd = "ip -br addr show|awk -v ORS=',' '{print $1}'"
|
||||
output = kubecli.exec_cmd_in_pod(cmd, "fedtools", "default")
|
||||
interface_lst = output[:-1].split(",")
|
||||
for interface in test_interface:
|
||||
if interface not in interface_lst:
|
||||
logging.error("Interface %s not found in node %s interface list %s" % (interface, nodelst[pod_index], interface_lst))
|
||||
#sys.exit(1)
|
||||
raise RuntimeError()
|
||||
return test_interface
|
||||
finally:
|
||||
logging.info("Deleteing pod to query interface on node")
|
||||
kubecli.delete_pod("fedtools", "default")
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def get_job_pods(api_response, kubecli: KrknKubernetes):
|
||||
controllerUid = api_response.metadata.labels["controller-uid"]
|
||||
pod_label_selector = "controller-uid=" + controllerUid
|
||||
pods_list = kubecli.list_pods(label_selector=pod_label_selector, namespace="default")
|
||||
return pods_list[0]
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def wait_for_job(joblst, kubecli: KrknKubernetes, timeout=300):
|
||||
waittime = time.time() + timeout
|
||||
count = 0
|
||||
joblen = len(joblst)
|
||||
while count != joblen:
|
||||
for jobname in joblst:
|
||||
try:
|
||||
api_response = kubecli.get_job_status(jobname, namespace="default")
|
||||
if api_response.status.succeeded is not None or api_response.status.failed is not None:
|
||||
count += 1
|
||||
joblst.remove(jobname)
|
||||
except Exception:
|
||||
logging.warning("Exception in getting job status")
|
||||
if time.time() > waittime:
|
||||
raise Exception("Starting pod failed")
|
||||
time.sleep(5)
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def delete_job(joblst, kubecli: KrknKubernetes):
|
||||
for jobname in joblst:
|
||||
try:
|
||||
api_response = kubecli.get_job_status(jobname, namespace="default")
|
||||
if api_response.status.failed is not None:
|
||||
pod_name = get_job_pods(api_response, kubecli)
|
||||
pod_stat = kubecli.read_pod(name=pod_name, namespace="default")
|
||||
logging.error(pod_stat.status.container_statuses)
|
||||
pod_log_response = kubecli.get_pod_log(name=pod_name, namespace="default")
|
||||
pod_log = pod_log_response.data.decode("utf-8")
|
||||
logging.error(pod_log)
|
||||
except Exception:
|
||||
logging.warning("Exception in getting job status")
|
||||
kubecli.delete_job(name=jobname, namespace="default")
|
||||
|
||||
|
||||
def get_egress_cmd(execution, test_interface, mod, vallst, duration=30):
|
||||
tc_set = tc_unset = tc_ls = ""
|
||||
param_map = {"latency": "delay", "loss": "loss", "bandwidth": "rate"}
|
||||
for i in test_interface:
|
||||
tc_set = "{0} tc qdisc add dev {1} root netem".format(tc_set, i)
|
||||
tc_unset = "{0} tc qdisc del dev {1} root ;".format(tc_unset, i)
|
||||
tc_ls = "{0} tc qdisc ls dev {1} ;".format(tc_ls, i)
|
||||
if execution == "parallel":
|
||||
for val in vallst.keys():
|
||||
tc_set += " {0} {1} ".format(param_map[val], vallst[val])
|
||||
tc_set += ";"
|
||||
else:
|
||||
tc_set += " {0} {1} ;".format(param_map[mod], vallst[mod])
|
||||
exec_cmd = "{0} {1} sleep {2};{3} sleep 20;{4}".format(tc_set, tc_ls, duration, tc_unset, tc_ls)
|
||||
return exec_cmd
|
||||
@@ -1,235 +0,0 @@
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
from kraken.node_actions.abstract_node_scenarios import abstract_node_scenarios
|
||||
from googleapiclient import discovery
|
||||
from oauth2client.client import GoogleCredentials
|
||||
import kraken.invoke.command as runcommand
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
class GCP:
|
||||
def __init__(self):
|
||||
|
||||
self.project = runcommand.invoke("gcloud config get-value project").split("/n")[0].strip()
|
||||
logging.info("project " + str(self.project) + "!")
|
||||
credentials = GoogleCredentials.get_application_default()
|
||||
self.client = discovery.build("compute", "v1", credentials=credentials, cache_discovery=False)
|
||||
|
||||
# Get the instance ID of the node
|
||||
def get_instance_id(self, node):
|
||||
zone_request = self.client.zones().list(project=self.project)
|
||||
while zone_request is not None:
|
||||
zone_response = zone_request.execute()
|
||||
for zone in zone_response["items"]:
|
||||
instances_request = self.client.instances().list(project=self.project, zone=zone["name"])
|
||||
while instances_request is not None:
|
||||
instance_response = instances_request.execute()
|
||||
if "items" in instance_response.keys():
|
||||
for instance in instance_response["items"]:
|
||||
if instance["name"] in node:
|
||||
return instance["name"], zone["name"]
|
||||
instances_request = self.client.zones().list_next(
|
||||
previous_request=instances_request, previous_response=instance_response
|
||||
)
|
||||
zone_request = self.client.zones().list_next(previous_request=zone_request, previous_response=zone_response)
|
||||
logging.info("no instances ")
|
||||
|
||||
# Start the node instance
|
||||
def start_instances(self, zone, instance_id):
|
||||
try:
|
||||
self.client.instances().start(project=self.project, zone=zone, instance=instance_id).execute()
|
||||
logging.info("vm name " + str(instance_id) + " started")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to start node instance %s. Encountered following " "exception: %s." % (instance_id, e)
|
||||
)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Stop the node instance
|
||||
def stop_instances(self, zone, instance_id):
|
||||
try:
|
||||
self.client.instances().stop(project=self.project, zone=zone, instance=instance_id).execute()
|
||||
logging.info("vm name " + str(instance_id) + " stopped")
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance %s. Encountered following " "exception: %s." % (instance_id, e))
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Start the node instance
|
||||
def suspend_instances(self, zone, instance_id):
|
||||
try:
|
||||
self.client.instances().suspend(project=self.project, zone=zone, instance=instance_id).execute()
|
||||
logging.info("vm name " + str(instance_id) + " suspended")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to suspend node instance %s. Encountered following " "exception: %s." % (instance_id, e)
|
||||
)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Terminate the node instance
|
||||
def terminate_instances(self, zone, instance_id):
|
||||
try:
|
||||
self.client.instances().delete(project=self.project, zone=zone, instance=instance_id).execute()
|
||||
logging.info("vm name " + str(instance_id) + " terminated")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to start node instance %s. Encountered following " "exception: %s." % (instance_id, e)
|
||||
)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Reboot the node instance
|
||||
def reboot_instances(self, zone, instance_id):
|
||||
try:
|
||||
self.client.instances().reset(project=self.project, zone=zone, instance=instance_id).execute()
|
||||
logging.info("vm name " + str(instance_id) + " rebooted")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to start node instance %s. Encountered following " "exception: %s." % (instance_id, e)
|
||||
)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Get instance status
|
||||
def get_instance_status(self, zone, instance_id, expected_status, timeout):
|
||||
# statuses: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING,
|
||||
# and TERMINATED.
|
||||
i = 0
|
||||
sleeper = 5
|
||||
while i <= timeout:
|
||||
instStatus = self.client.instances().get(project=self.project, zone=zone, instance=instance_id).execute()
|
||||
logging.info("Status of vm " + str(instStatus["status"]))
|
||||
if instStatus["status"] == expected_status:
|
||||
return True
|
||||
time.sleep(sleeper)
|
||||
i += sleeper
|
||||
logging.error("Status of %s was not %s in %s seconds" % (instance_id, expected_status, timeout))
|
||||
return False
|
||||
|
||||
# Wait until the node instance is suspended
|
||||
def wait_until_suspended(self, zone, instance_id, timeout):
|
||||
return self.get_instance_status(zone, instance_id, "SUSPENDED", timeout)
|
||||
|
||||
# Wait until the node instance is running
|
||||
def wait_until_running(self, zone, instance_id, timeout):
|
||||
return self.get_instance_status(zone, instance_id, "RUNNING", timeout)
|
||||
|
||||
# Wait until the node instance is stopped
|
||||
def wait_until_stopped(self, zone, instance_id, timeout):
|
||||
return self.get_instance_status(zone, instance_id, "TERMINATED", timeout)
|
||||
|
||||
# Wait until the node instance is terminated
|
||||
def wait_until_terminated(self, zone, instance_id, timeout):
|
||||
try:
|
||||
i = 0
|
||||
sleeper = 5
|
||||
while i <= timeout:
|
||||
instStatus = (
|
||||
self.client.instances().get(project=self.project, zone=zone, instance=instance_id).execute()
|
||||
)
|
||||
logging.info("Status of vm " + str(instStatus["status"]))
|
||||
time.sleep(sleeper)
|
||||
except Exception as e:
|
||||
logging.info("here " + str(e))
|
||||
return True
|
||||
|
||||
|
||||
# krkn_lib
|
||||
class gcp_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, kubecli: KrknKubernetes):
|
||||
super().__init__(kubecli)
|
||||
self.gcp = GCP()
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
try:
|
||||
logging.info("Starting node_start_scenario injection")
|
||||
instance_id, zone = self.gcp.get_instance_id(node)
|
||||
logging.info("Starting the node %s with instance ID: %s " % (node, instance_id))
|
||||
self.gcp.start_instances(zone, instance_id)
|
||||
self.gcp.wait_until_running(zone, instance_id, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
logging.info("Node with instance ID: %s is in running state" % instance_id)
|
||||
logging.info("node_start_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to start node instance. Encountered following " "exception: %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
logging.info("stop scenario")
|
||||
for _ in range(instance_kill_count):
|
||||
try:
|
||||
logging.info("Starting node_stop_scenario injection")
|
||||
instance_id, zone = self.gcp.get_instance_id(node)
|
||||
logging.info("Stopping the node %s with instance ID: %s " % (node, instance_id))
|
||||
self.gcp.stop_instances(zone, instance_id)
|
||||
self.gcp.wait_until_stopped(zone, instance_id, timeout)
|
||||
logging.info("Node with instance ID: %s is in stopped state" % instance_id)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Encountered following exception: %s. " "Test Failed" % (e))
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
try:
|
||||
logging.info("Starting node_termination_scenario injection")
|
||||
instance_id, zone = self.gcp.get_instance_id(node)
|
||||
logging.info("Terminating the node %s with instance ID: %s " % (node, instance_id))
|
||||
self.gcp.terminate_instances(zone, instance_id)
|
||||
self.gcp.wait_until_terminated(zone, instance_id, timeout)
|
||||
for _ in range(timeout):
|
||||
if node not in self.kubecli.list_nodes():
|
||||
break
|
||||
time.sleep(1)
|
||||
if node in self.kubecli.list_nodes():
|
||||
raise Exception("Node could not be terminated")
|
||||
logging.info("Node with instance ID: %s has been terminated" % instance_id)
|
||||
logging.info("node_termination_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to terminate node instance. Encountered following exception:" " %s. Test Failed" % e
|
||||
)
|
||||
logging.error("node_termination_scenario injection failed!")
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
try:
|
||||
logging.info("Starting node_reboot_scenario injection")
|
||||
instance_id, zone = self.gcp.get_instance_id(node)
|
||||
logging.info("Rebooting the node %s with instance ID: %s " % (node, instance_id))
|
||||
self.gcp.reboot_instances(zone, instance_id)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
logging.info("Node with instance ID: %s has been rebooted" % instance_id)
|
||||
logging.info("node_reboot_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to reboot node instance. Encountered following exception:" " %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
@@ -1,152 +0,0 @@
|
||||
import yaml
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
from kraken.node_actions.aws_node_scenarios import aws_node_scenarios
|
||||
from kraken.node_actions.general_cloud_node_scenarios import general_node_scenarios
|
||||
from kraken.node_actions.az_node_scenarios import azure_node_scenarios
|
||||
from kraken.node_actions.gcp_node_scenarios import gcp_node_scenarios
|
||||
from kraken.node_actions.openstack_node_scenarios import openstack_node_scenarios
|
||||
from kraken.node_actions.alibaba_node_scenarios import alibaba_node_scenarios
|
||||
from kraken.node_actions.bm_node_scenarios import bm_node_scenarios
|
||||
from kraken.node_actions.docker_node_scenarios import docker_node_scenarios
|
||||
import kraken.node_actions.common_node_functions as common_node_functions
|
||||
import kraken.cerberus.setup as cerberus
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value
|
||||
|
||||
node_general = False
|
||||
|
||||
|
||||
# Get the node scenarios object of specfied cloud type
|
||||
# krkn_lib
|
||||
def get_node_scenario_object(node_scenario, kubecli: KrknKubernetes):
|
||||
if "cloud_type" not in node_scenario.keys() or node_scenario["cloud_type"] == "generic":
|
||||
global node_general
|
||||
node_general = True
|
||||
return general_node_scenarios(kubecli)
|
||||
if node_scenario["cloud_type"] == "aws":
|
||||
return aws_node_scenarios(kubecli)
|
||||
elif node_scenario["cloud_type"] == "gcp":
|
||||
return gcp_node_scenarios(kubecli)
|
||||
elif node_scenario["cloud_type"] == "openstack":
|
||||
return openstack_node_scenarios(kubecli)
|
||||
elif node_scenario["cloud_type"] == "azure" or node_scenario["cloud_type"] == "az":
|
||||
return azure_node_scenarios(kubecli)
|
||||
elif node_scenario["cloud_type"] == "alibaba" or node_scenario["cloud_type"] == "alicloud":
|
||||
return alibaba_node_scenarios(kubecli)
|
||||
elif node_scenario["cloud_type"] == "bm":
|
||||
return bm_node_scenarios(
|
||||
node_scenario.get("bmc_info"), node_scenario.get("bmc_user", None), node_scenario.get("bmc_password", None),
|
||||
kubecli
|
||||
)
|
||||
elif node_scenario["cloud_type"] == "docker":
|
||||
return docker_node_scenarios(kubecli)
|
||||
else:
|
||||
logging.error(
|
||||
"Cloud type " + node_scenario["cloud_type"] + " is not currently supported; "
|
||||
"try using 'generic' if wanting to stop/start kubelet or fork bomb on any "
|
||||
"cluster"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Run defined scenarios
|
||||
# krkn_lib
|
||||
def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for node_scenario_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = node_scenario_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, node_scenario_config)
|
||||
with open(node_scenario_config, "r") as f:
|
||||
node_scenario_config = yaml.full_load(f)
|
||||
for node_scenario in node_scenario_config["node_scenarios"]:
|
||||
node_scenario_object = get_node_scenario_object(node_scenario, kubecli)
|
||||
if node_scenario["actions"]:
|
||||
for action in node_scenario["actions"]:
|
||||
start_time = int(time.time())
|
||||
try:
|
||||
inject_node_scenario(action, node_scenario, node_scenario_object, kubecli)
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.get_status(config, start_time, end_time)
|
||||
logging.info("")
|
||||
except (RuntimeError, Exception) as e:
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(node_scenario_config)
|
||||
log_exception(node_scenario_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
# Inject the specified node scenario
|
||||
def inject_node_scenario(action, node_scenario, node_scenario_object, kubecli: KrknKubernetes):
|
||||
generic_cloud_scenarios = ("stop_kubelet_scenario", "node_crash_scenario")
|
||||
# Get the node scenario configurations
|
||||
run_kill_count = get_yaml_item_value(node_scenario, "runs", 1)
|
||||
instance_kill_count = get_yaml_item_value(
|
||||
node_scenario, "instance_count", 1
|
||||
)
|
||||
node_name = get_yaml_item_value(node_scenario, "node_name", "")
|
||||
label_selector = get_yaml_item_value(node_scenario, "label_selector", "")
|
||||
timeout = get_yaml_item_value(node_scenario, "timeout", 120)
|
||||
service = get_yaml_item_value(node_scenario, "service", "")
|
||||
ssh_private_key = get_yaml_item_value(
|
||||
node_scenario, "ssh_private_key", "~/.ssh/id_rsa"
|
||||
)
|
||||
# Get the node to apply the scenario
|
||||
if node_name:
|
||||
node_name_list = node_name.split(",")
|
||||
else:
|
||||
node_name_list = [node_name]
|
||||
for single_node_name in node_name_list:
|
||||
nodes = common_node_functions.get_node(single_node_name, label_selector, instance_kill_count, kubecli)
|
||||
for single_node in nodes:
|
||||
if node_general and action not in generic_cloud_scenarios:
|
||||
logging.info("Scenario: " + action + " is not set up for generic cloud type, skipping action")
|
||||
else:
|
||||
if action == "node_start_scenario":
|
||||
node_scenario_object.node_start_scenario(run_kill_count, single_node, timeout)
|
||||
elif action == "node_stop_scenario":
|
||||
node_scenario_object.node_stop_scenario(run_kill_count, single_node, timeout)
|
||||
elif action == "node_stop_start_scenario":
|
||||
node_scenario_object.node_stop_start_scenario(run_kill_count, single_node, timeout)
|
||||
elif action == "node_termination_scenario":
|
||||
node_scenario_object.node_termination_scenario(run_kill_count, single_node, timeout)
|
||||
elif action == "node_reboot_scenario":
|
||||
node_scenario_object.node_reboot_scenario(run_kill_count, single_node, timeout)
|
||||
elif action == "stop_start_kubelet_scenario":
|
||||
node_scenario_object.stop_start_kubelet_scenario(run_kill_count, single_node, timeout)
|
||||
elif action == "stop_kubelet_scenario":
|
||||
node_scenario_object.stop_kubelet_scenario(run_kill_count, single_node, timeout)
|
||||
elif action == "node_crash_scenario":
|
||||
node_scenario_object.node_crash_scenario(run_kill_count, single_node, timeout)
|
||||
elif action == "stop_start_helper_node_scenario":
|
||||
if node_scenario["cloud_type"] != "openstack":
|
||||
logging.error(
|
||||
"Scenario: " + action + " is not supported for "
|
||||
"cloud type " + node_scenario["cloud_type"] + ", skipping action"
|
||||
)
|
||||
else:
|
||||
if not node_scenario["helper_node_ip"]:
|
||||
logging.error("Helper node IP address is not provided")
|
||||
sys.exit(1)
|
||||
node_scenario_object.helper_node_stop_start_scenario(
|
||||
run_kill_count, node_scenario["helper_node_ip"], timeout
|
||||
)
|
||||
node_scenario_object.helper_node_service_status(
|
||||
node_scenario["helper_node_ip"], service, ssh_private_key, timeout
|
||||
)
|
||||
else:
|
||||
logging.info("There is no node action that matches %s, skipping scenario" % action)
|
||||
@@ -1,318 +0,0 @@
|
||||
import dataclasses
|
||||
import json
|
||||
import logging
|
||||
from os.path import abspath
|
||||
from typing import List, Dict, Any
|
||||
import time
|
||||
|
||||
from arcaflow_plugin_sdk import schema, serialization, jsonschema
|
||||
from arcaflow_plugin_kill_pod import kill_pods, wait_for_pods
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.k8s.pods_monitor_pool import PodsMonitorPool
|
||||
|
||||
import kraken.plugins.node_scenarios.vmware_plugin as vmware_plugin
|
||||
import kraken.plugins.node_scenarios.ibmcloud_plugin as ibmcloud_plugin
|
||||
from kraken.plugins.run_python_plugin import run_python_file
|
||||
from kraken.plugins.network.ingress_shaping import network_chaos
|
||||
from kraken.plugins.pod_network_outage.pod_network_outage_plugin import pod_outage
|
||||
from kraken.plugins.pod_network_outage.pod_network_outage_plugin import pod_egress_shaping
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from kraken.plugins.pod_network_outage.pod_network_outage_plugin import pod_ingress_shaping
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import log_exception
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class PluginStep:
|
||||
schema: schema.StepSchema
|
||||
error_output_ids: List[str]
|
||||
|
||||
def render_output(self, output_id: str, output_data) -> str:
|
||||
return json.dumps({
|
||||
"output_id": output_id,
|
||||
"output_data": self.schema.outputs[output_id].serialize(output_data),
|
||||
}, indent='\t')
|
||||
|
||||
|
||||
class Plugins:
|
||||
"""
|
||||
Plugins is a class that can run plugins sequentially. The output is rendered to the standard output and the process
|
||||
is aborted if a step fails.
|
||||
"""
|
||||
steps_by_id: Dict[str, PluginStep]
|
||||
|
||||
def __init__(self, steps: List[PluginStep]):
|
||||
self.steps_by_id = dict()
|
||||
for step in steps:
|
||||
if step.schema.id in self.steps_by_id:
|
||||
raise Exception(
|
||||
"Duplicate step ID: {}".format(step.schema.id)
|
||||
)
|
||||
self.steps_by_id[step.schema.id] = step
|
||||
|
||||
def unserialize_scenario(self, file: str) -> Any:
|
||||
return serialization.load_from_file(abspath(file))
|
||||
|
||||
def run(self, file: str, kubeconfig_path: str, kraken_config: str):
|
||||
"""
|
||||
Run executes a series of steps
|
||||
"""
|
||||
data = self.unserialize_scenario(abspath(file))
|
||||
if not isinstance(data, list):
|
||||
raise Exception(
|
||||
"Invalid scenario configuration file: {} expected list, found {}".format(file, type(data).__name__)
|
||||
)
|
||||
i = 0
|
||||
for entry in data:
|
||||
if not isinstance(entry, dict):
|
||||
raise Exception(
|
||||
"Invalid scenario configuration file: {} expected a list of dict's, found {} on step {}".format(
|
||||
file,
|
||||
type(entry).__name__,
|
||||
i
|
||||
)
|
||||
)
|
||||
if "id" not in entry:
|
||||
raise Exception(
|
||||
"Invalid scenario configuration file: {} missing 'id' field on step {}".format(
|
||||
file,
|
||||
i,
|
||||
)
|
||||
)
|
||||
if "config" not in entry:
|
||||
raise Exception(
|
||||
"Invalid scenario configuration file: {} missing 'config' field on step {}".format(
|
||||
file,
|
||||
i,
|
||||
)
|
||||
)
|
||||
|
||||
if entry["id"] not in self.steps_by_id:
|
||||
raise Exception(
|
||||
"Invalid step {} in {} ID: {} expected one of: {}".format(
|
||||
i,
|
||||
file,
|
||||
entry["id"],
|
||||
', '.join(self.steps_by_id.keys())
|
||||
)
|
||||
)
|
||||
step = self.steps_by_id[entry["id"]]
|
||||
unserialized_input = step.schema.input.unserialize(entry["config"])
|
||||
if "kubeconfig_path" in step.schema.input.properties:
|
||||
unserialized_input.kubeconfig_path = kubeconfig_path
|
||||
if "kraken_config" in step.schema.input.properties:
|
||||
unserialized_input.kraken_config = kraken_config
|
||||
output_id, output_data = step.schema(unserialized_input)
|
||||
logging.info(step.render_output(output_id, output_data) + "\n")
|
||||
if output_id in step.error_output_ids:
|
||||
raise Exception(
|
||||
"Step {} in {} ({}) failed".format(i, file, step.schema.id)
|
||||
)
|
||||
i = i + 1
|
||||
|
||||
def json_schema(self):
|
||||
"""
|
||||
This function generates a JSON schema document and renders it from the steps passed.
|
||||
"""
|
||||
result = {
|
||||
"$id": "https://github.com/redhat-chaos/krkn/",
|
||||
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
||||
"title": "Kraken Arcaflow scenarios",
|
||||
"description": "Serial execution of Arcaflow Python plugins. See https://github.com/arcaflow for details.",
|
||||
"type": "array",
|
||||
"minContains": 1,
|
||||
"items": {
|
||||
"oneOf": [
|
||||
|
||||
]
|
||||
}
|
||||
}
|
||||
for step_id in self.steps_by_id.keys():
|
||||
step = self.steps_by_id[step_id]
|
||||
step_input = jsonschema.step_input(step.schema)
|
||||
del step_input["$id"]
|
||||
del step_input["$schema"]
|
||||
del step_input["title"]
|
||||
del step_input["description"]
|
||||
result["items"]["oneOf"].append({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"const": step_id,
|
||||
},
|
||||
"config": step_input,
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"config",
|
||||
]
|
||||
})
|
||||
return json.dumps(result, indent="\t")
|
||||
|
||||
|
||||
PLUGINS = Plugins(
|
||||
[
|
||||
PluginStep(
|
||||
kill_pods,
|
||||
[
|
||||
"error",
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
wait_for_pods,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
run_python_file,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
vmware_plugin.node_start,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
vmware_plugin.node_stop,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
vmware_plugin.node_reboot,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
vmware_plugin.node_terminate,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
ibmcloud_plugin.node_start,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
ibmcloud_plugin.node_stop,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
ibmcloud_plugin.node_reboot,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
ibmcloud_plugin.node_terminate,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
network_chaos,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
pod_outage,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
pod_egress_shaping,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
pod_ingress_shaping,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def run(scenarios: List[str],
|
||||
kubeconfig_path: str,
|
||||
kraken_config: str,
|
||||
failed_post_scenarios: List[str],
|
||||
wait_duration: int,
|
||||
telemetry: KrknTelemetryKubernetes,
|
||||
kubecli: KrknKubernetes
|
||||
) -> (List[str], list[ScenarioTelemetry]):
|
||||
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
for scenario in scenarios:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = scenario
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, scenario)
|
||||
logging.info('scenario ' + str(scenario))
|
||||
pool = PodsMonitorPool(kubecli)
|
||||
kill_scenarios = [kill_scenario for kill_scenario in PLUGINS.unserialize_scenario(scenario) if kill_scenario["id"] == "kill-pods"]
|
||||
|
||||
try:
|
||||
start_monitoring(pool, kill_scenarios)
|
||||
PLUGINS.run(scenario, kubeconfig_path, kraken_config)
|
||||
result = pool.join()
|
||||
scenario_telemetry.affected_pods = result
|
||||
if result.error:
|
||||
raise Exception(f"unrecovered pods: {result.error}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"scenario exception: {str(e)}")
|
||||
scenario_telemetry.exitStatus = 1
|
||||
pool.cancel()
|
||||
failed_post_scenarios.append(scenario)
|
||||
log_exception(scenario)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
|
||||
return failed_post_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
def start_monitoring(pool: PodsMonitorPool, scenarios: list[Any]):
|
||||
for kill_scenario in scenarios:
|
||||
recovery_time = kill_scenario["config"]["krkn_pod_recovery_time"]
|
||||
if ("namespace_pattern" in kill_scenario["config"] and
|
||||
"label_selector" in kill_scenario["config"]):
|
||||
namespace_pattern = kill_scenario["config"]["namespace_pattern"]
|
||||
label_selector = kill_scenario["config"]["label_selector"]
|
||||
pool.select_and_monitor_by_namespace_pattern_and_label(
|
||||
namespace_pattern=namespace_pattern,
|
||||
label_selector=label_selector,
|
||||
max_timeout=recovery_time)
|
||||
logging.info(
|
||||
f"waiting {recovery_time} seconds for pod recovery, "
|
||||
f"pod label selector: {label_selector} namespace pattern: {namespace_pattern}")
|
||||
|
||||
elif ("namespace_pattern" in kill_scenario["config"] and
|
||||
"name_pattern" in kill_scenario["config"]):
|
||||
namespace_pattern = kill_scenario["config"]["namespace_pattern"]
|
||||
name_pattern = kill_scenario["config"]["name_pattern"]
|
||||
pool.select_and_monitor_by_name_pattern_and_namespace_pattern(pod_name_pattern=name_pattern,
|
||||
namespace_pattern=namespace_pattern,
|
||||
max_timeout=recovery_time)
|
||||
logging.info(f"waiting {recovery_time} seconds for pod recovery, "
|
||||
f"pod name pattern: {name_pattern} namespace pattern: {namespace_pattern}")
|
||||
else:
|
||||
raise Exception(f"impossible to determine monitor parameters, check {kill_scenario} configuration")
|
||||
@@ -1,4 +0,0 @@
|
||||
from kraken.plugins import PLUGINS
|
||||
|
||||
if __name__ == "__main__":
|
||||
print(PLUGINS.json_schema())
|
||||
@@ -1,256 +0,0 @@
|
||||
import logging
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
import yaml
|
||||
import sys
|
||||
import random
|
||||
import arcaflow_plugin_kill_pod
|
||||
from krkn_lib.k8s.pods_monitor_pool import PodsMonitorPool
|
||||
|
||||
import kraken.cerberus.setup as cerberus
|
||||
import kraken.post_actions.actions as post_actions
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from arcaflow_plugin_sdk import serialization
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
|
||||
|
||||
# Run pod based scenarios
|
||||
def run(kubeconfig_path, scenarios_list, config, failed_post_scenarios, wait_duration):
|
||||
# Loop to run the scenarios starts here
|
||||
for pod_scenario in scenarios_list:
|
||||
if len(pod_scenario) > 1:
|
||||
pre_action_output = post_actions.run(kubeconfig_path, pod_scenario[1])
|
||||
else:
|
||||
pre_action_output = ""
|
||||
try:
|
||||
# capture start time
|
||||
start_time = int(time.time())
|
||||
|
||||
input = serialization.load_from_file(pod_scenario)
|
||||
|
||||
s = arcaflow_plugin_kill_pod.get_schema()
|
||||
input_data: arcaflow_plugin_kill_pod.KillPodConfig = s.unserialize_input("pod", input)
|
||||
|
||||
if kubeconfig_path is not None:
|
||||
input_data.kubeconfig_path = kubeconfig_path
|
||||
|
||||
output_id, output_data = s.call_step("pod", input_data)
|
||||
|
||||
if output_id == "error":
|
||||
data: arcaflow_plugin_kill_pod.PodErrorOutput = output_data
|
||||
logging.error("Failed to run pod scenario: {}".format(data.error))
|
||||
else:
|
||||
data: arcaflow_plugin_kill_pod.PodSuccessOutput = output_data
|
||||
for pod in data.pods:
|
||||
print("Deleted pod {} in namespace {}\n".format(pod.pod_name, pod.pod_namespace))
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to run scenario: %s. Encountered the following " "exception: %s" % (pod_scenario[0], e)
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
logging.info("Scenario: %s has been successfully injected!" % (pod_scenario[0]))
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
|
||||
try:
|
||||
failed_post_scenarios = post_actions.check_recovery(
|
||||
kubeconfig_path, pod_scenario, failed_post_scenarios, pre_action_output
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to run post action checks: %s" % e)
|
||||
sys.exit(1)
|
||||
|
||||
# capture end time
|
||||
end_time = int(time.time())
|
||||
|
||||
# publish cerberus status
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
return failed_post_scenarios
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def container_run(kubeconfig_path,
|
||||
scenarios_list,
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
wait_duration,
|
||||
kubecli: KrknKubernetes,
|
||||
telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
|
||||
failed_scenarios = []
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
pool = PodsMonitorPool(kubecli)
|
||||
|
||||
for container_scenario_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = container_scenario_config[0]
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, container_scenario_config[0])
|
||||
if len(container_scenario_config) > 1:
|
||||
pre_action_output = post_actions.run(kubeconfig_path, container_scenario_config[1])
|
||||
else:
|
||||
pre_action_output = ""
|
||||
with open(container_scenario_config[0], "r") as f:
|
||||
cont_scenario_config = yaml.full_load(f)
|
||||
start_monitoring(kill_scenarios=cont_scenario_config["scenarios"], pool=pool)
|
||||
for cont_scenario in cont_scenario_config["scenarios"]:
|
||||
# capture start time
|
||||
start_time = int(time.time())
|
||||
try:
|
||||
killed_containers = container_killing_in_pod(cont_scenario, kubecli)
|
||||
logging.info(f"killed containers: {str(killed_containers)}")
|
||||
result = pool.join()
|
||||
if result.error:
|
||||
raise Exception(f"pods failed to recovery: {result.error}")
|
||||
scenario_telemetry.affected_pods = result
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
|
||||
# capture end time
|
||||
end_time = int(time.time())
|
||||
|
||||
# publish cerberus status
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
except (RuntimeError, Exception):
|
||||
pool.cancel()
|
||||
failed_scenarios.append(container_scenario_config[0])
|
||||
log_exception(container_scenario_config[0])
|
||||
scenario_telemetry.exitStatus = 1
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
def start_monitoring(kill_scenarios: list[Any], pool: PodsMonitorPool):
|
||||
for kill_scenario in kill_scenarios:
|
||||
namespace_pattern = f"^{kill_scenario['namespace']}$"
|
||||
label_selector = kill_scenario["label_selector"]
|
||||
recovery_time = kill_scenario["expected_recovery_time"]
|
||||
pool.select_and_monitor_by_namespace_pattern_and_label(
|
||||
namespace_pattern=namespace_pattern,
|
||||
label_selector=label_selector,
|
||||
max_timeout=recovery_time)
|
||||
|
||||
|
||||
def container_killing_in_pod(cont_scenario, kubecli: KrknKubernetes):
|
||||
scenario_name = get_yaml_item_value(cont_scenario, "name", "")
|
||||
namespace = get_yaml_item_value(cont_scenario, "namespace", "*")
|
||||
label_selector = get_yaml_item_value(cont_scenario, "label_selector", None)
|
||||
pod_names = get_yaml_item_value(cont_scenario, "pod_names", [])
|
||||
container_name = get_yaml_item_value(cont_scenario, "container_name", "")
|
||||
kill_action = get_yaml_item_value(cont_scenario, "action", 1)
|
||||
kill_count = get_yaml_item_value(cont_scenario, "count", 1)
|
||||
if not isinstance(kill_action, int):
|
||||
logging.error("Please make sure the action parameter defined in the "
|
||||
"config is an integer")
|
||||
raise RuntimeError()
|
||||
if (kill_action < 1) or (kill_action > 15):
|
||||
logging.error("Only 1-15 kill signals are supported.")
|
||||
raise RuntimeError()
|
||||
kill_action = "kill " + str(kill_action)
|
||||
if type(pod_names) != list:
|
||||
logging.error("Please make sure your pod_names are in a list format")
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
if len(pod_names) == 0:
|
||||
if namespace == "*":
|
||||
# returns double array of pod name and namespace
|
||||
pods = kubecli.get_all_pods(label_selector)
|
||||
else:
|
||||
# Only returns pod names
|
||||
pods = kubecli.list_pods(namespace, label_selector)
|
||||
else:
|
||||
if namespace == "*":
|
||||
logging.error("You must specify the namespace to kill a container in a specific pod")
|
||||
logging.error("Scenario " + scenario_name + " failed")
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
pods = pod_names
|
||||
# get container and pod name
|
||||
container_pod_list = []
|
||||
for pod in pods:
|
||||
if type(pod) == list:
|
||||
pod_output = kubecli.get_pod_info(pod[0], pod[1])
|
||||
container_names = [container.name for container in pod_output.containers]
|
||||
|
||||
container_pod_list.append([pod[0], pod[1], container_names])
|
||||
else:
|
||||
pod_output = kubecli.get_pod_info(pod, namespace)
|
||||
container_names = [container.name for container in pod_output.containers]
|
||||
container_pod_list.append([pod, namespace, container_names])
|
||||
|
||||
killed_count = 0
|
||||
killed_container_list = []
|
||||
while killed_count < kill_count:
|
||||
if len(container_pod_list) == 0:
|
||||
logging.error("Trying to kill more containers than were found, try lowering kill count")
|
||||
logging.error("Scenario " + scenario_name + " failed")
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
selected_container_pod = container_pod_list[random.randint(0, len(container_pod_list) - 1)]
|
||||
for c_name in selected_container_pod[2]:
|
||||
if container_name != "":
|
||||
if c_name == container_name:
|
||||
killed_container_list.append([selected_container_pod[0], selected_container_pod[1], c_name])
|
||||
retry_container_killing(kill_action, selected_container_pod[0], selected_container_pod[1], c_name, kubecli)
|
||||
break
|
||||
else:
|
||||
killed_container_list.append([selected_container_pod[0], selected_container_pod[1], c_name])
|
||||
retry_container_killing(kill_action, selected_container_pod[0], selected_container_pod[1], c_name, kubecli)
|
||||
break
|
||||
container_pod_list.remove(selected_container_pod)
|
||||
killed_count += 1
|
||||
logging.info("Scenario " + scenario_name + " successfully injected")
|
||||
return killed_container_list
|
||||
|
||||
|
||||
def retry_container_killing(kill_action, podname, namespace, container_name, kubecli: KrknKubernetes):
|
||||
i = 0
|
||||
while i < 5:
|
||||
logging.info("Killing container %s in pod %s (ns %s)" % (str(container_name), str(podname), str(namespace)))
|
||||
response = kubecli.exec_cmd_in_pod(kill_action, podname, namespace, container_name)
|
||||
i += 1
|
||||
# Blank response means it is done
|
||||
if not response:
|
||||
break
|
||||
elif "unauthorized" in response.lower() or "authorization" in response.lower():
|
||||
time.sleep(2)
|
||||
continue
|
||||
else:
|
||||
logging.warning(response)
|
||||
continue
|
||||
|
||||
|
||||
def check_failed_containers(killed_container_list, wait_time, kubecli: KrknKubernetes):
|
||||
|
||||
container_ready = []
|
||||
timer = 0
|
||||
while timer <= wait_time:
|
||||
for killed_container in killed_container_list:
|
||||
# pod namespace contain name
|
||||
pod_output = kubecli.get_pod_info(killed_container[0], killed_container[1])
|
||||
|
||||
for container in pod_output.containers:
|
||||
if container.name == killed_container[2]:
|
||||
if container.ready:
|
||||
container_ready.append(killed_container)
|
||||
if len(container_ready) != 0:
|
||||
for item in container_ready:
|
||||
killed_container_list = killed_container_list.remove(item)
|
||||
if killed_container_list is None or len(killed_container_list) == 0:
|
||||
return []
|
||||
timer += 5
|
||||
logging.info("Waiting 5 seconds for containers to become ready")
|
||||
time.sleep(5)
|
||||
return killed_container_list
|
||||
@@ -1,48 +0,0 @@
|
||||
import logging
|
||||
import kraken.invoke.command as runcommand
|
||||
|
||||
|
||||
def run(kubeconfig_path, scenario, pre_action_output=""):
|
||||
|
||||
if scenario.endswith(".yaml") or scenario.endswith(".yml"):
|
||||
logging.error("Powerfulseal support has recently been removed. Please switch to using plugins instead.")
|
||||
elif scenario.endswith(".py"):
|
||||
action_output = runcommand.invoke("python3 " + scenario).strip()
|
||||
if pre_action_output:
|
||||
if pre_action_output == action_output:
|
||||
logging.info(scenario + " post action checks passed")
|
||||
else:
|
||||
logging.info(scenario + " post action response did not match pre check output")
|
||||
logging.info("Pre action output: " + str(pre_action_output) + "\n")
|
||||
logging.info("Post action output: " + str(action_output))
|
||||
return False
|
||||
elif scenario != "":
|
||||
# invoke custom bash script
|
||||
action_output = runcommand.invoke(scenario).strip()
|
||||
if pre_action_output:
|
||||
if pre_action_output == action_output:
|
||||
logging.info(scenario + " post action checks passed")
|
||||
else:
|
||||
logging.info(scenario + " post action response did not match pre check output")
|
||||
return False
|
||||
|
||||
return action_output
|
||||
|
||||
|
||||
# Perform the post scenario actions to see if components recovered
|
||||
def check_recovery(kubeconfig_path, scenario, failed_post_scenarios, pre_action_output):
|
||||
if failed_post_scenarios:
|
||||
for failed_scenario in failed_post_scenarios:
|
||||
post_action_output = run(kubeconfig_path, failed_scenario[0], failed_scenario[1])
|
||||
if post_action_output is not False:
|
||||
failed_post_scenarios.remove(failed_scenario)
|
||||
else:
|
||||
logging.info("Post action scenario " + str(failed_scenario) + "is still failing")
|
||||
|
||||
# check post actions
|
||||
if len(scenario) > 1:
|
||||
post_action_output = run(kubeconfig_path, scenario[1], pre_action_output)
|
||||
if post_action_output is False:
|
||||
failed_post_scenarios.append([scenario[1], pre_action_output])
|
||||
|
||||
return failed_post_scenarios
|
||||
@@ -1,88 +0,0 @@
|
||||
import datetime
|
||||
import os.path
|
||||
from typing import Optional
|
||||
|
||||
import urllib3
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import yaml
|
||||
from krkn_lib.models.krkn import ChaosRunAlertSummary, ChaosRunAlert
|
||||
from krkn_lib.prometheus.krkn_prometheus import KrknPrometheus
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
def alerts(prom_cli: KrknPrometheus, start_time, end_time, alert_profile):
|
||||
|
||||
if alert_profile is None or os.path.exists(alert_profile) is False:
|
||||
logging.error(f"{alert_profile} alert profile does not exist")
|
||||
sys.exit(1)
|
||||
|
||||
with open(alert_profile) as profile:
|
||||
profile_yaml = yaml.safe_load(profile)
|
||||
if not isinstance(profile_yaml, list):
|
||||
logging.error(f"{alert_profile} wrong file format, alert profile must be "
|
||||
f"a valid yaml file containing a list of items with 3 properties: "
|
||||
f"expr, description, severity" )
|
||||
sys.exit(1)
|
||||
|
||||
for alert in profile_yaml:
|
||||
if list(alert.keys()).sort() != ["expr", "description", "severity"].sort():
|
||||
logging.error(f"wrong alert {alert}, skipping")
|
||||
|
||||
prom_cli.process_alert(alert,
|
||||
datetime.datetime.fromtimestamp(start_time),
|
||||
datetime.datetime.fromtimestamp(end_time))
|
||||
|
||||
|
||||
def critical_alerts(prom_cli: KrknPrometheus,
|
||||
summary: ChaosRunAlertSummary,
|
||||
run_id,
|
||||
scenario,
|
||||
start_time,
|
||||
end_time):
|
||||
summary.scenario = scenario
|
||||
summary.run_id = run_id
|
||||
query = r"""ALERTS{severity="critical"}"""
|
||||
logging.info("Checking for critical alerts firing post chaos")
|
||||
|
||||
during_critical_alerts = prom_cli.process_prom_query_in_range(
|
||||
query,
|
||||
start_time=datetime.datetime.fromtimestamp(start_time),
|
||||
end_time=end_time
|
||||
|
||||
)
|
||||
|
||||
for alert in during_critical_alerts:
|
||||
if "metric" in alert:
|
||||
alertname = alert["metric"]["alertname"] if "alertname" in alert["metric"] else "none"
|
||||
alertstate = alert["metric"]["alertstate"] if "alertstate" in alert["metric"] else "none"
|
||||
namespace = alert["metric"]["namespace"] if "namespace" in alert["metric"] else "none"
|
||||
severity = alert["metric"]["severity"] if "severity" in alert["metric"] else "none"
|
||||
alert = ChaosRunAlert(alertname, alertstate, namespace, severity)
|
||||
summary.chaos_alerts.append(alert)
|
||||
|
||||
|
||||
post_critical_alerts = prom_cli.process_query(
|
||||
query
|
||||
)
|
||||
|
||||
for alert in post_critical_alerts:
|
||||
if "metric" in alert:
|
||||
alertname = alert["metric"]["alertname"] if "alertname" in alert["metric"] else "none"
|
||||
alertstate = alert["metric"]["alertstate"] if "alertstate" in alert["metric"] else "none"
|
||||
namespace = alert["metric"]["namespace"] if "namespace" in alert["metric"] else "none"
|
||||
severity = alert["metric"]["severity"] if "severity" in alert["metric"] else "none"
|
||||
alert = ChaosRunAlert(alertname, alertstate, namespace, severity)
|
||||
summary.post_chaos_alerts.append(alert)
|
||||
|
||||
during_critical_alerts_count = len(during_critical_alerts)
|
||||
post_critical_alerts_count = len(post_critical_alerts)
|
||||
firing_alerts = False
|
||||
|
||||
if during_critical_alerts_count > 0:
|
||||
firing_alerts = True
|
||||
|
||||
if post_critical_alerts_count > 0:
|
||||
firing_alerts = True
|
||||
|
||||
if not firing_alerts:
|
||||
logging.info("No critical alerts are firing!!")
|
||||
@@ -1,372 +0,0 @@
|
||||
import logging
|
||||
import random
|
||||
import re
|
||||
import time
|
||||
import yaml
|
||||
from ..cerberus import setup as cerberus
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def run(scenarios_list, config, kubecli: KrknKubernetes, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
"""
|
||||
Reads the scenario config and creates a temp file to fill up the PVC
|
||||
"""
|
||||
failed_post_scenarios = ""
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for app_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = app_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, app_config)
|
||||
try:
|
||||
if len(app_config) > 1:
|
||||
with open(app_config, "r") as f:
|
||||
config_yaml = yaml.full_load(f)
|
||||
scenario_config = config_yaml["pvc_scenario"]
|
||||
pvc_name = get_yaml_item_value(
|
||||
scenario_config, "pvc_name", ""
|
||||
)
|
||||
pod_name = get_yaml_item_value(
|
||||
scenario_config, "pod_name", ""
|
||||
)
|
||||
namespace = get_yaml_item_value(
|
||||
scenario_config, "namespace", ""
|
||||
)
|
||||
target_fill_percentage = get_yaml_item_value(
|
||||
scenario_config, "fill_percentage", "50"
|
||||
)
|
||||
duration = get_yaml_item_value(
|
||||
scenario_config, "duration", 60
|
||||
)
|
||||
|
||||
logging.info(
|
||||
"Input params:\n"
|
||||
"pvc_name: '%s'\n"
|
||||
"pod_name: '%s'\n"
|
||||
"namespace: '%s'\n"
|
||||
"target_fill_percentage: '%s%%'\nduration: '%ss'"
|
||||
% (
|
||||
str(pvc_name),
|
||||
str(pod_name),
|
||||
str(namespace),
|
||||
str(target_fill_percentage),
|
||||
str(duration)
|
||||
)
|
||||
)
|
||||
|
||||
# Check input params
|
||||
if namespace is None:
|
||||
logging.error(
|
||||
"You must specify the namespace where the PVC is"
|
||||
)
|
||||
#sys.exit(1)
|
||||
raise RuntimeError()
|
||||
if pvc_name is None and pod_name is None:
|
||||
logging.error(
|
||||
"You must specify the pvc_name or the pod_name"
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
if pvc_name and pod_name:
|
||||
logging.info(
|
||||
"pod_name will be ignored, pod_name used will be "
|
||||
"a retrieved from the pod used in the pvc_name"
|
||||
)
|
||||
|
||||
# Get pod name
|
||||
if pvc_name:
|
||||
if pod_name:
|
||||
logging.info(
|
||||
"pod_name '%s' will be overridden with one of "
|
||||
"the pods mounted in the PVC" % (str(pod_name))
|
||||
)
|
||||
pvc = kubecli.get_pvc_info(pvc_name, namespace)
|
||||
try:
|
||||
# random generator not used for
|
||||
# security/cryptographic purposes.
|
||||
pod_name = random.choice(pvc.podNames) # nosec
|
||||
logging.info("Pod name: %s" % pod_name)
|
||||
except Exception:
|
||||
logging.error(
|
||||
"Pod associated with %s PVC, on namespace %s, "
|
||||
"not found" % (str(pvc_name), str(namespace))
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Get volume name
|
||||
pod = kubecli.get_pod_info(name=pod_name, namespace=namespace)
|
||||
|
||||
if pod is None:
|
||||
logging.error(
|
||||
"Exiting as pod '%s' doesn't exist "
|
||||
"in namespace '%s'" % (
|
||||
str(pod_name),
|
||||
str(namespace)
|
||||
)
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
for volume in pod.volumes:
|
||||
if volume.pvcName is not None:
|
||||
volume_name = volume.name
|
||||
pvc_name = volume.pvcName
|
||||
pvc = kubecli.get_pvc_info(pvc_name, namespace)
|
||||
break
|
||||
if 'pvc' not in locals():
|
||||
logging.error(
|
||||
"Pod '%s' in namespace '%s' does not use a pvc" % (
|
||||
str(pod_name),
|
||||
str(namespace)
|
||||
)
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
logging.info("Volume name: %s" % volume_name)
|
||||
logging.info("PVC name: %s" % pvc_name)
|
||||
|
||||
# Get container name and mount path
|
||||
for container in pod.containers:
|
||||
for vol in container.volumeMounts:
|
||||
if vol.name == volume_name:
|
||||
mount_path = vol.mountPath
|
||||
container_name = container.name
|
||||
break
|
||||
logging.info("Container path: %s" % container_name)
|
||||
logging.info("Mount path: %s" % mount_path)
|
||||
|
||||
# Get PVC capacity and used bytes
|
||||
command = "df %s -B 1024 | sed 1d" % (str(mount_path))
|
||||
command_output = (
|
||||
kubecli.exec_cmd_in_pod(
|
||||
command,
|
||||
pod_name,
|
||||
namespace,
|
||||
container_name
|
||||
)
|
||||
).split()
|
||||
pvc_used_kb = int(command_output[2])
|
||||
pvc_capacity_kb = pvc_used_kb + int(command_output[3])
|
||||
logging.info("PVC used: %s KB" % pvc_used_kb)
|
||||
logging.info("PVC capacity: %s KB" % pvc_capacity_kb)
|
||||
|
||||
# Check valid fill percentage
|
||||
current_fill_percentage = pvc_used_kb / pvc_capacity_kb
|
||||
if not (
|
||||
current_fill_percentage * 100
|
||||
< float(target_fill_percentage)
|
||||
<= 99
|
||||
):
|
||||
logging.error(
|
||||
"Target fill percentage (%.2f%%) is lower than "
|
||||
"current fill percentage (%.2f%%) "
|
||||
"or higher than 99%%" % (
|
||||
target_fill_percentage,
|
||||
current_fill_percentage * 100
|
||||
)
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Calculate file size
|
||||
file_size_kb = int(
|
||||
(
|
||||
float(
|
||||
target_fill_percentage / 100
|
||||
) * float(pvc_capacity_kb)
|
||||
) - float(pvc_used_kb)
|
||||
)
|
||||
logging.debug("File size: %s KB" % file_size_kb)
|
||||
|
||||
file_name = "kraken.tmp"
|
||||
logging.info(
|
||||
"Creating %s file, %s KB size, in pod %s at %s (ns %s)"
|
||||
% (
|
||||
str(file_name),
|
||||
str(file_size_kb),
|
||||
str(pod_name),
|
||||
str(mount_path),
|
||||
str(namespace)
|
||||
)
|
||||
)
|
||||
|
||||
start_time = int(time.time())
|
||||
# Create temp file in the PVC
|
||||
full_path = "%s/%s" % (str(mount_path), str(file_name))
|
||||
command = "fallocate -l $((%s*1024)) %s" % (
|
||||
str(file_size_kb),
|
||||
str(full_path)
|
||||
)
|
||||
logging.debug(
|
||||
"Create temp file in the PVC command:\n %s" % command
|
||||
)
|
||||
kubecli.exec_cmd_in_pod(
|
||||
command,
|
||||
pod_name,
|
||||
namespace,
|
||||
container_name,
|
||||
)
|
||||
|
||||
# Check if file is created
|
||||
command = "ls -lh %s" % (str(mount_path))
|
||||
logging.debug("Check file is created command:\n %s" % command)
|
||||
response = kubecli.exec_cmd_in_pod(
|
||||
command, pod_name, namespace, container_name
|
||||
)
|
||||
logging.info("\n" + str(response))
|
||||
if str(file_name).lower() in str(response).lower():
|
||||
logging.info(
|
||||
"%s file successfully created" % (str(full_path))
|
||||
)
|
||||
else:
|
||||
logging.error(
|
||||
"Failed to create tmp file with %s size" % (
|
||||
str(file_size_kb)
|
||||
)
|
||||
)
|
||||
remove_temp_file(
|
||||
file_name,
|
||||
full_path,
|
||||
pod_name,
|
||||
namespace,
|
||||
container_name,
|
||||
mount_path,
|
||||
file_size_kb,
|
||||
kubecli
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Calculate file size
|
||||
file_size_kb = int(
|
||||
(
|
||||
float(
|
||||
target_fill_percentage / 100
|
||||
) * float(pvc_capacity_kb)
|
||||
) - float(pvc_used_kb)
|
||||
)
|
||||
logging.debug("File size: %s KB" % file_size_kb)
|
||||
|
||||
file_name = "kraken.tmp"
|
||||
logging.info(
|
||||
"Creating %s file, %s KB size, in pod %s at %s (ns %s)"
|
||||
% (
|
||||
str(file_name),
|
||||
str(file_size_kb),
|
||||
str(pod_name),
|
||||
str(mount_path),
|
||||
str(namespace)
|
||||
)
|
||||
)
|
||||
|
||||
start_time = int(time.time())
|
||||
# Create temp file in the PVC
|
||||
full_path = "%s/%s" % (str(mount_path), str(file_name))
|
||||
command = "fallocate -l $((%s*1024)) %s" % (
|
||||
str(file_size_kb),
|
||||
str(full_path)
|
||||
)
|
||||
logging.debug(
|
||||
"Create temp file in the PVC command:\n %s" % command
|
||||
)
|
||||
kubecli.exec_cmd_in_pod(
|
||||
command, pod_name, namespace, container_name
|
||||
)
|
||||
|
||||
# Check if file is created
|
||||
command = "ls -lh %s" % (str(mount_path))
|
||||
logging.debug("Check file is created command:\n %s" % command)
|
||||
response = kubecli.exec_cmd_in_pod(
|
||||
command, pod_name, namespace, container_name
|
||||
)
|
||||
logging.info("\n" + str(response))
|
||||
if str(file_name).lower() in str(response).lower():
|
||||
logging.info(
|
||||
"Waiting for the specified duration in the config: %ss" % (
|
||||
duration
|
||||
)
|
||||
)
|
||||
time.sleep(duration)
|
||||
logging.info("Finish waiting")
|
||||
|
||||
remove_temp_file(
|
||||
file_name,
|
||||
full_path,
|
||||
pod_name,
|
||||
namespace,
|
||||
container_name,
|
||||
mount_path,
|
||||
file_size_kb,
|
||||
kubecli
|
||||
)
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
except (RuntimeError, Exception):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(app_config)
|
||||
log_exception(app_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def remove_temp_file(
|
||||
file_name,
|
||||
full_path,
|
||||
pod_name,
|
||||
namespace,
|
||||
container_name,
|
||||
mount_path,
|
||||
file_size_kb,
|
||||
kubecli: KrknKubernetes
|
||||
):
|
||||
command = "rm -f %s" % (str(full_path))
|
||||
logging.debug("Remove temp file from the PVC command:\n %s" % command)
|
||||
kubecli.exec_cmd_in_pod(command, pod_name, namespace, container_name)
|
||||
command = "ls -lh %s" % (str(mount_path))
|
||||
logging.debug("Check temp file is removed command:\n %s" % command)
|
||||
response = kubecli.exec_cmd_in_pod(
|
||||
command,
|
||||
pod_name,
|
||||
namespace,
|
||||
container_name
|
||||
)
|
||||
logging.info("\n" + str(response))
|
||||
if not (str(file_name).lower() in str(response).lower()):
|
||||
logging.info("Temp file successfully removed")
|
||||
else:
|
||||
logging.error(
|
||||
"Failed to delete tmp file with %s size" % (str(file_size_kb))
|
||||
)
|
||||
raise RuntimeError()
|
||||
|
||||
|
||||
def toKbytes(value):
|
||||
if not re.match("^[0-9]+[K|M|G|T]i$", value):
|
||||
logging.error(
|
||||
"PVC capacity %s does not match expression "
|
||||
"regexp '^[0-9]+[K|M|G|T]i$'"
|
||||
)
|
||||
raise RuntimeError()
|
||||
unit = {"K": 0, "M": 1, "G": 2, "T": 3}
|
||||
base = 1024 if ("i" in value) else 1000
|
||||
exp = unit[value[-2:-1]]
|
||||
res = int(value[:-2]) * (base**exp)
|
||||
return res
|
||||
@@ -1,325 +0,0 @@
|
||||
import time
|
||||
import random
|
||||
import logging
|
||||
import kraken.cerberus.setup as cerberus
|
||||
import kraken.post_actions.actions as post_actions
|
||||
import yaml
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
|
||||
|
||||
def delete_objects(kubecli, namespace):
|
||||
|
||||
services = delete_all_services_namespace(kubecli, namespace)
|
||||
daemonsets = delete_all_daemonset_namespace(kubecli, namespace)
|
||||
statefulsets = delete_all_statefulsets_namespace(kubecli, namespace)
|
||||
replicasets = delete_all_replicaset_namespace(kubecli, namespace)
|
||||
deployments = delete_all_deployment_namespace(kubecli, namespace)
|
||||
|
||||
objects = { "daemonsets": daemonsets,
|
||||
"deployments": deployments,
|
||||
"replicasets": replicasets,
|
||||
"statefulsets": statefulsets,
|
||||
"services": services
|
||||
}
|
||||
|
||||
return objects
|
||||
|
||||
|
||||
def get_list_running_pods(kubecli: KrknKubernetes, namespace: str):
|
||||
running_pods = []
|
||||
pods = kubecli.list_pods(namespace)
|
||||
for pod in pods:
|
||||
pod_status = kubecli.get_pod_info(pod, namespace)
|
||||
if pod_status and pod_status.status == "Running":
|
||||
running_pods.append(pod)
|
||||
logging.info('all running pods ' + str(running_pods))
|
||||
return running_pods
|
||||
|
||||
|
||||
def delete_all_deployment_namespace(kubecli: KrknKubernetes, namespace: str):
|
||||
"""
|
||||
Delete all the deployments in the specified namespace
|
||||
|
||||
:param kubecli: krkn kubernetes python package
|
||||
:param namespace: namespace
|
||||
"""
|
||||
try:
|
||||
deployments = kubecli.get_deployment_ns(namespace)
|
||||
for deployment in deployments:
|
||||
logging.info("Deleting deployment" + deployment)
|
||||
kubecli.delete_deployment(deployment, namespace)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling delete_all_deployment_namespace: %s\n",
|
||||
str(e),
|
||||
)
|
||||
raise e
|
||||
|
||||
return deployments
|
||||
|
||||
|
||||
def delete_all_daemonset_namespace(kubecli: KrknKubernetes, namespace: str):
|
||||
"""
|
||||
Delete all the daemonset in the specified namespace
|
||||
|
||||
:param kubecli: krkn kubernetes python package
|
||||
:param namespace: namespace
|
||||
"""
|
||||
try:
|
||||
daemonsets = kubecli.get_daemonset(namespace)
|
||||
for daemonset in daemonsets:
|
||||
logging.info("Deleting daemonset" + daemonset)
|
||||
kubecli.delete_daemonset(daemonset, namespace)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling delete_all_daemonset_namespace: %s\n",
|
||||
str(e),
|
||||
)
|
||||
raise e
|
||||
|
||||
return daemonsets
|
||||
|
||||
|
||||
def delete_all_statefulsets_namespace(kubecli: KrknKubernetes, namespace: str):
|
||||
"""
|
||||
Delete all the statefulsets in the specified namespace
|
||||
|
||||
|
||||
:param kubecli: krkn kubernetes python package
|
||||
:param namespace: namespace
|
||||
"""
|
||||
try:
|
||||
statefulsets = kubecli.get_all_statefulset(namespace)
|
||||
for statefulset in statefulsets:
|
||||
logging.info("Deleting statefulsets" + statefulsets)
|
||||
kubecli.delete_statefulset(statefulset, namespace)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling delete_all_statefulsets_namespace: %s\n",
|
||||
str(e),
|
||||
)
|
||||
raise e
|
||||
|
||||
return statefulsets
|
||||
|
||||
|
||||
def delete_all_replicaset_namespace(kubecli: KrknKubernetes, namespace: str):
|
||||
"""
|
||||
Delete all the replicasets in the specified namespace
|
||||
|
||||
:param kubecli: krkn kubernetes python package
|
||||
:param namespace: namespace
|
||||
"""
|
||||
try:
|
||||
replicasets = kubecli.get_all_replicasets(namespace)
|
||||
for replicaset in replicasets:
|
||||
logging.info("Deleting replicaset" + replicaset)
|
||||
kubecli.delete_replicaset(replicaset, namespace)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling delete_all_replicaset_namespace: %s\n",
|
||||
str(e),
|
||||
)
|
||||
raise e
|
||||
|
||||
return replicasets
|
||||
|
||||
def delete_all_services_namespace(kubecli: KrknKubernetes, namespace: str):
|
||||
"""
|
||||
Delete all the services in the specified namespace
|
||||
|
||||
|
||||
:param kubecli: krkn kubernetes python package
|
||||
:param namespace: namespace
|
||||
"""
|
||||
try:
|
||||
services = kubecli.get_all_services(namespace)
|
||||
for service in services:
|
||||
logging.info("Deleting services" + service)
|
||||
kubecli.delete_services(service, namespace)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling delete_all_services_namespace: %s\n",
|
||||
str(e),
|
||||
)
|
||||
raise e
|
||||
|
||||
return services
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def run(
|
||||
scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
failed_post_scenarios,
|
||||
kubeconfig_path,
|
||||
kubecli: KrknKubernetes,
|
||||
telemetry: KrknTelemetryKubernetes
|
||||
) -> (list[str], list[ScenarioTelemetry]):
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for scenario_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = scenario_config[0]
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, scenario_config[0])
|
||||
try:
|
||||
if len(scenario_config) > 1:
|
||||
pre_action_output = post_actions.run(kubeconfig_path, scenario_config[1])
|
||||
else:
|
||||
pre_action_output = ""
|
||||
with open(scenario_config[0], "r") as f:
|
||||
scenario_config_yaml = yaml.full_load(f)
|
||||
for scenario in scenario_config_yaml["scenarios"]:
|
||||
scenario_namespace = get_yaml_item_value(
|
||||
scenario, "namespace", ""
|
||||
)
|
||||
scenario_label = get_yaml_item_value(
|
||||
scenario, "label_selector", ""
|
||||
)
|
||||
if scenario_namespace is not None and scenario_namespace.strip() != "":
|
||||
if scenario_label is not None and scenario_label.strip() != "":
|
||||
logging.error("You can only have namespace or label set in your namespace scenario")
|
||||
logging.error(
|
||||
"Current scenario config has namespace '%s' and label selector '%s'"
|
||||
% (scenario_namespace, scenario_label)
|
||||
)
|
||||
logging.error(
|
||||
"Please set either namespace to blank ('') or label_selector to blank ('') to continue"
|
||||
)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
delete_count = get_yaml_item_value(
|
||||
scenario, "delete_count", 1
|
||||
)
|
||||
run_count = get_yaml_item_value(scenario, "runs", 1)
|
||||
run_sleep = get_yaml_item_value(scenario, "sleep", 10)
|
||||
wait_time = get_yaml_item_value(scenario, "wait_time", 30)
|
||||
|
||||
logging.info(str(scenario_namespace) + str(scenario_label) + str(delete_count) + str(run_count) + str(run_sleep) + str(wait_time))
|
||||
logging.info("done")
|
||||
start_time = int(time.time())
|
||||
for i in range(run_count):
|
||||
killed_namespaces = {}
|
||||
namespaces = kubecli.check_namespaces([scenario_namespace], scenario_label)
|
||||
for j in range(delete_count):
|
||||
if len(namespaces) == 0:
|
||||
logging.error(
|
||||
"Couldn't delete %s namespaces, not enough namespaces matching %s with label %s"
|
||||
% (str(run_count), scenario_namespace, str(scenario_label))
|
||||
)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
selected_namespace = namespaces[random.randint(0, len(namespaces) - 1)]
|
||||
logging.info('Delete objects in selected namespace: ' + selected_namespace )
|
||||
try:
|
||||
# delete all pods in namespace
|
||||
objects = delete_objects(kubecli,selected_namespace)
|
||||
killed_namespaces[selected_namespace] = objects
|
||||
logging.info("Deleted all objects in namespace %s was successful" % str(selected_namespace))
|
||||
except Exception as e:
|
||||
logging.info("Delete all objects in namespace %s was unsuccessful" % str(selected_namespace))
|
||||
logging.info("Namespace action error: " + str(e))
|
||||
raise RuntimeError()
|
||||
namespaces.remove(selected_namespace)
|
||||
logging.info("Waiting %s seconds between namespace deletions" % str(run_sleep))
|
||||
time.sleep(run_sleep)
|
||||
|
||||
logging.info("Waiting for the specified duration: %s" % wait_duration)
|
||||
time.sleep(wait_duration)
|
||||
if len(scenario_config) > 1:
|
||||
try:
|
||||
failed_post_scenarios = post_actions.check_recovery(
|
||||
kubeconfig_path, scenario_config, failed_post_scenarios, pre_action_output
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to run post action checks: %s" % e)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
else:
|
||||
failed_post_scenarios = check_all_running_deployment(killed_namespaces, wait_time, kubecli)
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
except (Exception, RuntimeError):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(scenario_config[0])
|
||||
log_exception(scenario_config[0])
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
def check_all_running_pods(kubecli: KrknKubernetes, namespace_name, wait_time):
|
||||
|
||||
timer = 0
|
||||
while timer < wait_time:
|
||||
pod_list = kubecli.list_pods(namespace_name)
|
||||
pods_running = 0
|
||||
for pod in pod_list:
|
||||
pod_info = kubecli.get_pod_info(pod, namespace_name)
|
||||
if pod_info.status != "Running" and pod_info.status != "Succeeded":
|
||||
logging.info("Pods %s still not running or completed" % pod_info.name)
|
||||
break
|
||||
pods_running += 1
|
||||
if len(pod_list) == pods_running:
|
||||
break
|
||||
timer += 5
|
||||
time.sleep(5)
|
||||
logging.info("Waiting 5 seconds for pods to become active")
|
||||
|
||||
# krkn_lib
|
||||
def check_all_running_deployment(killed_namespaces, wait_time, kubecli: KrknKubernetes):
|
||||
|
||||
timer = 0
|
||||
while timer < wait_time and killed_namespaces:
|
||||
still_missing_ns = killed_namespaces.copy()
|
||||
for namespace_name, objects in killed_namespaces.items():
|
||||
still_missing_obj = objects.copy()
|
||||
for obj_name, obj_list in objects.items():
|
||||
if "deployments" == obj_name:
|
||||
deployments = kubecli.get_deployment_ns(namespace_name)
|
||||
if len(obj_list) == len(deployments):
|
||||
still_missing_obj.pop(obj_name)
|
||||
elif "replicasets" == obj_name:
|
||||
replicasets = kubecli.get_all_replicasets(namespace_name)
|
||||
if len(obj_list) == len(replicasets):
|
||||
still_missing_obj.pop(obj_name)
|
||||
elif "statefulsets" == obj_name:
|
||||
statefulsets = kubecli.get_all_statefulset(namespace_name)
|
||||
if len(obj_list) == len(statefulsets):
|
||||
still_missing_obj.pop(obj_name)
|
||||
elif "services" == obj_name:
|
||||
services = kubecli.get_all_services(namespace_name)
|
||||
if len(obj_list) == len(services):
|
||||
still_missing_obj.pop(obj_name)
|
||||
elif "daemonsets" == obj_name:
|
||||
daemonsets = kubecli.get_daemonset(namespace_name)
|
||||
if len(obj_list) == len(daemonsets):
|
||||
still_missing_obj.pop(obj_name)
|
||||
logging.info("Still missing objects " + str(still_missing_obj))
|
||||
killed_namespaces[namespace_name] = still_missing_obj.copy()
|
||||
if len(killed_namespaces[namespace_name].keys()) == 0:
|
||||
logging.info("Wait for pods to become running for namespace: " + namespace_name)
|
||||
check_all_running_pods(kubecli, namespace_name, wait_time)
|
||||
still_missing_ns.pop(namespace_name)
|
||||
killed_namespaces = still_missing_ns
|
||||
if len(killed_namespaces.keys()) == 0:
|
||||
return []
|
||||
|
||||
timer += 10
|
||||
time.sleep(10)
|
||||
logging.info("Waiting 10 seconds for objects in namespaces to become active")
|
||||
|
||||
logging.error("Objects are still not ready after waiting " + str(wait_time) + "seconds")
|
||||
logging.error("Non active namespaces " + str(killed_namespaces))
|
||||
return killed_namespaces
|
||||
@@ -1,186 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
import yaml
|
||||
import logging
|
||||
import time
|
||||
from multiprocessing.pool import ThreadPool
|
||||
from ..cerberus import setup as cerberus
|
||||
from ..post_actions import actions as post_actions
|
||||
from ..node_actions.aws_node_scenarios import AWS
|
||||
from ..node_actions.openstack_node_scenarios import OPENSTACKCLOUD
|
||||
from ..node_actions.az_node_scenarios import Azure
|
||||
from ..node_actions.gcp_node_scenarios import GCP
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import log_exception
|
||||
|
||||
def multiprocess_nodes(cloud_object_function, nodes):
|
||||
try:
|
||||
# pool object with number of element
|
||||
|
||||
pool = ThreadPool(processes=len(nodes))
|
||||
logging.info("nodes type " + str(type(nodes[0])))
|
||||
if type(nodes[0]) is tuple:
|
||||
node_id = []
|
||||
node_info = []
|
||||
for node in nodes:
|
||||
node_id.append(node[0])
|
||||
node_info.append(node[1])
|
||||
logging.info("node id " + str(node_id))
|
||||
logging.info("node info" + str(node_info))
|
||||
pool.starmap(cloud_object_function, zip(node_info, node_id))
|
||||
|
||||
else:
|
||||
logging.info("pool type" + str(type(nodes)))
|
||||
pool.map(cloud_object_function, nodes)
|
||||
pool.close()
|
||||
except Exception as e:
|
||||
logging.info("Error on pool multiprocessing: " + str(e))
|
||||
|
||||
|
||||
# Inject the cluster shut down scenario
|
||||
# krkn_lib
|
||||
def cluster_shut_down(shut_down_config, kubecli: KrknKubernetes):
|
||||
runs = shut_down_config["runs"]
|
||||
shut_down_duration = shut_down_config["shut_down_duration"]
|
||||
cloud_type = shut_down_config["cloud_type"]
|
||||
timeout = shut_down_config["timeout"]
|
||||
if cloud_type.lower() == "aws":
|
||||
cloud_object = AWS()
|
||||
elif cloud_type.lower() == "gcp":
|
||||
cloud_object = GCP()
|
||||
elif cloud_type.lower() == "openstack":
|
||||
cloud_object = OPENSTACKCLOUD()
|
||||
elif cloud_type.lower() in ["azure", "az"]:
|
||||
cloud_object = Azure()
|
||||
else:
|
||||
logging.error(
|
||||
"Cloud type %s is not currently supported for cluster shut down" %
|
||||
cloud_type
|
||||
)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
nodes = kubecli.list_nodes()
|
||||
node_id = []
|
||||
for node in nodes:
|
||||
instance_id = cloud_object.get_instance_id(node)
|
||||
node_id.append(instance_id)
|
||||
logging.info("node id list " + str(node_id))
|
||||
for _ in range(runs):
|
||||
logging.info("Starting cluster_shut_down scenario injection")
|
||||
stopping_nodes = set(node_id)
|
||||
multiprocess_nodes(cloud_object.stop_instances, node_id)
|
||||
stopped_nodes = stopping_nodes.copy()
|
||||
while len(stopping_nodes) > 0:
|
||||
for node in stopping_nodes:
|
||||
if type(node) is tuple:
|
||||
node_status = cloud_object.wait_until_stopped(
|
||||
node[1],
|
||||
node[0],
|
||||
timeout
|
||||
)
|
||||
else:
|
||||
node_status = cloud_object.wait_until_stopped(
|
||||
node,
|
||||
timeout
|
||||
)
|
||||
|
||||
# Only want to remove node from stopping list
|
||||
# when fully stopped/no error
|
||||
if node_status:
|
||||
stopped_nodes.remove(node)
|
||||
|
||||
stopping_nodes = stopped_nodes.copy()
|
||||
|
||||
logging.info(
|
||||
"Shutting down the cluster for the specified duration: %s" %
|
||||
(shut_down_duration)
|
||||
)
|
||||
time.sleep(shut_down_duration)
|
||||
logging.info("Restarting the nodes")
|
||||
restarted_nodes = set(node_id)
|
||||
multiprocess_nodes(cloud_object.start_instances, node_id)
|
||||
logging.info("Wait for each node to be running again")
|
||||
not_running_nodes = restarted_nodes.copy()
|
||||
while len(not_running_nodes) > 0:
|
||||
for node in not_running_nodes:
|
||||
if type(node) is tuple:
|
||||
node_status = cloud_object.wait_until_running(
|
||||
node[1],
|
||||
node[0],
|
||||
timeout
|
||||
)
|
||||
else:
|
||||
node_status = cloud_object.wait_until_running(
|
||||
node,
|
||||
timeout
|
||||
)
|
||||
if node_status:
|
||||
restarted_nodes.remove(node)
|
||||
not_running_nodes = restarted_nodes.copy()
|
||||
logging.info(
|
||||
"Waiting for 150s to allow cluster component initialization"
|
||||
)
|
||||
time.sleep(150)
|
||||
|
||||
logging.info("Successfully injected cluster_shut_down scenario!")
|
||||
|
||||
# krkn_lib
|
||||
|
||||
def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
failed_post_scenarios = []
|
||||
failed_scenarios = []
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
|
||||
for shut_down_config in scenarios_list:
|
||||
config_path = shut_down_config
|
||||
pre_action_output = ""
|
||||
if isinstance(shut_down_config, list) :
|
||||
if len(shut_down_config) == 0:
|
||||
raise Exception("bad config file format for shutdown scenario")
|
||||
|
||||
config_path = shut_down_config[0]
|
||||
if len(shut_down_config) > 1:
|
||||
pre_action_output = post_actions.run("", shut_down_config[1])
|
||||
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = config_path
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, config_path)
|
||||
|
||||
with open(config_path, "r") as f:
|
||||
shut_down_config_yaml = yaml.full_load(f)
|
||||
shut_down_config_scenario = \
|
||||
shut_down_config_yaml["cluster_shut_down_scenario"]
|
||||
start_time = int(time.time())
|
||||
try:
|
||||
cluster_shut_down(shut_down_config_scenario, kubecli)
|
||||
logging.info(
|
||||
"Waiting for the specified duration: %s" % (wait_duration)
|
||||
)
|
||||
time.sleep(wait_duration)
|
||||
failed_post_scenarios = post_actions.check_recovery(
|
||||
"", shut_down_config, failed_post_scenarios, pre_action_output
|
||||
)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
|
||||
except (RuntimeError, Exception):
|
||||
log_exception(config_path)
|
||||
failed_scenarios.append(config_path)
|
||||
scenario_telemetry.exitStatus = 1
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
@@ -1,388 +0,0 @@
|
||||
import datetime
|
||||
import time
|
||||
import logging
|
||||
import re
|
||||
|
||||
import yaml
|
||||
import random
|
||||
|
||||
from krkn_lib import utils
|
||||
from kubernetes.client import ApiException
|
||||
|
||||
from ..cerberus import setup as cerberus
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception, get_random_string
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def pod_exec(pod_name, command, namespace, container_name, kubecli:KrknKubernetes):
|
||||
for i in range(5):
|
||||
response = kubecli.exec_cmd_in_pod(
|
||||
command,
|
||||
pod_name,
|
||||
namespace,
|
||||
container_name
|
||||
)
|
||||
if not response:
|
||||
time.sleep(2)
|
||||
continue
|
||||
elif (
|
||||
"unauthorized" in response.lower() or
|
||||
"authorization" in response.lower()
|
||||
):
|
||||
time.sleep(2)
|
||||
continue
|
||||
else:
|
||||
break
|
||||
return response
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def get_container_name(pod_name, namespace, kubecli:KrknKubernetes, container_name=""):
|
||||
|
||||
container_names = kubecli.get_containers_in_pod(pod_name, namespace)
|
||||
if container_name != "":
|
||||
if container_name in container_names:
|
||||
return container_name
|
||||
else:
|
||||
logging.error(
|
||||
"Container name %s not an existing container in pod %s" % (
|
||||
container_name,
|
||||
pod_name
|
||||
)
|
||||
)
|
||||
else:
|
||||
container_name = container_names[
|
||||
# random module here is not used for security/cryptographic
|
||||
# purposes
|
||||
random.randint(0, len(container_names) - 1) # nosec
|
||||
]
|
||||
return container_name
|
||||
|
||||
|
||||
|
||||
def skew_node(node_name: str, action: str, kubecli: KrknKubernetes):
|
||||
pod_namespace = "default"
|
||||
status_pod_name = f"time-skew-pod-{get_random_string(5)}"
|
||||
skew_pod_name = f"time-skew-pod-{get_random_string(5)}"
|
||||
ntp_enabled = True
|
||||
logging.info(f'Creating pod to skew {"time" if action == "skew_time" else "date"} on node {node_name}')
|
||||
status_command = ["timedatectl"]
|
||||
param = "2001-01-01"
|
||||
skew_command = ["timedatectl", "set-time"]
|
||||
if action == "skew_time":
|
||||
skew_command.append("01:01:01")
|
||||
else:
|
||||
skew_command.append("2001-01-01")
|
||||
|
||||
try:
|
||||
status_response = kubecli.exec_command_on_node(node_name, status_command, status_pod_name, pod_namespace)
|
||||
if "Network time on: no" in status_response:
|
||||
ntp_enabled = False
|
||||
|
||||
logging.warning(f'ntp unactive on node {node_name} skewing {"time" if action == "skew_time" else "date"} to {param}')
|
||||
pod_exec(skew_pod_name, skew_command, pod_namespace, None, kubecli)
|
||||
else:
|
||||
logging.info(f'ntp active in cluster node, {"time" if action == "skew_time" else "date"} skewing will have no effect, skipping')
|
||||
except ApiException:
|
||||
pass
|
||||
except Exception as e:
|
||||
logging.error(f"failed to execute skew command in pod: {e}")
|
||||
finally:
|
||||
kubecli.delete_pod(status_pod_name, pod_namespace)
|
||||
if not ntp_enabled :
|
||||
kubecli.delete_pod(skew_pod_name, pod_namespace)
|
||||
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def skew_time(scenario, kubecli:KrknKubernetes):
|
||||
if scenario["action"] not in ["skew_date","skew_time"]:
|
||||
raise RuntimeError(f'{scenario["action"]} is not a valid time skew action')
|
||||
|
||||
if "node" in scenario["object_type"]:
|
||||
node_names = []
|
||||
if "object_name" in scenario.keys() and scenario["object_name"]:
|
||||
node_names = scenario["object_name"]
|
||||
elif (
|
||||
"label_selector" in scenario.keys() and
|
||||
scenario["label_selector"]
|
||||
):
|
||||
node_names = kubecli.list_nodes(scenario["label_selector"])
|
||||
for node in node_names:
|
||||
skew_node(node, scenario["action"], kubecli)
|
||||
logging.info("Reset date/time on node " + str(node))
|
||||
return "node", node_names
|
||||
|
||||
elif "pod" in scenario["object_type"]:
|
||||
skew_command = "date --date "
|
||||
if scenario["action"] == "skew_date":
|
||||
skewed_date = "00-01-01"
|
||||
skew_command += skewed_date
|
||||
elif scenario["action"] == "skew_time":
|
||||
skewed_time = "01:01:01"
|
||||
skew_command += skewed_time
|
||||
container_name = get_yaml_item_value(scenario, "container_name", "")
|
||||
pod_names = []
|
||||
if "object_name" in scenario.keys() and scenario["object_name"]:
|
||||
for name in scenario["object_name"]:
|
||||
if "namespace" not in scenario.keys():
|
||||
logging.error("Need to set namespace when using pod name")
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
pod_names.append([name, scenario["namespace"]])
|
||||
elif "namespace" in scenario.keys() and scenario["namespace"]:
|
||||
if "label_selector" not in scenario.keys():
|
||||
logging.info(
|
||||
"label_selector key not found, querying for all the pods "
|
||||
"in namespace: %s" % (scenario["namespace"])
|
||||
)
|
||||
pod_names = kubecli.list_pods(scenario["namespace"])
|
||||
else:
|
||||
logging.info(
|
||||
"Querying for the pods matching the %s label_selector "
|
||||
"in namespace %s"
|
||||
% (scenario["label_selector"], scenario["namespace"])
|
||||
)
|
||||
pod_names = kubecli.list_pods(
|
||||
scenario["namespace"],
|
||||
scenario["label_selector"]
|
||||
)
|
||||
counter = 0
|
||||
for pod_name in pod_names:
|
||||
pod_names[counter] = [pod_name, scenario["namespace"]]
|
||||
counter += 1
|
||||
elif (
|
||||
"label_selector" in scenario.keys() and
|
||||
scenario["label_selector"]
|
||||
):
|
||||
pod_names = kubecli.get_all_pods(scenario["label_selector"])
|
||||
|
||||
if len(pod_names) == 0:
|
||||
logging.info(
|
||||
"Cannot find pods matching the namespace/label_selector, "
|
||||
"please check"
|
||||
)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
pod_counter = 0
|
||||
for pod in pod_names:
|
||||
if len(pod) > 1:
|
||||
selected_container_name = get_container_name(
|
||||
pod[0],
|
||||
pod[1],
|
||||
kubecli,
|
||||
container_name,
|
||||
|
||||
)
|
||||
pod_exec_response = pod_exec(
|
||||
pod[0],
|
||||
skew_command,
|
||||
pod[1],
|
||||
selected_container_name,
|
||||
kubecli,
|
||||
|
||||
)
|
||||
if pod_exec_response is False:
|
||||
logging.error(
|
||||
"Couldn't reset time on container %s "
|
||||
"in pod %s in namespace %s"
|
||||
% (selected_container_name, pod[0], pod[1])
|
||||
)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
pod_names[pod_counter].append(selected_container_name)
|
||||
else:
|
||||
selected_container_name = get_container_name(
|
||||
pod,
|
||||
scenario["namespace"],
|
||||
kubecli,
|
||||
container_name
|
||||
)
|
||||
pod_exec_response = pod_exec(
|
||||
pod,
|
||||
skew_command,
|
||||
scenario["namespace"],
|
||||
selected_container_name,
|
||||
kubecli
|
||||
)
|
||||
if pod_exec_response is False:
|
||||
logging.error(
|
||||
"Couldn't reset time on container "
|
||||
"%s in pod %s in namespace %s"
|
||||
% (
|
||||
selected_container_name,
|
||||
pod,
|
||||
scenario["namespace"]
|
||||
)
|
||||
)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
pod_names[pod_counter].append(selected_container_name)
|
||||
logging.info("Reset date/time on pod " + str(pod[0]))
|
||||
pod_counter += 1
|
||||
return "pod", pod_names
|
||||
|
||||
|
||||
# From kubectl/oc command get time output
|
||||
def parse_string_date(obj_datetime):
|
||||
try:
|
||||
logging.info("Obj_date time " + str(obj_datetime))
|
||||
obj_datetime = re.sub(r"\s\s+", " ", obj_datetime).strip()
|
||||
logging.info("Obj_date sub time " + str(obj_datetime))
|
||||
date_line = re.match(
|
||||
r"[\s\S\n]*\w{3} \w{3} \d{1,} \d{2}:\d{2}:\d{2} \w{3} \d{4}[\s\S\n]*", # noqa
|
||||
obj_datetime
|
||||
)
|
||||
if date_line is not None:
|
||||
search_response = date_line.group().strip()
|
||||
logging.info("Search response: " + str(search_response))
|
||||
return search_response
|
||||
else:
|
||||
return ""
|
||||
except Exception as e:
|
||||
logging.info(
|
||||
"Exception %s when trying to parse string to date" % str(e)
|
||||
)
|
||||
return ""
|
||||
|
||||
|
||||
# Get date and time from string returned from OC
|
||||
def string_to_date(obj_datetime):
|
||||
obj_datetime = parse_string_date(obj_datetime)
|
||||
try:
|
||||
date_time_obj = datetime.datetime.strptime(
|
||||
obj_datetime,
|
||||
"%a %b %d %H:%M:%S %Z %Y"
|
||||
)
|
||||
return date_time_obj
|
||||
except Exception:
|
||||
logging.info("Couldn't parse string to datetime object")
|
||||
return datetime.datetime(datetime.MINYEAR, 1, 1)
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def check_date_time(object_type, names, kubecli:KrknKubernetes):
|
||||
skew_command = "date"
|
||||
not_reset = []
|
||||
max_retries = 30
|
||||
if object_type == "node":
|
||||
for node_name in names:
|
||||
first_date_time = datetime.datetime.utcnow()
|
||||
check_pod_name = f"time-skew-pod-{get_random_string(5)}"
|
||||
node_datetime_string = kubecli.exec_command_on_node(node_name, [skew_command], check_pod_name)
|
||||
node_datetime = string_to_date(node_datetime_string)
|
||||
counter = 0
|
||||
while not (
|
||||
first_date_time < node_datetime < datetime.datetime.utcnow()
|
||||
):
|
||||
time.sleep(10)
|
||||
logging.info(
|
||||
"Date/time on node %s still not reset, "
|
||||
"waiting 10 seconds and retrying" % node_name
|
||||
)
|
||||
|
||||
node_datetime_string = kubecli.exec_cmd_in_pod([skew_command], check_pod_name, "default")
|
||||
node_datetime = string_to_date(node_datetime_string)
|
||||
counter += 1
|
||||
if counter > max_retries:
|
||||
logging.error(
|
||||
"Date and time in node %s didn't reset properly" %
|
||||
node_name
|
||||
)
|
||||
not_reset.append(node_name)
|
||||
break
|
||||
if counter < max_retries:
|
||||
logging.info(
|
||||
"Date in node " + str(node_name) + " reset properly"
|
||||
)
|
||||
kubecli.delete_pod(check_pod_name)
|
||||
|
||||
elif object_type == "pod":
|
||||
for pod_name in names:
|
||||
first_date_time = datetime.datetime.utcnow()
|
||||
counter = 0
|
||||
pod_datetime_string = pod_exec(
|
||||
pod_name[0],
|
||||
skew_command,
|
||||
pod_name[1],
|
||||
pod_name[2],
|
||||
kubecli
|
||||
)
|
||||
pod_datetime = string_to_date(pod_datetime_string)
|
||||
while not (
|
||||
first_date_time < pod_datetime < datetime.datetime.utcnow()
|
||||
):
|
||||
time.sleep(10)
|
||||
logging.info(
|
||||
"Date/time on pod %s still not reset, "
|
||||
"waiting 10 seconds and retrying" % pod_name[0]
|
||||
)
|
||||
pod_datetime = pod_exec(
|
||||
pod_name[0],
|
||||
skew_command,
|
||||
pod_name[1],
|
||||
pod_name[2],
|
||||
kubecli
|
||||
)
|
||||
pod_datetime = string_to_date(pod_datetime)
|
||||
counter += 1
|
||||
if counter > max_retries:
|
||||
logging.error(
|
||||
"Date and time in pod %s didn't reset properly" %
|
||||
pod_name[0]
|
||||
)
|
||||
not_reset.append(pod_name[0])
|
||||
break
|
||||
if counter < max_retries:
|
||||
logging.info(
|
||||
"Date in pod " + str(pod_name[0]) + " reset properly"
|
||||
)
|
||||
return not_reset
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def run(scenarios_list, config, wait_duration, kubecli:KrknKubernetes, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
failed_scenarios = []
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
for time_scenario_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = time_scenario_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, time_scenario_config)
|
||||
try:
|
||||
with open(time_scenario_config, "r") as f:
|
||||
scenario_config = yaml.full_load(f)
|
||||
for time_scenario in scenario_config["time_scenarios"]:
|
||||
start_time = int(time.time())
|
||||
object_type, object_names = skew_time(time_scenario, kubecli)
|
||||
not_reset = check_date_time(object_type, object_names, kubecli)
|
||||
if len(not_reset) > 0:
|
||||
logging.info("Object times were not reset")
|
||||
logging.info(
|
||||
"Waiting for the specified duration: %s" % (wait_duration)
|
||||
)
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
not_reset,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
except (RuntimeError, Exception):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
log_exception(time_scenario_config)
|
||||
failed_scenarios.append(time_scenario_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
return failed_scenarios, scenario_telemetries
|
||||
@@ -1,121 +0,0 @@
|
||||
import yaml
|
||||
import logging
|
||||
import time
|
||||
from ..node_actions.aws_node_scenarios import AWS
|
||||
from ..cerberus import setup as cerberus
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import log_exception
|
||||
|
||||
def run(scenarios_list, config, wait_duration, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]) :
|
||||
"""
|
||||
filters the subnet of interest and applies the network acl
|
||||
to create zone outage
|
||||
"""
|
||||
failed_post_scenarios = ""
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
|
||||
for zone_outage_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = zone_outage_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, zone_outage_config)
|
||||
try:
|
||||
if len(zone_outage_config) > 1:
|
||||
with open(zone_outage_config, "r") as f:
|
||||
zone_outage_config_yaml = yaml.full_load(f)
|
||||
scenario_config = zone_outage_config_yaml["zone_outage"]
|
||||
vpc_id = scenario_config["vpc_id"]
|
||||
subnet_ids = scenario_config["subnet_id"]
|
||||
duration = scenario_config["duration"]
|
||||
cloud_type = scenario_config["cloud_type"]
|
||||
ids = {}
|
||||
acl_ids_created = []
|
||||
|
||||
if cloud_type.lower() == "aws":
|
||||
cloud_object = AWS()
|
||||
else:
|
||||
logging.error(
|
||||
"Cloud type %s is not currently supported for "
|
||||
"zone outage scenarios"
|
||||
% cloud_type
|
||||
)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
start_time = int(time.time())
|
||||
|
||||
for subnet_id in subnet_ids:
|
||||
logging.info("Targeting subnet_id")
|
||||
network_association_ids = []
|
||||
associations, original_acl_id = \
|
||||
cloud_object.describe_network_acls(vpc_id, subnet_id)
|
||||
for entry in associations:
|
||||
if entry["SubnetId"] == subnet_id:
|
||||
network_association_ids.append(
|
||||
entry["NetworkAclAssociationId"]
|
||||
)
|
||||
logging.info(
|
||||
"Network association ids associated with "
|
||||
"the subnet %s: %s"
|
||||
% (subnet_id, network_association_ids)
|
||||
)
|
||||
acl_id = cloud_object.create_default_network_acl(vpc_id)
|
||||
new_association_id = \
|
||||
cloud_object.replace_network_acl_association(
|
||||
network_association_ids[0], acl_id
|
||||
)
|
||||
|
||||
# capture the orginal_acl_id, created_acl_id and
|
||||
# new association_id to use during the recovery
|
||||
ids[new_association_id] = original_acl_id
|
||||
acl_ids_created.append(acl_id)
|
||||
|
||||
# wait for the specified duration
|
||||
logging.info(
|
||||
"Waiting for the specified duration "
|
||||
"in the config: %s" % (duration)
|
||||
)
|
||||
time.sleep(duration)
|
||||
|
||||
# replace the applied acl with the previous acl in use
|
||||
for new_association_id, original_acl_id in ids.items():
|
||||
cloud_object.replace_network_acl_association(
|
||||
new_association_id,
|
||||
original_acl_id
|
||||
)
|
||||
logging.info(
|
||||
"Wating for 60 seconds to make sure "
|
||||
"the changes are in place"
|
||||
)
|
||||
time.sleep(60)
|
||||
|
||||
# delete the network acl created for the run
|
||||
for acl_id in acl_ids_created:
|
||||
cloud_object.delete_network_acl(acl_id)
|
||||
|
||||
logging.info(
|
||||
"End of scenario. "
|
||||
"Waiting for the specified duration: %s" % (wait_duration)
|
||||
)
|
||||
time.sleep(wait_duration)
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
except (RuntimeError, Exception):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(zone_outage_config)
|
||||
log_exception(zone_outage_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
1
krkn/cerberus/__init__.py
Normal file
1
krkn/cerberus/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from .setup import *
|
||||
@@ -1,7 +1,6 @@
|
||||
import logging
|
||||
|
||||
import pandas as pd
|
||||
import kraken.chaos_recommender.kraken_tests as kraken_tests
|
||||
import time
|
||||
|
||||
KRAKEN_TESTS_PATH = "./kraken_chaos_tests.txt"
|
||||
@@ -23,7 +22,9 @@ def calculate_zscores(data):
|
||||
zscores["Service"] = data["service"]
|
||||
zscores["CPU"] = (data["CPU"] - data["CPU"].mean()) / data["CPU"].std()
|
||||
zscores["Memory"] = (data["MEM"] - data["MEM"].mean()) / data["MEM"].std()
|
||||
zscores["Network"] = (data["NETWORK"] - data["NETWORK"].mean()) / data["NETWORK"].std()
|
||||
zscores["Network"] = (data["NETWORK"] - data["NETWORK"].mean()) / data[
|
||||
"NETWORK"
|
||||
].std()
|
||||
return zscores
|
||||
|
||||
|
||||
@@ -37,18 +38,28 @@ def identify_outliers(data, threshold):
|
||||
|
||||
def get_services_above_heatmap_threshold(dataframe, cpu_threshold, mem_threshold):
|
||||
# Filter the DataFrame based on CPU_HEATMAP and MEM_HEATMAP thresholds
|
||||
filtered_df = dataframe[((dataframe['CPU']/dataframe['CPU_LIMITS']) > cpu_threshold)]
|
||||
filtered_df = dataframe[
|
||||
((dataframe["CPU"] / dataframe["CPU_LIMITS"]) > cpu_threshold)
|
||||
]
|
||||
# Get the lists of services
|
||||
cpu_services = filtered_df['service'].tolist()
|
||||
cpu_services = filtered_df["service"].tolist()
|
||||
|
||||
filtered_df = dataframe[((dataframe['MEM']/dataframe['MEM_LIMITS']) > mem_threshold)]
|
||||
mem_services = filtered_df['service'].tolist()
|
||||
filtered_df = dataframe[
|
||||
((dataframe["MEM"] / dataframe["MEM_LIMITS"]) > mem_threshold)
|
||||
]
|
||||
mem_services = filtered_df["service"].tolist()
|
||||
|
||||
return cpu_services, mem_services
|
||||
|
||||
|
||||
def analysis(file_path, namespaces, chaos_tests_config, threshold,
|
||||
heatmap_cpu_threshold, heatmap_mem_threshold):
|
||||
def analysis(
|
||||
file_path,
|
||||
namespaces,
|
||||
chaos_tests_config,
|
||||
threshold,
|
||||
heatmap_cpu_threshold,
|
||||
heatmap_mem_threshold,
|
||||
):
|
||||
# Load the telemetry data from file
|
||||
logging.info("Fetching the Telemetry data...")
|
||||
data = load_telemetry_data(file_path)
|
||||
@@ -66,29 +77,43 @@ def analysis(file_path, namespaces, chaos_tests_config, threshold,
|
||||
namespace_zscores = zscores.loc[zscores["Namespace"] == namespace]
|
||||
namespace_data = data.loc[data["namespace"] == namespace]
|
||||
outliers_cpu, outliers_memory, outliers_network = identify_outliers(
|
||||
namespace_zscores, threshold)
|
||||
namespace_zscores, threshold
|
||||
)
|
||||
cpu_services, mem_services = get_services_above_heatmap_threshold(
|
||||
namespace_data, heatmap_cpu_threshold, heatmap_mem_threshold)
|
||||
namespace_data, heatmap_cpu_threshold, heatmap_mem_threshold
|
||||
)
|
||||
|
||||
analysis_data[namespace] = analysis_json(outliers_cpu, outliers_memory,
|
||||
outliers_network,
|
||||
cpu_services, mem_services,
|
||||
chaos_tests_config)
|
||||
analysis_data[namespace] = analysis_json(
|
||||
outliers_cpu,
|
||||
outliers_memory,
|
||||
outliers_network,
|
||||
cpu_services,
|
||||
mem_services,
|
||||
chaos_tests_config,
|
||||
)
|
||||
|
||||
if cpu_services:
|
||||
logging.info(f"These services use significant CPU compared to "
|
||||
f"their assigned limits: {cpu_services}")
|
||||
logging.info(
|
||||
f"These services use significant CPU compared to "
|
||||
f"their assigned limits: {cpu_services}"
|
||||
)
|
||||
else:
|
||||
logging.info("There are no services that are using significant "
|
||||
"CPU compared to their assigned limits "
|
||||
"(infinite in case no limits are set).")
|
||||
logging.info(
|
||||
"There are no services that are using significant "
|
||||
"CPU compared to their assigned limits "
|
||||
"(infinite in case no limits are set)."
|
||||
)
|
||||
if mem_services:
|
||||
logging.info(f"These services use significant MEMORY compared to "
|
||||
f"their assigned limits: {mem_services}")
|
||||
logging.info(
|
||||
f"These services use significant MEMORY compared to "
|
||||
f"their assigned limits: {mem_services}"
|
||||
)
|
||||
else:
|
||||
logging.info("There are no services that are using significant "
|
||||
"MEMORY compared to their assigned limits "
|
||||
"(infinite in case no limits are set).")
|
||||
logging.info(
|
||||
"There are no services that are using significant "
|
||||
"MEMORY compared to their assigned limits "
|
||||
"(infinite in case no limits are set)."
|
||||
)
|
||||
time.sleep(2)
|
||||
|
||||
logging.info("Please check data in utilisation.txt for further analysis")
|
||||
@@ -96,36 +121,41 @@ def analysis(file_path, namespaces, chaos_tests_config, threshold,
|
||||
return analysis_data
|
||||
|
||||
|
||||
def analysis_json(outliers_cpu, outliers_memory, outliers_network,
|
||||
cpu_services, mem_services, chaos_tests_config):
|
||||
def analysis_json(
|
||||
outliers_cpu,
|
||||
outliers_memory,
|
||||
outliers_network,
|
||||
cpu_services,
|
||||
mem_services,
|
||||
chaos_tests_config,
|
||||
):
|
||||
|
||||
profiling = {
|
||||
"cpu_outliers": outliers_cpu,
|
||||
"memory_outliers": outliers_memory,
|
||||
"network_outliers": outliers_network
|
||||
"network_outliers": outliers_network,
|
||||
}
|
||||
|
||||
heatmap = {
|
||||
"services_with_cpu_heatmap_above_threshold": cpu_services,
|
||||
"services_with_mem_heatmap_above_threshold": mem_services
|
||||
"services_with_mem_heatmap_above_threshold": mem_services,
|
||||
}
|
||||
|
||||
recommendations = {}
|
||||
|
||||
if cpu_services:
|
||||
cpu_recommend = {"services": cpu_services,
|
||||
"tests": chaos_tests_config['CPU']}
|
||||
cpu_recommend = {"services": cpu_services, "tests": chaos_tests_config["CPU"]}
|
||||
recommendations["cpu_services_recommendations"] = cpu_recommend
|
||||
|
||||
if mem_services:
|
||||
mem_recommend = {"services": mem_services,
|
||||
"tests": chaos_tests_config['MEM']}
|
||||
mem_recommend = {"services": mem_services, "tests": chaos_tests_config["MEM"]}
|
||||
recommendations["mem_services_recommendations"] = mem_recommend
|
||||
|
||||
if outliers_network:
|
||||
outliers_network_recommend = {"outliers_networks": outliers_network,
|
||||
"tests": chaos_tests_config['NETWORK']}
|
||||
recommendations["outliers_network_recommendations"] = (
|
||||
outliers_network_recommend)
|
||||
outliers_network_recommend = {
|
||||
"outliers_networks": outliers_network,
|
||||
"tests": chaos_tests_config["NETWORK"],
|
||||
}
|
||||
recommendations["outliers_network_recommendations"] = outliers_network_recommend
|
||||
|
||||
return [profiling, heatmap, recommendations]
|
||||
@@ -1,13 +1,13 @@
|
||||
def get_entries_by_category(filename, category):
|
||||
# Read the file
|
||||
with open(filename, 'r') as file:
|
||||
with open(filename, "r") as file:
|
||||
content = file.read()
|
||||
|
||||
# Split the content into sections based on the square brackets
|
||||
sections = content.split('\n\n')
|
||||
sections = content.split("\n\n")
|
||||
|
||||
# Define the categories
|
||||
valid_categories = ['CPU', 'NETWORK', 'MEM', 'GENERIC']
|
||||
valid_categories = ["CPU", "NETWORK", "MEM", "GENERIC"]
|
||||
|
||||
# Validate the provided category
|
||||
if category not in valid_categories:
|
||||
@@ -25,6 +25,10 @@ def get_entries_by_category(filename, category):
|
||||
return []
|
||||
|
||||
# Extract the entries from the category section
|
||||
entries = [entry.strip() for entry in target_section.split('\n') if entry and not entry.startswith('[')]
|
||||
entries = [
|
||||
entry.strip()
|
||||
for entry in target_section.split("\n")
|
||||
if entry and not entry.startswith("[")
|
||||
]
|
||||
|
||||
return entries
|
||||
203
krkn/chaos_recommender/prometheus.py
Normal file
203
krkn/chaos_recommender/prometheus.py
Normal file
@@ -0,0 +1,203 @@
|
||||
import logging
|
||||
|
||||
from prometheus_api_client import PrometheusConnect
|
||||
import pandas as pd
|
||||
import urllib3
|
||||
|
||||
|
||||
saved_metrics_path = "./utilisation.txt"
|
||||
|
||||
|
||||
def convert_data_to_dataframe(data, label):
|
||||
df = pd.DataFrame()
|
||||
df["service"] = [item["metric"]["pod"] for item in data]
|
||||
df[label] = [item["value"][1] for item in data]
|
||||
|
||||
return df
|
||||
|
||||
|
||||
def convert_data(data, service):
|
||||
result = {}
|
||||
for entry in data:
|
||||
pod_name = entry["metric"]["pod"]
|
||||
value = entry["value"][1]
|
||||
result[pod_name] = value
|
||||
return result.get(
|
||||
service
|
||||
) # for those pods whose limits are not defined they can take as much resources, there assigning a very high value
|
||||
|
||||
|
||||
def convert_data_limits(data, node_data, service, prometheus):
|
||||
result = {}
|
||||
for entry in data:
|
||||
pod_name = entry["metric"]["pod"]
|
||||
value = entry["value"][1]
|
||||
result[pod_name] = value
|
||||
return result.get(
|
||||
service, get_node_capacity(node_data, service, prometheus)
|
||||
) # for those pods whose limits are not defined they can take as much resources, there assigning a very high value
|
||||
|
||||
|
||||
def get_node_capacity(node_data, pod_name, prometheus):
|
||||
|
||||
# Get the node name on which the pod is running
|
||||
query = f'kube_pod_info{{pod="{pod_name}"}}'
|
||||
result = prometheus.custom_query(query)
|
||||
if not result:
|
||||
return None
|
||||
|
||||
node_name = result[0]["metric"]["node"]
|
||||
|
||||
for item in node_data:
|
||||
if item["metric"]["node"] == node_name:
|
||||
return item["value"][1]
|
||||
|
||||
return "1000000000"
|
||||
|
||||
|
||||
def save_utilization_to_file(utilization, filename, prometheus):
|
||||
|
||||
merged_df = pd.DataFrame(
|
||||
columns=[
|
||||
"namespace",
|
||||
"service",
|
||||
"CPU",
|
||||
"CPU_LIMITS",
|
||||
"MEM",
|
||||
"MEM_LIMITS",
|
||||
"NETWORK",
|
||||
]
|
||||
)
|
||||
for namespace in utilization:
|
||||
# Loading utilization_data[] for namespace
|
||||
# indexes -- 0 CPU, 1 CPU limits, 2 mem, 3 mem limits, 4 network
|
||||
utilization_data = utilization[namespace]
|
||||
df_cpu = convert_data_to_dataframe(utilization_data[0], "CPU")
|
||||
services = df_cpu.service.unique()
|
||||
logging.info(f"Services for namespace {namespace}: {services}")
|
||||
|
||||
for s in services:
|
||||
|
||||
new_row_df = pd.DataFrame(
|
||||
{
|
||||
"namespace": namespace,
|
||||
"service": s,
|
||||
"CPU": convert_data(utilization_data[0], s),
|
||||
"CPU_LIMITS": convert_data_limits(
|
||||
utilization_data[1], utilization_data[5], s, prometheus
|
||||
),
|
||||
"MEM": convert_data(utilization_data[2], s),
|
||||
"MEM_LIMITS": convert_data_limits(
|
||||
utilization_data[3], utilization_data[6], s, prometheus
|
||||
),
|
||||
"NETWORK": convert_data(utilization_data[4], s),
|
||||
},
|
||||
index=[0],
|
||||
)
|
||||
merged_df = pd.concat([merged_df, new_row_df], ignore_index=True)
|
||||
|
||||
# Convert columns to string
|
||||
merged_df["CPU"] = merged_df["CPU"].astype(str)
|
||||
merged_df["MEM"] = merged_df["MEM"].astype(str)
|
||||
merged_df["CPU_LIMITS"] = merged_df["CPU_LIMITS"].astype(str)
|
||||
merged_df["MEM_LIMITS"] = merged_df["MEM_LIMITS"].astype(str)
|
||||
merged_df["NETWORK"] = merged_df["NETWORK"].astype(str)
|
||||
|
||||
# Extract integer part before the decimal point
|
||||
# merged_df['CPU'] = merged_df['CPU'].str.split('.').str[0]
|
||||
# merged_df['MEM'] = merged_df['MEM'].str.split('.').str[0]
|
||||
# merged_df['CPU_LIMITS'] = merged_df['CPU_LIMITS'].str.split('.').str[0]
|
||||
# merged_df['MEM_LIMITS'] = merged_df['MEM_LIMITS'].str.split('.').str[0]
|
||||
# merged_df['NETWORK'] = merged_df['NETWORK'].str.split('.').str[0]
|
||||
|
||||
merged_df.to_csv(filename, sep="\t", index=False)
|
||||
|
||||
|
||||
def fetch_utilization_from_prometheus(
|
||||
prometheus_endpoint, auth_token, namespaces, scrape_duration
|
||||
):
|
||||
urllib3.disable_warnings()
|
||||
prometheus = PrometheusConnect(
|
||||
url=prometheus_endpoint,
|
||||
headers={"Authorization": "Bearer {}".format(auth_token)},
|
||||
disable_ssl=True,
|
||||
)
|
||||
|
||||
# Dicts for saving utilisation and queries -- key is namespace
|
||||
utilization = {}
|
||||
queries = {}
|
||||
|
||||
logging.info("Fetching utilization...")
|
||||
for namespace in namespaces:
|
||||
|
||||
# Fetch CPU utilization
|
||||
cpu_query = (
|
||||
'sum (rate (container_cpu_usage_seconds_total{image!="", namespace="%s"}[%s])) by (pod) *1000'
|
||||
% (namespace, scrape_duration)
|
||||
)
|
||||
cpu_result = prometheus.custom_query(cpu_query)
|
||||
|
||||
cpu_limits_query = (
|
||||
'(sum by (pod) (kube_pod_container_resource_limits{resource="cpu", namespace="%s"}))*1000'
|
||||
% (namespace)
|
||||
)
|
||||
cpu_limits_result = prometheus.custom_query(cpu_limits_query)
|
||||
|
||||
node_cpu_limits_query = (
|
||||
'kube_node_status_capacity{resource="cpu", unit="core"}*1000'
|
||||
)
|
||||
node_cpu_limits_result = prometheus.custom_query(node_cpu_limits_query)
|
||||
|
||||
mem_query = (
|
||||
'sum by (pod) (avg_over_time(container_memory_usage_bytes{image!="", namespace="%s"}[%s]))'
|
||||
% (namespace, scrape_duration)
|
||||
)
|
||||
mem_result = prometheus.custom_query(mem_query)
|
||||
|
||||
mem_limits_query = (
|
||||
'sum by (pod) (kube_pod_container_resource_limits{resource="memory", namespace="%s"}) '
|
||||
% (namespace)
|
||||
)
|
||||
mem_limits_result = prometheus.custom_query(mem_limits_query)
|
||||
|
||||
node_mem_limits_query = (
|
||||
'kube_node_status_capacity{resource="memory", unit="byte"}'
|
||||
)
|
||||
node_mem_limits_result = prometheus.custom_query(node_mem_limits_query)
|
||||
|
||||
network_query = (
|
||||
'sum by (pod) ((avg_over_time(container_network_transmit_bytes_total{namespace="%s"}[%s])) + \
|
||||
(avg_over_time(container_network_receive_bytes_total{namespace="%s"}[%s])))'
|
||||
% (namespace, scrape_duration, namespace, scrape_duration)
|
||||
)
|
||||
network_result = prometheus.custom_query(network_query)
|
||||
|
||||
utilization[namespace] = [
|
||||
cpu_result,
|
||||
cpu_limits_result,
|
||||
mem_result,
|
||||
mem_limits_result,
|
||||
network_result,
|
||||
node_cpu_limits_result,
|
||||
node_mem_limits_result,
|
||||
]
|
||||
queries[namespace] = json_queries(
|
||||
cpu_query, cpu_limits_query, mem_query, mem_limits_query, network_query
|
||||
)
|
||||
|
||||
save_utilization_to_file(utilization, saved_metrics_path, prometheus)
|
||||
|
||||
return saved_metrics_path, queries
|
||||
|
||||
|
||||
def json_queries(
|
||||
cpu_query, cpu_limits_query, mem_query, mem_limits_query, network_query
|
||||
):
|
||||
queries = {
|
||||
"cpu_query": cpu_query,
|
||||
"cpu_limit_query": cpu_limits_query,
|
||||
"memory_query": mem_query,
|
||||
"memory_limit_query": mem_limits_query,
|
||||
"network_query": network_query,
|
||||
}
|
||||
return queries
|
||||
@@ -14,7 +14,9 @@ def setup(repo, distribution):
|
||||
logging.error("Provided distribution: %s is not supported" % (distribution))
|
||||
sys.exit(1)
|
||||
delete_repo = "rm -rf performance-dashboards || exit 0"
|
||||
logging.info("Cloning, installing mutable grafana on the cluster and loading the dashboards")
|
||||
logging.info(
|
||||
"Cloning, installing mutable grafana on the cluster and loading the dashboards"
|
||||
)
|
||||
try:
|
||||
# delete repo to clone the latest copy if exists
|
||||
subprocess.run(delete_repo, shell=True, universal_newlines=True, timeout=45)
|
||||
205
krkn/prometheus/client.py
Normal file
205
krkn/prometheus/client.py
Normal file
@@ -0,0 +1,205 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime
|
||||
import os.path
|
||||
from typing import Optional, List, Dict, Any
|
||||
|
||||
import urllib3
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import yaml
|
||||
from krkn_lib.elastic.krkn_elastic import KrknElastic
|
||||
from krkn_lib.models.elastic.models import ElasticAlert
|
||||
from krkn_lib.models.krkn import ChaosRunAlertSummary, ChaosRunAlert
|
||||
from krkn_lib.prometheus.krkn_prometheus import KrknPrometheus
|
||||
|
||||
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
|
||||
def alerts(
|
||||
prom_cli: KrknPrometheus,
|
||||
elastic: KrknElastic,
|
||||
run_uuid,
|
||||
start_time,
|
||||
end_time,
|
||||
alert_profile,
|
||||
elastic_collect_alerts,
|
||||
elastic_alerts_index,
|
||||
):
|
||||
|
||||
if alert_profile is None or os.path.exists(alert_profile) is False:
|
||||
logging.error(f"{alert_profile} alert profile does not exist")
|
||||
sys.exit(1)
|
||||
|
||||
with open(alert_profile) as profile:
|
||||
profile_yaml = yaml.safe_load(profile)
|
||||
if not isinstance(profile_yaml, list):
|
||||
logging.error(
|
||||
f"{alert_profile} wrong file format, alert profile must be "
|
||||
f"a valid yaml file containing a list of items with at least 3 properties: "
|
||||
f"expr, description, severity"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
for alert in profile_yaml:
|
||||
if list(alert.keys()).sort() != ["expr", "description", "severity"].sort():
|
||||
logging.error(f"wrong alert {alert}, skipping")
|
||||
|
||||
processed_alert = prom_cli.process_alert(
|
||||
alert,
|
||||
datetime.datetime.fromtimestamp(start_time),
|
||||
datetime.datetime.fromtimestamp(end_time),
|
||||
)
|
||||
if (
|
||||
processed_alert[0]
|
||||
and processed_alert[1]
|
||||
and elastic
|
||||
and elastic_collect_alerts
|
||||
):
|
||||
elastic_alert = ElasticAlert(
|
||||
run_uuid=run_uuid,
|
||||
severity=alert["severity"],
|
||||
alert=processed_alert[1],
|
||||
created_at=datetime.datetime.fromtimestamp(processed_alert[0]),
|
||||
)
|
||||
result = elastic.push_alert(elastic_alert, elastic_alerts_index)
|
||||
if result == -1:
|
||||
logging.error("failed to save alert on ElasticSearch")
|
||||
pass
|
||||
|
||||
|
||||
def critical_alerts(
|
||||
prom_cli: KrknPrometheus,
|
||||
summary: ChaosRunAlertSummary,
|
||||
run_id,
|
||||
scenario,
|
||||
start_time,
|
||||
end_time,
|
||||
):
|
||||
summary.scenario = scenario
|
||||
summary.run_id = run_id
|
||||
query = r"""ALERTS{severity="critical"}"""
|
||||
logging.info("Checking for critical alerts firing post chaos")
|
||||
|
||||
during_critical_alerts = prom_cli.process_prom_query_in_range(
|
||||
query, start_time=datetime.datetime.fromtimestamp(start_time), end_time=end_time
|
||||
)
|
||||
|
||||
for alert in during_critical_alerts:
|
||||
if "metric" in alert:
|
||||
alertname = (
|
||||
alert["metric"]["alertname"]
|
||||
if "alertname" in alert["metric"]
|
||||
else "none"
|
||||
)
|
||||
alertstate = (
|
||||
alert["metric"]["alertstate"]
|
||||
if "alertstate" in alert["metric"]
|
||||
else "none"
|
||||
)
|
||||
namespace = (
|
||||
alert["metric"]["namespace"]
|
||||
if "namespace" in alert["metric"]
|
||||
else "none"
|
||||
)
|
||||
severity = (
|
||||
alert["metric"]["severity"] if "severity" in alert["metric"] else "none"
|
||||
)
|
||||
alert = ChaosRunAlert(alertname, alertstate, namespace, severity)
|
||||
summary.chaos_alerts.append(alert)
|
||||
|
||||
post_critical_alerts = prom_cli.process_query(query)
|
||||
|
||||
for alert in post_critical_alerts:
|
||||
if "metric" in alert:
|
||||
alertname = (
|
||||
alert["metric"]["alertname"]
|
||||
if "alertname" in alert["metric"]
|
||||
else "none"
|
||||
)
|
||||
alertstate = (
|
||||
alert["metric"]["alertstate"]
|
||||
if "alertstate" in alert["metric"]
|
||||
else "none"
|
||||
)
|
||||
namespace = (
|
||||
alert["metric"]["namespace"]
|
||||
if "namespace" in alert["metric"]
|
||||
else "none"
|
||||
)
|
||||
severity = (
|
||||
alert["metric"]["severity"] if "severity" in alert["metric"] else "none"
|
||||
)
|
||||
alert = ChaosRunAlert(alertname, alertstate, namespace, severity)
|
||||
summary.post_chaos_alerts.append(alert)
|
||||
|
||||
during_critical_alerts_count = len(during_critical_alerts)
|
||||
post_critical_alerts_count = len(post_critical_alerts)
|
||||
firing_alerts = False
|
||||
|
||||
if during_critical_alerts_count > 0:
|
||||
firing_alerts = True
|
||||
|
||||
if post_critical_alerts_count > 0:
|
||||
firing_alerts = True
|
||||
|
||||
if not firing_alerts:
|
||||
logging.info("No critical alerts are firing!!")
|
||||
|
||||
|
||||
def metrics(
|
||||
prom_cli: KrknPrometheus,
|
||||
elastic: KrknElastic,
|
||||
run_uuid,
|
||||
start_time,
|
||||
end_time,
|
||||
metrics_profile,
|
||||
elastic_collect_metrics,
|
||||
elastic_metrics_index,
|
||||
) -> list[dict[str, list[(int, float)] | str]]:
|
||||
metrics_list: list[dict[str, list[(int, float)] | str]] = []
|
||||
if metrics_profile is None or os.path.exists(metrics_profile) is False:
|
||||
logging.error(f"{metrics_profile} alert profile does not exist")
|
||||
sys.exit(1)
|
||||
with open(metrics_profile) as profile:
|
||||
profile_yaml = yaml.safe_load(profile)
|
||||
if not profile_yaml["metrics"] or not isinstance(profile_yaml["metrics"], list):
|
||||
logging.error(
|
||||
f"{metrics_profile} wrong file format, alert profile must be "
|
||||
f"a valid yaml file containing a list of items with 3 properties: "
|
||||
f"expr, description, severity"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
for metric_query in profile_yaml["metrics"]:
|
||||
if (
|
||||
list(metric_query.keys()).sort()
|
||||
!= ["query", "metricName", "instant"].sort()
|
||||
):
|
||||
logging.error(f"wrong alert {metric_query}, skipping")
|
||||
metrics_result = prom_cli.process_prom_query_in_range(
|
||||
metric_query["query"],
|
||||
start_time=datetime.datetime.fromtimestamp(start_time),
|
||||
end_time=datetime.datetime.fromtimestamp(end_time),
|
||||
)
|
||||
|
||||
metric = {"name": metric_query["metricName"], "values": []}
|
||||
for returned_metric in metrics_result:
|
||||
if "values" in returned_metric:
|
||||
for value in returned_metric["values"]:
|
||||
try:
|
||||
metric["values"].append((value[0], float(value[1])))
|
||||
except ValueError:
|
||||
pass
|
||||
metrics_list.append(metric)
|
||||
|
||||
if elastic_collect_metrics and elastic:
|
||||
result = elastic.upload_metrics_to_elasticsearch(
|
||||
run_uuid=run_uuid, index=elastic_metrics_index, raw_data=metrics_list
|
||||
)
|
||||
if result == -1:
|
||||
logging.error("failed to save metrics on ElasticSearch")
|
||||
|
||||
return metrics_list
|
||||
115
krkn/scenario_plugins/abstract_scenario_plugin.py
Normal file
115
krkn/scenario_plugins/abstract_scenario_plugin.py
Normal file
@@ -0,0 +1,115 @@
|
||||
import logging
|
||||
import time
|
||||
from abc import ABC, abstractmethod
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
from krkn import utils
|
||||
|
||||
|
||||
class AbstractScenarioPlugin(ABC):
|
||||
@abstractmethod
|
||||
def run(
|
||||
self,
|
||||
run_uuid: str,
|
||||
scenario: str,
|
||||
krkn_config: dict[str, any],
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
) -> int:
|
||||
"""
|
||||
This method serves as the entry point for a ScenarioPlugin. To make the plugin loadable,
|
||||
the AbstractScenarioPlugin class must be extended, and this method must be implemented.
|
||||
No exception must be propagated outside of this method.
|
||||
|
||||
:param run_uuid: the uuid of the chaos run generated by krkn for every single run
|
||||
:param scenario: the config file of the scenario that is currently executed
|
||||
:param krkn_config: the full dictionary representation of the `config.yaml`
|
||||
:param lib_telemetry: it is a composite object of all the
|
||||
krkn-lib objects and methods needed by a krkn plugin to run.
|
||||
:param scenario_telemetry: the `ScenarioTelemetry` object of the scenario that is currently executed
|
||||
:return: 0 if the scenario suceeded 1 if failed
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_scenario_types(self) -> list[str]:
|
||||
"""
|
||||
Indicates the scenario types specified in the `config.yaml`. For the plugin to be properly
|
||||
loaded, recognized and executed, it must be implemented and must return the matching `scenario_type` strings.
|
||||
One plugin can be mapped one or many different strings unique across the other plugins otherwise an exception
|
||||
will be thrown.
|
||||
|
||||
|
||||
:return: the corresponding scenario_type as a list of strings
|
||||
"""
|
||||
pass
|
||||
|
||||
def run_scenarios(
|
||||
self,
|
||||
run_uuid: str,
|
||||
scenarios_list: list[str],
|
||||
krkn_config: dict[str, any],
|
||||
telemetry: KrknTelemetryOpenshift,
|
||||
) -> tuple[list[str], list[ScenarioTelemetry]]:
|
||||
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
wait_duration = krkn_config["tunings"]["wait_duration"]
|
||||
for scenario_config in scenarios_list:
|
||||
if isinstance(scenario_config, list):
|
||||
logging.error(
|
||||
"post scenarios have been deprecated, please "
|
||||
"remove sub-lists from `scenarios` in config.yaml"
|
||||
)
|
||||
failed_scenarios.append(scenario_config)
|
||||
break
|
||||
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = scenario_config
|
||||
scenario_telemetry.start_timestamp = time.time()
|
||||
parsed_scenario_config = telemetry.set_parameters_base64(
|
||||
scenario_telemetry, scenario_config
|
||||
)
|
||||
|
||||
try:
|
||||
logging.info(
|
||||
f"Running {self.__class__.__name__}: {self.get_scenario_types()} -> {scenario_config}"
|
||||
)
|
||||
return_value = self.run(
|
||||
run_uuid,
|
||||
scenario_config,
|
||||
krkn_config,
|
||||
telemetry,
|
||||
scenario_telemetry,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"uncaught exception on scenario `run()` method: {e} "
|
||||
f"please report an issue on https://github.com/krkn-chaos/krkn"
|
||||
)
|
||||
return_value = 1
|
||||
|
||||
scenario_telemetry.exit_status = return_value
|
||||
scenario_telemetry.end_timestamp = time.time()
|
||||
utils.collect_and_put_ocp_logs(
|
||||
telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry.get_telemetry_request_id(),
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp),
|
||||
)
|
||||
utils.populate_cluster_events(
|
||||
scenario_telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry.get_lib_kubernetes(),
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp),
|
||||
)
|
||||
|
||||
if scenario_telemetry.exit_status != 0:
|
||||
failed_scenarios.append(scenario_config)
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
logging.info(f"wating {wait_duration} before running the next scenario")
|
||||
time.sleep(wait_duration)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
@@ -0,0 +1,88 @@
|
||||
import logging
|
||||
import time
|
||||
import yaml
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.utils import get_yaml_item_value
|
||||
from jinja2 import Template
|
||||
from krkn import cerberus
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
|
||||
|
||||
class ApplicationOutageScenarioPlugin(AbstractScenarioPlugin):
|
||||
def run(
|
||||
self,
|
||||
run_uuid: str,
|
||||
scenario: str,
|
||||
krkn_config: dict[str, any],
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
) -> int:
|
||||
wait_duration = krkn_config["tunings"]["wait_duration"]
|
||||
try:
|
||||
with open(scenario, "r") as f:
|
||||
app_outage_config_yaml = yaml.full_load(f)
|
||||
scenario_config = app_outage_config_yaml["application_outage"]
|
||||
pod_selector = get_yaml_item_value(
|
||||
scenario_config, "pod_selector", "{}"
|
||||
)
|
||||
traffic_type = get_yaml_item_value(
|
||||
scenario_config, "block", "[Ingress, Egress]"
|
||||
)
|
||||
namespace = get_yaml_item_value(scenario_config, "namespace", "")
|
||||
duration = get_yaml_item_value(scenario_config, "duration", 60)
|
||||
|
||||
start_time = int(time.time())
|
||||
|
||||
network_policy_template = """---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: kraken-deny
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels: {{ pod_selector }}
|
||||
policyTypes: {{ traffic_type }}
|
||||
"""
|
||||
t = Template(network_policy_template)
|
||||
rendered_spec = t.render(
|
||||
pod_selector=pod_selector, traffic_type=traffic_type
|
||||
)
|
||||
yaml_spec = yaml.safe_load(rendered_spec)
|
||||
# Block the traffic by creating network policy
|
||||
logging.info("Creating the network policy")
|
||||
|
||||
lib_telemetry.get_lib_kubernetes().create_net_policy(
|
||||
yaml_spec, namespace
|
||||
)
|
||||
|
||||
# wait for the specified duration
|
||||
logging.info(
|
||||
"Waiting for the specified duration in the config: %s" % duration
|
||||
)
|
||||
time.sleep(duration)
|
||||
|
||||
# unblock the traffic by deleting the network policy
|
||||
logging.info("Deleting the network policy")
|
||||
lib_telemetry.get_lib_kubernetes().delete_net_policy(
|
||||
"kraken-deny", namespace
|
||||
)
|
||||
|
||||
logging.info(
|
||||
"End of scenario. Waiting for the specified duration: %s"
|
||||
% wait_duration
|
||||
)
|
||||
time.sleep(wait_duration)
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(krkn_config, [], start_time, end_time)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"ApplicationOutageScenarioPlugin exiting due to Exception %s" % e
|
||||
)
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def get_scenario_types(self) -> list[str]:
|
||||
return ["application_outages_scenarios"]
|
||||
197
krkn/scenario_plugins/arcaflow/arcaflow_scenario_plugin.py
Normal file
197
krkn/scenario_plugins/arcaflow/arcaflow_scenario_plugin.py
Normal file
@@ -0,0 +1,197 @@
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
import arcaflow
|
||||
import yaml
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
from krkn.scenario_plugins.arcaflow.context_auth import ContextAuth
|
||||
|
||||
|
||||
class ArcaflowScenarioPlugin(AbstractScenarioPlugin):
|
||||
|
||||
def run(
|
||||
self,
|
||||
run_uuid: str,
|
||||
scenario: str,
|
||||
krkn_config: dict[str, any],
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
) -> int:
|
||||
try:
|
||||
engine_args = self.build_args(scenario)
|
||||
status_code = self.run_workflow(
|
||||
engine_args, lib_telemetry.get_lib_kubernetes().get_kubeconfig_path()
|
||||
)
|
||||
return status_code
|
||||
except Exception as e:
|
||||
logging.error("ArcaflowScenarioPlugin exiting due to Exception %s" % e)
|
||||
return 1
|
||||
|
||||
def get_scenario_types(self) -> [str]:
|
||||
return ["hog_scenarios", "arcaflow_scenario"]
|
||||
|
||||
def run_workflow(
|
||||
self, engine_args: arcaflow.EngineArgs, kubeconfig_path: str
|
||||
) -> int:
|
||||
self.set_arca_kubeconfig(engine_args, kubeconfig_path)
|
||||
exit_status = arcaflow.run(engine_args)
|
||||
return exit_status
|
||||
|
||||
def build_args(self, input_file: str) -> arcaflow.EngineArgs:
|
||||
"""sets the kubeconfig parsed by setArcaKubeConfig as an input to the arcaflow workflow"""
|
||||
current_path = Path().resolve()
|
||||
context = f"{current_path}/{Path(input_file).parent}"
|
||||
workflow = f"{context}/workflow.yaml"
|
||||
config = f"{context}/config.yaml"
|
||||
if not os.path.exists(context):
|
||||
raise Exception(
|
||||
"context folder for arcaflow workflow not found: {}".format(context)
|
||||
)
|
||||
if not os.path.exists(input_file):
|
||||
raise Exception(
|
||||
"input file for arcaflow workflow not found: {}".format(input_file)
|
||||
)
|
||||
if not os.path.exists(workflow):
|
||||
raise Exception(
|
||||
"workflow file for arcaflow workflow not found: {}".format(workflow)
|
||||
)
|
||||
if not os.path.exists(config):
|
||||
raise Exception(
|
||||
"configuration file for arcaflow workflow not found: {}".format(config)
|
||||
)
|
||||
|
||||
engine_args = arcaflow.EngineArgs()
|
||||
engine_args.context = context
|
||||
engine_args.config = config
|
||||
engine_args.workflow = workflow
|
||||
engine_args.input = f"{current_path}/{input_file}"
|
||||
return engine_args
|
||||
|
||||
def set_arca_kubeconfig(
|
||||
self, engine_args: arcaflow.EngineArgs, kubeconfig_path: str
|
||||
):
|
||||
|
||||
context_auth = ContextAuth()
|
||||
if not os.path.exists(kubeconfig_path):
|
||||
raise Exception("kubeconfig not found in {}".format(kubeconfig_path))
|
||||
|
||||
with open(kubeconfig_path, "r") as stream:
|
||||
try:
|
||||
kubeconfig = yaml.safe_load(stream)
|
||||
context_auth.fetch_auth_data(kubeconfig)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"impossible to read kubeconfig file in: {}".format(kubeconfig_path)
|
||||
)
|
||||
raise e
|
||||
|
||||
kubeconfig_str = self.set_kubeconfig_auth(kubeconfig, context_auth)
|
||||
|
||||
with open(engine_args.input, "r") as stream:
|
||||
input_file = yaml.safe_load(stream)
|
||||
if "input_list" in input_file and isinstance(
|
||||
input_file["input_list"], list
|
||||
):
|
||||
for index, _ in enumerate(input_file["input_list"]):
|
||||
if isinstance(input_file["input_list"][index], dict):
|
||||
input_file["input_list"][index]["kubeconfig"] = kubeconfig_str
|
||||
else:
|
||||
input_file["kubeconfig"] = kubeconfig_str
|
||||
stream.close()
|
||||
with open(engine_args.input, "w") as stream:
|
||||
yaml.safe_dump(input_file, stream)
|
||||
|
||||
with open(engine_args.config, "r") as stream:
|
||||
config_file = yaml.safe_load(stream)
|
||||
if config_file["deployers"]["image"]["deployer_name"] == "kubernetes":
|
||||
kube_connection = self.set_kubernetes_deployer_auth(
|
||||
config_file["deployers"]["image"]["connection"], context_auth
|
||||
)
|
||||
config_file["deployers"]["image"]["connection"] = kube_connection
|
||||
with open(engine_args.config, "w") as stream:
|
||||
yaml.safe_dump(config_file, stream, explicit_start=True, width=4096)
|
||||
|
||||
def set_kubernetes_deployer_auth(
|
||||
self, deployer: any, context_auth: ContextAuth
|
||||
) -> any:
|
||||
if context_auth.clusterHost is not None:
|
||||
deployer["host"] = context_auth.clusterHost
|
||||
if context_auth.clientCertificateData is not None:
|
||||
deployer["cert"] = context_auth.clientCertificateData
|
||||
if context_auth.clientKeyData is not None:
|
||||
deployer["key"] = context_auth.clientKeyData
|
||||
if context_auth.clusterCertificateData is not None:
|
||||
deployer["cacert"] = context_auth.clusterCertificateData
|
||||
if context_auth.username is not None:
|
||||
deployer["username"] = context_auth.username
|
||||
if context_auth.password is not None:
|
||||
deployer["password"] = context_auth.password
|
||||
if context_auth.bearerToken is not None:
|
||||
deployer["bearerToken"] = context_auth.bearerToken
|
||||
return deployer
|
||||
|
||||
def set_kubeconfig_auth(self, kubeconfig: any, context_auth: ContextAuth) -> str:
|
||||
"""
|
||||
Builds an arcaflow-compatible kubeconfig representation and returns it as a string.
|
||||
In order to run arcaflow plugins in kubernetes/openshift the kubeconfig must contain client certificate/key
|
||||
and server certificate base64 encoded within the kubeconfig file itself in *-data fields. That is not always the
|
||||
case, infact kubeconfig may contain filesystem paths to those files, this function builds an arcaflow-compatible
|
||||
kubeconfig file and returns it as a string that can be safely included in input.yaml
|
||||
"""
|
||||
|
||||
if "current-context" not in kubeconfig.keys():
|
||||
raise Exception(
|
||||
"invalid kubeconfig file, impossible to determine current-context"
|
||||
)
|
||||
user_id = None
|
||||
cluster_id = None
|
||||
user_name = None
|
||||
cluster_name = None
|
||||
current_context = kubeconfig["current-context"]
|
||||
for context in kubeconfig["contexts"]:
|
||||
if context["name"] == current_context:
|
||||
user_name = context["context"]["user"]
|
||||
cluster_name = context["context"]["cluster"]
|
||||
if user_name is None:
|
||||
raise Exception(
|
||||
"user not set for context {} in kubeconfig file".format(current_context)
|
||||
)
|
||||
if cluster_name is None:
|
||||
raise Exception(
|
||||
"cluster not set for context {} in kubeconfig file".format(
|
||||
current_context
|
||||
)
|
||||
)
|
||||
|
||||
for index, user in enumerate(kubeconfig["users"]):
|
||||
if user["name"] == user_name:
|
||||
user_id = index
|
||||
for index, cluster in enumerate(kubeconfig["clusters"]):
|
||||
if cluster["name"] == cluster_name:
|
||||
cluster_id = index
|
||||
|
||||
if cluster_id is None:
|
||||
raise Exception(
|
||||
"no cluster {} found in kubeconfig users".format(cluster_name)
|
||||
)
|
||||
if "client-certificate" in kubeconfig["users"][user_id]["user"]:
|
||||
kubeconfig["users"][user_id]["user"][
|
||||
"client-certificate-data"
|
||||
] = context_auth.clientCertificateDataBase64
|
||||
del kubeconfig["users"][user_id]["user"]["client-certificate"]
|
||||
|
||||
if "client-key" in kubeconfig["users"][user_id]["user"]:
|
||||
kubeconfig["users"][user_id]["user"][
|
||||
"client-key-data"
|
||||
] = context_auth.clientKeyDataBase64
|
||||
del kubeconfig["users"][user_id]["user"]["client-key"]
|
||||
|
||||
if "certificate-authority" in kubeconfig["clusters"][cluster_id]["cluster"]:
|
||||
kubeconfig["clusters"][cluster_id]["cluster"][
|
||||
"certificate-authority-data"
|
||||
] = context_auth.clusterCertificateDataBase64
|
||||
del kubeconfig["clusters"][cluster_id]["cluster"]["certificate-authority"]
|
||||
kubeconfig_str = yaml.dump(kubeconfig)
|
||||
return kubeconfig_str
|
||||
@@ -1,4 +1,3 @@
|
||||
import yaml
|
||||
import os
|
||||
import base64
|
||||
|
||||
@@ -20,23 +19,25 @@ class ContextAuth:
|
||||
@property
|
||||
def clusterCertificateDataBase64(self):
|
||||
if self.clusterCertificateData is not None:
|
||||
return base64.b64encode(bytes(self.clusterCertificateData,'utf8')).decode("ascii")
|
||||
return base64.b64encode(bytes(self.clusterCertificateData, "utf8")).decode(
|
||||
"ascii"
|
||||
)
|
||||
return
|
||||
|
||||
@property
|
||||
def clientCertificateDataBase64(self):
|
||||
if self.clientCertificateData is not None:
|
||||
return base64.b64encode(bytes(self.clientCertificateData,'utf8')).decode("ascii")
|
||||
return base64.b64encode(bytes(self.clientCertificateData, "utf8")).decode(
|
||||
"ascii"
|
||||
)
|
||||
return
|
||||
|
||||
@property
|
||||
def clientKeyDataBase64(self):
|
||||
if self.clientKeyData is not None:
|
||||
return base64.b64encode(bytes(self.clientKeyData,"utf-8")).decode("ascii")
|
||||
return base64.b64encode(bytes(self.clientKeyData, "utf-8")).decode("ascii")
|
||||
return
|
||||
|
||||
|
||||
|
||||
def fetch_auth_data(self, kubeconfig: any):
|
||||
context_username = None
|
||||
current_context = kubeconfig["current-context"]
|
||||
@@ -56,8 +57,10 @@ class ContextAuth:
|
||||
for index, user in enumerate(kubeconfig["users"]):
|
||||
if user["name"] == context_username:
|
||||
user_id = index
|
||||
if user_id is None :
|
||||
raise Exception("user {0} not found in kubeconfig users".format(context_username))
|
||||
if user_id is None:
|
||||
raise Exception(
|
||||
"user {0} not found in kubeconfig users".format(context_username)
|
||||
)
|
||||
|
||||
for index, cluster in enumerate(kubeconfig["clusters"]):
|
||||
if cluster["name"] == self.clusterName:
|
||||
@@ -83,7 +86,9 @@ class ContextAuth:
|
||||
|
||||
if "client-key-data" in user:
|
||||
try:
|
||||
self.clientKeyData = base64.b64decode(user["client-key-data"]).decode('utf-8')
|
||||
self.clientKeyData = base64.b64decode(user["client-key-data"]).decode(
|
||||
"utf-8"
|
||||
)
|
||||
except Exception as e:
|
||||
raise Exception("impossible to decode client-key-data")
|
||||
|
||||
@@ -96,7 +101,9 @@ class ContextAuth:
|
||||
|
||||
if "client-certificate-data" in user:
|
||||
try:
|
||||
self.clientCertificateData = base64.b64decode(user["client-certificate-data"]).decode('utf-8')
|
||||
self.clientCertificateData = base64.b64decode(
|
||||
user["client-certificate-data"]
|
||||
).decode("utf-8")
|
||||
except Exception as e:
|
||||
raise Exception("impossible to decode client-certificate-data")
|
||||
|
||||
@@ -105,13 +112,17 @@ class ContextAuth:
|
||||
if "certificate-authority" in cluster:
|
||||
try:
|
||||
self.clusterCertificate = cluster["certificate-authority"]
|
||||
self.clusterCertificateData = self.read_file(cluster["certificate-authority"])
|
||||
self.clusterCertificateData = self.read_file(
|
||||
cluster["certificate-authority"]
|
||||
)
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
if "certificate-authority-data" in cluster:
|
||||
try:
|
||||
self.clusterCertificateData = base64.b64decode(cluster["certificate-authority-data"]).decode('utf-8')
|
||||
self.clusterCertificateData = base64.b64decode(
|
||||
cluster["certificate-authority-data"]
|
||||
).decode("utf-8")
|
||||
except Exception as e:
|
||||
raise Exception("impossible to decode certificate-authority-data")
|
||||
|
||||
@@ -124,19 +135,8 @@ class ContextAuth:
|
||||
if "token" in user:
|
||||
self.bearerToken = user["token"]
|
||||
|
||||
def read_file(self, filename:str) -> str:
|
||||
def read_file(self, filename: str) -> str:
|
||||
if not os.path.exists(filename):
|
||||
raise Exception("file not found {0} ".format(filename))
|
||||
with open(filename, "rb") as file_stream:
|
||||
return file_stream.read().decode('utf-8')
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
return file_stream.read().decode("utf-8")
|
||||
@@ -1,7 +1,9 @@
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from context_auth import ContextAuth
|
||||
import yaml
|
||||
|
||||
from .context_auth import ContextAuth
|
||||
|
||||
|
||||
class TestCurrentContext(unittest.TestCase):
|
||||
@@ -9,7 +11,7 @@ class TestCurrentContext(unittest.TestCase):
|
||||
def get_kubeconfig_with_data(self) -> str:
|
||||
"""
|
||||
This function returns a test kubeconfig file as a string.
|
||||
|
||||
|
||||
:return: a test kubeconfig file in string format (for unit testing purposes)
|
||||
""" # NOQA
|
||||
return """apiVersion: v1
|
||||
@@ -71,7 +73,8 @@ users:
|
||||
def test_current_context(self):
|
||||
cwd = os.getcwd()
|
||||
current_context_data = ContextAuth()
|
||||
current_context_data.fetch_auth_data(self.get_kubeconfig_with_data())
|
||||
data = yaml.safe_load(self.get_kubeconfig_with_data())
|
||||
current_context_data.fetch_auth_data(data)
|
||||
self.assertIsNotNone(current_context_data.clusterCertificateData)
|
||||
self.assertIsNotNone(current_context_data.clientCertificateData)
|
||||
self.assertIsNotNone(current_context_data.clientKeyData)
|
||||
@@ -81,7 +84,8 @@ users:
|
||||
self.assertIsNotNone(current_context_data.clusterHost)
|
||||
|
||||
current_context_no_data = ContextAuth()
|
||||
current_context_no_data.fetch_auth_data(self.get_kubeconfig_with_paths())
|
||||
data = yaml.safe_load(self.get_kubeconfig_with_paths())
|
||||
current_context_no_data.fetch_auth_data(data)
|
||||
self.assertIsNotNone(current_context_no_data.clusterCertificate)
|
||||
self.assertIsNotNone(current_context_no_data.clusterCertificateData)
|
||||
self.assertIsNotNone(current_context_no_data.clientCertificate)
|
||||
@@ -92,9 +96,3 @@ users:
|
||||
self.assertIsNotNone(current_context_no_data.password)
|
||||
self.assertIsNotNone(current_context_no_data.bearerToken)
|
||||
self.assertIsNotNone(current_context_data.clusterHost)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
232
krkn/scenario_plugins/container/container_scenario_plugin.py
Normal file
232
krkn/scenario_plugins/container/container_scenario_plugin.py
Normal file
@@ -0,0 +1,232 @@
|
||||
import logging
|
||||
import random
|
||||
import time
|
||||
|
||||
import yaml
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.k8s.pods_monitor_pool import PodsMonitorPool
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.utils import get_yaml_item_value
|
||||
|
||||
from krkn import cerberus
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
|
||||
|
||||
class ContainerScenarioPlugin(AbstractScenarioPlugin):
|
||||
def run(
|
||||
self,
|
||||
run_uuid: str,
|
||||
scenario: str,
|
||||
krkn_config: dict[str, any],
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
) -> int:
|
||||
start_time = int(time.time())
|
||||
pool = PodsMonitorPool(lib_telemetry.get_lib_kubernetes())
|
||||
wait_duration = krkn_config["tunings"]["wait_duration"]
|
||||
try:
|
||||
with open(scenario, "r") as f:
|
||||
cont_scenario_config = yaml.full_load(f)
|
||||
|
||||
for kill_scenario in cont_scenario_config["scenarios"]:
|
||||
self.start_monitoring(
|
||||
kill_scenario, pool
|
||||
)
|
||||
killed_containers = self.container_killing_in_pod(
|
||||
kill_scenario, lib_telemetry.get_lib_kubernetes()
|
||||
)
|
||||
result = pool.join()
|
||||
if result.error:
|
||||
logging.error(
|
||||
logging.error(
|
||||
f"ContainerScenarioPlugin pods failed to recovery: {result.error}"
|
||||
)
|
||||
)
|
||||
return 1
|
||||
scenario_telemetry.affected_pods = result
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
|
||||
# capture end time
|
||||
end_time = int(time.time())
|
||||
|
||||
# publish cerberus status
|
||||
cerberus.publish_kraken_status(krkn_config, [], start_time, end_time)
|
||||
except (RuntimeError, Exception):
|
||||
logging.error("ContainerScenarioPlugin exiting due to Exception %s" % e)
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def get_scenario_types(self) -> list[str]:
|
||||
return ["container_scenarios"]
|
||||
|
||||
def start_monitoring(self, kill_scenario: dict, pool: PodsMonitorPool):
|
||||
|
||||
namespace_pattern = f"^{kill_scenario['namespace']}$"
|
||||
label_selector = kill_scenario["label_selector"]
|
||||
recovery_time = kill_scenario["expected_recovery_time"]
|
||||
pool.select_and_monitor_by_namespace_pattern_and_label(
|
||||
namespace_pattern=namespace_pattern,
|
||||
label_selector=label_selector,
|
||||
max_timeout=recovery_time,
|
||||
)
|
||||
|
||||
def container_killing_in_pod(self, cont_scenario, kubecli: KrknKubernetes):
|
||||
scenario_name = get_yaml_item_value(cont_scenario, "name", "")
|
||||
namespace = get_yaml_item_value(cont_scenario, "namespace", "*")
|
||||
label_selector = get_yaml_item_value(cont_scenario, "label_selector", None)
|
||||
pod_names = get_yaml_item_value(cont_scenario, "pod_names", [])
|
||||
container_name = get_yaml_item_value(cont_scenario, "container_name", "")
|
||||
kill_action = get_yaml_item_value(cont_scenario, "action", 1)
|
||||
kill_count = get_yaml_item_value(cont_scenario, "count", 1)
|
||||
if not isinstance(kill_action, int):
|
||||
logging.error(
|
||||
"Please make sure the action parameter defined in the "
|
||||
"config is an integer"
|
||||
)
|
||||
raise RuntimeError()
|
||||
if (kill_action < 1) or (kill_action > 15):
|
||||
logging.error("Only 1-15 kill signals are supported.")
|
||||
raise RuntimeError()
|
||||
kill_action = "kill " + str(kill_action)
|
||||
if type(pod_names) != list:
|
||||
logging.error("Please make sure your pod_names are in a list format")
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
if len(pod_names) == 0:
|
||||
if namespace == "*":
|
||||
# returns double array of pod name and namespace
|
||||
pods = kubecli.get_all_pods(label_selector)
|
||||
else:
|
||||
# Only returns pod names
|
||||
pods = kubecli.list_pods(namespace, label_selector)
|
||||
else:
|
||||
if namespace == "*":
|
||||
logging.error(
|
||||
"You must specify the namespace to kill a container in a specific pod"
|
||||
)
|
||||
logging.error("Scenario " + scenario_name + " failed")
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
pods = pod_names
|
||||
# get container and pod name
|
||||
container_pod_list = []
|
||||
for pod in pods:
|
||||
if type(pod) == list:
|
||||
pod_output = kubecli.get_pod_info(pod[0], pod[1])
|
||||
container_names = [
|
||||
container.name for container in pod_output.containers
|
||||
]
|
||||
|
||||
container_pod_list.append([pod[0], pod[1], container_names])
|
||||
else:
|
||||
pod_output = kubecli.get_pod_info(pod, namespace)
|
||||
container_names = [
|
||||
container.name for container in pod_output.containers
|
||||
]
|
||||
container_pod_list.append([pod, namespace, container_names])
|
||||
killed_count = 0
|
||||
killed_container_list = []
|
||||
while killed_count < kill_count:
|
||||
if len(container_pod_list) == 0:
|
||||
logging.error(
|
||||
"Trying to kill more containers than were found, try lowering kill count"
|
||||
)
|
||||
logging.error("Scenario " + scenario_name + " failed")
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
selected_container_pod = container_pod_list[
|
||||
random.randint(0, len(container_pod_list) - 1)
|
||||
]
|
||||
for c_name in selected_container_pod[2]:
|
||||
if container_name != "":
|
||||
if c_name == container_name:
|
||||
killed_container_list.append(
|
||||
[
|
||||
selected_container_pod[0],
|
||||
selected_container_pod[1],
|
||||
c_name,
|
||||
]
|
||||
)
|
||||
self.retry_container_killing(
|
||||
kill_action,
|
||||
selected_container_pod[0],
|
||||
selected_container_pod[1],
|
||||
c_name,
|
||||
kubecli,
|
||||
)
|
||||
break
|
||||
else:
|
||||
killed_container_list.append(
|
||||
[selected_container_pod[0], selected_container_pod[1], c_name]
|
||||
)
|
||||
self.retry_container_killing(
|
||||
kill_action,
|
||||
selected_container_pod[0],
|
||||
selected_container_pod[1],
|
||||
c_name,
|
||||
kubecli,
|
||||
)
|
||||
break
|
||||
container_pod_list.remove(selected_container_pod)
|
||||
killed_count += 1
|
||||
logging.info("Scenario " + scenario_name + " successfully injected")
|
||||
return killed_container_list
|
||||
|
||||
def retry_container_killing(
|
||||
self, kill_action, podname, namespace, container_name, kubecli: KrknKubernetes
|
||||
):
|
||||
i = 0
|
||||
while i < 5:
|
||||
logging.info(
|
||||
"Killing container %s in pod %s (ns %s)"
|
||||
% (str(container_name), str(podname), str(namespace))
|
||||
)
|
||||
response = kubecli.exec_cmd_in_pod(
|
||||
kill_action, podname, namespace, container_name
|
||||
)
|
||||
i += 1
|
||||
# Blank response means it is done
|
||||
if not response:
|
||||
break
|
||||
elif (
|
||||
"unauthorized" in response.lower()
|
||||
or "authorization" in response.lower()
|
||||
):
|
||||
time.sleep(2)
|
||||
continue
|
||||
else:
|
||||
logging.warning(response)
|
||||
continue
|
||||
|
||||
def check_failed_containers(
|
||||
self, killed_container_list, wait_time, kubecli: KrknKubernetes
|
||||
):
|
||||
|
||||
container_ready = []
|
||||
timer = 0
|
||||
while timer <= wait_time:
|
||||
for killed_container in killed_container_list:
|
||||
# pod namespace contain name
|
||||
pod_output = kubecli.get_pod_info(
|
||||
killed_container[0], killed_container[1]
|
||||
)
|
||||
|
||||
for container in pod_output.containers:
|
||||
if container.name == killed_container[2]:
|
||||
if container.ready:
|
||||
container_ready.append(killed_container)
|
||||
if len(container_ready) != 0:
|
||||
for item in container_ready:
|
||||
killed_container_list = killed_container_list.remove(item)
|
||||
if killed_container_list is None or len(killed_container_list) == 0:
|
||||
return []
|
||||
timer += 5
|
||||
logging.info("Waiting 5 seconds for containers to become ready")
|
||||
time.sleep(5)
|
||||
return killed_container_list
|
||||
@@ -2,28 +2,37 @@ import random
|
||||
import logging
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
|
||||
# krkn_lib
|
||||
# Pick a random managedcluster with specified label selector
|
||||
def get_managedcluster(
|
||||
managedcluster_name,
|
||||
label_selector,
|
||||
instance_kill_count,
|
||||
kubecli: KrknKubernetes):
|
||||
managedcluster_name, label_selector, instance_kill_count, kubecli: KrknKubernetes
|
||||
):
|
||||
|
||||
if managedcluster_name in kubecli.list_killable_managedclusters():
|
||||
return [managedcluster_name]
|
||||
elif managedcluster_name:
|
||||
logging.info("managedcluster with provided managedcluster_name does not exist or the managedcluster might " "be in unavailable state.")
|
||||
logging.info(
|
||||
"managedcluster with provided managedcluster_name does not exist or the managedcluster might "
|
||||
"be in unavailable state."
|
||||
)
|
||||
managedclusters = kubecli.list_killable_managedclusters(label_selector)
|
||||
if not managedclusters:
|
||||
raise Exception("Available managedclusters with the provided label selector do not exist")
|
||||
logging.info("Available managedclusters with the label selector %s: %s" % (label_selector, managedclusters))
|
||||
raise Exception(
|
||||
"Available managedclusters with the provided label selector do not exist"
|
||||
)
|
||||
logging.info(
|
||||
"Available managedclusters with the label selector %s: %s"
|
||||
% (label_selector, managedclusters)
|
||||
)
|
||||
number_of_managedclusters = len(managedclusters)
|
||||
if instance_kill_count == number_of_managedclusters:
|
||||
return managedclusters
|
||||
managedclusters_to_return = []
|
||||
for i in range(instance_kill_count):
|
||||
managedcluster_to_add = managedclusters[random.randint(0, len(managedclusters) - 1)]
|
||||
managedcluster_to_add = managedclusters[
|
||||
random.randint(0, len(managedclusters) - 1)
|
||||
]
|
||||
managedclusters_to_return.append(managedcluster_to_add)
|
||||
managedclusters.remove(managedcluster_to_add)
|
||||
return managedclusters_to_return
|
||||
@@ -0,0 +1,127 @@
|
||||
import logging
|
||||
import time
|
||||
|
||||
import yaml
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.utils import get_yaml_item_value
|
||||
|
||||
from krkn import cerberus, utils
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
from krkn.scenario_plugins.managed_cluster.common_functions import get_managedcluster
|
||||
from krkn.scenario_plugins.managed_cluster.scenarios import Scenarios
|
||||
|
||||
|
||||
class ManagedClusterScenarioPlugin(AbstractScenarioPlugin):
|
||||
def run(
|
||||
self,
|
||||
run_uuid: str,
|
||||
scenario: str,
|
||||
krkn_config: dict[str, any],
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
) -> int:
|
||||
with open(scenario, "r") as f:
|
||||
scenario = yaml.full_load(f)
|
||||
for managedcluster_scenario in scenario["managedcluster_scenarios"]:
|
||||
managedcluster_scenario_object = Scenarios(
|
||||
lib_telemetry.get_lib_kubernetes()
|
||||
)
|
||||
if managedcluster_scenario["actions"]:
|
||||
for action in managedcluster_scenario["actions"]:
|
||||
start_time = int(time.time())
|
||||
try:
|
||||
self.inject_managedcluster_scenario(
|
||||
action,
|
||||
managedcluster_scenario,
|
||||
managedcluster_scenario_object,
|
||||
lib_telemetry.get_lib_kubernetes(),
|
||||
)
|
||||
end_time = int(time.time())
|
||||
cerberus.get_status(krkn_config, start_time, end_time)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"ManagedClusterScenarioPlugin exiting due to Exception %s"
|
||||
% e
|
||||
)
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def inject_managedcluster_scenario(
|
||||
self,
|
||||
action,
|
||||
managedcluster_scenario,
|
||||
managedcluster_scenario_object,
|
||||
kubecli: KrknKubernetes,
|
||||
):
|
||||
# Get the managedcluster scenario configurations
|
||||
run_kill_count = get_yaml_item_value(managedcluster_scenario, "runs", 1)
|
||||
instance_kill_count = get_yaml_item_value(
|
||||
managedcluster_scenario, "instance_count", 1
|
||||
)
|
||||
managedcluster_name = get_yaml_item_value(
|
||||
managedcluster_scenario, "managedcluster_name", ""
|
||||
)
|
||||
label_selector = get_yaml_item_value(
|
||||
managedcluster_scenario, "label_selector", ""
|
||||
)
|
||||
timeout = get_yaml_item_value(managedcluster_scenario, "timeout", 120)
|
||||
# Get the managedcluster to apply the scenario
|
||||
if managedcluster_name:
|
||||
managedcluster_name_list = managedcluster_name.split(",")
|
||||
else:
|
||||
managedcluster_name_list = [managedcluster_name]
|
||||
for single_managedcluster_name in managedcluster_name_list:
|
||||
managedclusters = get_managedcluster(
|
||||
single_managedcluster_name, label_selector, instance_kill_count, kubecli
|
||||
)
|
||||
for single_managedcluster in managedclusters:
|
||||
if action == "managedcluster_start_scenario":
|
||||
managedcluster_scenario_object.managedcluster_start_scenario(
|
||||
run_kill_count, single_managedcluster, timeout
|
||||
)
|
||||
elif action == "managedcluster_stop_scenario":
|
||||
managedcluster_scenario_object.managedcluster_stop_scenario(
|
||||
run_kill_count, single_managedcluster, timeout
|
||||
)
|
||||
elif action == "managedcluster_stop_start_scenario":
|
||||
managedcluster_scenario_object.managedcluster_stop_start_scenario(
|
||||
run_kill_count, single_managedcluster, timeout
|
||||
)
|
||||
elif action == "managedcluster_termination_scenario":
|
||||
managedcluster_scenario_object.managedcluster_termination_scenario(
|
||||
run_kill_count, single_managedcluster, timeout
|
||||
)
|
||||
elif action == "managedcluster_reboot_scenario":
|
||||
managedcluster_scenario_object.managedcluster_reboot_scenario(
|
||||
run_kill_count, single_managedcluster, timeout
|
||||
)
|
||||
elif action == "stop_start_klusterlet_scenario":
|
||||
managedcluster_scenario_object.stop_start_klusterlet_scenario(
|
||||
run_kill_count, single_managedcluster, timeout
|
||||
)
|
||||
elif action == "start_klusterlet_scenario":
|
||||
managedcluster_scenario_object.stop_klusterlet_scenario(
|
||||
run_kill_count, single_managedcluster, timeout
|
||||
)
|
||||
elif action == "stop_klusterlet_scenario":
|
||||
managedcluster_scenario_object.stop_klusterlet_scenario(
|
||||
run_kill_count, single_managedcluster, timeout
|
||||
)
|
||||
elif action == "managedcluster_crash_scenario":
|
||||
managedcluster_scenario_object.managedcluster_crash_scenario(
|
||||
run_kill_count, single_managedcluster, timeout
|
||||
)
|
||||
else:
|
||||
logging.info(
|
||||
"There is no managedcluster action that matches %s, skipping scenario"
|
||||
% action
|
||||
)
|
||||
|
||||
def get_managedcluster_scenario_object(self, kubecli: KrknKubernetes):
|
||||
return Scenarios(kubecli)
|
||||
|
||||
def get_scenario_types(self) -> list[str]:
|
||||
return ["managedcluster_scenarios"]
|
||||
@@ -2,104 +2,148 @@ from jinja2 import Environment, FileSystemLoader
|
||||
import os
|
||||
import time
|
||||
import logging
|
||||
import sys
|
||||
import yaml
|
||||
import kraken.managedcluster_scenarios.common_managedcluster_functions as common_managedcluster_functions
|
||||
import krkn.scenario_plugins.managed_cluster.common_functions as common_managedcluster_functions
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
|
||||
class GENERAL:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
|
||||
# krkn_lib
|
||||
class managedcluster_scenarios():
|
||||
class Scenarios:
|
||||
kubecli: KrknKubernetes
|
||||
|
||||
def __init__(self, kubecli: KrknKubernetes):
|
||||
self.kubecli = kubecli
|
||||
self.general = GENERAL()
|
||||
|
||||
# managedcluster scenario to start the managedcluster
|
||||
def managedcluster_start_scenario(self, instance_kill_count, managedcluster, timeout):
|
||||
def managedcluster_start_scenario(
|
||||
self, instance_kill_count, managedcluster, timeout
|
||||
):
|
||||
for _ in range(instance_kill_count):
|
||||
try:
|
||||
logging.info("Starting managedcluster_start_scenario injection")
|
||||
file_loader = FileSystemLoader(os.path.abspath(os.path.dirname(__file__)))
|
||||
file_loader = FileSystemLoader(
|
||||
os.path.abspath(os.path.dirname(__file__))
|
||||
)
|
||||
env = Environment(loader=file_loader, autoescape=False)
|
||||
template = env.get_template("manifestwork.j2")
|
||||
body = yaml.safe_load(
|
||||
template.render(managedcluster_name=managedcluster,
|
||||
template.render(
|
||||
managedcluster_name=managedcluster,
|
||||
args="""kubectl scale deployment.apps/klusterlet --replicas 3 &
|
||||
kubectl scale deployment.apps/klusterlet-registration-agent --replicas 1 -n open-cluster-management-agent""")
|
||||
kubectl scale deployment.apps/klusterlet-registration-agent --replicas 1 -n open-cluster-management-agent""",
|
||||
)
|
||||
)
|
||||
self.kubecli.create_manifestwork(body, managedcluster)
|
||||
logging.info("managedcluster_start_scenario has been successfully injected!")
|
||||
logging.info(
|
||||
"managedcluster_start_scenario has been successfully injected!"
|
||||
)
|
||||
logging.info("Waiting for the specified timeout: %s" % timeout)
|
||||
common_managedcluster_functions.wait_for_available_status(managedcluster, timeout, self.kubecli)
|
||||
common_managedcluster_functions.wait_for_available_status(
|
||||
managedcluster, timeout, self.kubecli
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("managedcluster scenario exiting due to Exception %s" % e)
|
||||
sys.exit(1)
|
||||
raise e
|
||||
finally:
|
||||
logging.info("Deleting manifestworks")
|
||||
self.kubecli.delete_manifestwork(managedcluster)
|
||||
|
||||
# managedcluster scenario to stop the managedcluster
|
||||
def managedcluster_stop_scenario(self, instance_kill_count, managedcluster, timeout):
|
||||
def managedcluster_stop_scenario(
|
||||
self, instance_kill_count, managedcluster, timeout
|
||||
):
|
||||
for _ in range(instance_kill_count):
|
||||
try:
|
||||
logging.info("Starting managedcluster_stop_scenario injection")
|
||||
file_loader = FileSystemLoader(os.path.abspath(os.path.dirname(__file__)),encoding='utf-8')
|
||||
file_loader = FileSystemLoader(
|
||||
os.path.abspath(os.path.dirname(__file__)), encoding="utf-8"
|
||||
)
|
||||
env = Environment(loader=file_loader, autoescape=False)
|
||||
template = env.get_template("manifestwork.j2")
|
||||
body = yaml.safe_load(
|
||||
template.render(managedcluster_name=managedcluster,
|
||||
template.render(
|
||||
managedcluster_name=managedcluster,
|
||||
args="""kubectl scale deployment.apps/klusterlet --replicas 0 &&
|
||||
kubectl scale deployment.apps/klusterlet-registration-agent --replicas 0 -n open-cluster-management-agent""")
|
||||
kubectl scale deployment.apps/klusterlet-registration-agent --replicas 0 -n open-cluster-management-agent""",
|
||||
)
|
||||
)
|
||||
self.kubecli.create_manifestwork(body, managedcluster)
|
||||
logging.info("managedcluster_stop_scenario has been successfully injected!")
|
||||
logging.info(
|
||||
"managedcluster_stop_scenario has been successfully injected!"
|
||||
)
|
||||
logging.info("Waiting for the specified timeout: %s" % timeout)
|
||||
common_managedcluster_functions.wait_for_unavailable_status(managedcluster, timeout, self.kubecli)
|
||||
common_managedcluster_functions.wait_for_unavailable_status(
|
||||
managedcluster, timeout, self.kubecli
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("managedcluster scenario exiting due to Exception %s" % e)
|
||||
sys.exit(1)
|
||||
raise e
|
||||
finally:
|
||||
logging.info("Deleting manifestworks")
|
||||
self.kubecli.delete_manifestwork(managedcluster)
|
||||
|
||||
# managedcluster scenario to stop and then start the managedcluster
|
||||
def managedcluster_stop_start_scenario(self, instance_kill_count, managedcluster, timeout):
|
||||
def managedcluster_stop_start_scenario(
|
||||
self, instance_kill_count, managedcluster, timeout
|
||||
):
|
||||
logging.info("Starting managedcluster_stop_start_scenario injection")
|
||||
self.managedcluster_stop_scenario(instance_kill_count, managedcluster, timeout)
|
||||
time.sleep(10)
|
||||
self.managedcluster_start_scenario(instance_kill_count, managedcluster, timeout)
|
||||
logging.info("managedcluster_stop_start_scenario has been successfully injected!")
|
||||
logging.info(
|
||||
"managedcluster_stop_start_scenario has been successfully injected!"
|
||||
)
|
||||
|
||||
# managedcluster scenario to terminate the managedcluster
|
||||
def managedcluster_termination_scenario(self, instance_kill_count, managedcluster, timeout):
|
||||
logging.info("managedcluster termination is not implemented, " "no action is going to be taken")
|
||||
def managedcluster_termination_scenario(
|
||||
self, instance_kill_count, managedcluster, timeout
|
||||
):
|
||||
logging.info(
|
||||
"managedcluster termination is not implemented, "
|
||||
"no action is going to be taken"
|
||||
)
|
||||
|
||||
# managedcluster scenario to reboot the managedcluster
|
||||
def managedcluster_reboot_scenario(self, instance_kill_count, managedcluster, timeout):
|
||||
logging.info("managedcluster reboot is not implemented," " no action is going to be taken")
|
||||
def managedcluster_reboot_scenario(
|
||||
self, instance_kill_count, managedcluster, timeout
|
||||
):
|
||||
logging.info(
|
||||
"managedcluster reboot is not implemented,"
|
||||
" no action is going to be taken"
|
||||
)
|
||||
|
||||
# managedcluster scenario to start the klusterlet
|
||||
def start_klusterlet_scenario(self, instance_kill_count, managedcluster, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
try:
|
||||
logging.info("Starting start_klusterlet_scenario injection")
|
||||
file_loader = FileSystemLoader(os.path.abspath(os.path.dirname(__file__)))
|
||||
file_loader = FileSystemLoader(
|
||||
os.path.abspath(os.path.dirname(__file__))
|
||||
)
|
||||
env = Environment(loader=file_loader, autoescape=False)
|
||||
template = env.get_template("manifestwork.j2")
|
||||
body = yaml.safe_load(
|
||||
template.render(managedcluster_name=managedcluster,
|
||||
args="""kubectl scale deployment.apps/klusterlet --replicas 3""")
|
||||
template.render(
|
||||
managedcluster_name=managedcluster,
|
||||
args="""kubectl scale deployment.apps/klusterlet --replicas 3""",
|
||||
)
|
||||
)
|
||||
self.kubecli.create_manifestwork(body, managedcluster)
|
||||
logging.info("start_klusterlet_scenario has been successfully injected!")
|
||||
time.sleep(30) # until https://github.com/open-cluster-management-io/OCM/issues/118 gets solved
|
||||
logging.info(
|
||||
"start_klusterlet_scenario has been successfully injected!"
|
||||
)
|
||||
time.sleep(
|
||||
30
|
||||
) # until https://github.com/open-cluster-management-io/OCM/issues/118 gets solved
|
||||
except Exception as e:
|
||||
logging.error("managedcluster scenario exiting due to Exception %s" % e)
|
||||
sys.exit(1)
|
||||
raise e
|
||||
finally:
|
||||
logging.info("Deleting manifestworks")
|
||||
self.kubecli.delete_manifestwork(managedcluster)
|
||||
@@ -109,25 +153,33 @@ class managedcluster_scenarios():
|
||||
for _ in range(instance_kill_count):
|
||||
try:
|
||||
logging.info("Starting stop_klusterlet_scenario injection")
|
||||
file_loader = FileSystemLoader(os.path.abspath(os.path.dirname(__file__)))
|
||||
file_loader = FileSystemLoader(
|
||||
os.path.abspath(os.path.dirname(__file__))
|
||||
)
|
||||
env = Environment(loader=file_loader, autoescape=False)
|
||||
template = env.get_template("manifestwork.j2")
|
||||
body = yaml.safe_load(
|
||||
template.render(managedcluster_name=managedcluster,
|
||||
args="""kubectl scale deployment.apps/klusterlet --replicas 0""")
|
||||
template.render(
|
||||
managedcluster_name=managedcluster,
|
||||
args="""kubectl scale deployment.apps/klusterlet --replicas 0""",
|
||||
)
|
||||
)
|
||||
self.kubecli.create_manifestwork(body, managedcluster)
|
||||
logging.info("stop_klusterlet_scenario has been successfully injected!")
|
||||
time.sleep(30) # until https://github.com/open-cluster-management-io/OCM/issues/118 gets solved
|
||||
time.sleep(
|
||||
30
|
||||
) # until https://github.com/open-cluster-management-io/OCM/issues/118 gets solved
|
||||
except Exception as e:
|
||||
logging.error("managedcluster scenario exiting due to Exception %s" % e)
|
||||
sys.exit(1)
|
||||
raise e
|
||||
finally:
|
||||
logging.info("Deleting manifestworks")
|
||||
self.kubecli.delete_manifestwork(managedcluster)
|
||||
|
||||
# managedcluster scenario to stop and start the klusterlet
|
||||
def stop_start_klusterlet_scenario(self, instance_kill_count, managedcluster, timeout):
|
||||
def stop_start_klusterlet_scenario(
|
||||
self, instance_kill_count, managedcluster, timeout
|
||||
):
|
||||
logging.info("Starting stop_start_klusterlet_scenario injection")
|
||||
self.stop_klusterlet_scenario(instance_kill_count, managedcluster, timeout)
|
||||
time.sleep(10)
|
||||
@@ -135,6 +187,10 @@ class managedcluster_scenarios():
|
||||
logging.info("stop_start_klusterlet_scenario has been successfully injected!")
|
||||
|
||||
# managedcluster scenario to crash the managedcluster
|
||||
def managedcluster_crash_scenario(self, instance_kill_count, managedcluster, timeout):
|
||||
logging.info("managedcluster crash scenario is not implemented, " "no action is going to be taken")
|
||||
|
||||
def managedcluster_crash_scenario(
|
||||
self, instance_kill_count, managedcluster, timeout
|
||||
):
|
||||
logging.info(
|
||||
"managedcluster crash scenario is not implemented, "
|
||||
"no action is going to be taken"
|
||||
)
|
||||
93
krkn/scenario_plugins/native/native_scenario_plugin.py
Normal file
93
krkn/scenario_plugins/native/native_scenario_plugin.py
Normal file
@@ -0,0 +1,93 @@
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
from krkn.scenario_plugins.native.plugins import PLUGINS
|
||||
from krkn_lib.k8s.pods_monitor_pool import PodsMonitorPool
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from typing import Any
|
||||
import logging
|
||||
|
||||
|
||||
class NativeScenarioPlugin(AbstractScenarioPlugin):
|
||||
|
||||
def run(
|
||||
self,
|
||||
run_uuid: str,
|
||||
scenario: str,
|
||||
krkn_config: dict[str, any],
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
) -> int:
|
||||
pool = PodsMonitorPool(lib_telemetry.get_lib_kubernetes())
|
||||
kill_scenarios = [
|
||||
kill_scenario
|
||||
for kill_scenario in PLUGINS.unserialize_scenario(scenario)
|
||||
if kill_scenario["id"] == "kill-pods"
|
||||
]
|
||||
|
||||
try:
|
||||
self.start_monitoring(pool, kill_scenarios)
|
||||
PLUGINS.run(
|
||||
scenario,
|
||||
lib_telemetry.get_lib_kubernetes().get_kubeconfig_path(),
|
||||
krkn_config,
|
||||
run_uuid,
|
||||
)
|
||||
result = pool.join()
|
||||
scenario_telemetry.affected_pods = result
|
||||
if result.error:
|
||||
logging.error(f"NativeScenarioPlugin unrecovered pods: {result.error}")
|
||||
return 1
|
||||
|
||||
except Exception as e:
|
||||
logging.error("NativeScenarioPlugin exiting due to Exception %s" % e)
|
||||
pool.cancel()
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def get_scenario_types(self) -> list[str]:
|
||||
return [
|
||||
"pod_disruption_scenarios",
|
||||
"pod_network_scenarios",
|
||||
"vmware_node_scenarios",
|
||||
"ibmcloud_node_scenarios",
|
||||
]
|
||||
|
||||
def start_monitoring(self, pool: PodsMonitorPool, scenarios: list[Any]):
|
||||
for kill_scenario in scenarios:
|
||||
recovery_time = kill_scenario["config"]["krkn_pod_recovery_time"]
|
||||
if (
|
||||
"namespace_pattern" in kill_scenario["config"]
|
||||
and "label_selector" in kill_scenario["config"]
|
||||
):
|
||||
namespace_pattern = kill_scenario["config"]["namespace_pattern"]
|
||||
label_selector = kill_scenario["config"]["label_selector"]
|
||||
pool.select_and_monitor_by_namespace_pattern_and_label(
|
||||
namespace_pattern=namespace_pattern,
|
||||
label_selector=label_selector,
|
||||
max_timeout=recovery_time,
|
||||
)
|
||||
logging.info(
|
||||
f"waiting {recovery_time} seconds for pod recovery, "
|
||||
f"pod label selector: {label_selector} namespace pattern: {namespace_pattern}"
|
||||
)
|
||||
|
||||
elif (
|
||||
"namespace_pattern" in kill_scenario["config"]
|
||||
and "name_pattern" in kill_scenario["config"]
|
||||
):
|
||||
namespace_pattern = kill_scenario["config"]["namespace_pattern"]
|
||||
name_pattern = kill_scenario["config"]["name_pattern"]
|
||||
pool.select_and_monitor_by_name_pattern_and_namespace_pattern(
|
||||
pod_name_pattern=name_pattern,
|
||||
namespace_pattern=namespace_pattern,
|
||||
max_timeout=recovery_time,
|
||||
)
|
||||
logging.info(
|
||||
f"waiting {recovery_time} seconds for pod recovery, "
|
||||
f"pod name pattern: {name_pattern} namespace pattern: {namespace_pattern}"
|
||||
)
|
||||
else:
|
||||
raise Exception(
|
||||
f"impossible to determine monitor parameters, check {kill_scenario} configuration"
|
||||
)
|
||||
@@ -18,17 +18,14 @@ from kubernetes.client.api.batch_v1_api import BatchV1Api as BatchV1Api
|
||||
@dataclass
|
||||
class NetworkScenarioConfig:
|
||||
|
||||
node_interface_name: typing.Dict[
|
||||
str, typing.List[str]
|
||||
] = field(
|
||||
node_interface_name: typing.Dict[str, typing.List[str]] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Node Interface Name",
|
||||
"description":
|
||||
"Dictionary with node names as key and values as a list of "
|
||||
"their test interfaces. "
|
||||
"Required if label_selector is not set.",
|
||||
}
|
||||
"description": "Dictionary with node names as key and values as a list of "
|
||||
"their test interfaces. "
|
||||
"Required if label_selector is not set.",
|
||||
},
|
||||
)
|
||||
|
||||
label_selector: typing.Annotated[
|
||||
@@ -37,93 +34,76 @@ class NetworkScenarioConfig:
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Label selector",
|
||||
"description":
|
||||
"Kubernetes label selector for the target nodes. "
|
||||
"Required if node_interface_name is not set.\n"
|
||||
"See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ " # noqa
|
||||
"for details.",
|
||||
}
|
||||
)
|
||||
|
||||
test_duration: typing.Annotated[
|
||||
typing.Optional[int],
|
||||
validation.min(1)
|
||||
] = field(
|
||||
default=120,
|
||||
metadata={
|
||||
"name": "Test duration",
|
||||
"description":
|
||||
"Duration for which each step of the ingress chaos testing "
|
||||
"is to be performed.",
|
||||
"description": "Kubernetes label selector for the target nodes. "
|
||||
"Required if node_interface_name is not set.\n"
|
||||
"See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ " # noqa
|
||||
"for details.",
|
||||
},
|
||||
)
|
||||
|
||||
wait_duration: typing.Annotated[
|
||||
typing.Optional[int],
|
||||
validation.min(1)
|
||||
] = field(
|
||||
test_duration: typing.Annotated[typing.Optional[int], validation.min(1)] = field(
|
||||
default=120,
|
||||
metadata={
|
||||
"name": "Test duration",
|
||||
"description": "Duration for which each step of the ingress chaos testing "
|
||||
"is to be performed.",
|
||||
},
|
||||
)
|
||||
|
||||
wait_duration: typing.Annotated[typing.Optional[int], validation.min(1)] = field(
|
||||
default=30,
|
||||
metadata={
|
||||
"name": "Wait Duration",
|
||||
"description":
|
||||
"Wait duration for finishing a test and its cleanup."
|
||||
"Ensure that it is significantly greater than wait_duration"
|
||||
}
|
||||
"description": "Wait duration for finishing a test and its cleanup."
|
||||
"Ensure that it is significantly greater than wait_duration",
|
||||
},
|
||||
)
|
||||
|
||||
instance_count: typing.Annotated[
|
||||
typing.Optional[int],
|
||||
validation.min(1)
|
||||
] = field(
|
||||
instance_count: typing.Annotated[typing.Optional[int], validation.min(1)] = field(
|
||||
default=1,
|
||||
metadata={
|
||||
"name": "Instance Count",
|
||||
"description":
|
||||
"Number of nodes to perform action/select that match "
|
||||
"the label selector.",
|
||||
}
|
||||
"description": "Number of nodes to perform action/select that match "
|
||||
"the label selector.",
|
||||
},
|
||||
)
|
||||
|
||||
kubeconfig_path: typing.Optional[str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Kubeconfig path",
|
||||
"description":
|
||||
"Path to your Kubeconfig file. Defaults to ~/.kube/config.\n"
|
||||
"See https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ " # noqa
|
||||
"for details.",
|
||||
}
|
||||
"description": "Path to your Kubeconfig file. Defaults to ~/.kube/config.\n"
|
||||
"See https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ " # noqa
|
||||
"for details.",
|
||||
},
|
||||
)
|
||||
|
||||
execution_type: typing.Optional[str] = field(
|
||||
default='parallel',
|
||||
default="parallel",
|
||||
metadata={
|
||||
"name": "Execution Type",
|
||||
"description":
|
||||
"The order in which the ingress filters are applied. "
|
||||
"Execution type can be 'serial' or 'parallel'"
|
||||
}
|
||||
"description": "The order in which the ingress filters are applied. "
|
||||
"Execution type can be 'serial' or 'parallel'",
|
||||
},
|
||||
)
|
||||
|
||||
network_params: typing.Dict[str, str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Network Parameters",
|
||||
"description":
|
||||
"The network filters that are applied on the interface. "
|
||||
"The currently supported filters are latency, "
|
||||
"loss and bandwidth"
|
||||
}
|
||||
"description": "The network filters that are applied on the interface. "
|
||||
"The currently supported filters are latency, "
|
||||
"loss and bandwidth",
|
||||
},
|
||||
)
|
||||
|
||||
kraken_config: typing.Optional[str] = field(
|
||||
default='',
|
||||
default="",
|
||||
metadata={
|
||||
"name": "Kraken Config",
|
||||
"description":
|
||||
"Path to the config file of Kraken. "
|
||||
"Set this field if you wish to publish status onto Cerberus"
|
||||
}
|
||||
"description": "Path to the config file of Kraken. "
|
||||
"Set this field if you wish to publish status onto Cerberus",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@@ -132,33 +112,30 @@ class NetworkScenarioSuccessOutput:
|
||||
filter_direction: str = field(
|
||||
metadata={
|
||||
"name": "Filter Direction",
|
||||
"description":
|
||||
"Direction in which the traffic control filters are applied "
|
||||
"on the test interfaces"
|
||||
"description": "Direction in which the traffic control filters are applied "
|
||||
"on the test interfaces",
|
||||
}
|
||||
)
|
||||
|
||||
test_interfaces: typing.Dict[str, typing.List[str]] = field(
|
||||
metadata={
|
||||
"name": "Test Interfaces",
|
||||
"description":
|
||||
"Dictionary of nodes and their interfaces on which "
|
||||
"the chaos experiment was performed"
|
||||
"description": "Dictionary of nodes and their interfaces on which "
|
||||
"the chaos experiment was performed",
|
||||
}
|
||||
)
|
||||
|
||||
network_parameters: typing.Dict[str, str] = field(
|
||||
metadata={
|
||||
"name": "Network Parameters",
|
||||
"description":
|
||||
"The network filters that are applied on the interfaces"
|
||||
"description": "The network filters that are applied on the interfaces",
|
||||
}
|
||||
)
|
||||
|
||||
execution_type: str = field(
|
||||
metadata={
|
||||
"name": "Execution Type",
|
||||
"description": "The order in which the filters are applied"
|
||||
"description": "The order in which the filters are applied",
|
||||
}
|
||||
)
|
||||
|
||||
@@ -168,18 +145,13 @@ class NetworkScenarioErrorOutput:
|
||||
error: str = field(
|
||||
metadata={
|
||||
"name": "Error",
|
||||
"description":
|
||||
"Error message when there is a run-time error during "
|
||||
"the execution of the scenario"
|
||||
"description": "Error message when there is a run-time error during "
|
||||
"the execution of the scenario",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def get_default_interface(
|
||||
node: str,
|
||||
pod_template,
|
||||
cli: CoreV1Api
|
||||
) -> str:
|
||||
def get_default_interface(node: str, pod_template, cli: CoreV1Api) -> str:
|
||||
"""
|
||||
Function that returns a random interface from a node
|
||||
|
||||
@@ -210,9 +182,9 @@ def get_default_interface(
|
||||
logging.error("Exception occurred while executing command in pod")
|
||||
sys.exit(1)
|
||||
|
||||
routes = output.split('\n')
|
||||
routes = output.split("\n")
|
||||
for route in routes:
|
||||
if 'default' in route:
|
||||
if "default" in route:
|
||||
default_route = route
|
||||
break
|
||||
|
||||
@@ -226,10 +198,7 @@ def get_default_interface(
|
||||
|
||||
|
||||
def verify_interface(
|
||||
input_interface_list: typing.List[str],
|
||||
node: str,
|
||||
pod_template,
|
||||
cli: CoreV1Api
|
||||
input_interface_list: typing.List[str], node: str, pod_template, cli: CoreV1Api
|
||||
) -> typing.List[str]:
|
||||
"""
|
||||
Function that verifies whether a list of interfaces is present in the node.
|
||||
@@ -258,22 +227,15 @@ def verify_interface(
|
||||
try:
|
||||
if input_interface_list == []:
|
||||
cmd = ["ip", "r"]
|
||||
output = kube_helper.exec_cmd_in_pod(
|
||||
cli,
|
||||
cmd,
|
||||
"fedtools",
|
||||
"default"
|
||||
)
|
||||
output = kube_helper.exec_cmd_in_pod(cli, cmd, "fedtools", "default")
|
||||
|
||||
if not output:
|
||||
logging.error(
|
||||
"Exception occurred while executing command in pod"
|
||||
)
|
||||
logging.error("Exception occurred while executing command in pod")
|
||||
sys.exit(1)
|
||||
|
||||
routes = output.split('\n')
|
||||
routes = output.split("\n")
|
||||
for route in routes:
|
||||
if 'default' in route:
|
||||
if "default" in route:
|
||||
default_route = route
|
||||
break
|
||||
|
||||
@@ -281,20 +243,13 @@ def verify_interface(
|
||||
|
||||
else:
|
||||
cmd = ["ip", "-br", "addr", "show"]
|
||||
output = kube_helper.exec_cmd_in_pod(
|
||||
cli,
|
||||
cmd,
|
||||
"fedtools",
|
||||
"default"
|
||||
)
|
||||
output = kube_helper.exec_cmd_in_pod(cli, cmd, "fedtools", "default")
|
||||
|
||||
if not output:
|
||||
logging.error(
|
||||
"Exception occurred while executing command in pod"
|
||||
)
|
||||
logging.error("Exception occurred while executing command in pod")
|
||||
sys.exit(1)
|
||||
|
||||
interface_ip = output.split('\n')
|
||||
interface_ip = output.split("\n")
|
||||
node_interface_list = [
|
||||
interface.split()[0] for interface in interface_ip[:-1]
|
||||
]
|
||||
@@ -302,12 +257,12 @@ def verify_interface(
|
||||
for interface in input_interface_list:
|
||||
if interface not in node_interface_list:
|
||||
logging.error(
|
||||
"Interface %s not found in node %s interface list %s" %
|
||||
(interface, node, node_interface_list)
|
||||
"Interface %s not found in node %s interface list %s"
|
||||
% (interface, node, node_interface_list)
|
||||
)
|
||||
raise Exception(
|
||||
"Interface %s not found in node %s interface list %s" %
|
||||
(interface, node, node_interface_list)
|
||||
"Interface %s not found in node %s interface list %s"
|
||||
% (interface, node, node_interface_list)
|
||||
)
|
||||
finally:
|
||||
logging.info("Deleteing pod to query interface on node")
|
||||
@@ -321,9 +276,8 @@ def get_node_interfaces(
|
||||
label_selector: str,
|
||||
instance_count: int,
|
||||
pod_template,
|
||||
cli: CoreV1Api
|
||||
cli: CoreV1Api,
|
||||
) -> typing.Dict[str, typing.List[str]]:
|
||||
|
||||
"""
|
||||
Function that is used to process the input dictionary with the nodes and
|
||||
its test interfaces.
|
||||
@@ -364,11 +318,7 @@ def get_node_interfaces(
|
||||
nodes = kube_helper.get_node(None, label_selector, instance_count, cli)
|
||||
node_interface_dict = {}
|
||||
for node in nodes:
|
||||
node_interface_dict[node] = get_default_interface(
|
||||
node,
|
||||
pod_template,
|
||||
cli
|
||||
)
|
||||
node_interface_dict[node] = get_default_interface(node, pod_template, cli)
|
||||
else:
|
||||
node_name_list = node_interface_dict.keys()
|
||||
filtered_node_list = []
|
||||
@@ -395,9 +345,8 @@ def apply_ingress_filter(
|
||||
batch_cli: BatchV1Api,
|
||||
cli: CoreV1Api,
|
||||
create_interfaces: bool = True,
|
||||
param_selector: str = 'all'
|
||||
param_selector: str = "all",
|
||||
) -> str:
|
||||
|
||||
"""
|
||||
Function that applies the filters to shape incoming traffic to
|
||||
the provided node's interfaces.
|
||||
@@ -438,22 +387,18 @@ def apply_ingress_filter(
|
||||
"""
|
||||
|
||||
network_params = cfg.network_params
|
||||
if param_selector != 'all':
|
||||
if param_selector != "all":
|
||||
network_params = {param_selector: cfg.network_params[param_selector]}
|
||||
|
||||
if create_interfaces:
|
||||
create_virtual_interfaces(cli, interface_list, node, pod_template)
|
||||
|
||||
exec_cmd = get_ingress_cmd(
|
||||
interface_list, network_params, duration=cfg.test_duration
|
||||
)
|
||||
interface_list, network_params, duration=cfg.test_duration
|
||||
)
|
||||
logging.info("Executing %s on node %s" % (exec_cmd, node))
|
||||
job_body = yaml.safe_load(
|
||||
job_template.render(
|
||||
jobname=str(hash(node))[:5],
|
||||
nodename=node,
|
||||
cmd=exec_cmd
|
||||
)
|
||||
job_template.render(jobname=str(hash(node))[:5], nodename=node, cmd=exec_cmd)
|
||||
)
|
||||
api_response = kube_helper.create_job(batch_cli, job_body)
|
||||
|
||||
@@ -464,10 +409,7 @@ def apply_ingress_filter(
|
||||
|
||||
|
||||
def create_virtual_interfaces(
|
||||
cli: CoreV1Api,
|
||||
interface_list: typing.List[str],
|
||||
node: str,
|
||||
pod_template
|
||||
cli: CoreV1Api, interface_list: typing.List[str], node: str, pod_template
|
||||
) -> None:
|
||||
"""
|
||||
Function that creates a privileged pod and uses it to create
|
||||
@@ -488,25 +430,20 @@ def create_virtual_interfaces(
|
||||
- The YAML template used to instantiate a pod to create
|
||||
virtual interfaces on the node
|
||||
"""
|
||||
pod_body = yaml.safe_load(
|
||||
pod_template.render(nodename=node)
|
||||
)
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
kube_helper.create_pod(cli, pod_body, "default", 300)
|
||||
logging.info(
|
||||
"Creating {0} virtual interfaces on node {1} using a pod".format(
|
||||
len(interface_list),
|
||||
node
|
||||
len(interface_list), node
|
||||
)
|
||||
)
|
||||
create_ifb(cli, len(interface_list), 'modtools')
|
||||
create_ifb(cli, len(interface_list), "modtools")
|
||||
logging.info("Deleting pod used to create virtual interfaces")
|
||||
kube_helper.delete_pod(cli, "modtools", "default")
|
||||
|
||||
|
||||
def delete_virtual_interfaces(
|
||||
cli: CoreV1Api,
|
||||
node_list: typing.List[str],
|
||||
pod_template
|
||||
cli: CoreV1Api, node_list: typing.List[str], pod_template
|
||||
):
|
||||
"""
|
||||
Function that creates a privileged pod and uses it to delete all
|
||||
@@ -529,14 +466,10 @@ def delete_virtual_interfaces(
|
||||
"""
|
||||
|
||||
for node in node_list:
|
||||
pod_body = yaml.safe_load(
|
||||
pod_template.render(nodename=node)
|
||||
)
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
kube_helper.create_pod(cli, pod_body, "default", 300)
|
||||
logging.info(
|
||||
"Deleting all virtual interfaces on node {0}".format(node)
|
||||
)
|
||||
delete_ifb(cli, 'modtools')
|
||||
logging.info("Deleting all virtual interfaces on node {0}".format(node))
|
||||
delete_ifb(cli, "modtools")
|
||||
kube_helper.delete_pod(cli, "modtools", "default")
|
||||
|
||||
|
||||
@@ -546,21 +479,13 @@ def create_ifb(cli: CoreV1Api, number: int, pod_name: str):
|
||||
Makes use of modprobe commands
|
||||
"""
|
||||
|
||||
exec_command = [
|
||||
'chroot', '/host',
|
||||
'modprobe', 'ifb', 'numifbs=' + str(number)
|
||||
]
|
||||
kube_helper.exec_cmd_in_pod(cli, exec_command, pod_name, 'default')
|
||||
exec_command = ["chroot", "/host", "modprobe", "ifb", "numifbs=" + str(number)]
|
||||
kube_helper.exec_cmd_in_pod(cli, exec_command, pod_name, "default")
|
||||
|
||||
for i in range(0, number):
|
||||
exec_command = ['chroot', '/host', 'ip', 'link', 'set', 'dev']
|
||||
exec_command += ['ifb' + str(i), 'up']
|
||||
kube_helper.exec_cmd_in_pod(
|
||||
cli,
|
||||
exec_command,
|
||||
pod_name,
|
||||
'default'
|
||||
)
|
||||
exec_command = ["chroot", "/host", "ip", "link", "set", "dev"]
|
||||
exec_command += ["ifb" + str(i), "up"]
|
||||
kube_helper.exec_cmd_in_pod(cli, exec_command, pod_name, "default")
|
||||
|
||||
|
||||
def delete_ifb(cli: CoreV1Api, pod_name: str):
|
||||
@@ -569,8 +494,8 @@ def delete_ifb(cli: CoreV1Api, pod_name: str):
|
||||
Makes use of modprobe command
|
||||
"""
|
||||
|
||||
exec_command = ['chroot', '/host', 'modprobe', '-r', 'ifb']
|
||||
kube_helper.exec_cmd_in_pod(cli, exec_command, pod_name, 'default')
|
||||
exec_command = ["chroot", "/host", "modprobe", "-r", "ifb"]
|
||||
kube_helper.exec_cmd_in_pod(cli, exec_command, pod_name, "default")
|
||||
|
||||
|
||||
def get_job_pods(cli: CoreV1Api, api_response):
|
||||
@@ -591,18 +516,14 @@ def get_job_pods(cli: CoreV1Api, api_response):
|
||||
controllerUid = api_response.metadata.labels["controller-uid"]
|
||||
pod_label_selector = "controller-uid=" + controllerUid
|
||||
pods_list = kube_helper.list_pods(
|
||||
cli,
|
||||
label_selector=pod_label_selector,
|
||||
namespace="default"
|
||||
cli, label_selector=pod_label_selector, namespace="default"
|
||||
)
|
||||
|
||||
return pods_list[0]
|
||||
|
||||
|
||||
def wait_for_job(
|
||||
batch_cli: BatchV1Api,
|
||||
job_list: typing.List[str],
|
||||
timeout: int = 300
|
||||
batch_cli: BatchV1Api, job_list: typing.List[str], timeout: int = 300
|
||||
) -> None:
|
||||
"""
|
||||
Function that waits for a list of jobs to finish within a time period
|
||||
@@ -625,13 +546,11 @@ def wait_for_job(
|
||||
for job_name in job_list:
|
||||
try:
|
||||
api_response = kube_helper.get_job_status(
|
||||
batch_cli,
|
||||
job_name,
|
||||
namespace="default"
|
||||
batch_cli, job_name, namespace="default"
|
||||
)
|
||||
if (
|
||||
api_response.status.succeeded is not None or
|
||||
api_response.status.failed is not None
|
||||
api_response.status.succeeded is not None
|
||||
or api_response.status.failed is not None
|
||||
):
|
||||
count += 1
|
||||
job_list.remove(job_name)
|
||||
@@ -645,11 +564,7 @@ def wait_for_job(
|
||||
time.sleep(5)
|
||||
|
||||
|
||||
def delete_jobs(
|
||||
cli: CoreV1Api,
|
||||
batch_cli: BatchV1Api,
|
||||
job_list: typing.List[str]
|
||||
):
|
||||
def delete_jobs(cli: CoreV1Api, batch_cli: BatchV1Api, job_list: typing.List[str]):
|
||||
"""
|
||||
Function that deletes jobs
|
||||
|
||||
@@ -667,38 +582,28 @@ def delete_jobs(
|
||||
for job_name in job_list:
|
||||
try:
|
||||
api_response = kube_helper.get_job_status(
|
||||
batch_cli,
|
||||
job_name,
|
||||
namespace="default"
|
||||
batch_cli, job_name, namespace="default"
|
||||
)
|
||||
if api_response.status.failed is not None:
|
||||
pod_name = get_job_pods(cli, api_response)
|
||||
pod_stat = kube_helper.read_pod(
|
||||
cli,
|
||||
name=pod_name,
|
||||
namespace="default"
|
||||
)
|
||||
pod_stat = kube_helper.read_pod(cli, name=pod_name, namespace="default")
|
||||
logging.error(pod_stat.status.container_statuses)
|
||||
pod_log_response = kube_helper.get_pod_log(
|
||||
cli,
|
||||
name=pod_name,
|
||||
namespace="default"
|
||||
cli, name=pod_name, namespace="default"
|
||||
)
|
||||
pod_log = pod_log_response.data.decode("utf-8")
|
||||
logging.error(pod_log)
|
||||
except Exception as e:
|
||||
logging.warn("Exception in getting job status: %s" % str(e))
|
||||
api_response = kube_helper.delete_job(
|
||||
batch_cli,
|
||||
name=job_name,
|
||||
namespace="default"
|
||||
batch_cli, name=job_name, namespace="default"
|
||||
)
|
||||
|
||||
|
||||
def get_ingress_cmd(
|
||||
interface_list: typing.List[str],
|
||||
network_parameters: typing.Dict[str, str],
|
||||
duration: int = 300
|
||||
duration: int = 300,
|
||||
):
|
||||
"""
|
||||
Function that returns the commands to the ingress traffic shaping on
|
||||
@@ -736,9 +641,7 @@ def get_ingress_cmd(
|
||||
|
||||
for i, interface in enumerate(interface_list):
|
||||
if not interface_pattern.match(interface):
|
||||
logging.error(
|
||||
"Interface name can only consist of alphanumeric characters"
|
||||
)
|
||||
logging.error("Interface name can only consist of alphanumeric characters")
|
||||
raise Exception(
|
||||
"Interface '{0}' does not match the required regex pattern :"
|
||||
r" ^[a-z0-9\-\@\_]+$".format(interface)
|
||||
@@ -752,33 +655,23 @@ def get_ingress_cmd(
|
||||
"follow the regex pattern ^ifb[0-9]+$".format(ifb_name)
|
||||
)
|
||||
|
||||
tc_set += "tc qdisc add dev {0} handle ffff: ingress;".format(
|
||||
interface
|
||||
)
|
||||
tc_set += "tc qdisc add dev {0} handle ffff: ingress;".format(interface)
|
||||
tc_set += "tc filter add dev {0} parent ffff: protocol ip u32 match u32 0 0 action mirred egress redirect dev {1};".format( # noqa
|
||||
interface,
|
||||
ifb_name
|
||||
interface, ifb_name
|
||||
)
|
||||
tc_set = "{0} tc qdisc add dev {1} root netem".format(tc_set, ifb_name)
|
||||
tc_unset = "{0} tc qdisc del dev {1} root ;".format(tc_unset, ifb_name)
|
||||
tc_unset += "tc qdisc del dev {0} handle ffff: ingress;".format(
|
||||
interface
|
||||
)
|
||||
tc_unset += "tc qdisc del dev {0} handle ffff: ingress;".format(interface)
|
||||
tc_ls = "{0} tc qdisc ls dev {1} ;".format(tc_ls, ifb_name)
|
||||
|
||||
for parameter in network_parameters.keys():
|
||||
tc_set += " {0} {1} ".format(
|
||||
param_map[parameter],
|
||||
network_parameters[parameter]
|
||||
param_map[parameter], network_parameters[parameter]
|
||||
)
|
||||
tc_set += ";"
|
||||
|
||||
exec_cmd = "{0} {1} sleep {2};{3} sleep 20;{4}".format(
|
||||
tc_set,
|
||||
tc_ls,
|
||||
duration,
|
||||
tc_unset,
|
||||
tc_ls
|
||||
tc_set, tc_ls, duration, tc_unset, tc_ls
|
||||
)
|
||||
|
||||
return exec_cmd
|
||||
@@ -790,17 +683,14 @@ def get_ingress_cmd(
|
||||
description="Applies filters to ihe ingress side of node(s) interfaces",
|
||||
outputs={
|
||||
"success": NetworkScenarioSuccessOutput,
|
||||
"error": NetworkScenarioErrorOutput
|
||||
"error": NetworkScenarioErrorOutput,
|
||||
},
|
||||
)
|
||||
def network_chaos(cfg: NetworkScenarioConfig) -> typing.Tuple[
|
||||
str,
|
||||
typing.Union[
|
||||
NetworkScenarioSuccessOutput,
|
||||
NetworkScenarioErrorOutput
|
||||
]
|
||||
def network_chaos(
|
||||
cfg: NetworkScenarioConfig,
|
||||
) -> typing.Tuple[
|
||||
str, typing.Union[NetworkScenarioSuccessOutput, NetworkScenarioErrorOutput]
|
||||
]:
|
||||
|
||||
"""
|
||||
Function that performs the ingress network chaos scenario based
|
||||
on the provided configuration
|
||||
@@ -826,12 +716,10 @@ def network_chaos(cfg: NetworkScenarioConfig) -> typing.Tuple[
|
||||
cfg.label_selector,
|
||||
cfg.instance_count,
|
||||
pod_interface_template,
|
||||
cli
|
||||
cli,
|
||||
)
|
||||
except Exception:
|
||||
return "error", NetworkScenarioErrorOutput(
|
||||
format_exc()
|
||||
)
|
||||
return "error", NetworkScenarioErrorOutput(format_exc())
|
||||
job_list = []
|
||||
publish = False
|
||||
if cfg.kraken_config:
|
||||
@@ -840,16 +728,12 @@ def network_chaos(cfg: NetworkScenarioConfig) -> typing.Tuple[
|
||||
with open(cfg.kraken_config, "r") as f:
|
||||
config = yaml.full_load(f)
|
||||
except Exception:
|
||||
logging.error(
|
||||
"Error reading Kraken config from %s" % cfg.kraken_config
|
||||
)
|
||||
return "error", NetworkScenarioErrorOutput(
|
||||
format_exc()
|
||||
)
|
||||
logging.error("Error reading Kraken config from %s" % cfg.kraken_config)
|
||||
return "error", NetworkScenarioErrorOutput(format_exc())
|
||||
publish = True
|
||||
|
||||
try:
|
||||
if cfg.execution_type == 'parallel':
|
||||
if cfg.execution_type == "parallel":
|
||||
for node in node_interface_dict:
|
||||
job_list.append(
|
||||
apply_ingress_filter(
|
||||
@@ -859,22 +743,19 @@ def network_chaos(cfg: NetworkScenarioConfig) -> typing.Tuple[
|
||||
pod_module_template,
|
||||
job_template,
|
||||
batch_cli,
|
||||
cli
|
||||
cli,
|
||||
)
|
||||
)
|
||||
logging.info("Waiting for parallel job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(batch_cli, job_list[:], cfg.test_duration+100)
|
||||
wait_for_job(batch_cli, job_list[:], cfg.test_duration + 100)
|
||||
end_time = int(time.time())
|
||||
if publish:
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
start_time,
|
||||
end_time
|
||||
config, failed_post_scenarios, start_time, end_time
|
||||
)
|
||||
|
||||
elif cfg.execution_type == 'serial':
|
||||
elif cfg.execution_type == "serial":
|
||||
create_interfaces = True
|
||||
for param in cfg.network_params:
|
||||
for node in node_interface_dict:
|
||||
@@ -888,50 +769,39 @@ def network_chaos(cfg: NetworkScenarioConfig) -> typing.Tuple[
|
||||
batch_cli,
|
||||
cli,
|
||||
create_interfaces=create_interfaces,
|
||||
param_selector=param
|
||||
param_selector=param,
|
||||
)
|
||||
)
|
||||
logging.info("Waiting for serial job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(batch_cli, job_list[:], cfg.test_duration+100)
|
||||
wait_for_job(batch_cli, job_list[:], cfg.test_duration + 100)
|
||||
logging.info("Deleting jobs")
|
||||
delete_jobs(cli, batch_cli, job_list[:])
|
||||
job_list = []
|
||||
logging.info(
|
||||
"Waiting for wait_duration : %ss" % cfg.wait_duration
|
||||
)
|
||||
logging.info("Waiting for wait_duration : %ss" % cfg.wait_duration)
|
||||
time.sleep(cfg.wait_duration)
|
||||
end_time = int(time.time())
|
||||
if publish:
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
start_time,
|
||||
end_time
|
||||
config, failed_post_scenarios, start_time, end_time
|
||||
)
|
||||
create_interfaces = False
|
||||
else:
|
||||
|
||||
return "error", NetworkScenarioErrorOutput(
|
||||
"Invalid execution type - serial and parallel are "
|
||||
"the only accepted types"
|
||||
)
|
||||
"Invalid execution type - serial and parallel are "
|
||||
"the only accepted types"
|
||||
)
|
||||
return "success", NetworkScenarioSuccessOutput(
|
||||
filter_direction="ingress",
|
||||
test_interfaces=node_interface_dict,
|
||||
network_parameters=cfg.network_params,
|
||||
execution_type=cfg.execution_type
|
||||
execution_type=cfg.execution_type,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Network Chaos exiting due to Exception - %s" % e)
|
||||
return "error", NetworkScenarioErrorOutput(
|
||||
format_exc()
|
||||
)
|
||||
return "error", NetworkScenarioErrorOutput(format_exc())
|
||||
finally:
|
||||
delete_virtual_interfaces(
|
||||
cli,
|
||||
node_interface_dict.keys(),
|
||||
pod_module_template
|
||||
)
|
||||
delete_virtual_interfaces(cli, node_interface_dict.keys(), pod_module_template)
|
||||
logging.info("Deleting jobs(if any)")
|
||||
delete_jobs(cli, batch_cli, job_list[:])
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user