mirror of
https://github.com/krkn-chaos/krkn.git
synced 2026-02-19 20:40:33 +00:00
Compare commits
55 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
39c0152b7b | ||
|
|
491dc17267 | ||
|
|
b2b5002f45 | ||
|
|
fccd701dee | ||
|
|
570631ebfc | ||
|
|
3ab9ca4319 | ||
|
|
4084ffd9c6 | ||
|
|
19cc2c047f | ||
|
|
6197fc6722 | ||
|
|
2a8ac41ebf | ||
|
|
b4d235d31c | ||
|
|
e4e4620d10 | ||
|
|
a2c24ab7ed | ||
|
|
fe892fd9bf | ||
|
|
74613fdb4b | ||
|
|
28c37c9353 | ||
|
|
de0567b067 | ||
|
|
83486557f1 | ||
|
|
ce409ea6fb | ||
|
|
0eb8d38596 | ||
|
|
68dc17bc44 | ||
|
|
572eeefaf4 | ||
|
|
81376bad56 | ||
|
|
72b46f8393 | ||
|
|
a7938e58d2 | ||
|
|
9858f96c78 | ||
|
|
c91e8db928 | ||
|
|
54ea98be9c | ||
|
|
9748622e4f | ||
|
|
47f93b39c2 | ||
|
|
aa715bf566 | ||
|
|
b9c08a45db | ||
|
|
d9f4607aa6 | ||
|
|
8806781a4f | ||
|
|
83b811bee4 | ||
|
|
16ea18c718 | ||
|
|
1ab94754e3 | ||
|
|
278b2bafd7 | ||
|
|
bc863fa01f | ||
|
|
900ca74d80 | ||
|
|
82b8df4e85 | ||
|
|
691be66b0a | ||
|
|
019b036f9f | ||
|
|
13fa711c9b | ||
|
|
17f61625e4 | ||
|
|
3627b5ba88 | ||
|
|
fee4f7d2bf | ||
|
|
0534e03c48 | ||
|
|
bb9a19ab71 | ||
|
|
c5b9554de5 | ||
|
|
e5f97434d3 | ||
|
|
8b18fa8a35 | ||
|
|
93686ca736 | ||
|
|
64f4c234e9 | ||
|
|
915cc5db94 |
21
.github/workflows/build.yml
vendored
21
.github/workflows/build.yml
vendored
@@ -1,8 +1,5 @@
|
||||
name: Build Krkn
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
@@ -51,20 +48,4 @@ jobs:
|
||||
if-no-files-found: error
|
||||
- name: Check CI results
|
||||
run: grep Fail CI/results.markdown && false || true
|
||||
- name: Build the Docker images
|
||||
run: docker build --no-cache -t quay.io/chaos-kubox/krkn containers/
|
||||
- name: Login in quay
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker login quay.io -u ${QUAY_USER} -p ${QUAY_TOKEN}
|
||||
env:
|
||||
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}
|
||||
- name: Push the Docker images
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker push quay.io/chaos-kubox/krkn
|
||||
- name: Rebuild krkn-hub
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
uses: redhat-chaos/actions/krkn-hub@main
|
||||
with:
|
||||
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}
|
||||
|
||||
|
||||
30
.github/workflows/docker-image.yml
vendored
Normal file
30
.github/workflows/docker-image.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: Docker Image CI
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
- name: Build the Docker images
|
||||
run: docker build --no-cache -t quay.io/redhat-chaos/krkn containers/
|
||||
- name: Login in quay
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker login quay.io -u ${QUAY_USER} -p ${QUAY_TOKEN}
|
||||
env:
|
||||
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}
|
||||
- name: Push the Docker images
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker push quay.io/redhat-chaos/krkn
|
||||
- name: Rebuild krkn-hub
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
uses: redhat-chaos/actions/krkn-hub@main
|
||||
with:
|
||||
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}
|
||||
111
.github/workflows/functional_tests.yaml
vendored
Normal file
111
.github/workflows/functional_tests.yaml
vendored
Normal file
@@ -0,0 +1,111 @@
|
||||
on: issue_comment
|
||||
|
||||
jobs:
|
||||
check_user:
|
||||
# This job only runs for pull request comments
|
||||
name: Check User Authorization
|
||||
env:
|
||||
USERS: ${{vars.USERS}}
|
||||
if: contains(github.event.comment.body, '/funtest') && contains(github.event.comment.html_url, '/pull/')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check User
|
||||
run: |
|
||||
for name in `echo $USERS`
|
||||
do
|
||||
name="${name//$'\r'/}"
|
||||
name="${name//$'\n'/}"
|
||||
if [ $name == "${{github.event.sender.login}}" ]
|
||||
then
|
||||
echo "user ${{github.event.sender.login}} authorized, action started..."
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
echo "user ${{github.event.sender.login}} is not allowed to run functional tests Action"
|
||||
exit 1
|
||||
pr_commented:
|
||||
# This job only runs for pull request comments containing /functional
|
||||
name: Functional Tests
|
||||
if: contains(github.event.comment.body, '/funtest') && contains(github.event.comment.html_url, '/pull/')
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- check_user
|
||||
steps:
|
||||
- name: Check out Kraken
|
||||
uses: actions/checkout@v3
|
||||
- name: Checkout Pull Request
|
||||
run: hub pr checkout ${{ github.event.issue.number }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install OC CLI
|
||||
uses: redhat-actions/oc-installer@v1
|
||||
with:
|
||||
oc_version: latest
|
||||
- name: Install python 3.9
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
- name: Setup kraken dependencies
|
||||
run: pip install -r requirements.txt
|
||||
- name: Create Workdir & export the path
|
||||
run: |
|
||||
mkdir workdir
|
||||
echo "WORKDIR_PATH=`pwd`/workdir" >> $GITHUB_ENV
|
||||
- name: Teardown CRC (Post Action)
|
||||
uses: webiny/action-post-run@3.0.0
|
||||
id: post-run-command
|
||||
with:
|
||||
# currently using image coming from tsebastiani quay.io repo
|
||||
# waiting that a fix is merged in the upstream one
|
||||
# post action run cannot (apparently) be properly indented
|
||||
run: docker run -v "${{ env.WORKDIR_PATH }}:/workdir" -e WORKING_MODE=T -e AWS_ACCESS_KEY_ID=${{ secrets.AWS_ACCESS_KEY_ID }} -e AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_SECRET_ACCESS_KEY }} -e AWS_DEFAULT_REGION=us-west-2 -e TEARDOWN_RUN_ID=crc quay.io/tsebastiani/crc-cloud
|
||||
- name: Run CRC
|
||||
# currently using image coming from tsebastiani quay.io repo
|
||||
# waiting that a fix is merged in the upstream one
|
||||
run: |
|
||||
docker run -v "${{ env.WORKDIR_PATH }}:/workdir" \
|
||||
-e WORKING_MODE=C \
|
||||
-e PULL_SECRET="${{ secrets.PULL_SECRET }}" \
|
||||
-e AWS_ACCESS_KEY_ID="${{ secrets.AWS_ACCESS_KEY_ID }}" \
|
||||
-e AWS_SECRET_ACCESS_KEY="${{ secrets.AWS_SECRET_ACCESS_KEY }}" \
|
||||
-e AWS_DEFAULT_REGION=us-west-2 \
|
||||
-e CREATE_RUN_ID=crc \
|
||||
-e PASS_KUBEADMIN="${{ secrets.KUBEADMIN_PWD }}" \
|
||||
-e PASS_REDHAT="${{ secrets.REDHAT_PWD }}" \
|
||||
-e PASS_DEVELOPER="${{ secrets.DEVELOPER_PWD }}" \
|
||||
quay.io/tsebastiani/crc-cloud
|
||||
- name: OpenShift login and example deployment, GitHub Action env init
|
||||
env:
|
||||
NAMESPACE: test-namespace
|
||||
DEPLOYMENT_NAME: test-nginx
|
||||
KUBEADMIN_PWD: '${{ secrets.KUBEADMIN_PWD }}'
|
||||
run: ./CI/CRC/init_github_action.sh
|
||||
- name: Setup test suite
|
||||
run: |
|
||||
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
|
||||
|
||||
echo "test_app_outages_gh" > ./CI/tests/my_tests
|
||||
echo "test_container" >> ./CI/tests/my_tests
|
||||
echo "test_namespace" >> ./CI/tests/my_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/my_tests
|
||||
echo "test_time" >> ./CI/tests/my_tests
|
||||
|
||||
- name: Print affected config files
|
||||
run: |
|
||||
echo -e "## CI/config/common_test_config.yaml\n\n"
|
||||
cat CI/config/common_test_config.yaml
|
||||
|
||||
- name: Running test suite
|
||||
run: |
|
||||
./CI/run.sh
|
||||
- name: Print test output
|
||||
run: cat CI/out/*
|
||||
- name: Create coverage report
|
||||
run: |
|
||||
echo "# Test results" > $GITHUB_STEP_SUMMARY
|
||||
cat CI/results.markdown >> $GITHUB_STEP_SUMMARY
|
||||
echo "# Test coverage" >> $GITHUB_STEP_SUMMARY
|
||||
python -m coverage report --format=markdown >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
|
||||
6
.gitleaks.toml
Normal file
6
.gitleaks.toml
Normal file
@@ -0,0 +1,6 @@
|
||||
[allowlist]
|
||||
description = "Global Allowlist"
|
||||
|
||||
paths = [
|
||||
'''kraken/arcaflow_plugin/fixtures/*'''
|
||||
]
|
||||
44
CI/CRC/deployment.yaml
Normal file
44
CI/CRC/deployment.yaml
Normal file
@@ -0,0 +1,44 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: $NAMESPACE
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: $DEPLOYMENT_NAME-service
|
||||
namespace: $NAMESPACE
|
||||
spec:
|
||||
selector:
|
||||
app: $DEPLOYMENT_NAME
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
type: ClusterIP
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: $NAMESPACE
|
||||
name: $DEPLOYMENT_NAME-deployment
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: $DEPLOYMENT_NAME
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: $DEPLOYMENT_NAME
|
||||
spec:
|
||||
containers:
|
||||
- name: $DEPLOYMENT_NAME
|
||||
image: nginxinc/nginx-unprivileged:stable-alpine
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
72
CI/CRC/init_github_action.sh
Executable file
72
CI/CRC/init_github_action.sh
Executable file
@@ -0,0 +1,72 @@
|
||||
#!/bin/bash
|
||||
SCRIPT_PATH=./CI/CRC
|
||||
DEPLOYMENT_PATH=$SCRIPT_PATH/deployment.yaml
|
||||
CLUSTER_INFO=cluster_infos.json
|
||||
|
||||
[[ -z $WORKDIR_PATH ]] && echo "[ERROR] please set \$WORKDIR_PATH environment variable" && exit 1
|
||||
CLUSTER_INFO_PATH=$WORKDIR_PATH/crc/$CLUSTER_INFO
|
||||
|
||||
[[ ! -f $DEPLOYMENT_PATH ]] && echo "[ERROR] please run $0 from GitHub action root directory" && exit 1
|
||||
[[ -z $KUBEADMIN_PWD ]] && echo "[ERROR] kubeadmin password not set, please check the repository secrets" && exit 1
|
||||
[[ -z $DEPLOYMENT_NAME ]] && echo "[ERROR] please set \$DEPLOYMENT_NAME environment variable" && exit 1
|
||||
[[ -z $NAMESPACE ]] && echo "[ERROR] please set \$NAMESPACE environment variable" && exit 1
|
||||
[[ ! -f $CLUSTER_INFO_PATH ]] && echo "[ERROR] cluster_info.json not found in $CLUSTER_INFO_PATH" && exit 1
|
||||
|
||||
OPENSSL=`which openssl 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: openssl missing, please install it and try again" && exit 1
|
||||
OC=`which oc 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: oc missing, please install it and try again" && exit 1
|
||||
SED=`which sed 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: sed missing, please install it and try again" && exit 1
|
||||
JQ=`which jq 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: jq missing, please install it and try again" && exit 1
|
||||
ENVSUBST=`which envsubst 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: envsubst missing, please install it and try again" && exit 1
|
||||
|
||||
API_ADDRESS="$($JQ -r '.api.address' $CLUSTER_INFO_PATH)"
|
||||
API_PORT="$($JQ -r '.api.port' $CLUSTER_INFO_PATH)"
|
||||
BASE_HOST=`$JQ -r '.api.address' $CLUSTER_INFO_PATH | sed -r 's#https:\/\/api\.(.+\.nip\.io)#\1#'`
|
||||
FQN=$DEPLOYMENT_NAME.apps.$BASE_HOST
|
||||
|
||||
echo "[INF] logging on $API_ADDRESS:$API_PORT"
|
||||
COUNTER=1
|
||||
until `$OC login --insecure-skip-tls-verify -u kubeadmin -p $KUBEADMIN_PWD $API_ADDRESS:$API_PORT > /dev/null 2>&1`
|
||||
do
|
||||
echo "[INF] login attempt $COUNTER"
|
||||
[[ $COUNTER == 20 ]] && echo "[ERR] maximum login attempts exceeded, failing" && exit 1
|
||||
((COUNTER++))
|
||||
sleep 10
|
||||
done
|
||||
|
||||
echo "[INF] deploying example deployment: $DEPLOYMENT_NAME in namespace: $NAMESPACE"
|
||||
$ENVSUBST < $DEPLOYMENT_PATH | $OC apply -f - > /dev/null 2>&1
|
||||
|
||||
echo "[INF] creating SSL self-signed certificates for route https://$FQN"
|
||||
$OPENSSL genrsa -out servercakey.pem > /dev/null 2>&1
|
||||
$OPENSSL req -new -x509 -key servercakey.pem -out serverca.crt -subj "/CN=$FQN/O=Red Hat Inc./C=US" > /dev/null 2>&1
|
||||
$OPENSSL genrsa -out server.key > /dev/null 2>&1
|
||||
$OPENSSL req -new -key server.key -out server_reqout.txt -subj "/CN=$FQN/O=Red Hat Inc./C=US" > /dev/null 2>&1
|
||||
$OPENSSL x509 -req -in server_reqout.txt -days 3650 -sha256 -CAcreateserial -CA serverca.crt -CAkey servercakey.pem -out server.crt > /dev/null 2>&1
|
||||
echo "[INF] creating deployment: $DEPLOYMENT_NAME public route: https://$FQN"
|
||||
$OC create route --namespace $NAMESPACE edge --service=$DEPLOYMENT_NAME-service --cert=server.crt --key=server.key --ca-cert=serverca.crt --hostname="$FQN" > /dev/null 2>&1
|
||||
|
||||
|
||||
echo "[INF] setting github action environment variables"
|
||||
|
||||
NODE_NAME="`$OC get nodes -o json | $JQ -r '.items[0].metadata.name'`"
|
||||
COVERAGE_FILE="`pwd`/coverage.md"
|
||||
echo "DEPLOYMENT_NAME=$DEPLOYMENT_NAME" >> $GITHUB_ENV
|
||||
echo "DEPLOYMENT_FQN=$FQN" >> $GITHUB_ENV
|
||||
echo "API_ADDRESS=$API_ADDRESS" >> $GITHUB_ENV
|
||||
echo "API_PORT=$API_PORT" >> $GITHUB_ENV
|
||||
echo "NODE_NAME=$NODE_NAME" >> $GITHUB_ENV
|
||||
echo "NAMESPACE=$NAMESPACE" >> $GITHUB_ENV
|
||||
echo "COVERAGE_FILE=$COVERAGE_FILE" >> $GITHUB_ENV
|
||||
|
||||
echo "[INF] deployment fully qualified name will be available in \${{ env.DEPLOYMENT_NAME }} with value $DEPLOYMENT_NAME"
|
||||
echo "[INF] deployment name will be available in \${{ env.DEPLOYMENT_FQN }} with value $FQN"
|
||||
echo "[INF] OCP API address will be available in \${{ env.API_ADDRESS }} with value $API_ADDRESS"
|
||||
echo "[INF] OCP API port will be available in \${{ env.API_PORT }} with value $API_PORT"
|
||||
echo "[INF] OCP node name will be available in \${{ env.NODE_NAME }} with value $NODE_NAME"
|
||||
echo "[INF] coverage file will ve available in \${{ env.COVERAGE_FILE }} with value $COVERAGE_FILE"
|
||||
|
||||
@@ -29,3 +29,15 @@ tunings:
|
||||
wait_duration: 6 # Duration to wait between each chaos scenario.
|
||||
iterations: 1 # Number of times to execute the scenarios.
|
||||
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever.
|
||||
telemetry:
|
||||
enabled: False # enable/disables the telemetry collection feature
|
||||
api_url: https://ulnmf9xv7j.execute-api.us-west-2.amazonaws.com/production #telemetry service endpoint
|
||||
username: username # telemetry service username
|
||||
password: password # telemetry service password
|
||||
prometheus_backup: True # enables/disables prometheus data collection
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
archive_path: /tmp # local path where the archive files will be temporarly stored
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 10000 # the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
|
||||
19
CI/run.sh
19
CI/run.sh
@@ -1,5 +1,22 @@
|
||||
#!/bin/bash
|
||||
set -x
|
||||
MAX_RETRIES=60
|
||||
|
||||
OC=`which oc 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: oc missing, please install it and try again" && exit 1
|
||||
|
||||
wait_cluster_become_ready() {
|
||||
COUNT=1
|
||||
until `$OC get namespace > /dev/null 2>&1`
|
||||
do
|
||||
echo "[INF] waiting OpenShift to become ready, after $COUNT check"
|
||||
sleep 3
|
||||
[[ $COUNT == $MAX_RETRIES ]] && echo "[ERR] max retries exceeded, failing" && exit 1
|
||||
((COUNT++))
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
|
||||
ci_tests_loc="CI/tests/my_tests"
|
||||
|
||||
@@ -22,5 +39,7 @@ echo '-----------------------|--------|---------' >> $results
|
||||
# Run each test
|
||||
for test_name in `cat CI/tests/my_tests`
|
||||
do
|
||||
wait_cluster_become_ready
|
||||
./CI/run_test.sh $test_name $results
|
||||
wait_cluster_become_ready
|
||||
done
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
---
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: hello-pod
|
||||
creationTimestamp:
|
||||
labels:
|
||||
name: hello-openshift
|
||||
spec:
|
||||
containers:
|
||||
- name: hello-openshift
|
||||
image: openshift/hello-openshift
|
||||
ports:
|
||||
- containerPort: 5050
|
||||
protocol: TCP
|
||||
resources: {}
|
||||
volumeMounts:
|
||||
- name: tmp
|
||||
mountPath: "/tmp"
|
||||
terminationMessagePath: "/dev/termination-log"
|
||||
imagePullPolicy: IfNotPresent
|
||||
securityContext:
|
||||
capabilities: {}
|
||||
privileged: false
|
||||
volumes:
|
||||
- name: tmp
|
||||
emptyDir: {}
|
||||
restartPolicy: Always
|
||||
dnsPolicy: ClusterFirst
|
||||
serviceAccount: ''
|
||||
status: {}
|
||||
@@ -1,6 +0,0 @@
|
||||
# yaml-language-server: $schema=../../scenarios/plugin.schema.json
|
||||
- id: kill-pods
|
||||
config:
|
||||
label_selector: name=hello-openshift
|
||||
namespace_pattern: ^default$
|
||||
kill: 1
|
||||
@@ -1,7 +0,0 @@
|
||||
scenarios:
|
||||
- action: delete
|
||||
namespace: "^.*ingress.*$"
|
||||
label_selector:
|
||||
runs: 1
|
||||
sleep: 15
|
||||
wait_time: 30
|
||||
7
CI/scenarios/network_diagnostics_namespace.yaml
Normal file
7
CI/scenarios/network_diagnostics_namespace.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
scenarios:
|
||||
- action: delete
|
||||
namespace: "^$openshift-network-diagnostics$"
|
||||
label_selector:
|
||||
runs: 1
|
||||
sleep: 15
|
||||
wait_time: 30
|
||||
@@ -1,7 +1,20 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/metadata.name: kraken
|
||||
pod-security.kubernetes.io/audit: privileged
|
||||
pod-security.kubernetes.io/enforce: privileged
|
||||
pod-security.kubernetes.io/enforce-version: v1.24
|
||||
pod-security.kubernetes.io/warn: privileged
|
||||
security.openshift.io/scc.podSecurityLabelSync: "false"
|
||||
name: kraken
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: kraken-test-pv
|
||||
namespace: kraken
|
||||
labels:
|
||||
type: local
|
||||
spec:
|
||||
@@ -17,6 +30,7 @@ apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: kraken-test-pvc
|
||||
namespace: kraken
|
||||
spec:
|
||||
storageClassName: manual
|
||||
accessModes:
|
||||
@@ -29,6 +43,7 @@ apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kraken-test-pod
|
||||
namespace: kraken
|
||||
spec:
|
||||
volumes:
|
||||
- name: kraken-test-pv
|
||||
@@ -36,7 +51,7 @@ spec:
|
||||
claimName: kraken-test-pvc
|
||||
containers:
|
||||
- name: kraken-test-container
|
||||
image: 'image-registry.openshift-image-registry.svc:5000/openshift/httpd:latest'
|
||||
image: 'quay.io/centos7/httpd-24-centos7:latest'
|
||||
volumeMounts:
|
||||
- mountPath: "/home/krake-dir/"
|
||||
name: kraken-test-pv
|
||||
|
||||
21
CI/tests/test_app_outages_gh.sh
Executable file
21
CI/tests/test_app_outages_gh.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_app_outage {
|
||||
[ -z $DEPLOYMENT_NAME ] && echo "[ERR] DEPLOYMENT_NAME variable not set, failing." && exit 1
|
||||
yq -i '.application_outage.pod_selector={"app":"'$DEPLOYMENT_NAME'"}' CI/scenarios/app_outage.yaml
|
||||
yq -i '.application_outage.namespace="'$NAMESPACE'"' CI/scenarios/app_outage.yaml
|
||||
export scenario_type="application_outages"
|
||||
export scenario_file="CI/scenarios/app_outage.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/app_outage.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/app_outage.yaml
|
||||
echo "App outage scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_app_outage
|
||||
20
CI/tests/test_cpu_hog_gh.sh
Executable file
20
CI/tests/test_cpu_hog_gh.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_litmus_cpu {
|
||||
[ -z $NODE_NAME ] && echo "[ERR] NODE_NAME variable not set, failing." && exit 1
|
||||
yq -i ' .spec.experiments = [{"name": "node-cpu-hog", "spec":{"components":{"env":[{"name":"TOTAL_CHAOS_DURATION","value":"10"},{"name":"NODE_CPU_CORE","value":"1"},{"name":"NODES_AFFECTED_PERC","value":"30"},{"name":"TARGET_NODES","value":"'$NODE_NAME'"}]}}}]' CI/scenarios/node_cpu_hog_engine_node.yaml
|
||||
|
||||
cp CI/config/common_test_config.yaml CI/config/litmus_config.yaml
|
||||
yq '.kraken.chaos_scenarios = [{"litmus_scenarios":[["scenarios/openshift/templates/litmus-rbac.yaml","CI/scenarios/node_cpu_hog_engine_node.yaml"]]}]' -i CI/config/litmus_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/litmus_config.yaml
|
||||
echo "Litmus scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_litmus_cpu
|
||||
19
CI/tests/test_io_hog_gh.sh
Executable file
19
CI/tests/test_io_hog_gh.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_litmus_io {
|
||||
[ -z $NODE_NAME ] && echo "[ERR] NODE_NAME variable not set, failing." && exit 1
|
||||
yq -i ' .spec.experiments = [{"name": "node-io-stress", "spec":{"components":{"env":[{"name":"TOTAL_CHAOS_DURATION","value":"10"},{"name":"FILESYSTEM_UTILIZATION_PERCENTAGE","value":"100"},{"name":"CPU","value":"1"},{"name":"NUMBER_OF_WORKERS","value":"3"},{"name":"TARGET_NODES","value":"'$NODE_NAME'"}]}}}]' CI/scenarios/node_io_engine_node.yaml
|
||||
cp CI/config/common_test_config.yaml CI/config/litmus_config.yaml
|
||||
yq '.kraken.chaos_scenarios = [{"litmus_scenarios":[["scenarios/openshift/templates/litmus-rbac.yaml","CI/scenarios/node_io_engine_node.yaml"]]}]' -i CI/config/litmus_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/litmus_config.yaml
|
||||
echo "Litmus scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_litmus_io
|
||||
19
CI/tests/test_mem_hog_gh.sh
Executable file
19
CI/tests/test_mem_hog_gh.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_litmus_mem {
|
||||
[ -z $NODE_NAME ] && echo "[ERR] NODE_NAME variable not set, failing." && exit 1
|
||||
yq -i ' .spec.experiments = [{"name": "node-io-stress", "spec":{"components":{"env":[{"name":"TOTAL_CHAOS_DURATION","value":"10"},{"name":"CPU","value":"1"},{"name":"TARGET_NODES","value":"'$NODE_NAME'"}]}}}]' CI/scenarios/node_mem_engine_node.yaml
|
||||
cp CI/config/common_test_config.yaml CI/config/litmus_config.yaml
|
||||
yq '.kraken.chaos_scenarios = [{"litmus_scenarios":[["scenarios/openshift/templates/litmus-rbac.yaml","CI/scenarios/node_mem_engine_node.yaml"]]}]' -i CI/config/litmus_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/litmus_config.yaml
|
||||
echo "Litmus scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_litmus_mem
|
||||
@@ -7,10 +7,10 @@ trap finish EXIT
|
||||
|
||||
function funtional_test_namespace_deletion {
|
||||
export scenario_type="namespace_scenarios"
|
||||
export scenario_file="- CI/scenarios/ingress_namespace.yaml"
|
||||
export scenario_file="- CI/scenarios/network_diagnostics_namespace.yaml"
|
||||
export post_config=""
|
||||
yq '.scenarios.[0].namespace="^openshift-network-diagnostics$"' -i CI/scenarios/network_diagnostics_namespace.yaml
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/namespace_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/namespace_config.yaml
|
||||
echo $?
|
||||
echo "Namespace scenario test: Success"
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function funtional_test_pod_deletion {
|
||||
export scenario_type="pod_scenarios"
|
||||
export scenario_file="- CI/scenarios/hello_pod_killing.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml
|
||||
echo $?
|
||||
echo "Pod scenario test: Success"
|
||||
}
|
||||
|
||||
funtional_test_pod_deletion
|
||||
127
CODE_OF_CONDUCT.md
Normal file
127
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,127 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at
|
||||
https://www.contributor-covenant.org/translations.
|
||||
12
MAINTAINERS.md
Normal file
12
MAINTAINERS.md
Normal file
@@ -0,0 +1,12 @@
|
||||
## Overview
|
||||
|
||||
This document contains a list of maintainers in this repo.
|
||||
|
||||
## Current Maintainers
|
||||
|
||||
| Maintainer | GitHub ID | Email |
|
||||
|---------------------| --------------------------------------------------------- | ----------------------- |
|
||||
| Ravi Elluri | [chaitanyaenr](https://github.com/chaitanyaenr) | nelluri@redhat.com |
|
||||
| Pradeep Surisetty | [psuriset](https://github.com/psuriset) | psuriset@redhat.com |
|
||||
| Paige Rubendall | [paigerube14](https://github.com/paigerube14) | prubenda@redhat.com |
|
||||
| Tullio Sebastiani | [tsebastiani](https://github.com/tsebastiani) | tsebasti@redhat.com |
|
||||
26
README.md
26
README.md
@@ -1,5 +1,5 @@
|
||||
# Krkn aka Kraken
|
||||
[](https://quay.io/repository/chaos-kubox/krkn?tab=tags&tag=latest)
|
||||
[](https://quay.io/repository/redhat-chaos/krkn?tab=tags&tag=latest)
|
||||
|
||||

|
||||
|
||||
@@ -56,20 +56,21 @@ Instructions on how to setup the config and the options supported can be found a
|
||||
|
||||
### Kubernetes/OpenShift chaos scenarios supported
|
||||
|
||||
Scenario type | Kubernetes | OpenShift
|
||||
--------------------------- | ------------- | -------------------- |
|
||||
Scenario type | Kubernetes | OpenShift
|
||||
--------------------------- | ------------- |--------------------|
|
||||
[Pod Scenarios](docs/pod_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Pod Network Scenarios](docs/pod_network_scenarios.md) | :x: | :heavy_check_mark: |
|
||||
[Container Scenarios](docs/container_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Node Scenarios](docs/node_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Time Scenarios](docs/time_scenarios.md) | :x: | :heavy_check_mark: |
|
||||
[Litmus Scenarios](docs/litmus_scenarios.md) | :x: | :heavy_check_mark: |
|
||||
[Hog Scenarios: CPU, Memory](docs/arcaflow_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Cluster Shut Down Scenarios](docs/cluster_shut_down_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Namespace Scenarios](docs/namespace_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Zone Outage Scenarios](docs/zone_outage.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Application_outages](docs/application_outages.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[PVC scenario](docs/pvc_scenario.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Network_Chaos](docs/network_chaos.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[ManagedCluster Scenarios](docs/managedcluster_scenarios.md) | :heavy_check_mark: | :question: |
|
||||
[ManagedCluster Scenarios](docs/managedcluster_scenarios.md) | :heavy_check_mark: | :question: |
|
||||
|
||||
|
||||
### Kraken scenario pass/fail criteria and report
|
||||
@@ -94,8 +95,12 @@ Monitoring the Kubernetes/OpenShift cluster to observe the impact of Kraken chao
|
||||
Kraken supports capturing metrics for the duration of the scenarios defined in the config and indexes then into Elasticsearch to be able to store and evaluate the state of the runs long term. The indexed metrics can be visualized with the help of Grafana. It uses [Kube-burner](https://github.com/cloud-bulldozer/kube-burner) under the hood. The metrics to capture need to be defined in a metrics profile which Kraken consumes to query prometheus ( installed by default in OpenShift ) with the start and end timestamp of the run. Information on enabling and leveraging this feature can be found [here](docs/metrics.md).
|
||||
|
||||
|
||||
### Alerts
|
||||
In addition to checking the recovery and health of the cluster and components under test, Kraken takes in a profile with the Prometheus expressions to validate and alerts, exits with a non-zero return code depending on the severity set. This feature can be used to determine pass/fail or alert on abnormalities observed in the cluster based on the metrics. Information on enabling and leveraging this feature can be found [here](docs/alerts.md).
|
||||
### SLOs validation during and post chaos
|
||||
- In addition to checking the recovery and health of the cluster and components under test, Kraken takes in a profile with the Prometheus expressions to validate and alerts, exits with a non-zero return code depending on the severity set. This feature can be used to determine pass/fail or alert on abnormalities observed in the cluster based on the metrics.
|
||||
- Kraken also provides ability to check if any critical alerts are firing in the cluster post chaos and pass/fail's.
|
||||
|
||||
Information on enabling and leveraging this feature can be found [here](docs/SLOs_validation.md)
|
||||
|
||||
|
||||
### OCM / ACM integration
|
||||
|
||||
@@ -105,14 +110,11 @@ Kraken supports injecting faults into [Open Cluster Management (OCM)](https://op
|
||||
- Blog post on introduction to Kraken: https://www.openshift.com/blog/introduction-to-kraken-a-chaos-tool-for-openshift/kubernetes
|
||||
- Discussion and demo on how Kraken can be leveraged to ensure OpenShift is reliable, performant and scalable: https://www.youtube.com/watch?v=s1PvupI5sD0&ab_channel=OpenShift
|
||||
- Blog post emphasizing the importance of making Chaos part of Performance and Scale runs to mimic the production environments: https://www.openshift.com/blog/making-chaos-part-of-kubernetes/openshift-performance-and-scalability-tests
|
||||
- Blog post on findings from Chaos test runs: https://cloud.redhat.com/blog/openshift/kubernetes-chaos-stories
|
||||
|
||||
|
||||
### Roadmap
|
||||
Following is a list of enhancements that we are planning to work on adding support in Kraken. Of course any help/contributions are greatly appreciated.
|
||||
- [Ability to visualize the metrics that are being captured by Kraken and stored in Elasticsearch](https://github.com/redhat-chaos/krkn/issues/124)
|
||||
- Continue to improve [Chaos Testing Guide](https://cloud-bulldozer.github.io/kraken/) in terms of adding best practices, test environment recommendations and scenarios to make sure the OpenShift platform, as well the applications running on top it, are resilient and performant under chaotic conditions.
|
||||
- Support for running Kraken on Kubernetes distribution - see https://github.com/redhat-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
|
||||
- Sweet logo for Kraken - see https://github.com/redhat-chaos/krkn/issues/195
|
||||
Enhancements being planned can be found in the [roadmap](ROADMAP.md).
|
||||
|
||||
|
||||
### Contributions
|
||||
|
||||
11
ROADMAP.md
Normal file
11
ROADMAP.md
Normal file
@@ -0,0 +1,11 @@
|
||||
## Krkn Roadmap
|
||||
|
||||
Following are a list of enhancements that we are planning to work on adding support in Krkn. Of course any help/contributions are greatly appreciated.
|
||||
|
||||
- [Ability to run multiple chaos scenarios in parallel under load to mimic real world outages](https://github.com/redhat-chaos/krkn/issues/424)
|
||||
- [Centralized storage for chaos experiments artifacts](https://github.com/redhat-chaos/krkn/issues/423)
|
||||
- [Support for causing DNS outages](https://github.com/redhat-chaos/krkn/issues/394)
|
||||
- [Support for pod level network traffic shaping](https://github.com/redhat-chaos/krkn/issues/393)
|
||||
- [Ability to visualize the metrics that are being captured by Kraken and stored in Elasticsearch](https://github.com/redhat-chaos/krkn/issues/124)
|
||||
- Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/redhat-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
|
||||
- Continue to improve [Chaos Testing Guide](https://redhat-chaos.github.io/krkn) in terms of adding best practices, test environment recommendations and scenarios to make sure the OpenShift platform, as well the applications running on top it, are resilient and performant under chaotic conditions.
|
||||
@@ -1,11 +1,65 @@
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[5m:]) > 0.01
|
||||
description: 5 minutes avg. etcd fsync latency on {{$labels.pod}} higher than 10ms {{$value}}
|
||||
# etcd
|
||||
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[10m:]) > 0.01
|
||||
description: 10 minutes avg. 99th etcd fsync latency on {{$labels.pod}} higher than 10ms. {{$value}}s
|
||||
severity: warning
|
||||
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[10m:]) > 1
|
||||
description: 10 minutes avg. 99th etcd fsync latency on {{$labels.pod}} higher than 1s. {{$value}}s
|
||||
severity: error
|
||||
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket[5m]))[5m:]) > 0.1
|
||||
description: 5 minutes avg. etcd netowrk peer round trip on {{$labels.pod}} higher than 100ms {{$value}}
|
||||
severity: info
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))[10m:]) > 0.03
|
||||
description: 10 minutes avg. 99th etcd commit latency on {{$labels.pod}} higher than 30ms. {{$value}}s
|
||||
severity: warning
|
||||
|
||||
- expr: increase(etcd_server_leader_changes_seen_total[2m]) > 0
|
||||
- expr: rate(etcd_server_leader_changes_seen_total[2m]) > 0
|
||||
description: etcd leader changes observed
|
||||
severity: critical
|
||||
severity: warning
|
||||
|
||||
# API server
|
||||
- expr: avg_over_time(histogram_quantile(0.99, sum(irate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb=~"POST|PUT|DELETE|PATCH", subresource!~"log|exec|portforward|attach|proxy"}[2m])) by (le, resource, verb))[10m:]) > 1
|
||||
description: 10 minutes avg. 99th mutating API call latency for {{$labels.verb}}/{{$labels.resource}} higher than 1 second. {{$value}}s
|
||||
severity: error
|
||||
|
||||
- expr: avg_over_time(histogram_quantile(0.99, sum(irate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb=~"LIST|GET", subresource!~"log|exec|portforward|attach|proxy", scope="resource"}[2m])) by (le, resource, verb, scope))[5m:]) > 1
|
||||
description: 5 minutes avg. 99th read-only API call latency for {{$labels.verb}}/{{$labels.resource}} in scope {{$labels.scope}} higher than 1 second. {{$value}}s
|
||||
severity: error
|
||||
|
||||
- expr: avg_over_time(histogram_quantile(0.99, sum(irate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb=~"LIST|GET", subresource!~"log|exec|portforward|attach|proxy", scope="namespace"}[2m])) by (le, resource, verb, scope))[5m:]) > 5
|
||||
description: 5 minutes avg. 99th read-only API call latency for {{$labels.verb}}/{{$labels.resource}} in scope {{$labels.scope}} higher than 5 seconds. {{$value}}s
|
||||
severity: error
|
||||
|
||||
- expr: avg_over_time(histogram_quantile(0.99, sum(irate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb=~"LIST|GET", subresource!~"log|exec|portforward|attach|proxy", scope="cluster"}[2m])) by (le, resource, verb, scope))[5m:]) > 30
|
||||
description: 5 minutes avg. 99th read-only API call latency for {{$labels.verb}}/{{$labels.resource}} in scope {{$labels.scope}} higher than 30 seconds. {{$value}}s
|
||||
severity: error
|
||||
|
||||
# Control plane pods
|
||||
- expr: up{apiserver=~"kube-apiserver|openshift-apiserver"} == 0
|
||||
description: "{{$labels.apiserver}} {{$labels.instance}} down"
|
||||
severity: warning
|
||||
|
||||
- expr: up{namespace=~"openshift-etcd"} == 0
|
||||
description: "{{$labels.namespace}}/{{$labels.pod}} down"
|
||||
severity: warning
|
||||
|
||||
- expr: up{namespace=~"openshift-.*(kube-controller-manager|scheduler|controller-manager|sdn|ovn-kubernetes|dns)"} == 0
|
||||
description: "{{$labels.namespace}}/{{$labels.pod}} down"
|
||||
severity: warning
|
||||
|
||||
- expr: up{job=~"crio|kubelet"} == 0
|
||||
description: "{{$labels.node}}/{{$labels.job}} down"
|
||||
severity: warning
|
||||
|
||||
- expr: up{job="ovnkube-node"} == 0
|
||||
description: "{{$labels.instance}}/{{$labels.pod}} {{$labels.job}} down"
|
||||
severity: warning
|
||||
|
||||
# Service sync latency
|
||||
- expr: histogram_quantile(0.99, sum(rate(kubeproxy_network_programming_duration_seconds_bucket[2m])) by (le)) > 10
|
||||
description: 99th Kubeproxy network programming latency higher than 10 seconds. {{$value}}s
|
||||
severity: warning
|
||||
|
||||
# Prometheus alerts
|
||||
- expr: ALERTS{severity="critical", alertstate="firing"} > 0
|
||||
description: Critical prometheus alert. {{$labels.alertname}}
|
||||
severity: warning
|
||||
|
||||
@@ -6,47 +6,9 @@ kraken:
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
port: 8081 # Signal port
|
||||
litmus_install: True # Installs specified version, set to False if it's already setup
|
||||
litmus_version: v1.13.6 # Litmus version to install
|
||||
litmus_uninstall: False # If you want to uninstall litmus if failure
|
||||
litmus_uninstall_before_run: True # If you want to uninstall litmus before a new run starts
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load
|
||||
- container_scenarios: # List of chaos pod scenarios to load
|
||||
- - scenarios/openshift/container_etcd.yml
|
||||
- plugin_scenarios:
|
||||
- scenarios/openshift/etcd.yml
|
||||
- scenarios/openshift/regex_openshift_pod_kill.yml
|
||||
- scenarios/openshift/vmware_node_scenarios.yml
|
||||
- scenarios/openshift/network_chaos_ingress.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/node_scenarios_example.yml
|
||||
- plugin_scenarios:
|
||||
- scenarios/openshift/openshift-apiserver.yml
|
||||
- scenarios/openshift/openshift-kube-apiserver.yml
|
||||
- time_scenarios: # List of chaos time scenarios to load
|
||||
- scenarios/openshift/time_scenarios_example.yml
|
||||
- litmus_scenarios: # List of litmus scenarios to load
|
||||
- - scenarios/openshift/templates/litmus-rbac.yaml
|
||||
- scenarios/openshift/node_cpu_hog_engine.yaml
|
||||
- - scenarios/openshift/templates/litmus-rbac.yaml
|
||||
- scenarios/openshift/node_mem_engine.yaml
|
||||
- - scenarios/openshift/templates/litmus-rbac.yaml
|
||||
- scenarios/openshift/node_io_engine.yaml
|
||||
- cluster_shut_down_scenarios:
|
||||
- - scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- scenarios/openshift/post_action_shut_down.py
|
||||
- namespace_scenarios:
|
||||
- - scenarios/openshift/regex_namespace.yaml
|
||||
- - scenarios/openshift/ingress_namespace.yaml
|
||||
- scenarios/openshift/post_action_namespace.py
|
||||
- zone_outages:
|
||||
- scenarios/openshift/zone_outage.yaml
|
||||
- application_outages:
|
||||
- scenarios/openshift/app_outage.yaml
|
||||
- pvc_scenarios:
|
||||
- scenarios/openshift/pvc_scenario.yaml
|
||||
- network_chaos:
|
||||
- scenarios/openshift/network_chaos.yaml
|
||||
- application_outages:
|
||||
- scenarios/openshift/app_outage.yaml
|
||||
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
@@ -56,7 +18,7 @@ cerberus:
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
kube_burner_binary_url: "https://github.com/cloud-bulldozer/kube-burner/releases/download/v0.9.1/kube-burner-0.9.1-Linux-x86_64.tar.gz"
|
||||
kube_burner_binary_url: "https://github.com/cloud-bulldozer/kube-burner/releases/download/v1.7.0/kube-burner-1.7.0-Linux-x86_64.tar.gz"
|
||||
capture_metrics: False
|
||||
config_path: config/kube_burner.yaml # Define the Elasticsearch url and index name in this config
|
||||
metrics_profile_path: config/metrics-aggregated.yaml
|
||||
@@ -64,9 +26,26 @@ performance_monitoring:
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
alert_profile: config/alerts # Path to alert profile with the prometheus queries
|
||||
|
||||
alert_profile: config/alerts # Path or URL to alert profile with the prometheus queries
|
||||
check_critical_alerts: False # When enabled will check prometheus for critical alerts firing post chaos
|
||||
tunings:
|
||||
wait_duration: 60 # Duration to wait between each chaos scenario
|
||||
iterations: 1 # Number of times to execute the scenarios
|
||||
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever
|
||||
telemetry:
|
||||
enabled: False # enable/disables the telemetry collection feature
|
||||
api_url: https://ulnmf9xv7j.execute-api.us-west-2.amazonaws.com/production #telemetry service endpoint
|
||||
username: username # telemetry service username
|
||||
password: password # telemetry service password
|
||||
prometheus_backup: True # enables/disables prometheus data collection
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
archive_path: /tmp # local path where the archive files will be temporarly stored
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 10000 # the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
# the higher the number of archive files will be produced and uploaded (and processed by backup_threads
|
||||
# simultaneously).
|
||||
# For unstable/slow connection is better to keep this value low
|
||||
# increasing the number of backup_threads, in this way, on upload failure, the retry will happen only on the
|
||||
# failed chunk without affecting the whole upload.
|
||||
@@ -32,7 +32,7 @@ performance_monitoring:
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
alert_profile: config/alerts # Path to alert profile with the prometheus queries
|
||||
|
||||
check_critical_alerts: False # When enabled will check prometheus for critical alerts firing post chaos after soak time for the cluster to settle down
|
||||
tunings:
|
||||
wait_duration: 60 # Duration to wait between each chaos scenario
|
||||
iterations: 1 # Number of times to execute the scenarios
|
||||
|
||||
@@ -1,29 +1,28 @@
|
||||
# Dockerfile for kraken
|
||||
|
||||
FROM quay.io/openshift/origin-tests:latest as origintests
|
||||
|
||||
FROM mcr.microsoft.com/azure-cli:latest as azure-cli
|
||||
|
||||
FROM quay.io/centos/centos:stream9
|
||||
FROM registry.access.redhat.com/ubi8/ubi:latest
|
||||
|
||||
LABEL org.opencontainers.image.authors="Red Hat OpenShift Chaos Engineering"
|
||||
|
||||
ENV KUBECONFIG /root/.kube/config
|
||||
|
||||
# Copy OpenShift CLI, Kubernetes CLI from origin-tests image
|
||||
COPY --from=origintests /usr/bin/oc /usr/bin/oc
|
||||
COPY --from=origintests /usr/bin/kubectl /usr/bin/kubectl
|
||||
|
||||
# Copy azure client binary from azure-cli image
|
||||
COPY --from=azure-cli /usr/local/bin/az /usr/bin/az
|
||||
|
||||
# Install dependencies
|
||||
RUN yum install epel-release -y && \
|
||||
yum install -y git python39 python3-pip jq gettext && \
|
||||
RUN yum install -y git python39 python3-pip jq gettext wget && \
|
||||
python3.9 -m pip install -U pip && \
|
||||
git clone https://github.com/redhat-chaos/krkn.git --branch v1.1.1 /root/kraken && \
|
||||
git clone https://github.com/redhat-chaos/krkn.git --branch v1.3.6 /root/kraken && \
|
||||
mkdir -p /root/.kube && cd /root/kraken && \
|
||||
pip3.9 install -r requirements.txt
|
||||
pip3.9 install -r requirements.txt && \
|
||||
pip3.9 install virtualenv && \
|
||||
wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq && chmod +x /usr/bin/yq
|
||||
|
||||
# Get Kubernetes and OpenShift clients from stable releases
|
||||
WORKDIR /tmp
|
||||
RUN wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-client-linux.tar.gz && tar -xvf openshift-client-linux.tar.gz && cp oc /usr/local/bin/oc && cp kubectl /usr/local/bin/kubectl
|
||||
|
||||
WORKDIR /root/kraken
|
||||
|
||||
|
||||
@@ -2,24 +2,28 @@
|
||||
|
||||
FROM ppc64le/centos:8
|
||||
|
||||
MAINTAINER Red Hat OpenShift Performance and Scale
|
||||
FROM mcr.microsoft.com/azure-cli:latest as azure-cli
|
||||
|
||||
LABEL org.opencontainers.image.authors="Red Hat OpenShift Chaos Engineering"
|
||||
|
||||
ENV KUBECONFIG /root/.kube/config
|
||||
|
||||
RUN curl -L -o kubernetes-client-linux-ppc64le.tar.gz https://dl.k8s.io/v1.19.0/kubernetes-client-linux-ppc64le.tar.gz \
|
||||
&& tar xf kubernetes-client-linux-ppc64le.tar.gz && mv kubernetes/client/bin/kubectl /usr/bin/ && rm -rf kubernetes-client-linux-ppc64le.tar.gz
|
||||
|
||||
RUN curl -L -o openshift-client-linux.tar.gz https://mirror.openshift.com/pub/openshift-v4/ppc64le/clients/ocp/stable/openshift-client-linux.tar.gz \
|
||||
&& tar xf openshift-client-linux.tar.gz -C /usr/bin && rm -rf openshift-client-linux.tar.gz
|
||||
# Copy azure client binary from azure-cli image
|
||||
COPY --from=azure-cli /usr/local/bin/az /usr/bin/az
|
||||
|
||||
# Install dependencies
|
||||
RUN yum install epel-release -y && \
|
||||
yum install -y git python36 python3-pip gcc libffi-devel python36-devel openssl-devel gcc-c++ make jq gettext && \
|
||||
git clone https://github.com/redhat-chaos/krkn.git --branch main /root/kraken && \
|
||||
mkdir -p /root/.kube && cd /root/kraken && \
|
||||
pip3 install cryptography==3.3.2 && \
|
||||
pip3 install -r requirements.txt setuptools==40.3.0 urllib3==1.25.4
|
||||
RUN yum install -y git python39 python3-pip jq gettext wget && \
|
||||
python3.9 -m pip install -U pip && \
|
||||
git clone https://github.com/redhat-chaos/krkn.git --branch v1.3.6 /root/kraken && \
|
||||
mkdir -p /root/.kube && cd /root/kraken && \
|
||||
pip3.9 install -r requirements.txt && \
|
||||
pip3.9 install virtualenv && \
|
||||
wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq && chmod +x /usr/bin/yq
|
||||
|
||||
# Get Kubernetes and OpenShift clients from stable releases
|
||||
WORKDIR /tmp
|
||||
RUN wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-client-linux.tar.gz && tar -xvf openshift-client-linux.tar.gz && cp oc /usr/local/bin/oc && cp kubectl /usr/local/bin/kubectl
|
||||
|
||||
WORKDIR /root/kraken
|
||||
|
||||
ENTRYPOINT python3 run_kraken.py --config=config/config.yaml
|
||||
ENTRYPOINT python3.9 run_kraken.py --config=config/config.yaml
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
|
||||
|
||||
Container image gets automatically built by quay.io at [Kraken image](https://quay.io/chaos-kubox/krkn).
|
||||
Container image gets automatically built by quay.io at [Kraken image](https://quay.io/redhat-chaos/krkn).
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
- name: kraken
|
||||
securityContext:
|
||||
privileged: true
|
||||
image: quay.io/chaos-kubox/krkn
|
||||
image: quay.io/redhat-chaos/krkn
|
||||
command: ["/bin/sh", "-c"]
|
||||
args: ["python3.9 run_kraken.py -c config/config.yaml"]
|
||||
volumeMounts:
|
||||
|
||||
@@ -1,6 +1,17 @@
|
||||
## Alerts
|
||||
## SLOs validation
|
||||
|
||||
Pass/fail based on metrics captured from the cluster is important in addition to checking the health status and recovery. Kraken supports alerting based on the queries defined by the user and modifies the return code of the run to determine pass/fail. It's especially useful in case of automated runs in CI where user won't be able to monitor the system. It uses [Kube-burner](https://kube-burner.readthedocs.io/en/latest/) under the hood. This feature can be enabled in the [config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) by setting the following:
|
||||
Pass/fail based on metrics captured from the cluster is important in addition to checking the health status and recovery. Kraken supports:
|
||||
|
||||
### Checking for critical alerts post chaos
|
||||
If enabled, the check runs at the end of each scenario ( post chaos ) and Kraken exits in case critical alerts are firing to allow user to debug. You can enable it in the config:
|
||||
|
||||
```
|
||||
performance_monitoring:
|
||||
check_critical_alerts: False # When enabled will check prometheus for critical alerts firing post chaos
|
||||
```
|
||||
|
||||
### Validation and alerting based on the queries defined by the user during chaos
|
||||
Takes PromQL queries as input and modifies the return code of the run to determine pass/fail. It's especially useful in case of automated runs in CI where user won't be able to monitor the system. It uses [Kube-burner](https://kube-burner.readthedocs.io/en/latest/) under the hood. This feature can be enabled in the [config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) by setting the following:
|
||||
|
||||
```
|
||||
performance_monitoring:
|
||||
@@ -11,8 +22,8 @@ performance_monitoring:
|
||||
alert_profile: config/alerts # Path to alert profile with the prometheus queries.
|
||||
```
|
||||
|
||||
### Alert profile
|
||||
A couple of [alert profiles](https://github.com/redhat-chaos/krkn/tree/main/config) [alerts](https://github.com/redhat-chaos/krkn/blob/main/config/alerts) are shipped by default and can be tweaked to add more queries to alert on. The following are a few alerts examples:
|
||||
#### Alert profile
|
||||
A couple of [alert profiles](https://github.com/redhat-chaos/krkn/tree/main/config) [alerts](https://github.com/redhat-chaos/krkn/blob/main/config/alerts) are shipped by default and can be tweaked to add more queries to alert on. User can provide a URL or path to the file in the [config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml). The following are a few alerts examples:
|
||||
|
||||
```
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[5m:]) > 0.01
|
||||
67
docs/arcaflow_scenarios.md
Normal file
67
docs/arcaflow_scenarios.md
Normal file
@@ -0,0 +1,67 @@
|
||||
## Arcaflow Scenarios
|
||||
Arcaflow is a workflow engine in development which provides the ability to execute workflow steps in sequence, in parallel, repeatedly, etc. The main difference to competitors such as Netflix Conductor is the ability to run ad-hoc workflows without an infrastructure setup required.
|
||||
|
||||
The engine uses containers to execute plugins and runs them either locally in Docker/Podman or remotely on a Kubernetes cluster. The workflow system is strongly typed and allows for generating JSON schema and OpenAPI documents for all data formats involved.
|
||||
|
||||
### Available Scenarios
|
||||
#### Hog scenarios:
|
||||
- [CPU Hog](arcaflow_scenarios/cpu_hog.md)
|
||||
- [Memory Hog](arcaflow_scenarios/memory_hog.md)
|
||||
|
||||
|
||||
### Prequisites
|
||||
Arcaflow supports three deployment technologies:
|
||||
- Docker
|
||||
- Podman
|
||||
- Kubernetes
|
||||
|
||||
#### Docker
|
||||
In order to run Arcaflow Scenarios with the Docker deployer, be sure that:
|
||||
- Docker is correctly installed in your Operating System (to find instructions on how to install docker please refer to [Docker Documentation](https://www.docker.com/))
|
||||
- The Docker daemon is running
|
||||
|
||||
#### Podman
|
||||
The podman deployer is built around the podman CLI and doesn't need necessarily to be run along with the podman daemon.
|
||||
To run Arcaflow Scenarios in your Operating system be sure that:
|
||||
- podman is correctly installed in your Operating System (to find instructions on how to install podman refer to [Podman Documentation](https://podman.io/))
|
||||
- the podman CLI is in your shell PATH
|
||||
|
||||
#### Kubernetes
|
||||
The kubernetes deployer integrates directly the Kubernetes API Client and needs only a valid kubeconfig file and a reachable Kubernetes/OpenShift Cluster.
|
||||
|
||||
### Usage
|
||||
|
||||
To enable arcaflow scenarios edit the kraken config file, go to the section `kraken -> chaos_scenarios` of the yaml structure
|
||||
and add a new element to the list named `arcaflow_scenarios` then add the desired scenario
|
||||
pointing to the `input.yaml` file.
|
||||
```
|
||||
kraken:
|
||||
...
|
||||
chaos_scenarios:
|
||||
- arcaflow_scenarios:
|
||||
- scenarios/arcaflow/cpu-hog/input.yaml
|
||||
```
|
||||
|
||||
#### input.yaml
|
||||
The implemented scenarios can be found in *scenarios/arcaflow/<scenario_name>* folder.
|
||||
The entrypoint of each scenario is the *input.yaml* file.
|
||||
In this file there are all the options to set up the scenario accordingly to the desired target
|
||||
### config.yaml
|
||||
The arcaflow config file. Here you can set the arcaflow deployer and the arcaflow log level.
|
||||
The supported deployers are:
|
||||
- Docker
|
||||
- Podman (podman daemon not needed, suggested option)
|
||||
- Kubernetes
|
||||
|
||||
The supported log levels are:
|
||||
- debug
|
||||
- info
|
||||
- warning
|
||||
- error
|
||||
### workflow.yaml
|
||||
This file contains the steps that will be executed to perform the scenario against the target.
|
||||
Each step is represented by a container that will be executed from the deployer and its options.
|
||||
Note that we provide the scenarios as a template, but they can be manipulated to define more complex workflows.
|
||||
To have more details regarding the arcaflow workflows architecture and syntax it is suggested to refer to the [Arcaflow Documentation](https://arcalot.io/arcaflow/).
|
||||
|
||||
|
||||
19
docs/arcaflow_scenarios/cpu_hog.md
Normal file
19
docs/arcaflow_scenarios/cpu_hog.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# CPU Hog
|
||||
This scenario is based on the arcaflow [arcaflow-plugin-stressng](https://github.com/arcalot/arcaflow-plugin-stressng) plugin.
|
||||
The purpose of this scenario is to create cpu pressure on a particular node of the Kubernetes/OpenShift cluster for a time span.
|
||||
To enable this plugin add the pointer to the scenario input file `scenarios/arcaflow/cpu-hog/input.yaml` as described in the
|
||||
Usage section.
|
||||
This scenario takes a list of objects named `input_list` with the following properties:
|
||||
|
||||
- **kubeconfig :** *string* the kubeconfig needed by the deployer to deploy the sysbench plugin in the target cluster
|
||||
- **namespace :** *string* the namespace where the scenario container will be deployed
|
||||
**Note:** this parameter will be automatically filled by kraken if the `kubeconfig_path` property is correctly set
|
||||
- **node_selector :** *key-value map* the node label that will be used as `nodeSelector` by the pod to target a specific cluster node
|
||||
- **duration :** *string* stop stress test after N seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y.
|
||||
- **cpu_count :** *int* the number of CPU cores to be used (0 means all)
|
||||
- **cpu_method :** *string* a fine-grained control of which cpu stressors to use (ackermann, cfloat etc. see [manpage](https://manpages.org/sysbench) for all the cpu_method options)
|
||||
- **cpu_load_percentage :** *int* the CPU load by percentage
|
||||
|
||||
To perform several load tests in the same run simultaneously (eg. stress two or more nodes in the same run) add another item
|
||||
to the `input_list` with the same properties (and eventually different values eg. different node_selectors
|
||||
to schedule the pod on different nodes). To reduce (or increase) the parallelism change the value `parallelism` in `workload.yaml` file
|
||||
18
docs/arcaflow_scenarios/memory_hog.md
Normal file
18
docs/arcaflow_scenarios/memory_hog.md
Normal file
@@ -0,0 +1,18 @@
|
||||
# Memory Hog
|
||||
This scenario is based on the arcaflow [arcaflow-plugin-stressng](https://github.com/arcalot/arcaflow-plugin-stressng) plugin.
|
||||
The purpose of this scenario is to create Virtual Memory pressure on a particular node of the Kubernetes/OpenShift cluster for a time span.
|
||||
To enable this plugin add the pointer to the scenario input file `scenarios/arcaflow/memory-hog/input.yaml` as described in the
|
||||
Usage section.
|
||||
This scenario takes a list of objects named `input_list` with the following properties:
|
||||
|
||||
- **kubeconfig :** *string* the kubeconfig needed by the deployer to deploy the sysbench plugin in the target cluster
|
||||
- **namespace :** *string* the namespace where the scenario container will be deployed
|
||||
**Note:** this parameter will be automatically filled by kraken if the `kubeconfig_path` property is correctly set
|
||||
- **node_selector :** *key-value map* the node label that will be used as `nodeSelector` by the pod to target a specific cluster node
|
||||
- **duration :** *string* stop stress test after N seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y.
|
||||
- **vm_bytes :** *string* N bytes per vm process or percentage of memory used (using the % symbol). The size can be expressed in units of Bytes, KBytes, MBytes and GBytes using the suffix b, k, m or g.
|
||||
- **vm_workers :** *int* Number of VM stressors to be run (0 means 1 stressor per CPU)
|
||||
|
||||
To perform several load tests in the same run simultaneously (eg. stress two or more nodes in the same run) add another item
|
||||
to the `input_list` with the same properties (and eventually different values eg. different node_selectors
|
||||
to schedule the pod on different nodes). To reduce (or increase) the parallelism change the value `parallelism` in `workload.yaml` file
|
||||
@@ -1,11 +1,12 @@
|
||||
Supported Cloud Providers:
|
||||
|
||||
* [AWS](#aws)
|
||||
* [GCP](#gcp)
|
||||
* [Openstack](#openstack)
|
||||
* [Azure](#azure)
|
||||
* [Alibaba](#alibaba)
|
||||
* [VMware](#vmware)
|
||||
- [AWS](#aws)
|
||||
- [GCP](#gcp)
|
||||
- [Openstack](#openstack)
|
||||
- [Azure](#azure)
|
||||
- [Alibaba](#alibaba)
|
||||
- [VMware](#vmware)
|
||||
- [IBMCloud](#ibmcloud)
|
||||
|
||||
## AWS
|
||||
|
||||
@@ -65,4 +66,24 @@ Set the following environment variables
|
||||
|
||||
3. ```export VSPHERE_PASSWORD=<vSphere_client_password>```
|
||||
|
||||
These are the credentials that you would normally use to access the vSphere client.
|
||||
These are the credentials that you would normally use to access the vSphere client.
|
||||
|
||||
|
||||
## IBMCloud
|
||||
If no api key is set up with proper VPC resource permissions, use the following to create:
|
||||
* Access group
|
||||
* Service id with the following access
|
||||
* With policy **VPC Infrastructure Services**
|
||||
* Resources = All
|
||||
* Roles:
|
||||
* Editor
|
||||
* Administrator
|
||||
* Operator
|
||||
* Viewer
|
||||
* API Key
|
||||
|
||||
Set the following environment variables
|
||||
|
||||
1. ```export IBMC_URL=https://<region>.iaas.cloud.ibm.com/v1```
|
||||
|
||||
2. ```export IBMC_APIKEY=<ibmcloud_api_key>```
|
||||
|
||||
@@ -27,14 +27,6 @@ The prometheus url/route and bearer token are automatically obtained in case of
|
||||
**signal_address**: Address to listen/post the signal state to
|
||||
**port**: port to listen/post the signal state to
|
||||
|
||||
## Litmus Variables
|
||||
Litmus installation specifics if you are running one of the hog scenarios. See [litmus doc](litmus_scenarios.md) for more information on these types of scenarios
|
||||
**litmus_install**: Installs specified version of litmus, set to False if it's already setup
|
||||
**litmus_version**: Litmus version to install
|
||||
**litmus_uninstall**: If you want to uninstall litmus if failure
|
||||
**litmus_uninstall_before_run**: If you want to uninstall litmus before a new run starts, True or False values
|
||||
|
||||
|
||||
## Chaos Scenarios
|
||||
|
||||
**chaos_scenarios**: List of different types of chaos scenarios you want to run with paths to their specific yaml file configurations
|
||||
@@ -48,7 +40,6 @@ Chaos scenario types:
|
||||
- plugin_scenarios
|
||||
- node_scenarios
|
||||
- time_scenarios
|
||||
- litmus_scenarios
|
||||
- cluster_shut_down_scenarios
|
||||
- namespace_scenarios
|
||||
- zone_outages
|
||||
|
||||
@@ -14,7 +14,9 @@ scenarios:
|
||||
container_name: "<specific container name>" # This is optional, can take out and will kill all containers in all pods found under namespace and label
|
||||
pod_names: # This is optional, can take out and will select all pods with given namespace and label
|
||||
- <pod_name>
|
||||
retry_wait: <number of seconds to wait for container to be running again> (defaults to 120seconds)
|
||||
count: <number of containers to disrupt, default=1>
|
||||
action: <Action to run. For example kill 1 ( hang up ) or kill 9. Default is set to kill 1>
|
||||
expected_recovery_time: <number of seconds to wait for container to be running again> (defaults to 120seconds)
|
||||
```
|
||||
|
||||
#### Post Action
|
||||
@@ -34,5 +36,5 @@ See [scenarios/post_action_etcd_container.py](https://github.com/redhat-chaos/kr
|
||||
containers that were killed as well as the namespaces and pods to verify all containers that were affected recover properly.
|
||||
|
||||
```
|
||||
retry_wait: <seconds to wait for container to recover>
|
||||
expected_recovery_time: <seconds to wait for container to recover>
|
||||
```
|
||||
|
||||
@@ -10,10 +10,9 @@
|
||||
* [Cluster recovery checks, metrics evaluation and pass/fail criteria](#cluster-recovery-checks-metrics-evaluation-and-passfail-criteria)
|
||||
* [Scenarios](#scenarios)
|
||||
* [Test Environment Recommendations - how and where to run chaos tests](#test-environment-recommendations---how-and-where-to-run-chaos-tests)
|
||||
* [Chaos testing in Practice within the OpenShift Organization](#chaos-testing-in-practice-within-the-OpenShift-Organization)
|
||||
* [Using kraken as part of a tekton pipeline](#using-kraken-as-part-of-a-tekton-pipeline)
|
||||
* [Start as a single taskrun](#start-as-a-single-taskrun)
|
||||
* [Start as a pipelinerun](#start-as-a-pipelinerun)
|
||||
* [Chaos testing in Practice](#chaos-testing-in-practice)
|
||||
* [OpenShift oraganization](#openshift-organization)
|
||||
* [startx-lab](#startx-lab)
|
||||
|
||||
|
||||
### Introduction
|
||||
@@ -156,7 +155,6 @@ Let us take a look at how to run the chaos scenarios on your OpenShift clusters
|
||||
- Helps understand if the application/system components have reserved resources to not get disrupted because of rogue applications, or get performance throttled.
|
||||
- CPU Hog ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/node-cpu-hog.md), [Demo](https://asciinema.org/a/452762))
|
||||
- Memory Hog ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/node-memory-hog.md), [Demo](https://asciinema.org/a/452742?speed=3&theme=solarized-dark))
|
||||
- IO Hog ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/node-io-hog.md))
|
||||
|
||||
- Time Skewing ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/time-scenarios.md))
|
||||
- Manipulate the system time and/or date of specific pods/nodes.
|
||||
@@ -210,8 +208,9 @@ Let us take a look at few recommendations on how and where to run the chaos test
|
||||
- You might have existing test cases, be it related to Performance, Scalability or QE. Run the chaos in the background during the test runs to observe the impact. Signaling feature in Kraken can help with coordinating the chaos runs i.e., start, stop, pause the scenarios based on the state of the other test jobs.
|
||||
|
||||
|
||||
#### Chaos testing in Practice within the OpenShift Organization
|
||||
#### Chaos testing in Practice
|
||||
|
||||
##### OpenShift organization
|
||||
Within the OpenShift organization we use kraken to perform chaos testing throughout a release before the code is available to customers.
|
||||
|
||||
1. We execute kraken during our regression test suite.
|
||||
@@ -230,7 +229,14 @@ Within the OpenShift organization we use kraken to perform chaos testing through
|
||||
|
||||
3. We are starting to add in test cases that perform chaos testing during an upgrade (not many iterations of this have been completed).
|
||||
|
||||
### Using kraken as part of a tekton pipeline
|
||||
|
||||
##### startx-lab
|
||||
|
||||
**NOTE**: Requests for enhancements and any issues need to be filed at the mentioned links given that they are not natively supported in Kraken.
|
||||
|
||||
The following content covers the implementation details around how Startx is leveraging Kraken:
|
||||
|
||||
* Using kraken as part of a tekton pipeline
|
||||
|
||||
You can find on [artifacthub.io](https://artifacthub.io/packages/search?kind=7&ts_query_web=kraken) the
|
||||
[kraken-scenario](https://artifacthub.io/packages/tekton-task/startx-tekton-catalog/kraken-scenario) `tekton-task`
|
||||
@@ -258,14 +264,47 @@ to reflect your cluster configuration. Refer to the [kraken configuration](https
|
||||
and [configuration examples](https://github.com/startxfr/tekton-catalog/blob/stable/task/kraken-scenario/0.1/samples/)
|
||||
for details on how to configure theses resources.
|
||||
|
||||
#### Start as a single taskrun
|
||||
* Start as a single taskrun
|
||||
|
||||
```bash
|
||||
oc apply -f https://github.com/startxfr/tekton-catalog/raw/stable/task/kraken-scenario/0.1/samples/taskrun.yaml
|
||||
```
|
||||
|
||||
#### Start as a pipelinerun
|
||||
* Start as a pipelinerun
|
||||
|
||||
```yaml
|
||||
oc apply -f https://github.com/startxfr/tekton-catalog/raw/stable/task/kraken-scenario/0.1/samples/pipelinerun.yaml
|
||||
```
|
||||
|
||||
* Deploying kraken using a helm-chart
|
||||
|
||||
You can find on [artifacthub.io](https://artifacthub.io/packages/search?kind=0&ts_query_web=kraken) the
|
||||
[chaos-kraken](https://artifacthub.io/packages/helm/startx/chaos-kraken) `helm-chart`
|
||||
which can be used to deploy a kraken chaos scenarios.
|
||||
|
||||
Default configuration create the following resources :
|
||||
|
||||
- 1 project named **chaos-kraken**
|
||||
- 1 scc with privileged context for kraken deployment
|
||||
- 1 configmap with kraken 21 generic scenarios, various scripts and configuration
|
||||
- 1 configmap with kubeconfig of the targeted cluster
|
||||
- 1 job named kraken-test-xxx
|
||||
- 1 service to the kraken pods
|
||||
- 1 route to the kraken service
|
||||
|
||||
```bash
|
||||
# Install the startx helm repository
|
||||
helm repo add startx https://startxfr.github.io/helm-repository/packages/
|
||||
# Install the kraken project
|
||||
helm install --set project.enabled=true chaos-kraken-project startx/chaos-kraken
|
||||
# Deploy the kraken instance
|
||||
helm install \
|
||||
--set kraken.enabled=true \
|
||||
--set kraken.aws.credentials.region="eu-west-3" \
|
||||
--set kraken.aws.credentials.key_id="AKIAXXXXXXXXXXXXXXXX" \
|
||||
--set kraken.aws.credentials.secret="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" \
|
||||
--set kraken.kubeconfig.token.server="https://api.mycluster:6443" \
|
||||
--set kraken.kubeconfig.token.token="sha256~XXXXXXXXXX_PUT_YOUR_TOKEN_HERE_XXXXXXXXXXXX" \
|
||||
-n chaos-kraken \
|
||||
chaos-kraken-instance startx/chaos-kraken
|
||||
```
|
||||
|
||||
@@ -5,7 +5,6 @@ The following ways are supported to run Kraken:
|
||||
- Standalone python program through Git.
|
||||
- Containerized version using either Podman or Docker as the runtime.
|
||||
- Kubernetes or OpenShift deployment.
|
||||
- Using chaos-kraken helm chart.
|
||||
|
||||
**NOTE**: It is recommended to run Kraken external to the cluster ( Standalone or Containerized ) hitting the Kubernetes/OpenShift API as running it internal to the cluster might be disruptive to itself and also might not report back the results if the chaos leads to cluster's API server instability.
|
||||
|
||||
@@ -38,17 +37,17 @@ $ python3.9 run_kraken.py --config <config_file_location>
|
||||
### Run containerized version
|
||||
Assuming that the latest docker ( 17.05 or greater with multi-build support ) is installed on the host, run:
|
||||
```
|
||||
$ docker pull quay.io/chaos-kubox/krkn:latest
|
||||
$ docker run --name=kraken --net=host -v <path_to_kubeconfig>:/root/.kube/config:Z -v <path_to_kraken_config>:/root/kraken/config/config.yaml:Z -d quay.io/chaos-kubox/krkn:latest
|
||||
$ docker run --name=kraken --net=host -v <path_to_kubeconfig>:/root/.kube/config:Z -v <path_to_kraken_config>:/root/kraken/config/config.yaml:Z -v <path_to_scenarios_directory>:/root/kraken/scenarios:Z -d quay.io/chaos-kubox/krkn:latest #custom or tweaked scenario configs
|
||||
$ docker pull quay.io/redhat-chaos/krkn:latest
|
||||
$ docker run --name=kraken --net=host -v <path_to_kubeconfig>:/root/.kube/config:Z -v <path_to_kraken_config>:/root/kraken/config/config.yaml:Z -d quay.io/redhat-chaos/krkn:latest
|
||||
$ docker run --name=kraken --net=host -v <path_to_kubeconfig>:/root/.kube/config:Z -v <path_to_kraken_config>:/root/kraken/config/config.yaml:Z -v <path_to_scenarios_directory>:/root/kraken/scenarios:Z -d quay.io/redhat-chaos/krkn:latest #custom or tweaked scenario configs
|
||||
$ docker logs -f kraken
|
||||
```
|
||||
|
||||
Similarly, podman can be used to achieve the same:
|
||||
```
|
||||
$ podman pull quay.io/chaos-kubox/krkn
|
||||
$ podman run --name=kraken --net=host -v <path_to_kubeconfig>:/root/.kube/config:Z -v <path_to_kraken_config>:/root/kraken/config/config.yaml:Z -d quay.io/chaos-kubox/krkn:latest
|
||||
$ podman run --name=kraken --net=host -v <path_to_kubeconfig>:/root/.kube/config:Z -v <path_to_kraken_config>:/root/kraken/config/config.yaml:Z -v <path_to_scenarios_directory>:/root/kraken/scenarios:Z -d quay.io/chaos-kubox/krkn:latest #custom or tweaked scenario configs
|
||||
$ podman pull quay.io/redhat-chaos/krkn
|
||||
$ podman run --name=kraken --net=host -v <path_to_kubeconfig>:/root/.kube/config:Z -v <path_to_kraken_config>:/root/kraken/config/config.yaml:Z -d quay.io/redhat-chaos/krkn:latest
|
||||
$ podman run --name=kraken --net=host -v <path_to_kubeconfig>:/root/.kube/config:Z -v <path_to_kraken_config>:/root/kraken/config/config.yaml:Z -v <path_to_scenarios_directory>:/root/kraken/scenarios:Z -d quay.io/redhat-chaos/krkn:latest #custom or tweaked scenario configs
|
||||
$ podman logs -f kraken
|
||||
```
|
||||
|
||||
@@ -59,39 +58,6 @@ If you want to build your own kraken image see [here](https://github.com/redhat-
|
||||
Refer [Instructions](https://github.com/redhat-chaos/krkn/blob/main/containers/README.md) on how to deploy and run Kraken as a Kubernetes/OpenShift deployment.
|
||||
|
||||
|
||||
### Deploying kraken using a helm-chart
|
||||
|
||||
You can find on [artifacthub.io](https://artifacthub.io/packages/search?kind=0&ts_query_web=kraken) the
|
||||
[chaos-kraken](https://artifacthub.io/packages/helm/startx/chaos-kraken) `helm-chart`
|
||||
which can be used to deploy a kraken chaos scenarios.
|
||||
|
||||
Default configuration create the following resources :
|
||||
|
||||
- 1 project named **chaos-kraken**
|
||||
- 1 scc with privileged context for kraken deployment
|
||||
- 1 configmap with kraken 21 generic scenarios, various scripts and configuration
|
||||
- 1 configmap with kubeconfig of the targeted cluster
|
||||
- 1 job named kraken-test-xxx
|
||||
- 1 service to the kraken pods
|
||||
- 1 route to the kraken service
|
||||
|
||||
```bash
|
||||
# Install the startx helm repository
|
||||
helm repo add startx https://startxfr.github.io/helm-repository/packages/
|
||||
# Install the kraken project
|
||||
helm install --set project.enabled=true chaos-kraken-project startx/chaos-kraken
|
||||
# Deploy the kraken instance
|
||||
helm install \
|
||||
--set kraken.enabled=true \
|
||||
--set kraken.aws.credentials.region="eu-west-3" \
|
||||
--set kraken.aws.credentials.key_id="AKIAXXXXXXXXXXXXXXXX" \
|
||||
--set kraken.aws.credentials.secret="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" \
|
||||
--set kraken.kubeconfig.token.server="https://api.mycluster:6443" \
|
||||
--set kraken.kubeconfig.token.token="sha256~XXXXXXXXXX_PUT_YOUR_TOKEN_HERE_XXXXXXXXXXXX" \
|
||||
-n chaos-kraken \
|
||||
chaos-kraken-instance startx/chaos-kraken
|
||||
```
|
||||
|
||||
Refer to the [chaos-kraken chart manpage](https://artifacthub.io/packages/helm/startx/chaos-kraken)
|
||||
and especially the [kraken configuration values](https://artifacthub.io/packages/helm/startx/chaos-kraken#chaos-kraken-values-dictionary)
|
||||
for details on how to configure this chart.
|
||||
|
||||
@@ -1,41 +0,0 @@
|
||||
### Litmus Scenarios
|
||||
Kraken consumes [Litmus](https://github.com/litmuschaos/litmus) under the hood for some scenarios
|
||||
|
||||
Official Litmus documentation and specifics of Litmus resources can be found [here](https://docs.litmuschaos.io/docs/next/getstarted/)
|
||||
|
||||
|
||||
#### Litmus Chaos Custom Resources
|
||||
There are 3 custom resources that are created during each Litmus scenario. Below is a description of the resources:
|
||||
* ChaosEngine: A resource to link a Kubernetes application or Kubernetes node to a ChaosExperiment. ChaosEngine is watched by Litmus' Chaos-Operator which then invokes Chaos-Experiments.
|
||||
* ChaosExperiment: A resource to group the configuration parameters of a chaos experiment. ChaosExperiment CRs are created by the operator when experiments are invoked by ChaosEngine.
|
||||
* ChaosResult : A resource to hold the results of a chaos-experiment. The Chaos-exporter reads the results and exports the metrics into a configured Prometheus server.
|
||||
|
||||
### Understanding Litmus Scenarios
|
||||
|
||||
To run Litmus scenarios we need to apply 3 different resources/yaml files to our cluster.
|
||||
1. **Chaos experiments** contain the actual chaos details of a scenario.
|
||||
|
||||
i. This is installed automatically by Kraken (does not need to be specified in kraken scenario configuration).
|
||||
|
||||
2. **Service Account**: should be created to allow chaosengine to run experiments in your application namespace. Usually it sets just enough permissions to a specific namespace to be able to run the experiment properly.
|
||||
|
||||
i. This can be defined using either a link to a yaml file or a downloaded file in the scenarios' folder.
|
||||
|
||||
3. **Chaos Engine** connects the application instance to a Chaos Experiment. This is where you define the specifics of your scenario; i.e.: the node or pod name you want to cause chaos within.
|
||||
|
||||
i. This is a downloaded yaml file in the scenarios' folder. A full list of scenarios can be found [here](https://hub.litmuschaos.io/)
|
||||
|
||||
**NOTE**: By default, all chaos experiments will be installed based on the version you give in the config file.
|
||||
|
||||
Adding a new Litmus based scenario is as simple as adding references to 2 new yaml files (the Service Account and Chaos engine files for your scenario ) in the Kraken config.
|
||||
|
||||
|
||||
### Supported scenarios
|
||||
|
||||
The following are the start of scenarios for which a chaos scenario config exists today.
|
||||
|
||||
Scenario | Description | Working
|
||||
------------------------ |-----------------------------------------------------------------------------------------| ------------------------- |
|
||||
[Node CPU Hog](https://github.com/redhat-chaos/krkn/blob/main/scenarios/node_cpu_hog_engine.yaml) | Chaos scenario that hogs up the CPU on a defined node for a specific amount of time. | :heavy_check_mark: |
|
||||
[Node Memory Hog](https://github.com/redhat-chaos/krkn/blob/main/scenarios/node_mem_engine.yaml) | Chaos scenario that hogs up the memory on a defined node for a specific amount of time. | :heavy_check_mark: |
|
||||
[Node IO Hog](https://github.com/redhat-chaos/krkn/blob/main/scenarios/node_io_engine.yaml) | Chaos scenario that hogs up the IO on a defined node for a specific amount of time. | :heavy_check_mark: |
|
||||
@@ -76,6 +76,42 @@ How to set up Alibaba cli to run node scenarios is defined [here](cloud_setup.md
|
||||
#### VMware
|
||||
How to set up VMware vSphere to run node scenarios is defined [here](cloud_setup.md#vmware)
|
||||
|
||||
This cloud type uses a different configuration style, see actions below and [example config file](../scenarios/openshift/vmware_node_scenarios.yml)
|
||||
|
||||
*vmware-node-terminate, vmware-node-reboot, vmware-node-stop, vmware-node-start*
|
||||
|
||||
#### IBMCloud
|
||||
How to set up IBMCloud to run node scenarios is defined [here](cloud_setup.md#ibmcloud)
|
||||
|
||||
This cloud type uses a different configuration style, see actions below and [example config file](../scenarios/openshift/ibmcloud_node_scenarios.yml)
|
||||
|
||||
*ibmcloud-node-terminate, ibmcloud-node-reboot, ibmcloud-node-stop, ibmcloud-node-start
|
||||
*
|
||||
|
||||
|
||||
#### IBMCloud and Vmware example
|
||||
|
||||
|
||||
```
|
||||
- id: ibmcloud-node-stop
|
||||
config:
|
||||
name: "<node_name>"
|
||||
label_selector: "node-role.kubernetes.io/worker" # When node_name is not specified, a node with matching label_selector is selected for node chaos scenario injection
|
||||
runs: 1 # Number of times to inject each scenario under actions (will perform on same node each time)
|
||||
instance_count: 1 # Number of nodes to perform action/select that match the label selector
|
||||
timeout: 30 # Duration to wait for completion of node scenario injection
|
||||
skip_openshift_checks: False # Set to True if you don't want to wait for the status of the nodes to change on OpenShift before passing the scenario
|
||||
- id: ibmcloud-node-start
|
||||
config:
|
||||
name: "<node_name>" #Same name as before
|
||||
label_selector: "node-role.kubernetes.io/worker" # When node_name is not specified, a node with matching label_selector is selected for node chaos scenario injection
|
||||
runs: 1 # Number of times to inject each scenario under actions (will perform on same node each time)
|
||||
instance_count: 1 # Number of nodes to perform action/select that match the label selector
|
||||
timeout: 30 # Duration to wait for completion of node scenario injection
|
||||
skip_openshift_checks: False # Set to True if you don't want to wait for the status of the nodes to change on OpenShift before passing the scenario
|
||||
```
|
||||
|
||||
|
||||
|
||||
#### General
|
||||
|
||||
|
||||
37
docs/pod_network_scenarios.md
Normal file
37
docs/pod_network_scenarios.md
Normal file
@@ -0,0 +1,37 @@
|
||||
## Pod network Scenarios
|
||||
|
||||
### Pod outage
|
||||
Scenario to block the traffic ( Ingress/Egress ) of a pod matching the labels for the specified duration of time to understand the behavior of the service/other services which depend on it during downtime. This helps with planning the requirements accordingly, be it improving the timeouts or tweaking the alerts etc.
|
||||
With the current network policies, it is not possible to explicitly block ports which are enabled by allowed network policy rule. This chaos scenario addresses this issue by using OVS flow rules to block ports related to the pod. It supports OpenShiftSDN and OVNKubernetes based networks.
|
||||
|
||||
##### Sample scenario config (using a plugin)
|
||||
```
|
||||
- id: pod_network_outage
|
||||
config:
|
||||
namespace: openshift-console # Required - Namespace of the pod to which filter need to be applied
|
||||
direction: # Optioinal - List of directions to apply filters
|
||||
- ingress # Blocks ingress traffic, Default both egress and ingress
|
||||
ingress_ports: # Optional - List of ports to block traffic on
|
||||
- 8443 # Blocks 8443, Default [], i.e. all ports.
|
||||
label_selector: 'component=ui' # Blocks access to openshift console
|
||||
```
|
||||
### Pod Network shaping
|
||||
Scenario to introduce network latency, packet loss, and bandwidth restriction in the Pod's network interface. The purpose of this scenario is to observe faults caused by random variations in the network.
|
||||
|
||||
##### Sample scenario config for egress traffic shaping (using plugin)
|
||||
```
|
||||
- id: pod_egress_shaping
|
||||
config:
|
||||
namespace: openshift-console # Required - Namespace of the pod to which filter need to be applied.
|
||||
label_selector: 'component=ui' # Applies traffic shaping to access openshift console.
|
||||
network_params:
|
||||
latency: 500ms # Add 500ms latency to egress traffic from the pod.
|
||||
```
|
||||
|
||||
##### Steps
|
||||
- Pick the pods to introduce the network anomaly either from label_selector or pod_name.
|
||||
- Identify the pod interface name on the node.
|
||||
- Set traffic shaping config on pod's interface using tc and netem.
|
||||
- Wait for the duration time.
|
||||
- Remove traffic shaping config on pod's interface.
|
||||
- Remove the job that spawned the pod.
|
||||
@@ -4,25 +4,32 @@ import time
|
||||
import kraken.cerberus.setup as cerberus
|
||||
from jinja2 import Template
|
||||
import kraken.invoke.command as runcommand
|
||||
|
||||
from krkn_lib_kubernetes import ScenarioTelemetry, KrknTelemetry
|
||||
|
||||
# Reads the scenario config, applies and deletes a network policy to
|
||||
# block the traffic for the specified duration
|
||||
def run(scenarios_list, config, wait_duration):
|
||||
def run(scenarios_list, config, wait_duration, telemetry: KrknTelemetry) -> (list[str], list[ScenarioTelemetry]):
|
||||
failed_post_scenarios = ""
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for app_outage_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = app_outage_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, app_outage_config)
|
||||
if len(app_outage_config) > 1:
|
||||
with open(app_outage_config, "r") as f:
|
||||
app_outage_config_yaml = yaml.full_load(f)
|
||||
scenario_config = app_outage_config_yaml["application_outage"]
|
||||
pod_selector = scenario_config.get("pod_selector", "{}")
|
||||
traffic_type = scenario_config.get("block", "[Ingress, Egress]")
|
||||
namespace = scenario_config.get("namespace", "")
|
||||
duration = scenario_config.get("duration", 60)
|
||||
try:
|
||||
with open(app_outage_config, "r") as f:
|
||||
app_outage_config_yaml = yaml.full_load(f)
|
||||
scenario_config = app_outage_config_yaml["application_outage"]
|
||||
pod_selector = scenario_config.get("pod_selector", "{}")
|
||||
traffic_type = scenario_config.get("block", "[Ingress, Egress]")
|
||||
namespace = scenario_config.get("namespace", "")
|
||||
duration = scenario_config.get("duration", 60)
|
||||
|
||||
start_time = int(time.time())
|
||||
start_time = int(time.time())
|
||||
|
||||
network_policy_template = """---
|
||||
network_policy_template = """---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
@@ -31,28 +38,38 @@ spec:
|
||||
podSelector:
|
||||
matchLabels: {{ pod_selector }}
|
||||
policyTypes: {{ traffic_type }}
|
||||
"""
|
||||
t = Template(network_policy_template)
|
||||
rendered_spec = t.render(pod_selector=pod_selector, traffic_type=traffic_type)
|
||||
# Write the rendered template to a file
|
||||
with open("kraken_network_policy.yaml", "w") as f:
|
||||
f.write(rendered_spec)
|
||||
# Block the traffic by creating network policy
|
||||
logging.info("Creating the network policy")
|
||||
runcommand.invoke(
|
||||
"kubectl create -f %s -n %s --validate=false" % ("kraken_network_policy.yaml", namespace)
|
||||
)
|
||||
"""
|
||||
t = Template(network_policy_template)
|
||||
rendered_spec = t.render(pod_selector=pod_selector, traffic_type=traffic_type)
|
||||
# Write the rendered template to a file
|
||||
with open("kraken_network_policy.yaml", "w") as f:
|
||||
f.write(rendered_spec)
|
||||
# Block the traffic by creating network policy
|
||||
logging.info("Creating the network policy")
|
||||
runcommand.invoke(
|
||||
"kubectl create -f %s -n %s --validate=false" % ("kraken_network_policy.yaml", namespace)
|
||||
)
|
||||
|
||||
# wait for the specified duration
|
||||
logging.info("Waiting for the specified duration in the config: %s" % (duration))
|
||||
time.sleep(duration)
|
||||
# wait for the specified duration
|
||||
logging.info("Waiting for the specified duration in the config: %s" % (duration))
|
||||
time.sleep(duration)
|
||||
|
||||
# unblock the traffic by deleting the network policy
|
||||
logging.info("Deleting the network policy")
|
||||
runcommand.invoke("kubectl delete -f %s -n %s" % ("kraken_network_policy.yaml", namespace))
|
||||
# unblock the traffic by deleting the network policy
|
||||
logging.info("Deleting the network policy")
|
||||
runcommand.invoke("kubectl delete -f %s -n %s" % ("kraken_network_policy.yaml", namespace))
|
||||
|
||||
logging.info("End of scenario. Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
logging.info("End of scenario. Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
except Exception as e :
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(app_outage_config)
|
||||
telemetry.log_exception(app_outage_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
|
||||
2
kraken/arcaflow_plugin/__init__.py
Normal file
2
kraken/arcaflow_plugin/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
from .arcaflow_plugin import *
|
||||
from .context_auth import ContextAuth
|
||||
179
kraken/arcaflow_plugin/arcaflow_plugin.py
Normal file
179
kraken/arcaflow_plugin/arcaflow_plugin.py
Normal file
@@ -0,0 +1,179 @@
|
||||
import time
|
||||
|
||||
import arcaflow
|
||||
import os
|
||||
import yaml
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from .context_auth import ContextAuth
|
||||
from krkn_lib_kubernetes import ScenarioTelemetry, KrknTelemetry
|
||||
|
||||
|
||||
def run(scenarios_list: List[str], kubeconfig_path: str, telemetry: KrknTelemetry) -> (list[str], list[ScenarioTelemetry]):
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_post_scenarios = []
|
||||
for scenario in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = scenario
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry,scenario)
|
||||
engine_args = build_args(scenario)
|
||||
status_code = run_workflow(engine_args, kubeconfig_path)
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetry.exitStatus = status_code
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
if status_code != 0:
|
||||
failed_post_scenarios.append(scenario)
|
||||
return failed_post_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
def run_workflow(engine_args: arcaflow.EngineArgs, kubeconfig_path: str) -> int:
|
||||
set_arca_kubeconfig(engine_args, kubeconfig_path)
|
||||
exit_status = arcaflow.run(engine_args)
|
||||
return exit_status
|
||||
|
||||
|
||||
def build_args(input_file: str) -> arcaflow.EngineArgs:
|
||||
"""sets the kubeconfig parsed by setArcaKubeConfig as an input to the arcaflow workflow"""
|
||||
context = Path(input_file).parent
|
||||
workflow = "{}/workflow.yaml".format(context)
|
||||
config = "{}/config.yaml".format(context)
|
||||
if not os.path.exists(context):
|
||||
raise Exception(
|
||||
"context folder for arcaflow workflow not found: {}".format(
|
||||
context)
|
||||
)
|
||||
if not os.path.exists(input_file):
|
||||
raise Exception(
|
||||
"input file for arcaflow workflow not found: {}".format(input_file))
|
||||
if not os.path.exists(workflow):
|
||||
raise Exception(
|
||||
"workflow file for arcaflow workflow not found: {}".format(
|
||||
workflow)
|
||||
)
|
||||
if not os.path.exists(config):
|
||||
raise Exception(
|
||||
"configuration file for arcaflow workflow not found: {}".format(
|
||||
config)
|
||||
)
|
||||
|
||||
engine_args = arcaflow.EngineArgs()
|
||||
engine_args.context = context
|
||||
engine_args.config = config
|
||||
engine_args.input = input_file
|
||||
return engine_args
|
||||
|
||||
|
||||
def set_arca_kubeconfig(engine_args: arcaflow.EngineArgs, kubeconfig_path: str):
|
||||
|
||||
context_auth = ContextAuth()
|
||||
if not os.path.exists(kubeconfig_path):
|
||||
raise Exception("kubeconfig not found in {}".format(kubeconfig_path))
|
||||
|
||||
with open(kubeconfig_path, "r") as stream:
|
||||
try:
|
||||
kubeconfig = yaml.safe_load(stream)
|
||||
context_auth.fetch_auth_data(kubeconfig)
|
||||
except Exception as e:
|
||||
logging.error("impossible to read kubeconfig file in: {}".format(
|
||||
kubeconfig_path))
|
||||
raise e
|
||||
|
||||
kubeconfig_str = set_kubeconfig_auth(kubeconfig, context_auth)
|
||||
|
||||
with open(engine_args.input, "r") as stream:
|
||||
input_file = yaml.safe_load(stream)
|
||||
if "input_list" in input_file and isinstance(input_file["input_list"],list):
|
||||
for index, _ in enumerate(input_file["input_list"]):
|
||||
if isinstance(input_file["input_list"][index], dict):
|
||||
input_file["input_list"][index]["kubeconfig"] = kubeconfig_str
|
||||
else:
|
||||
input_file["kubeconfig"] = kubeconfig_str
|
||||
stream.close()
|
||||
with open(engine_args.input, "w") as stream:
|
||||
yaml.safe_dump(input_file, stream)
|
||||
|
||||
with open(engine_args.config, "r") as stream:
|
||||
config_file = yaml.safe_load(stream)
|
||||
if config_file["deployer"]["type"] == "kubernetes":
|
||||
kube_connection = set_kubernetes_deployer_auth(config_file["deployer"]["connection"], context_auth)
|
||||
config_file["deployer"]["connection"]=kube_connection
|
||||
with open(engine_args.config, "w") as stream:
|
||||
yaml.safe_dump(config_file, stream,explicit_start=True, width=4096)
|
||||
|
||||
|
||||
def set_kubernetes_deployer_auth(deployer: any, context_auth: ContextAuth) -> any:
|
||||
if context_auth.clusterHost is not None :
|
||||
deployer["host"] = context_auth.clusterHost
|
||||
if context_auth.clientCertificateData is not None :
|
||||
deployer["cert"] = context_auth.clientCertificateData
|
||||
if context_auth.clientKeyData is not None:
|
||||
deployer["key"] = context_auth.clientKeyData
|
||||
if context_auth.clusterCertificateData is not None:
|
||||
deployer["cacert"] = context_auth.clusterCertificateData
|
||||
if context_auth.username is not None:
|
||||
deployer["username"] = context_auth.username
|
||||
if context_auth.password is not None:
|
||||
deployer["password"] = context_auth.password
|
||||
if context_auth.bearerToken is not None:
|
||||
deployer["bearerToken"] = context_auth.bearerToken
|
||||
return deployer
|
||||
|
||||
|
||||
def set_kubeconfig_auth(kubeconfig: any, context_auth: ContextAuth) -> str:
|
||||
"""
|
||||
Builds an arcaflow-compatible kubeconfig representation and returns it as a string.
|
||||
In order to run arcaflow plugins in kubernetes/openshift the kubeconfig must contain client certificate/key
|
||||
and server certificate base64 encoded within the kubeconfig file itself in *-data fields. That is not always the
|
||||
case, infact kubeconfig may contain filesystem paths to those files, this function builds an arcaflow-compatible
|
||||
kubeconfig file and returns it as a string that can be safely included in input.yaml
|
||||
"""
|
||||
|
||||
if "current-context" not in kubeconfig.keys():
|
||||
raise Exception(
|
||||
"invalid kubeconfig file, impossible to determine current-context"
|
||||
)
|
||||
user_id = None
|
||||
cluster_id = None
|
||||
user_name = None
|
||||
cluster_name = None
|
||||
current_context = kubeconfig["current-context"]
|
||||
for context in kubeconfig["contexts"]:
|
||||
if context["name"] == current_context:
|
||||
user_name = context["context"]["user"]
|
||||
cluster_name = context["context"]["cluster"]
|
||||
if user_name is None:
|
||||
raise Exception(
|
||||
"user not set for context {} in kubeconfig file".format(current_context)
|
||||
)
|
||||
if cluster_name is None:
|
||||
raise Exception(
|
||||
"cluster not set for context {} in kubeconfig file".format(current_context)
|
||||
)
|
||||
|
||||
for index, user in enumerate(kubeconfig["users"]):
|
||||
if user["name"] == user_name:
|
||||
user_id = index
|
||||
for index, cluster in enumerate(kubeconfig["clusters"]):
|
||||
if cluster["name"] == cluster_name:
|
||||
cluster_id = index
|
||||
|
||||
if cluster_id is None:
|
||||
raise Exception(
|
||||
"no cluster {} found in kubeconfig users".format(cluster_name)
|
||||
)
|
||||
if "client-certificate" in kubeconfig["users"][user_id]["user"]:
|
||||
kubeconfig["users"][user_id]["user"]["client-certificate-data"] = context_auth.clientCertificateDataBase64
|
||||
del kubeconfig["users"][user_id]["user"]["client-certificate"]
|
||||
|
||||
if "client-key" in kubeconfig["users"][user_id]["user"]:
|
||||
kubeconfig["users"][user_id]["user"]["client-key-data"] = context_auth.clientKeyDataBase64
|
||||
del kubeconfig["users"][user_id]["user"]["client-key"]
|
||||
|
||||
if "certificate-authority" in kubeconfig["clusters"][cluster_id]["cluster"]:
|
||||
kubeconfig["clusters"][cluster_id]["cluster"]["certificate-authority-data"] = context_auth.clusterCertificateDataBase64
|
||||
del kubeconfig["clusters"][cluster_id]["cluster"]["certificate-authority"]
|
||||
kubeconfig_str = yaml.dump(kubeconfig)
|
||||
return kubeconfig_str
|
||||
142
kraken/arcaflow_plugin/context_auth.py
Normal file
142
kraken/arcaflow_plugin/context_auth.py
Normal file
@@ -0,0 +1,142 @@
|
||||
import yaml
|
||||
import os
|
||||
import base64
|
||||
|
||||
|
||||
class ContextAuth:
|
||||
clusterCertificate: str = None
|
||||
clusterCertificateData: str = None
|
||||
clusterHost: str = None
|
||||
clientCertificate: str = None
|
||||
clientCertificateData: str = None
|
||||
clientKey: str = None
|
||||
clientKeyData: str = None
|
||||
clusterName: str = None
|
||||
username: str = None
|
||||
password: str = None
|
||||
bearerToken: str = None
|
||||
# TODO: integrate in krkn-lib-kubernetes in the next iteration
|
||||
|
||||
@property
|
||||
def clusterCertificateDataBase64(self):
|
||||
if self.clusterCertificateData is not None:
|
||||
return base64.b64encode(bytes(self.clusterCertificateData,'utf8')).decode("ascii")
|
||||
return
|
||||
|
||||
@property
|
||||
def clientCertificateDataBase64(self):
|
||||
if self.clientCertificateData is not None:
|
||||
return base64.b64encode(bytes(self.clientCertificateData,'utf8')).decode("ascii")
|
||||
return
|
||||
|
||||
@property
|
||||
def clientKeyDataBase64(self):
|
||||
if self.clientKeyData is not None:
|
||||
return base64.b64encode(bytes(self.clientKeyData,"utf-8")).decode("ascii")
|
||||
return
|
||||
|
||||
|
||||
|
||||
def fetch_auth_data(self, kubeconfig: any):
|
||||
context_username = None
|
||||
current_context = kubeconfig["current-context"]
|
||||
if current_context is None:
|
||||
raise Exception("no current-context found in kubeconfig")
|
||||
|
||||
for context in kubeconfig["contexts"]:
|
||||
if context["name"] == current_context:
|
||||
context_username = context["context"]["user"]
|
||||
self.clusterName = context["context"]["cluster"]
|
||||
if context_username is None:
|
||||
raise Exception("user not found for context {0}".format(current_context))
|
||||
if self.clusterName is None:
|
||||
raise Exception("cluster not found for context {0}".format(current_context))
|
||||
cluster_id = None
|
||||
user_id = None
|
||||
for index, user in enumerate(kubeconfig["users"]):
|
||||
if user["name"] == context_username:
|
||||
user_id = index
|
||||
if user_id is None :
|
||||
raise Exception("user {0} not found in kubeconfig users".format(context_username))
|
||||
|
||||
for index, cluster in enumerate(kubeconfig["clusters"]):
|
||||
if cluster["name"] == self.clusterName:
|
||||
cluster_id = index
|
||||
|
||||
if cluster_id is None:
|
||||
raise Exception(
|
||||
"no cluster {} found in kubeconfig users".format(self.clusterName)
|
||||
)
|
||||
|
||||
user = kubeconfig["users"][user_id]["user"]
|
||||
cluster = kubeconfig["clusters"][cluster_id]["cluster"]
|
||||
# sets cluster api URL
|
||||
self.clusterHost = cluster["server"]
|
||||
# client certificates
|
||||
|
||||
if "client-key" in user:
|
||||
try:
|
||||
self.clientKey = user["client-key"]
|
||||
self.clientKeyData = self.read_file(user["client-key"])
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
if "client-key-data" in user:
|
||||
try:
|
||||
self.clientKeyData = base64.b64decode(user["client-key-data"]).decode('utf-8')
|
||||
except Exception as e:
|
||||
raise Exception("impossible to decode client-key-data")
|
||||
|
||||
if "client-certificate" in user:
|
||||
try:
|
||||
self.clientCertificate = user["client-certificate"]
|
||||
self.clientCertificateData = self.read_file(user["client-certificate"])
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
if "client-certificate-data" in user:
|
||||
try:
|
||||
self.clientCertificateData = base64.b64decode(user["client-certificate-data"]).decode('utf-8')
|
||||
except Exception as e:
|
||||
raise Exception("impossible to decode client-certificate-data")
|
||||
|
||||
# cluster certificate authority
|
||||
|
||||
if "certificate-authority" in cluster:
|
||||
try:
|
||||
self.clusterCertificate = cluster["certificate-authority"]
|
||||
self.clusterCertificateData = self.read_file(cluster["certificate-authority"])
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
if "certificate-authority-data" in cluster:
|
||||
try:
|
||||
self.clusterCertificateData = base64.b64decode(cluster["certificate-authority-data"]).decode('utf-8')
|
||||
except Exception as e:
|
||||
raise Exception("impossible to decode certificate-authority-data")
|
||||
|
||||
if "username" in user:
|
||||
self.username = user["username"]
|
||||
|
||||
if "password" in user:
|
||||
self.password = user["password"]
|
||||
|
||||
if "token" in user:
|
||||
self.bearerToken = user["token"]
|
||||
|
||||
def read_file(self, filename:str) -> str:
|
||||
if not os.path.exists(filename):
|
||||
raise Exception("file not found {0} ".format(filename))
|
||||
with open(filename, "rb") as file_stream:
|
||||
return file_stream.read().decode('utf-8')
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
19
kraken/arcaflow_plugin/fixtures/ca.crt
Normal file
19
kraken/arcaflow_plugin/fixtures/ca.crt
Normal file
@@ -0,0 +1,19 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p
|
||||
a3ViZUNBMB4XDTIzMDMxMzE1NDAxM1oXDTMzMDMxMTE1NDAxM1owFTETMBEGA1UE
|
||||
AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMnz
|
||||
U/gIbJBRGOgNYVKX2fV03ANOwnM4VjquR28QMAdxURqgOFZ6IxYNysHEyxxE9I+I
|
||||
DAm9hi4vQPbOX7FlxUezuzw+ExEfa6RRJ+n+AGJOV1lezCVph6OaJxB1+L1UqaDZ
|
||||
eM3B4cUf/iCc5Y4bs927+CBG3MJL/jmCVPCO+MiSn/l73PXSFNJAYMvRj42zkXqD
|
||||
CVG9CwY2vWgZnnzl01l7jNGtie871AmV2uqKakJrQ2ILhD+8fZk4jE5JBDTCZnqQ
|
||||
pXIc+vERNKLUS8cvjO6Ux8dMv/Z7+xonpXOU59LlpUdHWP9jgCvMTwiOriwqGjJ+
|
||||
pQJWpX9Dm+oxJiVOJzsCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW
|
||||
MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
|
||||
BBQU9pDMtbayJdNM6bp0IG8dcs15qTANBgkqhkiG9w0BAQsFAAOCAQEAtl9TVKPA
|
||||
hTnPODqv0AGTqreS9kLg4WUUjZRaPUkPWmtCoTh2Yf55nRWdHOHeZnCWDSg24x42
|
||||
lpt+13IdqKew1RKTpKCTkicMFi090A01bYu/w39Cm6nOAA5h8zkgSkV5czvQotuV
|
||||
SoN2vB+nbuY28ah5PkdqjMHEZbNwa59cgEke8wB1R1DWFQ/pqflrH2v9ACAuY+5Q
|
||||
i673tA6CXrb1YfaCQnVBzcfvjGS1MqShPKpOLMF+/GccPczNimaBxMnKvYLvf3pN
|
||||
qEUrJC00mAcein8HmxR2Xz8wredbMUUyrQxW29pZJwfGE5GU0olnlsA0lZLbTwio
|
||||
xoolo5y+fsK/dA==
|
||||
-----END CERTIFICATE-----
|
||||
19
kraken/arcaflow_plugin/fixtures/client.crt
Normal file
19
kraken/arcaflow_plugin/fixtures/client.crt
Normal file
@@ -0,0 +1,19 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDITCCAgmgAwIBAgIBAjANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p
|
||||
a3ViZUNBMB4XDTIzMDUwMTA4NTc0N1oXDTI2MDUwMTA4NTc0N1owMTEXMBUGA1UE
|
||||
ChMOc3lzdGVtOm1hc3RlcnMxFjAUBgNVBAMTDW1pbmlrdWJlLXVzZXIwggEiMA0G
|
||||
CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0b7uy9nQYrh7uC5NODve7dFNLAgo5
|
||||
pWRS6Kx13ULA55gOpieZiI5/1jwUBjOz0Hhl5QAdHC1HDNu5wf4MmwIEheuq3kMA
|
||||
mfuvNxW2BnWSDuXyUMlBfqlwg5o6W8ndEWaK33D7wd2WQsSsAnhQPJSjnzWKvWKq
|
||||
+Kbcygc4hdss/ZWN+SXLTahNpHBw0sw8AcJqddNeXs2WI5GdZmbXL4QZI36EaNUm
|
||||
m4xKmKRKYIP9wYkmXOV/D2h1meM44y4lul5v2qvo6I+umJ84q4W1/W1vVmAzyVfL
|
||||
v1TQCUx8cpKMHzw3ma6CTBCtU3Oq9HKHBnf8GyHZicmV7ESzf/phJu4ZAgMBAAGj
|
||||
YDBeMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUH
|
||||
AwIwDAYDVR0TAQH/BAIwADAfBgNVHSMEGDAWgBQU9pDMtbayJdNM6bp0IG8dcs15
|
||||
qTANBgkqhkiG9w0BAQsFAAOCAQEABNzEQQMYUcLsBASHladEjr46avKn7gREfaDl
|
||||
Y5PBvgCPP42q/sW/9iCNY3UpT9TJZWM6s01+0p6I96jYbRQER1NX7O4OgQYHmFw2
|
||||
PF6UOG2vMo54w11OvL7sbr4d+nkE6ItdM9fLDIJ3fEOYJZkSoxhOL/U3jSjIl7Wu
|
||||
KCIlpM/M/gcZ4w2IvcLrWtvswbFNUd+dwQfBGcQTmSQDOLE7MqSvzYAkeNv73GLB
|
||||
ieba7gs/PmoTFsf9nW60iXymDDF4MtODn15kqT/y1uD6coujmiEiIomBfxqAkUCU
|
||||
0ciP/KF5oOEMmMedm7/peQxaRTMdRSk4yu7vbj/BxnTcj039Qg==
|
||||
-----END CERTIFICATE-----
|
||||
27
kraken/arcaflow_plugin/fixtures/client.key
Normal file
27
kraken/arcaflow_plugin/fixtures/client.key
Normal file
@@ -0,0 +1,27 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEAtG+7svZ0GK4e7guTTg73u3RTSwIKOaVkUuisdd1CwOeYDqYn
|
||||
mYiOf9Y8FAYzs9B4ZeUAHRwtRwzbucH+DJsCBIXrqt5DAJn7rzcVtgZ1kg7l8lDJ
|
||||
QX6pcIOaOlvJ3RFmit9w+8HdlkLErAJ4UDyUo581ir1iqvim3MoHOIXbLP2Vjfkl
|
||||
y02oTaRwcNLMPAHCanXTXl7NliORnWZm1y+EGSN+hGjVJpuMSpikSmCD/cGJJlzl
|
||||
fw9odZnjOOMuJbpeb9qr6OiPrpifOKuFtf1tb1ZgM8lXy79U0AlMfHKSjB88N5mu
|
||||
gkwQrVNzqvRyhwZ3/Bsh2YnJlexEs3/6YSbuGQIDAQABAoIBAQCdJxPb8zt6o2zc
|
||||
98f8nJy378D7+3LccmjGrVBH98ZELXIKkDy9RGqYfQcmiaBOZKv4U1OeBwSIdXKK
|
||||
f6O9ZuSC/AEeeSbyRysmmFuYhlewNrmgKyyelqsNDBIv8fIHUTh2i9Xj8B4G2XBi
|
||||
QGR5vcnYGLqRdBGTx63Nb0iKuksDCwPAuPA/e0ySz9HdWL1j4bqpVSYsOIXsqTDr
|
||||
CVnxUeSIL0fFQnRm3IASXQD7zdq9eEFX7vESeleZoz8qNcKb4Na/C3N6crScjgH7
|
||||
qyNZ2zNLfy1LT84k8uc1TMX2KcEVEmfdDv5cCnUH2ic12CwXMZ0vgId5LJTaHx4x
|
||||
ytIQIe5hAoGBANB+TsRXP4KzcjZlUUfiAp/pWUM4kVktbsfZa1R2NEuIGJUxPk3P
|
||||
7WS0WX5W75QKRg+UWTubg5kfd0f9fklLgofmliBnY/HrpgdyugJmUZBgzIxmy0k+
|
||||
aCe0biD1gULfyyrKtfe8k5wRFstzhfGszlOf2ebR87sSVNBuF2lEwPTvAoGBAN2M
|
||||
0/XrsodGU4B9Mj86Go2gb2k2WU2izI0cO+tm2S5U5DvKmVEnmjXfPRaOFj2UUQjo
|
||||
cljnDAinbN+O0+Inc35qsEeYdAIepNAPglzcpfTHagja9mhx2idLYTXGhbZLL+Ei
|
||||
TRzMyP27NF+GVVfYU/cA86ns6NboG6spohmnqh13AoGAKPc4aNGv0/GIVnHP56zb
|
||||
0SnbdR7PSFNp+fCZay4Slmi2U9IqKMXbIjdhgjZ4uoDORU9jvReQYuzQ1h9TyfkB
|
||||
O8yt4M4P0D/6DmqXa9NI4XJznn6wIMMXWf3UybsTW913IQBVgsjVxAuDjBQ11Eec
|
||||
/sdg3D6SgkZWzeFjzjZJJ5cCgYBSYVg7fE3hERxhjawOaJuRCBQFSklAngVzfwkk
|
||||
yhR9ruFC/l2uGIy19XFwnprUgP700gIa3qbR3PeV1TUiRcsjOaacqKqSUzSzjODL
|
||||
iNxIvZHHAyxWv+b/b38REOWNWD3QeAG2cMtX1bFux7OaO31VPkxcZhRaPOp05cE5
|
||||
yudtlwKBgDBbR7RLYn03OPm3NDBLLjTybhD8Iu8Oj7UeNCiEWAdZpqIKYnwSxMzQ
|
||||
kdo4aTENA/seEwq+XDV7TwbUIFFJg5gDXIhkcK2c9kiO2bObCAmKpBlQCcrp0a5X
|
||||
NSBk1N/ZG/Qhqns7z8k01KN4LNcdpRoNiYYPgY+p3xbY8+nWhv+q
|
||||
-----END RSA PRIVATE KEY-----
|
||||
100
kraken/arcaflow_plugin/test_context_auth.py
Normal file
100
kraken/arcaflow_plugin/test_context_auth.py
Normal file
@@ -0,0 +1,100 @@
|
||||
import os
|
||||
import unittest
|
||||
|
||||
from context_auth import ContextAuth
|
||||
|
||||
|
||||
class TestCurrentContext(unittest.TestCase):
|
||||
|
||||
def get_kubeconfig_with_data(self) -> str:
|
||||
"""
|
||||
This function returns a test kubeconfig file as a string.
|
||||
|
||||
:return: a test kubeconfig file in string format (for unit testing purposes)
|
||||
""" # NOQA
|
||||
return """apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM5ekNDQWQrZ0F3SUJBZ0lVV01PTVBNMVUrRi9uNXN6TSthYzlMcGZISHB3d0RRWUpLb1pJaHZjTkFRRUwKQlFBd0hqRWNNQm9HQTFVRUF3d1RhM1ZpZFc1MGRTNXNiMk5oYkdSdmJXRnBiakFlRncweU1URXlNRFl4T0RBdwpNRFJhRncwek1URXlNRFF4T0RBd01EUmFNQjR4SERBYUJnTlZCQU1NRTJ0MVluVnVkSFV1Ykc5allXeGtiMjFoCmFXNHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDNExhcG00SDB0T1NuYTNXVisKdzI4a0tOWWRwaHhYOUtvNjUwVGlOK2c5ZFNQU3VZK0V6T1JVOWVONlgyWUZkMEJmVFNodno4Y25rclAvNysxegpETEoxQ3MwRi9haEV3ZDQxQXN5UGFjbnRiVE80dGRLWm9POUdyODR3YVdBN1hSZmtEc2ZxRGN1YW5UTmVmT1hpCkdGbmdDVzU5Q285M056alB1eEFrakJxdVF6eE5GQkgwRlJPbXJtVFJ4cnVLZXo0aFFuUW1OWEFUNnp0M21udzMKWUtWTzU4b2xlcUxUcjVHNlRtVFQyYTZpVGdtdWY2N0cvaVZlalJGbkw3YkNHWmgzSjlCSTNMcVpqRzE4dWxvbgpaVDdQcGQrQTlnaTJOTm9UZlI2TVB5SndxU1BCL0xZQU5ZNGRoZDVJYlVydDZzbmViTlRZSHV2T0tZTDdNTWRMCmVMSzFBZ01CQUFHakxUQXJNQWtHQTFVZEV3UUNNQUF3SGdZRFZSMFJCQmN3RllJVGEzVmlkVzUwZFM1c2IyTmgKYkdSdmJXRnBiakFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBQTVqUHVpZVlnMExySE1PSkxYY0N4d3EvVzBDNApZeFpncVd3VHF5VHNCZjVKdDlhYTk0SkZTc2dHQWdzUTN3NnA2SlBtL0MyR05MY3U4ZWxjV0E4UXViQWxueXRRCnF1cEh5WnYrZ08wMG83TXdrejZrTUxqQVZ0QllkRzJnZ21FRjViTEk5czBKSEhjUGpHUkl1VHV0Z0tHV1dPWHgKSEg4T0RzaG9wZHRXMktrR2c2aThKaEpYaWVIbzkzTHptM00xRUNGcXAvMEdtNkN1RFphVVA2SGpJMWRrYllLdgpsSHNVZ1U1SmZjSWhNYmJLdUllTzRkc1YvT3FHcm9iNW5vcmRjaExBQmRDTnc1cmU5T1NXZGZ1VVhSK0ViZVhrCjVFM0tFYzA1RGNjcGV2a1NTdlJ4SVQrQzNMOTltWGcxL3B5NEw3VUhvNFFLTXlqWXJXTWlLRlVKV1E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
server: https://127.0.0.1:6443
|
||||
name: default
|
||||
contexts:
|
||||
- context:
|
||||
cluster: default
|
||||
namespace: default
|
||||
user: testuser
|
||||
name: default
|
||||
current-context: default
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: testuser
|
||||
user:
|
||||
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM5ekNDQWQrZ0F3SUJBZ0lVV01PTVBNMVUrRi9uNXN6TSthYzlMcGZISHB3d0RRWUpLb1pJaHZjTkFRRUwKQlFBd0hqRWNNQm9HQTFVRUF3d1RhM1ZpZFc1MGRTNXNiMk5oYkdSdmJXRnBiakFlRncweU1URXlNRFl4T0RBdwpNRFJhRncwek1URXlNRFF4T0RBd01EUmFNQjR4SERBYUJnTlZCQU1NRTJ0MVluVnVkSFV1Ykc5allXeGtiMjFoCmFXNHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDNExhcG00SDB0T1NuYTNXVisKdzI4a0tOWWRwaHhYOUtvNjUwVGlOK2c5ZFNQU3VZK0V6T1JVOWVONlgyWUZkMEJmVFNodno4Y25rclAvNysxegpETEoxQ3MwRi9haEV3ZDQxQXN5UGFjbnRiVE80dGRLWm9POUdyODR3YVdBN1hSZmtEc2ZxRGN1YW5UTmVmT1hpCkdGbmdDVzU5Q285M056alB1eEFrakJxdVF6eE5GQkgwRlJPbXJtVFJ4cnVLZXo0aFFuUW1OWEFUNnp0M21udzMKWUtWTzU4b2xlcUxUcjVHNlRtVFQyYTZpVGdtdWY2N0cvaVZlalJGbkw3YkNHWmgzSjlCSTNMcVpqRzE4dWxvbgpaVDdQcGQrQTlnaTJOTm9UZlI2TVB5SndxU1BCL0xZQU5ZNGRoZDVJYlVydDZzbmViTlRZSHV2T0tZTDdNTWRMCmVMSzFBZ01CQUFHakxUQXJNQWtHQTFVZEV3UUNNQUF3SGdZRFZSMFJCQmN3RllJVGEzVmlkVzUwZFM1c2IyTmgKYkdSdmJXRnBiakFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBQTVqUHVpZVlnMExySE1PSkxYY0N4d3EvVzBDNApZeFpncVd3VHF5VHNCZjVKdDlhYTk0SkZTc2dHQWdzUTN3NnA2SlBtL0MyR05MY3U4ZWxjV0E4UXViQWxueXRRCnF1cEh5WnYrZ08wMG83TXdrejZrTUxqQVZ0QllkRzJnZ21FRjViTEk5czBKSEhjUGpHUkl1VHV0Z0tHV1dPWHgKSEg4T0RzaG9wZHRXMktrR2c2aThKaEpYaWVIbzkzTHptM00xRUNGcXAvMEdtNkN1RFphVVA2SGpJMWRrYllLdgpsSHNVZ1U1SmZjSWhNYmJLdUllTzRkc1YvT3FHcm9iNW5vcmRjaExBQmRDTnc1cmU5T1NXZGZ1VVhSK0ViZVhrCjVFM0tFYzA1RGNjcGV2a1NTdlJ4SVQrQzNMOTltWGcxL3B5NEw3VUhvNFFLTXlqWXJXTWlLRlVKV1E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
client-key-data: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2QUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktZd2dnU2lBZ0VBQW9JQkFRQzRMYXBtNEgwdE9TbmEKM1dWK3cyOGtLTllkcGh4WDlLbzY1MFRpTitnOWRTUFN1WStFek9SVTllTjZYMllGZDBCZlRTaHZ6OGNua3JQLwo3KzF6RExKMUNzMEYvYWhFd2Q0MUFzeVBhY250YlRPNHRkS1pvTzlHcjg0d2FXQTdYUmZrRHNmcURjdWFuVE5lCmZPWGlHRm5nQ1c1OUNvOTNOempQdXhBa2pCcXVRenhORkJIMEZST21ybVRSeHJ1S2V6NGhRblFtTlhBVDZ6dDMKbW53M1lLVk81OG9sZXFMVHI1RzZUbVRUMmE2aVRnbXVmNjdHL2lWZWpSRm5MN2JDR1poM0o5QkkzTHFaakcxOAp1bG9uWlQ3UHBkK0E5Z2kyTk5vVGZSNk1QeUp3cVNQQi9MWUFOWTRkaGQ1SWJVcnQ2c25lYk5UWUh1dk9LWUw3Ck1NZExlTEsxQWdNQkFBRUNnZ0VBQ28rank4NW5ueVk5L2l6ZjJ3cjkzb2J3OERaTVBjYnIxQURhOUZYY1hWblEKT2c4bDZhbU9Ga2tiU0RNY09JZ0VDdkx6dEtXbmQ5OXpydU5sTEVtNEdmb0trNk5kK01OZEtKRUdoZHE5RjM1Qgpqdi91R1owZTIyRE5ZLzFHNVdDTE5DcWMwQkVHY2RFOTF0YzJuMlppRVBTNWZ6WVJ6L1k4cmJ5K1NqbzJkWE9RCmRHYWRlUFplbi9UbmlHTFlqZWhrbXZNQjJvU0FDbVMycTd2OUNrcmdmR1RZbWJzeGVjSU1QK0JONG9KS3BOZ28KOUpnRWJ5SUxkR1pZS2pQb2lLaHNjMVhmSy8zZStXSmxuYjJBaEE5Y1JMUzhMcDdtcEYySWp4SjNSNE93QTg3WQpNeGZvZWFGdnNuVUFHWUdFWFo4Z3BkWmhQMEoxNWRGdERjajIrcngrQVFLQmdRRDFoSE9nVGdFbERrVEc5bm5TCjE1eXYxRzUxYnJMQU1UaWpzNklEMU1qelhzck0xY2ZvazVaaUlxNVJsQ3dReTlYNDdtV1RhY0lZRGR4TGJEcXEKY0IydjR5Wm1YK1VleGJ3cDU1OWY0V05HdzF5YzQrQjdaNFF5aTRFelN4WmFjbldjMnBzcHJMUFVoOUFXRXVNcApOaW1vcXNiVGNnNGs5QWRxeUIrbWhIWmJRUUtCZ1FEQUNzU09qNXZMU1VtaVpxYWcrOVMySUxZOVNOdDZzS1VyCkprcjdCZEVpN3N2YmU5cldRR2RBb0xkQXNzcU94aENydmtPNkpSSHB1YjlRRjlYdlF4Riszc2ZpZm4yYkQ0ZloKMlVsclA1emF3RlNrNDNLbjdMZzRscURpaVUxVGlqTkJBL3dUcFlmbTB4dW5WeFRWNDZpNVViQW1XRk12TWV0bQozWUZYQmJkK2RRS0JnRGl6Q1B6cFpzeEcrazAwbUxlL2dYajl4ekNwaXZCbHJaM29teTdsVWk4YUloMmg5VlBaCjJhMzZNbVcyb1dLVG9HdW5xcCtibWU1eUxRRGlFcjVQdkJ0bGl2V3ppYmRNbFFMY2Nlcnpveml4WDA4QU5WUnEKZUpZdnIzdklDSGFFM25LRjdiVjNJK1NlSk1ra1BYL0QrV1R4WTQ5clZLYm1FRnh4c1JXRW04ekJBb0dBWEZ3UgpZanJoQTZqUW1DRmtYQ0loa0NJMVkwNEorSHpDUXZsY3NGT0EzSnNhUWduVUdwekl5OFUvdlFiLzhpQ0IzZ2RZCmpVck16YXErdnVkbnhYVnRFYVpWWGJIVitPQkVSdHFBdStyUkprZS9yYm1SNS84cUxsVUxOVWd4ZjA4RkRXeTgKTERxOUhKOUZPbnJnRTJvMU9FTjRRMGpSWU81U041dXFXODd0REEwQ2dZQXpXbk1KSFgrbmlyMjhRRXFyVnJKRAo4ZUEwOHIwWTJRMDhMRlcvMjNIVWQ4WU12VnhTUTdwcUwzaE41RXVJQ2dCbEpGVFI3TndBREo3eDY2M002akFMCm1DNlI4dWxSZStwa08xN2Y0UUs3MnVRanJGZEhESnlXQmdDL0RKSkV6d1dwY0Q4VVNPK3A5bVVIbllLTUJTOEsKTVB1ejYrZ3h0VEtsRU5pZUVacXhxZz09Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
|
||||
username: testuser
|
||||
password: testpassword
|
||||
token: sha256~fFyEqjf1xxFMO0tbEyGRvWeNOd7QByuEgS4hyEq_A9o
|
||||
""" # NOQA
|
||||
|
||||
def get_kubeconfig_with_paths(self) -> str:
|
||||
"""
|
||||
This function returns a test kubeconfig file as a string.
|
||||
|
||||
:return: a test kubeconfig file in string format (for unit testing purposes)
|
||||
""" # NOQA
|
||||
return """apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: fixtures/ca.crt
|
||||
server: https://127.0.0.1:6443
|
||||
name: default
|
||||
contexts:
|
||||
- context:
|
||||
cluster: default
|
||||
namespace: default
|
||||
user: testuser
|
||||
name: default
|
||||
current-context: default
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: testuser
|
||||
user:
|
||||
client-certificate: fixtures/client.crt
|
||||
client-key: fixtures/client.key
|
||||
username: testuser
|
||||
password: testpassword
|
||||
token: sha256~fFyEqjf1xxFMO0tbEyGRvWeNOd7QByuEgS4hyEq_A9o
|
||||
""" # NOQA
|
||||
|
||||
def test_current_context(self):
|
||||
cwd = os.getcwd()
|
||||
current_context_data = ContextAuth()
|
||||
current_context_data.fetch_auth_data(self.get_kubeconfig_with_data())
|
||||
self.assertIsNotNone(current_context_data.clusterCertificateData)
|
||||
self.assertIsNotNone(current_context_data.clientCertificateData)
|
||||
self.assertIsNotNone(current_context_data.clientKeyData)
|
||||
self.assertIsNotNone(current_context_data.username)
|
||||
self.assertIsNotNone(current_context_data.password)
|
||||
self.assertIsNotNone(current_context_data.bearerToken)
|
||||
self.assertIsNotNone(current_context_data.clusterHost)
|
||||
|
||||
current_context_no_data = ContextAuth()
|
||||
current_context_no_data.fetch_auth_data(self.get_kubeconfig_with_paths())
|
||||
self.assertIsNotNone(current_context_no_data.clusterCertificate)
|
||||
self.assertIsNotNone(current_context_no_data.clusterCertificateData)
|
||||
self.assertIsNotNone(current_context_no_data.clientCertificate)
|
||||
self.assertIsNotNone(current_context_no_data.clientCertificateData)
|
||||
self.assertIsNotNone(current_context_no_data.clientKey)
|
||||
self.assertIsNotNone(current_context_no_data.clientKeyData)
|
||||
self.assertIsNotNone(current_context_no_data.username)
|
||||
self.assertIsNotNone(current_context_no_data.password)
|
||||
self.assertIsNotNone(current_context_no_data.bearerToken)
|
||||
self.assertIsNotNone(current_context_data.clusterHost)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -3,7 +3,10 @@ import logging
|
||||
import urllib.request
|
||||
import shutil
|
||||
import sys
|
||||
import requests
|
||||
import tempfile
|
||||
import kraken.prometheus.client as prometheus
|
||||
from urllib.parse import urlparse
|
||||
|
||||
|
||||
def setup(url):
|
||||
@@ -40,7 +43,7 @@ def scrape_metrics(
|
||||
distribution, prometheus_url, prometheus_bearer_token
|
||||
)
|
||||
else:
|
||||
logging.error("Looks like proemtheus url is not defined, exiting")
|
||||
logging.error("Looks like prometheus url is not defined, exiting")
|
||||
sys.exit(1)
|
||||
command = (
|
||||
"./kube-burner index --uuid "
|
||||
@@ -72,6 +75,14 @@ def alerts(distribution, prometheus_url, prometheus_bearer_token, start_time, en
|
||||
Scrapes metrics defined in the profile from Prometheus and alerts based on the severity defined
|
||||
"""
|
||||
|
||||
is_url = urlparse(alert_profile)
|
||||
if is_url.scheme and is_url.netloc:
|
||||
response = requests.get(alert_profile)
|
||||
temp_alerts = tempfile.NamedTemporaryFile()
|
||||
temp_alerts.write(response.content)
|
||||
temp_alerts.flush()
|
||||
alert_profile = temp_alerts.name
|
||||
|
||||
if not prometheus_url:
|
||||
if distribution == "openshift":
|
||||
logging.info("Looks like prometheus_url is not defined, trying to use the default instance on the cluster")
|
||||
@@ -79,7 +90,7 @@ def alerts(distribution, prometheus_url, prometheus_bearer_token, start_time, en
|
||||
distribution, prometheus_url, prometheus_bearer_token
|
||||
)
|
||||
else:
|
||||
logging.error("Looks like proemtheus url is not defined, exiting")
|
||||
logging.error("Looks like prometheus url is not defined, exiting")
|
||||
sys.exit(1)
|
||||
command = (
|
||||
"./kube-burner check-alerts "
|
||||
@@ -96,7 +107,10 @@ def alerts(distribution, prometheus_url, prometheus_bearer_token, start_time, en
|
||||
)
|
||||
try:
|
||||
logging.info("Running kube-burner to capture the metrics: %s" % command)
|
||||
subprocess.run(command, shell=True, universal_newlines=True)
|
||||
output = subprocess.run(command, shell=True, universal_newlines=True)
|
||||
if output.returncode != 0:
|
||||
logging.error("command exited with a non-zero rc, please check the logs for errors or critical alerts")
|
||||
sys.exit(output.returncode)
|
||||
except Exception as e:
|
||||
logging.error("Failed to run kube-burner, error: %s" % (e))
|
||||
sys.exit(1)
|
||||
|
||||
@@ -24,14 +24,16 @@ def initialize_clients(kubeconfig_path):
|
||||
global dyn_client
|
||||
global custom_object_client
|
||||
try:
|
||||
config.load_kube_config(kubeconfig_path)
|
||||
cli = client.CoreV1Api()
|
||||
batch_cli = client.BatchV1Api()
|
||||
watch_resource = watch.Watch()
|
||||
if kubeconfig_path:
|
||||
config.load_kube_config(kubeconfig_path)
|
||||
else:
|
||||
config.load_incluster_config()
|
||||
api_client = client.ApiClient()
|
||||
custom_object_client = client.CustomObjectsApi()
|
||||
k8s_client = config.new_client_from_config()
|
||||
dyn_client = DynamicClient(k8s_client)
|
||||
cli = client.CoreV1Api(api_client)
|
||||
batch_cli = client.BatchV1Api(api_client)
|
||||
custom_object_client = client.CustomObjectsApi(api_client)
|
||||
dyn_client = DynamicClient(api_client)
|
||||
watch_resource = watch.Watch()
|
||||
except ApiException as e:
|
||||
logging.error("Failed to initialize kubernetes client: %s\n" % e)
|
||||
sys.exit(1)
|
||||
@@ -1,5 +1,5 @@
|
||||
import kraken.invoke.command as runcommand
|
||||
import kraken.kubernetes.client as kubecli
|
||||
import krkn_lib_kubernetes
|
||||
import logging
|
||||
import time
|
||||
import sys
|
||||
@@ -8,8 +8,16 @@ import yaml
|
||||
import kraken.cerberus.setup as cerberus
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# Inject litmus scenarios defined in the config
|
||||
def run(scenarios_list, config, litmus_uninstall, wait_duration, litmus_namespace):
|
||||
def run(
|
||||
scenarios_list,
|
||||
config,
|
||||
litmus_uninstall,
|
||||
wait_duration,
|
||||
litmus_namespace,
|
||||
kubecli: krkn_lib_kubernetes.KrknLibKubernetes
|
||||
):
|
||||
# Loop to run the scenarios starts here
|
||||
for l_scenario in scenarios_list:
|
||||
start_time = int(time.time())
|
||||
@@ -35,16 +43,16 @@ def run(scenarios_list, config, litmus_uninstall, wait_duration, litmus_namespac
|
||||
sys.exit(1)
|
||||
for expr in experiment_names:
|
||||
expr_name = expr["name"]
|
||||
experiment_result = check_experiment(engine_name, expr_name, litmus_namespace)
|
||||
experiment_result = check_experiment(engine_name, expr_name, litmus_namespace, kubecli)
|
||||
if experiment_result:
|
||||
logging.info("Scenario: %s has been successfully injected!" % item)
|
||||
else:
|
||||
logging.info("Scenario: %s was not successfully injected, please check" % item)
|
||||
if litmus_uninstall:
|
||||
delete_chaos(litmus_namespace)
|
||||
delete_chaos(litmus_namespace, kubecli)
|
||||
sys.exit(1)
|
||||
if litmus_uninstall:
|
||||
delete_chaos(litmus_namespace)
|
||||
delete_chaos(litmus_namespace, kubecli)
|
||||
logging.info("Waiting for the specified duration: %s" % wait_duration)
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
@@ -86,7 +94,8 @@ def deploy_all_experiments(version_string, namespace):
|
||||
)
|
||||
|
||||
|
||||
def wait_for_initialized(engine_name, experiment_name, namespace):
|
||||
# krkn_lib_kubernetes
|
||||
def wait_for_initialized(engine_name, experiment_name, namespace, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
|
||||
chaos_engine = kubecli.get_litmus_chaos_object(kind='chaosengine', name=engine_name,
|
||||
namespace=namespace).engineStatus
|
||||
@@ -110,10 +119,17 @@ def wait_for_initialized(engine_name, experiment_name, namespace):
|
||||
return True
|
||||
|
||||
|
||||
def wait_for_status(engine_name, expected_status, experiment_name, namespace):
|
||||
# krkn_lib_kubernetes
|
||||
def wait_for_status(
|
||||
engine_name,
|
||||
expected_status,
|
||||
experiment_name,
|
||||
namespace,
|
||||
kubecli: krkn_lib_kubernetes.KrknLibKubernetes
|
||||
):
|
||||
|
||||
if expected_status == "running":
|
||||
response = wait_for_initialized(engine_name, experiment_name, namespace)
|
||||
response = wait_for_initialized(engine_name, experiment_name, namespace, kubecli)
|
||||
if not response:
|
||||
logging.info("Chaos engine never initialized, exiting")
|
||||
return False
|
||||
@@ -140,12 +156,13 @@ def wait_for_status(engine_name, expected_status, experiment_name, namespace):
|
||||
|
||||
|
||||
# Check status of experiment
|
||||
def check_experiment(engine_name, experiment_name, namespace):
|
||||
# krkn_lib_kubernetes
|
||||
def check_experiment(engine_name, experiment_name, namespace, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
|
||||
wait_response = wait_for_status(engine_name, "running", experiment_name, namespace)
|
||||
wait_response = wait_for_status(engine_name, "running", experiment_name, namespace, kubecli)
|
||||
|
||||
if wait_response:
|
||||
wait_for_status(engine_name, "completed", experiment_name, namespace)
|
||||
wait_for_status(engine_name, "completed", experiment_name, namespace, kubecli)
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
@@ -166,7 +183,8 @@ def check_experiment(engine_name, experiment_name, namespace):
|
||||
|
||||
|
||||
# Delete all chaos engines in a given namespace
|
||||
def delete_chaos_experiments(namespace):
|
||||
# krkn_lib_kubernetes
|
||||
def delete_chaos_experiments(namespace, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
|
||||
if kubecli.check_if_namespace_exists(namespace):
|
||||
chaos_exp_exists = runcommand.invoke_no_exit("kubectl get chaosexperiment")
|
||||
@@ -176,7 +194,8 @@ def delete_chaos_experiments(namespace):
|
||||
|
||||
|
||||
# Delete all chaos engines in a given namespace
|
||||
def delete_chaos(namespace):
|
||||
# krkn_lib_kubernetes
|
||||
def delete_chaos(namespace, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
|
||||
if kubecli.check_if_namespace_exists(namespace):
|
||||
logging.info("Deleting all litmus run objects")
|
||||
@@ -190,7 +209,8 @@ def delete_chaos(namespace):
|
||||
logging.info(namespace + " namespace doesn't exist")
|
||||
|
||||
|
||||
def uninstall_litmus(version, litmus_namespace):
|
||||
# krkn_lib_kubernetes
|
||||
def uninstall_litmus(version, litmus_namespace, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
|
||||
if kubecli.check_if_namespace_exists(litmus_namespace):
|
||||
logging.info("Uninstalling Litmus operator")
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
import random
|
||||
import logging
|
||||
import kraken.kubernetes.client as kubecli
|
||||
|
||||
import krkn_lib_kubernetes
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# Pick a random managedcluster with specified label selector
|
||||
def get_managedcluster(managedcluster_name, label_selector, instance_kill_count):
|
||||
def get_managedcluster(
|
||||
managedcluster_name,
|
||||
label_selector,
|
||||
instance_kill_count,
|
||||
kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
|
||||
if managedcluster_name in kubecli.list_killable_managedclusters():
|
||||
return [managedcluster_name]
|
||||
elif managedcluster_name:
|
||||
@@ -25,10 +30,12 @@ def get_managedcluster(managedcluster_name, label_selector, instance_kill_count)
|
||||
|
||||
|
||||
# Wait until the managedcluster status becomes Available
|
||||
def wait_for_available_status(managedcluster, timeout):
|
||||
# krkn_lib_kubernetes
|
||||
def wait_for_available_status(managedcluster, timeout, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
kubecli.watch_managedcluster_status(managedcluster, "True", timeout)
|
||||
|
||||
|
||||
# Wait until the managedcluster status becomes Not Available
|
||||
def wait_for_unavailable_status(managedcluster, timeout):
|
||||
# krkn_lib_kubernetes
|
||||
def wait_for_unavailable_status(managedcluster, timeout, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
kubecli.watch_managedcluster_status(managedcluster, "Unknown", timeout)
|
||||
|
||||
@@ -5,7 +5,7 @@ import logging
|
||||
import sys
|
||||
import yaml
|
||||
import html
|
||||
import kraken.kubernetes.client as kubecli
|
||||
import krkn_lib_kubernetes
|
||||
import kraken.managedcluster_scenarios.common_managedcluster_functions as common_managedcluster_functions
|
||||
|
||||
|
||||
@@ -13,9 +13,11 @@ class GENERAL:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
class managedcluster_scenarios():
|
||||
def __init__(self):
|
||||
kubecli: krkn_lib_kubernetes.KrknLibKubernetes
|
||||
def __init__(self, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
self.kubecli = kubecli
|
||||
self.general = GENERAL()
|
||||
|
||||
# managedcluster scenario to start the managedcluster
|
||||
@@ -31,16 +33,16 @@ class managedcluster_scenarios():
|
||||
args="""kubectl scale deployment.apps/klusterlet --replicas 3 &
|
||||
kubectl scale deployment.apps/klusterlet-registration-agent --replicas 1 -n open-cluster-management-agent""")
|
||||
)
|
||||
kubecli.create_manifestwork(body, managedcluster)
|
||||
self.kubecli.create_manifestwork(body, managedcluster)
|
||||
logging.info("managedcluster_start_scenario has been successfully injected!")
|
||||
logging.info("Waiting for the specified timeout: %s" % timeout)
|
||||
common_managedcluster_functions.wait_for_available_status(managedcluster, timeout)
|
||||
common_managedcluster_functions.wait_for_available_status(managedcluster, timeout, self.kubecli)
|
||||
except Exception as e:
|
||||
logging.error("managedcluster scenario exiting due to Exception %s" % e)
|
||||
sys.exit(1)
|
||||
finally:
|
||||
logging.info("Deleting manifestworks")
|
||||
kubecli.delete_manifestwork(managedcluster)
|
||||
self.kubecli.delete_manifestwork(managedcluster)
|
||||
|
||||
# managedcluster scenario to stop the managedcluster
|
||||
def managedcluster_stop_scenario(self, instance_kill_count, managedcluster, timeout):
|
||||
@@ -55,16 +57,16 @@ class managedcluster_scenarios():
|
||||
args="""kubectl scale deployment.apps/klusterlet --replicas 0 &&
|
||||
kubectl scale deployment.apps/klusterlet-registration-agent --replicas 0 -n open-cluster-management-agent""")
|
||||
)
|
||||
kubecli.create_manifestwork(body, managedcluster)
|
||||
self.kubecli.create_manifestwork(body, managedcluster)
|
||||
logging.info("managedcluster_stop_scenario has been successfully injected!")
|
||||
logging.info("Waiting for the specified timeout: %s" % timeout)
|
||||
common_managedcluster_functions.wait_for_unavailable_status(managedcluster, timeout)
|
||||
common_managedcluster_functions.wait_for_unavailable_status(managedcluster, timeout, self.kubecli)
|
||||
except Exception as e:
|
||||
logging.error("managedcluster scenario exiting due to Exception %s" % e)
|
||||
sys.exit(1)
|
||||
finally:
|
||||
logging.info("Deleting manifestworks")
|
||||
kubecli.delete_manifestwork(managedcluster)
|
||||
self.kubecli.delete_manifestwork(managedcluster)
|
||||
|
||||
# managedcluster scenario to stop and then start the managedcluster
|
||||
def managedcluster_stop_start_scenario(self, instance_kill_count, managedcluster, timeout):
|
||||
@@ -94,7 +96,7 @@ class managedcluster_scenarios():
|
||||
template.render(managedcluster_name=managedcluster,
|
||||
args="""kubectl scale deployment.apps/klusterlet --replicas 3""")
|
||||
)
|
||||
kubecli.create_manifestwork(body, managedcluster)
|
||||
self.kubecli.create_manifestwork(body, managedcluster)
|
||||
logging.info("start_klusterlet_scenario has been successfully injected!")
|
||||
time.sleep(30) # until https://github.com/open-cluster-management-io/OCM/issues/118 gets solved
|
||||
except Exception as e:
|
||||
@@ -102,7 +104,7 @@ class managedcluster_scenarios():
|
||||
sys.exit(1)
|
||||
finally:
|
||||
logging.info("Deleting manifestworks")
|
||||
kubecli.delete_manifestwork(managedcluster)
|
||||
self.kubecli.delete_manifestwork(managedcluster)
|
||||
|
||||
# managedcluster scenario to stop the klusterlet
|
||||
def stop_klusterlet_scenario(self, instance_kill_count, managedcluster, timeout):
|
||||
@@ -116,7 +118,7 @@ class managedcluster_scenarios():
|
||||
template.render(managedcluster_name=managedcluster,
|
||||
args="""kubectl scale deployment.apps/klusterlet --replicas 0""")
|
||||
)
|
||||
kubecli.create_manifestwork(body, managedcluster)
|
||||
self.kubecli.create_manifestwork(body, managedcluster)
|
||||
logging.info("stop_klusterlet_scenario has been successfully injected!")
|
||||
time.sleep(30) # until https://github.com/open-cluster-management-io/OCM/issues/118 gets solved
|
||||
except Exception as e:
|
||||
@@ -124,7 +126,7 @@ class managedcluster_scenarios():
|
||||
sys.exit(1)
|
||||
finally:
|
||||
logging.info("Deleting manifestworks")
|
||||
kubecli.delete_manifestwork(managedcluster)
|
||||
self.kubecli.delete_manifestwork(managedcluster)
|
||||
|
||||
# managedcluster scenario to stop and start the klusterlet
|
||||
def stop_start_klusterlet_scenario(self, instance_kill_count, managedcluster, timeout):
|
||||
|
||||
@@ -1,26 +1,29 @@
|
||||
import yaml
|
||||
import logging
|
||||
import time
|
||||
import krkn_lib_kubernetes
|
||||
from kraken.managedcluster_scenarios.managedcluster_scenarios import managedcluster_scenarios
|
||||
import kraken.managedcluster_scenarios.common_managedcluster_functions as common_managedcluster_functions
|
||||
import kraken.cerberus.setup as cerberus
|
||||
|
||||
|
||||
# Get the managedcluster scenarios object of specfied cloud type
|
||||
def get_managedcluster_scenario_object(managedcluster_scenario):
|
||||
return managedcluster_scenarios()
|
||||
# krkn_lib_kubernetes
|
||||
def get_managedcluster_scenario_object(managedcluster_scenario, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
return managedcluster_scenarios(kubecli)
|
||||
|
||||
# Run defined scenarios
|
||||
def run(scenarios_list, config, wait_duration):
|
||||
# krkn_lib_kubernetes
|
||||
def run(scenarios_list, config, wait_duration, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
for managedcluster_scenario_config in scenarios_list:
|
||||
with open(managedcluster_scenario_config, "r") as f:
|
||||
managedcluster_scenario_config = yaml.full_load(f)
|
||||
for managedcluster_scenario in managedcluster_scenario_config["managedcluster_scenarios"]:
|
||||
managedcluster_scenario_object = get_managedcluster_scenario_object(managedcluster_scenario)
|
||||
managedcluster_scenario_object = get_managedcluster_scenario_object(managedcluster_scenario, kubecli)
|
||||
if managedcluster_scenario["actions"]:
|
||||
for action in managedcluster_scenario["actions"]:
|
||||
start_time = int(time.time())
|
||||
inject_managedcluster_scenario(action, managedcluster_scenario, managedcluster_scenario_object)
|
||||
inject_managedcluster_scenario(action, managedcluster_scenario, managedcluster_scenario_object, kubecli)
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
@@ -29,7 +32,8 @@ def run(scenarios_list, config, wait_duration):
|
||||
|
||||
|
||||
# Inject the specified managedcluster scenario
|
||||
def inject_managedcluster_scenario(action, managedcluster_scenario, managedcluster_scenario_object):
|
||||
# krkn_lib_kubernetes
|
||||
def inject_managedcluster_scenario(action, managedcluster_scenario, managedcluster_scenario_object, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# Get the managedcluster scenario configurations
|
||||
run_kill_count = managedcluster_scenario.get("runs", 1)
|
||||
instance_kill_count = managedcluster_scenario.get("instance_count", 1)
|
||||
@@ -42,7 +46,7 @@ def inject_managedcluster_scenario(action, managedcluster_scenario, managedclust
|
||||
else:
|
||||
managedcluster_name_list = [managedcluster_name]
|
||||
for single_managedcluster_name in managedcluster_name_list:
|
||||
managedclusters = common_managedcluster_functions.get_managedcluster(single_managedcluster_name, label_selector, instance_kill_count)
|
||||
managedclusters = common_managedcluster_functions.get_managedcluster(single_managedcluster_name, label_selector, instance_kill_count, kubecli)
|
||||
for single_managedcluster in managedclusters:
|
||||
if action == "managedcluster_start_scenario":
|
||||
managedcluster_scenario_object.managedcluster_start_scenario(run_kill_count, single_managedcluster, timeout)
|
||||
|
||||
@@ -1,80 +1,113 @@
|
||||
import time
|
||||
import random
|
||||
import logging
|
||||
import kraken.kubernetes.client as kubecli
|
||||
import krkn_lib_kubernetes
|
||||
import kraken.cerberus.setup as cerberus
|
||||
import kraken.post_actions.actions as post_actions
|
||||
import yaml
|
||||
import sys
|
||||
from krkn_lib_kubernetes import ScenarioTelemetry, KrknTelemetry
|
||||
|
||||
|
||||
def run(scenarios_list, config, wait_duration, failed_post_scenarios, kubeconfig_path):
|
||||
# krkn_lib_kubernetes
|
||||
def run(
|
||||
scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
failed_post_scenarios,
|
||||
kubeconfig_path,
|
||||
kubecli: krkn_lib_kubernetes.KrknLibKubernetes,
|
||||
telemetry: KrknTelemetry
|
||||
) -> (list[str], list[ScenarioTelemetry]):
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for scenario_config in scenarios_list:
|
||||
if len(scenario_config) > 1:
|
||||
pre_action_output = post_actions.run(kubeconfig_path, scenario_config[1])
|
||||
else:
|
||||
pre_action_output = ""
|
||||
with open(scenario_config[0], "r") as f:
|
||||
scenario_config_yaml = yaml.full_load(f)
|
||||
for scenario in scenario_config_yaml["scenarios"]:
|
||||
scenario_namespace = scenario.get("namespace", "")
|
||||
scenario_label = scenario.get("label_selector", "")
|
||||
if scenario_namespace is not None and scenario_namespace.strip() != "":
|
||||
if scenario_label is not None and scenario_label.strip() != "":
|
||||
logging.error("You can only have namespace or label set in your namespace scenario")
|
||||
logging.error(
|
||||
"Current scenario config has namespace '%s' and label selector '%s'"
|
||||
% (scenario_namespace, scenario_label)
|
||||
)
|
||||
logging.error(
|
||||
"Please set either namespace to blank ('') or label_selector to blank ('') to continue"
|
||||
)
|
||||
sys.exit(1)
|
||||
delete_count = scenario.get("delete_count", 1)
|
||||
run_count = scenario.get("runs", 1)
|
||||
run_sleep = scenario.get("sleep", 10)
|
||||
wait_time = scenario.get("wait_time", 30)
|
||||
killed_namespaces = []
|
||||
start_time = int(time.time())
|
||||
for i in range(run_count):
|
||||
namespaces = kubecli.check_namespaces([scenario_namespace], scenario_label)
|
||||
for j in range(delete_count):
|
||||
if len(namespaces) == 0:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = scenario_config[0]
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, scenario_config[0])
|
||||
try:
|
||||
if len(scenario_config) > 1:
|
||||
pre_action_output = post_actions.run(kubeconfig_path, scenario_config[1])
|
||||
else:
|
||||
pre_action_output = ""
|
||||
with open(scenario_config[0], "r") as f:
|
||||
scenario_config_yaml = yaml.full_load(f)
|
||||
for scenario in scenario_config_yaml["scenarios"]:
|
||||
scenario_namespace = scenario.get("namespace", "")
|
||||
scenario_label = scenario.get("label_selector", "")
|
||||
if scenario_namespace is not None and scenario_namespace.strip() != "":
|
||||
if scenario_label is not None and scenario_label.strip() != "":
|
||||
logging.error("You can only have namespace or label set in your namespace scenario")
|
||||
logging.error(
|
||||
"Couldn't delete %s namespaces, not enough namespaces matching %s with label %s"
|
||||
% (str(run_count), scenario_namespace, str(scenario_label))
|
||||
"Current scenario config has namespace '%s' and label selector '%s'"
|
||||
% (scenario_namespace, scenario_label)
|
||||
)
|
||||
sys.exit(1)
|
||||
selected_namespace = namespaces[random.randint(0, len(namespaces) - 1)]
|
||||
killed_namespaces.append(selected_namespace)
|
||||
try:
|
||||
kubecli.delete_namespace(selected_namespace)
|
||||
logging.info("Delete on namespace %s was successful" % str(selected_namespace))
|
||||
except Exception as e:
|
||||
logging.info("Delete on namespace %s was unsuccessful" % str(selected_namespace))
|
||||
logging.info("Namespace action error: " + str(e))
|
||||
sys.exit(1)
|
||||
namespaces.remove(selected_namespace)
|
||||
logging.info("Waiting %s seconds between namespace deletions" % str(run_sleep))
|
||||
time.sleep(run_sleep)
|
||||
|
||||
logging.info("Waiting for the specified duration: %s" % wait_duration)
|
||||
time.sleep(wait_duration)
|
||||
if len(scenario_config) > 1:
|
||||
try:
|
||||
failed_post_scenarios = post_actions.check_recovery(
|
||||
kubeconfig_path, scenario_config, failed_post_scenarios, pre_action_output
|
||||
logging.error(
|
||||
"Please set either namespace to blank ('') or label_selector to blank ('') to continue"
|
||||
)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
delete_count = scenario.get("delete_count", 1)
|
||||
run_count = scenario.get("runs", 1)
|
||||
run_sleep = scenario.get("sleep", 10)
|
||||
wait_time = scenario.get("wait_time", 30)
|
||||
killed_namespaces = []
|
||||
start_time = int(time.time())
|
||||
for i in range(run_count):
|
||||
namespaces = kubecli.check_namespaces([scenario_namespace], scenario_label)
|
||||
for j in range(delete_count):
|
||||
if len(namespaces) == 0:
|
||||
logging.error(
|
||||
"Couldn't delete %s namespaces, not enough namespaces matching %s with label %s"
|
||||
% (str(run_count), scenario_namespace, str(scenario_label))
|
||||
)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
selected_namespace = namespaces[random.randint(0, len(namespaces) - 1)]
|
||||
killed_namespaces.append(selected_namespace)
|
||||
try:
|
||||
kubecli.delete_namespace(selected_namespace)
|
||||
logging.info("Delete on namespace %s was successful" % str(selected_namespace))
|
||||
except Exception as e:
|
||||
logging.error("Failed to run post action checks: %s" % e)
|
||||
sys.exit(1)
|
||||
else:
|
||||
failed_post_scenarios = check_active_namespace(killed_namespaces, wait_time)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
logging.info("Delete on namespace %s was unsuccessful" % str(selected_namespace))
|
||||
logging.info("Namespace action error: " + str(e))
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
namespaces.remove(selected_namespace)
|
||||
logging.info("Waiting %s seconds between namespace deletions" % str(run_sleep))
|
||||
time.sleep(run_sleep)
|
||||
|
||||
logging.info("Waiting for the specified duration: %s" % wait_duration)
|
||||
time.sleep(wait_duration)
|
||||
if len(scenario_config) > 1:
|
||||
try:
|
||||
failed_post_scenarios = post_actions.check_recovery(
|
||||
kubeconfig_path, scenario_config, failed_post_scenarios, pre_action_output
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to run post action checks: %s" % e)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
else:
|
||||
failed_post_scenarios = check_active_namespace(killed_namespaces, wait_time, kubecli)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
except (Exception, RuntimeError):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(scenario_config[0])
|
||||
telemetry.log_exception(scenario_config[0])
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
def check_active_namespace(killed_namespaces, wait_time):
|
||||
# krkn_lib_kubernetes
|
||||
def check_active_namespace(killed_namespaces, wait_time, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
active_namespace = []
|
||||
timer = 0
|
||||
while timer < wait_time and killed_namespaces:
|
||||
|
||||
@@ -4,94 +4,112 @@ import time
|
||||
import sys
|
||||
import os
|
||||
import random
|
||||
import krkn_lib_kubernetes
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
import kraken.cerberus.setup as cerberus
|
||||
import kraken.kubernetes.client as kubecli
|
||||
import kraken.node_actions.common_node_functions as common_node_functions
|
||||
from krkn_lib_kubernetes import ScenarioTelemetry, KrknTelemetry
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# Reads the scenario config and introduces traffic variations in Node's host network interface.
|
||||
def run(scenarios_list, config, wait_duration):
|
||||
def run(scenarios_list, config, wait_duration, kubecli: krkn_lib_kubernetes.KrknLibKubernetes, telemetry: KrknTelemetry) -> (list[str], list[ScenarioTelemetry]):
|
||||
failed_post_scenarios = ""
|
||||
logging.info("Runing the Network Chaos tests")
|
||||
failed_post_scenarios = ""
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for net_config in scenarios_list:
|
||||
with open(net_config, "r") as file:
|
||||
param_lst = ["latency", "loss", "bandwidth"]
|
||||
test_config = yaml.safe_load(file)
|
||||
test_dict = test_config["network_chaos"]
|
||||
test_duration = int(test_dict.get("duration", 300))
|
||||
test_interface = test_dict.get("interfaces", [])
|
||||
test_node = test_dict.get("node_name", "")
|
||||
test_node_label = test_dict.get("label_selector", "node-role.kubernetes.io/master")
|
||||
test_execution = test_dict.get("execution", "serial")
|
||||
test_instance_count = test_dict.get("instance_count", 1)
|
||||
test_egress = test_dict.get("egress", {"bandwidth": "100mbit"})
|
||||
if test_node:
|
||||
node_name_list = test_node.split(",")
|
||||
else:
|
||||
node_name_list = [test_node]
|
||||
nodelst = []
|
||||
for single_node_name in node_name_list:
|
||||
nodelst.extend(common_node_functions.get_node(single_node_name, test_node_label, test_instance_count))
|
||||
file_loader = FileSystemLoader(os.path.abspath(os.path.dirname(__file__)))
|
||||
env = Environment(loader=file_loader, autoescape=True)
|
||||
pod_template = env.get_template("pod.j2")
|
||||
test_interface = verify_interface(test_interface, nodelst, pod_template)
|
||||
joblst = []
|
||||
egress_lst = [i for i in param_lst if i in test_egress]
|
||||
chaos_config = {
|
||||
"network_chaos": {
|
||||
"duration": test_duration,
|
||||
"interfaces": test_interface,
|
||||
"node_name": ",".join(nodelst),
|
||||
"execution": test_execution,
|
||||
"instance_count": test_instance_count,
|
||||
"egress": test_egress,
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = net_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, net_config)
|
||||
try:
|
||||
with open(net_config, "r") as file:
|
||||
param_lst = ["latency", "loss", "bandwidth"]
|
||||
test_config = yaml.safe_load(file)
|
||||
test_dict = test_config["network_chaos"]
|
||||
test_duration = int(test_dict.get("duration", 300))
|
||||
test_interface = test_dict.get("interfaces", [])
|
||||
test_node = test_dict.get("node_name", "")
|
||||
test_node_label = test_dict.get("label_selector", "node-role.kubernetes.io/master")
|
||||
test_execution = test_dict.get("execution", "serial")
|
||||
test_instance_count = test_dict.get("instance_count", 1)
|
||||
test_egress = test_dict.get("egress", {"bandwidth": "100mbit"})
|
||||
if test_node:
|
||||
node_name_list = test_node.split(",")
|
||||
else:
|
||||
node_name_list = [test_node]
|
||||
nodelst = []
|
||||
for single_node_name in node_name_list:
|
||||
nodelst.extend(common_node_functions.get_node(single_node_name, test_node_label, test_instance_count, kubecli))
|
||||
file_loader = FileSystemLoader(os.path.abspath(os.path.dirname(__file__)))
|
||||
env = Environment(loader=file_loader, autoescape=True)
|
||||
pod_template = env.get_template("pod.j2")
|
||||
test_interface = verify_interface(test_interface, nodelst, pod_template, kubecli)
|
||||
joblst = []
|
||||
egress_lst = [i for i in param_lst if i in test_egress]
|
||||
chaos_config = {
|
||||
"network_chaos": {
|
||||
"duration": test_duration,
|
||||
"interfaces": test_interface,
|
||||
"node_name": ",".join(nodelst),
|
||||
"execution": test_execution,
|
||||
"instance_count": test_instance_count,
|
||||
"egress": test_egress,
|
||||
}
|
||||
}
|
||||
}
|
||||
logging.info("Executing network chaos with config \n %s" % yaml.dump(chaos_config))
|
||||
job_template = env.get_template("job.j2")
|
||||
try:
|
||||
for i in egress_lst:
|
||||
for node in nodelst:
|
||||
exec_cmd = get_egress_cmd(
|
||||
test_execution, test_interface, i, test_dict["egress"], duration=test_duration
|
||||
)
|
||||
logging.info("Executing %s on node %s" % (exec_cmd, node))
|
||||
job_body = yaml.safe_load(
|
||||
job_template.render(jobname=i + str(hash(node))[:5], nodename=node, cmd=exec_cmd)
|
||||
)
|
||||
joblst.append(job_body["metadata"]["name"])
|
||||
api_response = kubecli.create_job(job_body)
|
||||
if api_response is None:
|
||||
raise Exception("Error creating job")
|
||||
if test_execution == "serial":
|
||||
logging.info("Waiting for serial job to finish")
|
||||
logging.info("Executing network chaos with config \n %s" % yaml.dump(chaos_config))
|
||||
job_template = env.get_template("job.j2")
|
||||
try:
|
||||
for i in egress_lst:
|
||||
for node in nodelst:
|
||||
exec_cmd = get_egress_cmd(
|
||||
test_execution, test_interface, i, test_dict["egress"], duration=test_duration
|
||||
)
|
||||
logging.info("Executing %s on node %s" % (exec_cmd, node))
|
||||
job_body = yaml.safe_load(
|
||||
job_template.render(jobname=i + str(hash(node))[:5], nodename=node, cmd=exec_cmd)
|
||||
)
|
||||
joblst.append(job_body["metadata"]["name"])
|
||||
api_response = kubecli.create_job(job_body)
|
||||
if api_response is None:
|
||||
raise Exception("Error creating job")
|
||||
if test_execution == "serial":
|
||||
logging.info("Waiting for serial job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(joblst[:], kubecli, test_duration + 300)
|
||||
logging.info("Waiting for wait_duration %s" % wait_duration)
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
if test_execution == "parallel":
|
||||
break
|
||||
if test_execution == "parallel":
|
||||
logging.info("Waiting for parallel job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(joblst[:], test_duration + 300)
|
||||
wait_for_job(joblst[:], kubecli, test_duration + 300)
|
||||
logging.info("Waiting for wait_duration %s" % wait_duration)
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
if test_execution == "parallel":
|
||||
break
|
||||
if test_execution == "parallel":
|
||||
logging.info("Waiting for parallel job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(joblst[:], test_duration + 300)
|
||||
logging.info("Waiting for wait_duration %s" % wait_duration)
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
except Exception as e:
|
||||
logging.error("Network Chaos exiting due to Exception %s" % e)
|
||||
sys.exit(1)
|
||||
finally:
|
||||
logging.info("Deleting jobs")
|
||||
delete_job(joblst[:])
|
||||
except Exception as e:
|
||||
logging.error("Network Chaos exiting due to Exception %s" % e)
|
||||
raise RuntimeError()
|
||||
finally:
|
||||
logging.info("Deleting jobs")
|
||||
delete_job(joblst[:], kubecli)
|
||||
except (RuntimeError, Exception):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(net_config)
|
||||
telemetry.log_exception(net_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
def verify_interface(test_interface, nodelst, template):
|
||||
# krkn_lib_kubernetes
|
||||
def verify_interface(test_interface, nodelst, template, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
pod_index = random.randint(0, len(nodelst) - 1)
|
||||
pod_body = yaml.safe_load(template.render(nodename=nodelst[pod_index]))
|
||||
logging.info("Creating pod to query interface on node %s" % nodelst[pod_index])
|
||||
@@ -108,21 +126,24 @@ def verify_interface(test_interface, nodelst, template):
|
||||
for interface in test_interface:
|
||||
if interface not in interface_lst:
|
||||
logging.error("Interface %s not found in node %s interface list %s" % (interface, nodelst[pod_index], interface_lst))
|
||||
sys.exit(1)
|
||||
#sys.exit(1)
|
||||
raise RuntimeError()
|
||||
return test_interface
|
||||
finally:
|
||||
logging.info("Deleteing pod to query interface on node")
|
||||
kubecli.delete_pod("fedtools", "default")
|
||||
|
||||
|
||||
def get_job_pods(api_response):
|
||||
# krkn_lib_kubernetes
|
||||
def get_job_pods(api_response, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
controllerUid = api_response.metadata.labels["controller-uid"]
|
||||
pod_label_selector = "controller-uid=" + controllerUid
|
||||
pods_list = kubecli.list_pods(label_selector=pod_label_selector, namespace="default")
|
||||
return pods_list[0]
|
||||
|
||||
|
||||
def wait_for_job(joblst, timeout=300):
|
||||
# krkn_lib_kubernetes
|
||||
def wait_for_job(joblst, kubecli: krkn_lib_kubernetes.KrknLibKubernetes, timeout=300):
|
||||
waittime = time.time() + timeout
|
||||
count = 0
|
||||
joblen = len(joblst)
|
||||
@@ -134,26 +155,27 @@ def wait_for_job(joblst, timeout=300):
|
||||
count += 1
|
||||
joblst.remove(jobname)
|
||||
except Exception:
|
||||
logging.warn("Exception in getting job status")
|
||||
logging.warning("Exception in getting job status")
|
||||
if time.time() > waittime:
|
||||
raise Exception("Starting pod failed")
|
||||
time.sleep(5)
|
||||
|
||||
|
||||
def delete_job(joblst):
|
||||
# krkn_lib_kubernetes
|
||||
def delete_job(joblst, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
for jobname in joblst:
|
||||
try:
|
||||
api_response = kubecli.get_job_status(jobname, namespace="default")
|
||||
if api_response.status.failed is not None:
|
||||
pod_name = get_job_pods(api_response)
|
||||
pod_name = get_job_pods(api_response, kubecli)
|
||||
pod_stat = kubecli.read_pod(name=pod_name, namespace="default")
|
||||
logging.error(pod_stat.status.container_statuses)
|
||||
pod_log_response = kubecli.get_pod_log(name=pod_name, namespace="default")
|
||||
pod_log = pod_log_response.data.decode("utf-8")
|
||||
logging.error(pod_log)
|
||||
except Exception:
|
||||
logging.warn("Exception in getting job status")
|
||||
api_response = kubecli.delete_job(name=jobname, namespace="default")
|
||||
logging.warning("Exception in getting job status")
|
||||
kubecli.delete_job(name=jobname, namespace="default")
|
||||
|
||||
|
||||
def get_egress_cmd(execution, test_interface, mod, vallst, duration=30):
|
||||
|
||||
@@ -2,10 +2,13 @@ import sys
|
||||
import logging
|
||||
import kraken.invoke.command as runcommand
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
import krkn_lib_kubernetes
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
class abstract_node_scenarios:
|
||||
|
||||
kubecli: krkn_lib_kubernetes.KrknLibKubernetes
|
||||
def __init__(self, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
self.kubecli = kubecli
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
pass
|
||||
@@ -42,7 +45,7 @@ class abstract_node_scenarios:
|
||||
logging.info("Starting stop_kubelet_scenario injection")
|
||||
logging.info("Stopping the kubelet of the node %s" % (node))
|
||||
runcommand.run("oc debug node/" + node + " -- chroot /host systemctl stop kubelet")
|
||||
nodeaction.wait_for_unknown_status(node, timeout)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
logging.info("The kubelet of the node %s has been stopped" % (node))
|
||||
logging.info("stop_kubelet_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import sys
|
||||
import time
|
||||
import krkn_lib_kubernetes
|
||||
from aliyunsdkcore.client import AcsClient
|
||||
from aliyunsdkecs.request.v20140526 import DescribeInstancesRequest, DeleteInstanceRequest
|
||||
from aliyunsdkecs.request.v20140526 import StopInstanceRequest, StartInstanceRequest, RebootInstanceRequest
|
||||
@@ -179,9 +180,9 @@ class Alibaba:
|
||||
logging.info("ECS %s is released" % instance_id)
|
||||
return True
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
class alibaba_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self):
|
||||
def __init__(self,kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
self.alibaba = Alibaba()
|
||||
|
||||
# Node scenario to start the node
|
||||
@@ -193,7 +194,7 @@ class alibaba_node_scenarios(abstract_node_scenarios):
|
||||
logging.info("Starting the node %s with instance ID: %s " % (node, vm_id))
|
||||
self.alibaba.start_instances(vm_id)
|
||||
self.alibaba.wait_until_running(vm_id, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
logging.info("Node with instance ID: %s is in running state" % node)
|
||||
logging.info("node_start_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
@@ -213,7 +214,7 @@ class alibaba_node_scenarios(abstract_node_scenarios):
|
||||
self.alibaba.stop_instances(vm_id)
|
||||
self.alibaba.wait_until_stopped(vm_id, timeout)
|
||||
logging.info("Node with instance ID: %s is in stopped state" % vm_id)
|
||||
nodeaction.wait_for_unknown_status(node, timeout)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Encountered following exception: %s. " "Test Failed" % e)
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
@@ -248,8 +249,8 @@ class alibaba_node_scenarios(abstract_node_scenarios):
|
||||
instance_id = self.alibaba.get_instance_id(node)
|
||||
logging.info("Rebooting the node with instance ID: %s " % (instance_id))
|
||||
self.alibaba.reboot_instances(instance_id)
|
||||
nodeaction.wait_for_unknown_status(node, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
logging.info("Node with instance ID: %s has been rebooted" % (instance_id))
|
||||
logging.info("node_reboot_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
|
||||
@@ -2,7 +2,7 @@ import sys
|
||||
import time
|
||||
import boto3
|
||||
import logging
|
||||
import kraken.kubernetes.client as kubecli
|
||||
import krkn_lib_kubernetes
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
from kraken.node_actions.abstract_node_scenarios import abstract_node_scenarios
|
||||
|
||||
@@ -27,7 +27,9 @@ class AWS:
|
||||
logging.error(
|
||||
"Failed to start node instance %s. Encountered following " "exception: %s." % (instance_id, e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Stop the node instance
|
||||
def stop_instances(self, instance_id):
|
||||
@@ -36,7 +38,9 @@ class AWS:
|
||||
logging.info("EC2 instance: " + str(instance_id) + " stopped")
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance %s. Encountered following " "exception: %s." % (instance_id, e))
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Terminate the node instance
|
||||
def terminate_instances(self, instance_id):
|
||||
@@ -47,7 +51,9 @@ class AWS:
|
||||
logging.error(
|
||||
"Failed to terminate node instance %s. Encountered following " "exception: %s." % (instance_id, e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Reboot the node instance
|
||||
def reboot_instances(self, instance_id):
|
||||
@@ -58,7 +64,9 @@ class AWS:
|
||||
logging.error(
|
||||
"Failed to reboot node instance %s. Encountered following " "exception: %s." % (instance_id, e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Below functions poll EC2.Client.describe_instances() every 15 seconds
|
||||
# until a successful state is reached. An error is returned after 40 failed checks
|
||||
@@ -102,7 +110,9 @@ class AWS:
|
||||
"Failed to create the default network_acl: %s"
|
||||
"Make sure you have aws cli configured on the host and set for the region of your vpc/subnet" % (e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
return acl_id
|
||||
|
||||
# Replace network acl association
|
||||
@@ -114,7 +124,9 @@ class AWS:
|
||||
new_association_id = status["NewAssociationId"]
|
||||
except Exception as e:
|
||||
logging.error("Failed to replace network acl association: %s" % (e))
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
return new_association_id
|
||||
|
||||
# Describe network acl
|
||||
@@ -131,7 +143,9 @@ class AWS:
|
||||
"Failed to describe network acl: %s."
|
||||
"Make sure you have aws cli configured on the host and set for the region of your vpc/subnet" % (e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
associations = response["NetworkAcls"][0]["Associations"]
|
||||
# grab the current network_acl in use
|
||||
original_acl_id = response["NetworkAcls"][0]["Associations"][0]["NetworkAclId"]
|
||||
@@ -148,11 +162,14 @@ class AWS:
|
||||
"Make sure you have aws cli configured on the host and set for the region of your vpc/subnet"
|
||||
% (acl_id, e)
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
class aws_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self):
|
||||
def __init__(self, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
super().__init__(kubecli)
|
||||
self.aws = AWS()
|
||||
|
||||
# Node scenario to start the node
|
||||
@@ -164,7 +181,7 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
logging.info("Starting the node %s with instance ID: %s " % (node, instance_id))
|
||||
self.aws.start_instances(instance_id)
|
||||
self.aws.wait_until_running(instance_id)
|
||||
nodeaction.wait_for_ready_status(node, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
logging.info("Node with instance ID: %s is in running state" % (instance_id))
|
||||
logging.info("node_start_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
@@ -172,7 +189,9 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to start node instance. Encountered following " "exception: %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -184,11 +203,13 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
self.aws.stop_instances(instance_id)
|
||||
self.aws.wait_until_stopped(instance_id)
|
||||
logging.info("Node with instance ID: %s is in stopped state" % (instance_id))
|
||||
nodeaction.wait_for_unknown_status(node, timeout)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Encountered following exception: %s. " "Test Failed" % (e))
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -200,10 +221,10 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
self.aws.terminate_instances(instance_id)
|
||||
self.aws.wait_until_terminated(instance_id)
|
||||
for _ in range(timeout):
|
||||
if node not in kubecli.list_nodes():
|
||||
if node not in self.kubecli.list_nodes():
|
||||
break
|
||||
time.sleep(1)
|
||||
if node in kubecli.list_nodes():
|
||||
if node in self.kubecli.list_nodes():
|
||||
raise Exception("Node could not be terminated")
|
||||
logging.info("Node with instance ID: %s has been terminated" % (instance_id))
|
||||
logging.info("node_termination_scenario has been successfuly injected!")
|
||||
@@ -212,7 +233,9 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to terminate node instance. Encountered following exception:" " %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_termination_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -222,8 +245,8 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
instance_id = self.aws.get_instance_id(node)
|
||||
logging.info("Rebooting the node %s with instance ID: %s " % (node, instance_id))
|
||||
self.aws.reboot_instances(instance_id)
|
||||
nodeaction.wait_for_unknown_status(node, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
logging.info("Node with instance ID: %s has been rebooted" % (instance_id))
|
||||
logging.info("node_reboot_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
@@ -231,4 +254,6 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to reboot node instance. Encountered following exception:" " %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
@@ -3,7 +3,7 @@ import time
|
||||
from azure.mgmt.compute import ComputeManagementClient
|
||||
from azure.identity import DefaultAzureCredential
|
||||
import logging
|
||||
import kraken.kubernetes.client as kubecli
|
||||
import krkn_lib_kubernetes
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
from kraken.node_actions.abstract_node_scenarios import abstract_node_scenarios
|
||||
import kraken.invoke.command as runcommand
|
||||
@@ -39,7 +39,9 @@ class Azure:
|
||||
logging.info("vm name " + str(vm_name) + " started")
|
||||
except Exception as e:
|
||||
logging.error("Failed to start node instance %s. Encountered following " "exception: %s." % (vm_name, e))
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Stop the node instance
|
||||
def stop_instances(self, group_name, vm_name):
|
||||
@@ -48,7 +50,9 @@ class Azure:
|
||||
logging.info("vm name " + str(vm_name) + " stopped")
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance %s. Encountered following " "exception: %s." % (vm_name, e))
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Terminate the node instance
|
||||
def terminate_instances(self, group_name, vm_name):
|
||||
@@ -59,7 +63,9 @@ class Azure:
|
||||
logging.error(
|
||||
"Failed to terminate node instance %s. Encountered following " "exception: %s." % (vm_name, e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Reboot the node instance
|
||||
def reboot_instances(self, group_name, vm_name):
|
||||
@@ -68,7 +74,9 @@ class Azure:
|
||||
logging.info("vm name " + str(vm_name) + " rebooted")
|
||||
except Exception as e:
|
||||
logging.error("Failed to reboot node instance %s. Encountered following " "exception: %s." % (vm_name, e))
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
def get_vm_status(self, resource_group, vm_name):
|
||||
statuses = self.compute_client.virtual_machines.instance_view(resource_group, vm_name).statuses
|
||||
@@ -121,9 +129,10 @@ class Azure:
|
||||
logging.info("Vm %s is terminated" % vm_name)
|
||||
return True
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
class azure_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self):
|
||||
def __init__(self, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
super().__init__(kubecli)
|
||||
logging.info("init in azure")
|
||||
self.azure = Azure()
|
||||
|
||||
@@ -136,7 +145,7 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
logging.info("Starting the node %s with instance ID: %s " % (vm_name, resource_group))
|
||||
self.azure.start_instances(resource_group, vm_name)
|
||||
self.azure.wait_until_running(resource_group, vm_name, timeout)
|
||||
nodeaction.wait_for_ready_status(vm_name, timeout)
|
||||
nodeaction.wait_for_ready_status(vm_name, timeout,self.kubecli)
|
||||
logging.info("Node with instance ID: %s is in running state" % node)
|
||||
logging.info("node_start_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
@@ -144,7 +153,9 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to start node instance. Encountered following " "exception: %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -156,11 +167,13 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
self.azure.stop_instances(resource_group, vm_name)
|
||||
self.azure.wait_until_stopped(resource_group, vm_name, timeout)
|
||||
logging.info("Node with instance ID: %s is in stopped state" % vm_name)
|
||||
nodeaction.wait_for_unknown_status(vm_name, timeout)
|
||||
nodeaction.wait_for_unknown_status(vm_name, timeout, self.kubecli)
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Encountered following exception: %s. " "Test Failed" % e)
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -172,10 +185,10 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
self.azure.terminate_instances(resource_group, vm_name)
|
||||
self.azure.wait_until_terminated(resource_group, vm_name, timeout)
|
||||
for _ in range(timeout):
|
||||
if vm_name not in kubecli.list_nodes():
|
||||
if vm_name not in self.kubecli.list_nodes():
|
||||
break
|
||||
time.sleep(1)
|
||||
if vm_name in kubecli.list_nodes():
|
||||
if vm_name in self.kubecli.list_nodes():
|
||||
raise Exception("Node could not be terminated")
|
||||
logging.info("Node with instance ID: %s has been terminated" % node)
|
||||
logging.info("node_termination_scenario has been successfully injected!")
|
||||
@@ -184,7 +197,9 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to terminate node instance. Encountered following exception:" " %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_termination_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -194,8 +209,8 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
vm_name, resource_group = self.azure.get_instance_id(node)
|
||||
logging.info("Rebooting the node %s with instance ID: %s " % (vm_name, resource_group))
|
||||
self.azure.reboot_instances(resource_group, vm_name)
|
||||
nodeaction.wait_for_unknown_status(vm_name, timeout)
|
||||
nodeaction.wait_for_ready_status(vm_name, timeout)
|
||||
nodeaction.wait_for_unknown_status(vm_name, timeout, self.kubecli)
|
||||
nodeaction.wait_for_ready_status(vm_name, timeout, self.kubecli)
|
||||
logging.info("Node with instance ID: %s has been rebooted" % (vm_name))
|
||||
logging.info("node_reboot_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
@@ -203,4 +218,6 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to reboot node instance. Encountered following exception:" " %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
@@ -1,5 +1,6 @@
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
from kraken.node_actions.abstract_node_scenarios import abstract_node_scenarios
|
||||
import krkn_lib_kubernetes
|
||||
import logging
|
||||
import openshift as oc
|
||||
import pyipmi
|
||||
@@ -104,9 +105,10 @@ class BM:
|
||||
while self.get_ipmi_connection(bmc_addr, node_name).get_chassis_status().power_on:
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
class bm_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, bm_info, user, passwd):
|
||||
def __init__(self, bm_info, user, passwd, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
super().__init__(kubecli)
|
||||
self.bm = BM(bm_info, user, passwd)
|
||||
|
||||
# Node scenario to start the node
|
||||
@@ -118,7 +120,7 @@ class bm_node_scenarios(abstract_node_scenarios):
|
||||
logging.info("Starting the node %s with bmc address: %s " % (node, bmc_addr))
|
||||
self.bm.start_instances(bmc_addr, node)
|
||||
self.bm.wait_until_running(bmc_addr, node)
|
||||
nodeaction.wait_for_ready_status(node, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
logging.info("Node with bmc address: %s is in running state" % (bmc_addr))
|
||||
logging.info("node_start_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
@@ -140,7 +142,7 @@ class bm_node_scenarios(abstract_node_scenarios):
|
||||
self.bm.stop_instances(bmc_addr, node)
|
||||
self.bm.wait_until_stopped(bmc_addr, node)
|
||||
logging.info("Node with bmc address: %s is in stopped state" % (bmc_addr))
|
||||
nodeaction.wait_for_unknown_status(node, timeout)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to stop node instance. Encountered following exception: %s. "
|
||||
@@ -163,8 +165,8 @@ class bm_node_scenarios(abstract_node_scenarios):
|
||||
logging.info("BMC Addr: %s" % (bmc_addr))
|
||||
logging.info("Rebooting the node %s with bmc address: %s " % (node, bmc_addr))
|
||||
self.bm.reboot_instances(bmc_addr, node)
|
||||
nodeaction.wait_for_unknown_status(node, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
logging.info("Node with bmc address: %s has been rebooted" % (bmc_addr))
|
||||
logging.info("node_reboot_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
|
||||
@@ -2,14 +2,14 @@ import time
|
||||
import random
|
||||
import logging
|
||||
import paramiko
|
||||
import kraken.kubernetes.client as kubecli
|
||||
import krkn_lib_kubernetes
|
||||
import kraken.invoke.command as runcommand
|
||||
|
||||
node_general = False
|
||||
|
||||
|
||||
# Pick a random node with specified label selector
|
||||
def get_node(node_name, label_selector, instance_kill_count):
|
||||
def get_node(node_name, label_selector, instance_kill_count, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
if node_name in kubecli.list_killable_nodes():
|
||||
return [node_name]
|
||||
elif node_name:
|
||||
@@ -29,20 +29,21 @@ def get_node(node_name, label_selector, instance_kill_count):
|
||||
return nodes_to_return
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# Wait until the node status becomes Ready
|
||||
def wait_for_ready_status(node, timeout):
|
||||
def wait_for_ready_status(node, timeout, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
resource_version = kubecli.get_node_resource_version(node)
|
||||
kubecli.watch_node_status(node, "True", timeout, resource_version)
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# Wait until the node status becomes Not Ready
|
||||
def wait_for_not_ready_status(node, timeout):
|
||||
def wait_for_not_ready_status(node, timeout, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
resource_version = kubecli.get_node_resource_version(node)
|
||||
kubecli.watch_node_status(node, "False", timeout, resource_version)
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# Wait until the node status becomes Unknown
|
||||
def wait_for_unknown_status(node, timeout):
|
||||
def wait_for_unknown_status(node, timeout, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
resource_version = kubecli.get_node_resource_version(node)
|
||||
kubecli.watch_node_status(node, "Unknown", timeout, resource_version)
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
from kraken.node_actions.abstract_node_scenarios import abstract_node_scenarios
|
||||
import krkn_lib_kubernetes
|
||||
import logging
|
||||
import sys
|
||||
import docker
|
||||
@@ -36,7 +37,8 @@ class Docker:
|
||||
|
||||
|
||||
class docker_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self):
|
||||
def __init__(self, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
super().__init__(kubecli)
|
||||
self.docker = Docker()
|
||||
|
||||
# Node scenario to start the node
|
||||
@@ -47,7 +49,7 @@ class docker_node_scenarios(abstract_node_scenarios):
|
||||
container_id = self.docker.get_container_id(node)
|
||||
logging.info("Starting the node %s with container ID: %s " % (node, container_id))
|
||||
self.docker.start_instances(node)
|
||||
nodeaction.wait_for_ready_status(node, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
logging.info("Node with container ID: %s is in running state" % (container_id))
|
||||
logging.info("node_start_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
@@ -66,7 +68,7 @@ class docker_node_scenarios(abstract_node_scenarios):
|
||||
logging.info("Stopping the node %s with container ID: %s " % (node, container_id))
|
||||
self.docker.stop_instances(node)
|
||||
logging.info("Node with container ID: %s is in stopped state" % (container_id))
|
||||
nodeaction.wait_for_unknown_status(node, timeout)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Encountered following exception: %s. " "Test Failed" % (e))
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
@@ -97,8 +99,8 @@ class docker_node_scenarios(abstract_node_scenarios):
|
||||
container_id = self.docker.get_container_id(node)
|
||||
logging.info("Rebooting the node %s with container ID: %s " % (node, container_id))
|
||||
self.docker.reboot_instances(node)
|
||||
nodeaction.wait_for_unknown_status(node, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
logging.info("Node with container ID: %s has been rebooted" % (container_id))
|
||||
logging.info("node_reboot_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import kraken.kubernetes.client as kubecli
|
||||
import krkn_lib_kubernetes
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
from kraken.node_actions.abstract_node_scenarios import abstract_node_scenarios
|
||||
from googleapiclient import discovery
|
||||
@@ -45,7 +45,9 @@ class GCP:
|
||||
logging.error(
|
||||
"Failed to start node instance %s. Encountered following " "exception: %s." % (instance_id, e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Stop the node instance
|
||||
def stop_instances(self, zone, instance_id):
|
||||
@@ -54,7 +56,9 @@ class GCP:
|
||||
logging.info("vm name " + str(instance_id) + " stopped")
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance %s. Encountered following " "exception: %s." % (instance_id, e))
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Start the node instance
|
||||
def suspend_instances(self, zone, instance_id):
|
||||
@@ -65,7 +69,9 @@ class GCP:
|
||||
logging.error(
|
||||
"Failed to suspend node instance %s. Encountered following " "exception: %s." % (instance_id, e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Terminate the node instance
|
||||
def terminate_instances(self, zone, instance_id):
|
||||
@@ -76,7 +82,9 @@ class GCP:
|
||||
logging.error(
|
||||
"Failed to start node instance %s. Encountered following " "exception: %s." % (instance_id, e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Reboot the node instance
|
||||
def reboot_instances(self, zone, instance_id):
|
||||
@@ -87,7 +95,9 @@ class GCP:
|
||||
logging.error(
|
||||
"Failed to start node instance %s. Encountered following " "exception: %s." % (instance_id, e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Get instance status
|
||||
def get_instance_status(self, zone, instance_id, expected_status, timeout):
|
||||
@@ -133,8 +143,10 @@ class GCP:
|
||||
return True
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
class gcp_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self):
|
||||
def __init__(self, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
super().__init__(kubecli)
|
||||
self.gcp = GCP()
|
||||
|
||||
# Node scenario to start the node
|
||||
@@ -146,7 +158,7 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
logging.info("Starting the node %s with instance ID: %s " % (node, instance_id))
|
||||
self.gcp.start_instances(zone, instance_id)
|
||||
self.gcp.wait_until_running(zone, instance_id, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
logging.info("Node with instance ID: %s is in running state" % instance_id)
|
||||
logging.info("node_start_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
@@ -154,7 +166,9 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to start node instance. Encountered following " "exception: %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -167,11 +181,13 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
self.gcp.stop_instances(zone, instance_id)
|
||||
self.gcp.wait_until_stopped(zone, instance_id, timeout)
|
||||
logging.info("Node with instance ID: %s is in stopped state" % instance_id)
|
||||
nodeaction.wait_for_unknown_status(node, timeout)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Encountered following exception: %s. " "Test Failed" % (e))
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -183,10 +199,10 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
self.gcp.terminate_instances(zone, instance_id)
|
||||
self.gcp.wait_until_terminated(zone, instance_id, timeout)
|
||||
for _ in range(timeout):
|
||||
if node not in kubecli.list_nodes():
|
||||
if node not in self.kubecli.list_nodes():
|
||||
break
|
||||
time.sleep(1)
|
||||
if node in kubecli.list_nodes():
|
||||
if node in self.kubecli.list_nodes():
|
||||
raise Exception("Node could not be terminated")
|
||||
logging.info("Node with instance ID: %s has been terminated" % instance_id)
|
||||
logging.info("node_termination_scenario has been successfuly injected!")
|
||||
@@ -195,7 +211,9 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to terminate node instance. Encountered following exception:" " %s. Test Failed" % e
|
||||
)
|
||||
logging.error("node_termination_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -205,7 +223,7 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
instance_id, zone = self.gcp.get_instance_id(node)
|
||||
logging.info("Rebooting the node %s with instance ID: %s " % (node, instance_id))
|
||||
self.gcp.reboot_instances(zone, instance_id)
|
||||
nodeaction.wait_for_ready_status(node, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
logging.info("Node with instance ID: %s has been rebooted" % instance_id)
|
||||
logging.info("node_reboot_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
@@ -213,4 +231,6 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to reboot node instance. Encountered following exception:" " %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import logging
|
||||
import krkn_lib_kubernetes
|
||||
from kraken.node_actions.abstract_node_scenarios import abstract_node_scenarios
|
||||
|
||||
|
||||
@@ -6,9 +7,10 @@ class GENERAL:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
class general_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self):
|
||||
def __init__(self, kubecli: krkn_lib_kubernetes.KrknLibKubernetes ):
|
||||
super().__init__(kubecli)
|
||||
self.general = GENERAL()
|
||||
|
||||
# Node scenario to start the node
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import krkn_lib_kubernetes
|
||||
import kraken.invoke.command as runcommand
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
from kraken.node_actions.abstract_node_scenarios import abstract_node_scenarios
|
||||
@@ -23,7 +24,9 @@ class OPENSTACKCLOUD:
|
||||
logging.info("Instance: " + str(node) + " started")
|
||||
except Exception as e:
|
||||
logging.error("Failed to start node instance %s. Encountered following " "exception: %s." % (node, e))
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Stop the node instance
|
||||
def stop_instances(self, node):
|
||||
@@ -32,7 +35,9 @@ class OPENSTACKCLOUD:
|
||||
logging.info("Instance: " + str(node) + " stopped")
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance %s. Encountered following " "exception: %s." % (node, e))
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Reboot the node instance
|
||||
def reboot_instances(self, node):
|
||||
@@ -41,7 +46,9 @@ class OPENSTACKCLOUD:
|
||||
logging.info("Instance: " + str(node) + " rebooted")
|
||||
except Exception as e:
|
||||
logging.error("Failed to reboot node instance %s. Encountered following " "exception: %s." % (node, e))
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Wait until the node instance is running
|
||||
def wait_until_running(self, node, timeout):
|
||||
@@ -86,9 +93,9 @@ class OPENSTACKCLOUD:
|
||||
return node_name
|
||||
counter += 1
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
class openstack_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self):
|
||||
def __init__(self, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
self.openstackcloud = OPENSTACKCLOUD()
|
||||
|
||||
# Node scenario to start the node
|
||||
@@ -100,7 +107,7 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
openstack_node_name = self.openstackcloud.get_instance_id(node)
|
||||
self.openstackcloud.start_instances(openstack_node_name)
|
||||
self.openstackcloud.wait_until_running(openstack_node_name, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
logging.info("Node with instance ID: %s is in running state" % (node))
|
||||
logging.info("node_start_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
@@ -108,7 +115,9 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to start node instance. Encountered following " "exception: %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -120,11 +129,13 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
self.openstackcloud.stop_instances(openstack_node_name)
|
||||
self.openstackcloud.wait_until_stopped(openstack_node_name, timeout)
|
||||
logging.info("Node with instance name: %s is in stopped state" % (node))
|
||||
nodeaction.wait_for_ready_status(node, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Encountered following exception: %s. " "Test Failed" % (e))
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -134,8 +145,8 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
logging.info("Rebooting the node %s" % (node))
|
||||
openstack_node_name = self.openstackcloud.get_instance_id(node)
|
||||
self.openstackcloud.reboot_instances(openstack_node_name)
|
||||
nodeaction.wait_for_unknown_status(node, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
logging.info("Node with instance name: %s has been rebooted" % (node))
|
||||
logging.info("node_reboot_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
@@ -143,7 +154,9 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to reboot node instance. Encountered following exception:" " %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to start the node
|
||||
def helper_node_start_scenario(self, instance_kill_count, node_ip, timeout):
|
||||
@@ -161,7 +174,9 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to start node instance. Encountered following " "exception: %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("helper_node_start_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to stop the node
|
||||
def helper_node_stop_scenario(self, instance_kill_count, node_ip, timeout):
|
||||
@@ -176,7 +191,9 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Encountered following exception: %s. " "Test Failed" % (e))
|
||||
logging.error("helper_node_stop_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
def helper_node_service_status(self, node_ip, service, ssh_private_key, timeout):
|
||||
try:
|
||||
@@ -187,4 +204,6 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
except Exception as e:
|
||||
logging.error("Failed to check service status. Encountered following exception:" " %s. Test Failed" % (e))
|
||||
logging.error("helper_node_service_status injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
@@ -2,6 +2,7 @@ import yaml
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import krkn_lib_kubernetes
|
||||
from kraken.node_actions.aws_node_scenarios import aws_node_scenarios
|
||||
from kraken.node_actions.general_cloud_node_scenarios import general_node_scenarios
|
||||
from kraken.node_actions.az_node_scenarios import azure_node_scenarios
|
||||
@@ -12,33 +13,35 @@ from kraken.node_actions.bm_node_scenarios import bm_node_scenarios
|
||||
from kraken.node_actions.docker_node_scenarios import docker_node_scenarios
|
||||
import kraken.node_actions.common_node_functions as common_node_functions
|
||||
import kraken.cerberus.setup as cerberus
|
||||
|
||||
from krkn_lib_kubernetes import ScenarioTelemetry, KrknTelemetry
|
||||
|
||||
node_general = False
|
||||
|
||||
|
||||
# Get the node scenarios object of specfied cloud type
|
||||
def get_node_scenario_object(node_scenario):
|
||||
# krkn_lib_kubernetes
|
||||
def get_node_scenario_object(node_scenario, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
if "cloud_type" not in node_scenario.keys() or node_scenario["cloud_type"] == "generic":
|
||||
global node_general
|
||||
node_general = True
|
||||
return general_node_scenarios()
|
||||
return general_node_scenarios(kubecli)
|
||||
if node_scenario["cloud_type"] == "aws":
|
||||
return aws_node_scenarios()
|
||||
return aws_node_scenarios(kubecli)
|
||||
elif node_scenario["cloud_type"] == "gcp":
|
||||
return gcp_node_scenarios()
|
||||
return gcp_node_scenarios(kubecli)
|
||||
elif node_scenario["cloud_type"] == "openstack":
|
||||
return openstack_node_scenarios()
|
||||
return openstack_node_scenarios(kubecli)
|
||||
elif node_scenario["cloud_type"] == "azure" or node_scenario["cloud_type"] == "az":
|
||||
return azure_node_scenarios()
|
||||
return azure_node_scenarios(kubecli)
|
||||
elif node_scenario["cloud_type"] == "alibaba" or node_scenario["cloud_type"] == "alicloud":
|
||||
return alibaba_node_scenarios()
|
||||
return alibaba_node_scenarios(kubecli)
|
||||
elif node_scenario["cloud_type"] == "bm":
|
||||
return bm_node_scenarios(
|
||||
node_scenario.get("bmc_info"), node_scenario.get("bmc_user", None), node_scenario.get("bmc_password", None)
|
||||
node_scenario.get("bmc_info"), node_scenario.get("bmc_user", None), node_scenario.get("bmc_password", None),
|
||||
kubecli
|
||||
)
|
||||
elif node_scenario["cloud_type"] == "docker":
|
||||
return docker_node_scenarios()
|
||||
return docker_node_scenarios(kubecli)
|
||||
else:
|
||||
logging.error(
|
||||
"Cloud type " + node_scenario["cloud_type"] + " is not currently supported; "
|
||||
@@ -49,25 +52,44 @@ def get_node_scenario_object(node_scenario):
|
||||
|
||||
|
||||
# Run defined scenarios
|
||||
def run(scenarios_list, config, wait_duration):
|
||||
# krkn_lib_kubernetes
|
||||
def run(scenarios_list, config, wait_duration, kubecli: krkn_lib_kubernetes.KrknLibKubernetes, telemetry: KrknTelemetry) -> (list[str], list[ScenarioTelemetry]):
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for node_scenario_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = node_scenario_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, node_scenario_config)
|
||||
with open(node_scenario_config, "r") as f:
|
||||
node_scenario_config = yaml.full_load(f)
|
||||
for node_scenario in node_scenario_config["node_scenarios"]:
|
||||
node_scenario_object = get_node_scenario_object(node_scenario)
|
||||
node_scenario_object = get_node_scenario_object(node_scenario, kubecli)
|
||||
if node_scenario["actions"]:
|
||||
for action in node_scenario["actions"]:
|
||||
start_time = int(time.time())
|
||||
inject_node_scenario(action, node_scenario, node_scenario_object)
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.get_status(config, start_time, end_time)
|
||||
logging.info("")
|
||||
try:
|
||||
inject_node_scenario(action, node_scenario, node_scenario_object, kubecli)
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.get_status(config, start_time, end_time)
|
||||
logging.info("")
|
||||
except (RuntimeError, Exception) as e:
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(node_scenario_config)
|
||||
telemetry.log_exception(node_scenario_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
# Inject the specified node scenario
|
||||
def inject_node_scenario(action, node_scenario, node_scenario_object):
|
||||
def inject_node_scenario(action, node_scenario, node_scenario_object, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
generic_cloud_scenarios = ("stop_kubelet_scenario", "node_crash_scenario")
|
||||
# Get the node scenario configurations
|
||||
run_kill_count = node_scenario.get("runs", 1)
|
||||
@@ -83,7 +105,7 @@ def inject_node_scenario(action, node_scenario, node_scenario_object):
|
||||
else:
|
||||
node_name_list = [node_name]
|
||||
for single_node_name in node_name_list:
|
||||
nodes = common_node_functions.get_node(single_node_name, label_selector, instance_kill_count)
|
||||
nodes = common_node_functions.get_node(single_node_name, label_selector, instance_kill_count, kubecli)
|
||||
for single_node in nodes:
|
||||
if node_general and action not in generic_cloud_scenarios:
|
||||
logging.info("Scenario: " + action + " is not set up for generic cloud type, skipping action")
|
||||
|
||||
@@ -7,10 +7,13 @@ import time
|
||||
|
||||
from arcaflow_plugin_sdk import schema, serialization, jsonschema
|
||||
from arcaflow_plugin_kill_pod import kill_pods, wait_for_pods
|
||||
|
||||
import kraken.plugins.vmware.vmware_plugin as vmware_plugin
|
||||
import kraken.plugins.node_scenarios.vmware_plugin as vmware_plugin
|
||||
import kraken.plugins.node_scenarios.ibmcloud_plugin as ibmcloud_plugin
|
||||
from kraken.plugins.run_python_plugin import run_python_file
|
||||
from kraken.plugins.network.ingress_shaping import network_chaos
|
||||
from kraken.plugins.pod_network_outage.pod_network_outage_plugin import pod_outage
|
||||
from kraken.plugins.pod_network_outage.pod_network_outage_plugin import pod_egress_shaping
|
||||
from krkn_lib_kubernetes import ScenarioTelemetry, KrknTelemetry
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
@@ -41,7 +44,7 @@ class Plugins:
|
||||
)
|
||||
self.steps_by_id[step.schema.id] = step
|
||||
|
||||
def run(self, file: str, kubeconfig_path: str):
|
||||
def run(self, file: str, kubeconfig_path: str, kraken_config: str):
|
||||
"""
|
||||
Run executes a series of steps
|
||||
"""
|
||||
@@ -88,6 +91,8 @@ class Plugins:
|
||||
unserialized_input = step.schema.input.unserialize(entry["config"])
|
||||
if "kubeconfig_path" in step.schema.input.properties:
|
||||
unserialized_input.kubeconfig_path = kubeconfig_path
|
||||
if "kraken_config" in step.schema.input.properties:
|
||||
unserialized_input.kraken_config = kraken_config
|
||||
output_id, output_data = step.schema(unserialized_input)
|
||||
logging.info(step.render_output(output_id, output_data) + "\n")
|
||||
if output_id in step.error_output_ids:
|
||||
@@ -182,25 +187,64 @@ PLUGINS = Plugins(
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
network_chaos,
|
||||
ibmcloud_plugin.node_start,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
)
|
||||
),
|
||||
PluginStep(
|
||||
ibmcloud_plugin.node_stop,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
ibmcloud_plugin.node_reboot,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
ibmcloud_plugin.node_terminate,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
pod_outage,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
pod_egress_shaping,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def run(scenarios: List[str], kubeconfig_path: str, failed_post_scenarios: List[str], wait_duration: int) -> List[str]:
|
||||
def run(scenarios: List[str], kubeconfig_path: str, kraken_config: str, failed_post_scenarios: List[str], wait_duration: int, telemetry: KrknTelemetry) -> (List[str], list[ScenarioTelemetry]):
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
for scenario in scenarios:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = scenario
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, scenario)
|
||||
logging.info('scenario '+ str(scenario))
|
||||
try:
|
||||
PLUGINS.run(scenario, kubeconfig_path)
|
||||
PLUGINS.run(scenario, kubeconfig_path, kraken_config)
|
||||
except Exception as e:
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_post_scenarios.append(scenario)
|
||||
logging.error("Error while running {}: {}".format(scenario, e))
|
||||
return failed_post_scenarios
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
telemetry.log_exception(scenario)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
|
||||
return failed_post_scenarios
|
||||
return failed_post_scenarios, scenario_telemetries
|
||||
|
||||
563
kraken/plugins/node_scenarios/ibmcloud_plugin.py
Normal file
563
kraken/plugins/node_scenarios/ibmcloud_plugin.py
Normal file
@@ -0,0 +1,563 @@
|
||||
#!/usr/bin/env python
|
||||
import sys
|
||||
import time
|
||||
import typing
|
||||
from os import environ
|
||||
from dataclasses import dataclass, field
|
||||
import random
|
||||
from traceback import format_exc
|
||||
import logging
|
||||
from kraken.plugins.node_scenarios import kubernetes_functions as kube_helper
|
||||
from arcaflow_plugin_sdk import validation, plugin
|
||||
from kubernetes import client, watch
|
||||
from ibm_vpc import VpcV1
|
||||
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
|
||||
from ibm_cloud_sdk_core import ApiException
|
||||
import requests
|
||||
import sys
|
||||
|
||||
|
||||
class IbmCloud:
|
||||
def __init__(self):
|
||||
"""
|
||||
Initialize the ibm cloud client by using the the env variables:
|
||||
'IBMC_APIKEY' 'IBMC_URL'
|
||||
"""
|
||||
apiKey = environ.get("IBMC_APIKEY")
|
||||
service_url = environ.get("IBMC_URL")
|
||||
if not apiKey:
|
||||
raise Exception(
|
||||
"Environmental variable 'IBMC_APIKEY' is not set"
|
||||
)
|
||||
if not service_url:
|
||||
raise Exception(
|
||||
"Environmental variable 'IBMC_URL' is not set"
|
||||
)
|
||||
try:
|
||||
authenticator = IAMAuthenticator(apiKey)
|
||||
self.service = VpcV1(authenticator=authenticator)
|
||||
|
||||
self.service.set_service_url(service_url)
|
||||
except Exception as e:
|
||||
logging.error("error authenticating" + str(e))
|
||||
sys.exit(1)
|
||||
|
||||
def delete_instance(self, instance_id):
|
||||
"""
|
||||
Deletes the Instance whose name is given by 'instance_id'
|
||||
"""
|
||||
try:
|
||||
self.service.delete_instance(instance_id)
|
||||
logging.info("Deleted Instance -- '{}'".format(instance_id))
|
||||
except Exception as e:
|
||||
logging.info(
|
||||
"Instance '{}' could not be deleted. ".format(
|
||||
instance_id
|
||||
)
|
||||
)
|
||||
return False
|
||||
|
||||
def reboot_instances(self, instance_id):
|
||||
"""
|
||||
Reboots the Instance whose name is given by 'instance_id'. Returns True if successful, or
|
||||
returns False if the Instance is not powered on
|
||||
"""
|
||||
|
||||
try:
|
||||
self.service.create_instance_action(
|
||||
instance_id,
|
||||
type='reboot',
|
||||
)
|
||||
logging.info("Reset Instance -- '{}'".format(instance_id))
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.info(
|
||||
"Instance '{}' could not be rebooted".format(
|
||||
instance_id
|
||||
)
|
||||
)
|
||||
return False
|
||||
|
||||
def stop_instances(self, instance_id):
|
||||
"""
|
||||
Stops the Instance whose name is given by 'instance_id'. Returns True if successful, or
|
||||
returns False if the Instance is already stopped
|
||||
"""
|
||||
|
||||
try:
|
||||
self.service.create_instance_action(
|
||||
instance_id,
|
||||
type='stop',
|
||||
)
|
||||
logging.info("Stopped Instance -- '{}'".format(instance_id))
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.info(
|
||||
"Instance '{}' could not be stopped".format(instance_id)
|
||||
)
|
||||
logging.info("error" + str(e))
|
||||
return False
|
||||
|
||||
def start_instances(self, instance_id):
|
||||
"""
|
||||
Stops the Instance whose name is given by 'instance_id'. Returns True if successful, or
|
||||
returns False if the Instance is already running
|
||||
"""
|
||||
|
||||
try:
|
||||
self.service.create_instance_action(
|
||||
instance_id,
|
||||
type='start',
|
||||
)
|
||||
logging.info("Started Instance -- '{}'".format(instance_id))
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.info("Instance '{}' could not start running".format(instance_id))
|
||||
return False
|
||||
|
||||
def list_instances(self):
|
||||
"""
|
||||
Returns a list of Instances present in the datacenter
|
||||
"""
|
||||
instance_names = []
|
||||
try:
|
||||
instances_result = self.service.list_instances().get_result()
|
||||
instances_list = instances_result['instances']
|
||||
for vpc in instances_list:
|
||||
instance_names.append({"vpc_name": vpc['name'], "vpc_id": vpc['id']})
|
||||
starting_count = instances_result['total_count']
|
||||
while instances_result['total_count'] == instances_result['limit']:
|
||||
instances_result = self.service.list_instances(start=starting_count).get_result()
|
||||
instances_list = instances_result['instances']
|
||||
starting_count += instances_result['total_count']
|
||||
for vpc in instances_list:
|
||||
instance_names.append({"vpc_name": vpc.name, "vpc_id": vpc.id})
|
||||
except Exception as e:
|
||||
logging.error("Error listing out instances: " + str(e))
|
||||
sys.exit(1)
|
||||
return instance_names
|
||||
|
||||
def find_id_in_list(self, name, vpc_list):
|
||||
for vpc in vpc_list:
|
||||
if vpc['vpc_name'] == name:
|
||||
return vpc['vpc_id']
|
||||
|
||||
def get_instance_status(self, instance_id):
|
||||
"""
|
||||
Returns the status of the Instance whose name is given by 'instance_id'
|
||||
"""
|
||||
|
||||
try:
|
||||
instance = self.service.get_instance(instance_id).get_result()
|
||||
state = instance['status']
|
||||
return state
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to get node instance status %s. Encountered following "
|
||||
"exception: %s." % (instance_id, e)
|
||||
)
|
||||
return None
|
||||
|
||||
def wait_until_deleted(self, instance_id, timeout):
|
||||
"""
|
||||
Waits until the instance is deleted or until the timeout. Returns True if
|
||||
the instance is successfully deleted, else returns False
|
||||
"""
|
||||
|
||||
time_counter = 0
|
||||
vpc = self.get_instance_status(instance_id)
|
||||
while vpc is not None:
|
||||
vpc = self.get_instance_status(instance_id)
|
||||
logging.info(
|
||||
"Instance %s is still being deleted, sleeping for 5 seconds" % instance_id
|
||||
)
|
||||
time.sleep(5)
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info(
|
||||
"Instance %s is still not deleted in allotted time" % instance_id
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
def wait_until_running(self, instance_id, timeout):
|
||||
"""
|
||||
Waits until the Instance switches to running state or until the timeout.
|
||||
Returns True if the Instance switches to running, else returns False
|
||||
"""
|
||||
|
||||
time_counter = 0
|
||||
status = self.get_instance_status(instance_id)
|
||||
while status != "running":
|
||||
status = self.get_instance_status(instance_id)
|
||||
logging.info(
|
||||
"Instance %s is still not running, sleeping for 5 seconds" % instance_id
|
||||
)
|
||||
time.sleep(5)
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info("Instance %s is still not ready in allotted time" % instance_id)
|
||||
return False
|
||||
return True
|
||||
|
||||
def wait_until_stopped(self, instance_id, timeout):
|
||||
"""
|
||||
Waits until the Instance switches to stopped state or until the timeout.
|
||||
Returns True if the Instance switches to stopped, else returns False
|
||||
"""
|
||||
|
||||
time_counter = 0
|
||||
status = self.get_instance_status(instance_id)
|
||||
while status != "stopped":
|
||||
status = self.get_instance_status(instance_id)
|
||||
logging.info(
|
||||
"Instance %s is still not stopped, sleeping for 5 seconds" % instance_id
|
||||
)
|
||||
time.sleep(5)
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info("Instance %s is still not stopped in allotted time" % instance_id)
|
||||
return False
|
||||
return True
|
||||
|
||||
def wait_until_rebooted(self, instance_id, timeout):
|
||||
"""
|
||||
Waits until the Instance switches to restarting state and then running state or until the timeout.
|
||||
Returns True if the Instance switches back to running, else returns False
|
||||
"""
|
||||
|
||||
time_counter = 0
|
||||
status = self.get_instance_status(instance_id)
|
||||
while status == "starting":
|
||||
status = self.get_instance_status(instance_id)
|
||||
logging.info(
|
||||
"Instance %s is still restarting, sleeping for 5 seconds" % instance_id
|
||||
)
|
||||
time.sleep(5)
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info("Instance %s is still restarting after allotted time" % instance_id)
|
||||
return False
|
||||
self.wait_until_running(instance_id, timeout)
|
||||
return True
|
||||
|
||||
|
||||
@dataclass
|
||||
class Node:
|
||||
name: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeScenarioSuccessOutput:
|
||||
|
||||
nodes: typing.Dict[int, Node] = field(
|
||||
metadata={
|
||||
"name": "Nodes started/stopped/terminated/rebooted",
|
||||
"description": """Map between timestamps and the pods started/stopped/terminated/rebooted.
|
||||
The timestamp is provided in nanoseconds""",
|
||||
}
|
||||
)
|
||||
action: kube_helper.Actions = field(
|
||||
metadata={
|
||||
"name": "The action performed on the node",
|
||||
"description": """The action performed or attempted to be performed on the node. Possible values
|
||||
are : Start, Stop, Terminate, Reboot""",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeScenarioErrorOutput:
|
||||
|
||||
error: str
|
||||
action: kube_helper.Actions = field(
|
||||
metadata={
|
||||
"name": "The action performed on the node",
|
||||
"description": """The action attempted to be performed on the node. Possible values are : Start
|
||||
Stop, Terminate, Reboot""",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeScenarioConfig:
|
||||
|
||||
name: typing.Annotated[
|
||||
typing.Optional[str],
|
||||
validation.required_if_not("label_selector"),
|
||||
validation.required_if("skip_openshift_checks"),
|
||||
] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Name",
|
||||
"description": "Name(s) for target nodes. Required if label_selector is not set.",
|
||||
},
|
||||
)
|
||||
|
||||
runs: typing.Annotated[typing.Optional[int], validation.min(1)] = field(
|
||||
default=1,
|
||||
metadata={
|
||||
"name": "Number of runs per node",
|
||||
"description": "Number of times to inject each scenario under actions (will perform on same node each time)",
|
||||
},
|
||||
)
|
||||
|
||||
label_selector: typing.Annotated[
|
||||
typing.Optional[str],
|
||||
validation.min(1),
|
||||
validation.required_if_not("name")
|
||||
] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Label selector",
|
||||
"description": "Kubernetes label selector for the target nodes. Required if name is not set.\n"
|
||||
"See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for details.",
|
||||
},
|
||||
)
|
||||
|
||||
timeout: typing.Annotated[typing.Optional[int], validation.min(1)] = field(
|
||||
default=180,
|
||||
metadata={
|
||||
"name": "Timeout",
|
||||
"description": "Timeout to wait for the target pod(s) to be removed in seconds.",
|
||||
},
|
||||
)
|
||||
|
||||
instance_count: typing.Annotated[typing.Optional[int], validation.min(1)] = field(
|
||||
default=1,
|
||||
metadata={
|
||||
"name": "Instance Count",
|
||||
"description": "Number of nodes to perform action/select that match the label selector.",
|
||||
},
|
||||
)
|
||||
|
||||
skip_openshift_checks: typing.Optional[bool] = field(
|
||||
default=False,
|
||||
metadata={
|
||||
"name": "Skip Openshift Checks",
|
||||
"description": "Skip checking the status of the openshift nodes.",
|
||||
},
|
||||
)
|
||||
|
||||
kubeconfig_path: typing.Optional[str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Kubeconfig path",
|
||||
"description": "Path to your Kubeconfig file. Defaults to ~/.kube/config.\n"
|
||||
"See https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ for "
|
||||
"details.",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@plugin.step(
|
||||
id="ibmcloud-node-start",
|
||||
name="Start the node",
|
||||
description="Start the node(s) by starting the Ibmcloud Instance on which the node is configured",
|
||||
outputs={"success": NodeScenarioSuccessOutput, "error": NodeScenarioErrorOutput},
|
||||
)
|
||||
def node_start(
|
||||
cfg: NodeScenarioConfig,
|
||||
) -> typing.Tuple[
|
||||
str, typing.Union[NodeScenarioSuccessOutput, NodeScenarioErrorOutput]
|
||||
]:
|
||||
with kube_helper.setup_kubernetes(None) as cli:
|
||||
ibmcloud = IbmCloud()
|
||||
core_v1 = client.CoreV1Api(cli)
|
||||
watch_resource = watch.Watch()
|
||||
node_list = kube_helper.get_node_list(cfg, kube_helper.Actions.START, core_v1)
|
||||
node_name_id_list = ibmcloud.list_instances()
|
||||
nodes_started = {}
|
||||
for name in node_list:
|
||||
try:
|
||||
for _ in range(cfg.runs):
|
||||
logging.info("Starting node_start_scenario injection")
|
||||
logging.info("Starting the node %s " % (name))
|
||||
instance_id = ibmcloud.find_id_in_list(name, node_name_id_list)
|
||||
if instance_id:
|
||||
vm_started = ibmcloud.start_instances(instance_id)
|
||||
if vm_started:
|
||||
ibmcloud.wait_until_running(instance_id, cfg.timeout)
|
||||
if not cfg.skip_openshift_checks:
|
||||
kube_helper.wait_for_ready_status(
|
||||
name, cfg.timeout, watch_resource, core_v1
|
||||
)
|
||||
nodes_started[int(time.time_ns())] = Node(name=name)
|
||||
logging.info("Node with instance ID: %s is in running state" % name)
|
||||
logging.info("node_start_scenario has been successfully injected!")
|
||||
else:
|
||||
logging.error("Failed to find node that matched instances on ibm cloud in region")
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
"No matching vpc with node name " + name, kube_helper.Actions.START
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to start node instance. Test Failed")
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
format_exc(), kube_helper.Actions.START
|
||||
)
|
||||
|
||||
return "success", NodeScenarioSuccessOutput(
|
||||
nodes_started, kube_helper.Actions.START
|
||||
)
|
||||
|
||||
|
||||
@plugin.step(
|
||||
id="ibmcloud-node-stop",
|
||||
name="Stop the node",
|
||||
description="Stop the node(s) by starting the Ibmcloud Instance on which the node is configured",
|
||||
outputs={"success": NodeScenarioSuccessOutput, "error": NodeScenarioErrorOutput},
|
||||
)
|
||||
def node_stop(
|
||||
cfg: NodeScenarioConfig,
|
||||
) -> typing.Tuple[
|
||||
str, typing.Union[NodeScenarioSuccessOutput, NodeScenarioErrorOutput]
|
||||
]:
|
||||
with kube_helper.setup_kubernetes(None) as cli:
|
||||
ibmcloud = IbmCloud()
|
||||
core_v1 = client.CoreV1Api(cli)
|
||||
watch_resource = watch.Watch()
|
||||
logging.info('set up done')
|
||||
node_list = kube_helper.get_node_list(cfg, kube_helper.Actions.STOP, core_v1)
|
||||
logging.info("set node list" + str(node_list))
|
||||
node_name_id_list = ibmcloud.list_instances()
|
||||
logging.info('node names' + str(node_name_id_list))
|
||||
nodes_stopped = {}
|
||||
for name in node_list:
|
||||
try:
|
||||
for _ in range(cfg.runs):
|
||||
logging.info("Starting node_stop_scenario injection")
|
||||
logging.info("Stopping the node %s " % (name))
|
||||
instance_id = ibmcloud.find_id_in_list(name, node_name_id_list)
|
||||
if instance_id:
|
||||
vm_stopped = ibmcloud.stop_instances(instance_id)
|
||||
if vm_stopped:
|
||||
ibmcloud.wait_until_stopped(instance_id, cfg.timeout)
|
||||
if not cfg.skip_openshift_checks:
|
||||
kube_helper.wait_for_ready_status(
|
||||
name, cfg.timeout, watch_resource, core_v1
|
||||
)
|
||||
nodes_stopped[int(time.time_ns())] = Node(name=name)
|
||||
logging.info("Node with instance ID: %s is in stopped state" % name)
|
||||
logging.info("node_stop_scenario has been successfully injected!")
|
||||
else:
|
||||
logging.error("Failed to find node that matched instances on ibm cloud in region")
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
"No matching vpc with node name " + name, kube_helper.Actions.STOP
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Test Failed")
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
format_exc(), kube_helper.Actions.STOP
|
||||
)
|
||||
|
||||
return "success", NodeScenarioSuccessOutput(
|
||||
nodes_stopped, kube_helper.Actions.STOP
|
||||
)
|
||||
|
||||
|
||||
@plugin.step(
|
||||
id="ibmcloud-node-reboot",
|
||||
name="Reboot Ibmcloud Instance",
|
||||
description="Reboot the node(s) by starting the Ibmcloud Instance on which the node is configured",
|
||||
outputs={"success": NodeScenarioSuccessOutput, "error": NodeScenarioErrorOutput},
|
||||
)
|
||||
def node_reboot(
|
||||
cfg: NodeScenarioConfig,
|
||||
) -> typing.Tuple[
|
||||
str, typing.Union[NodeScenarioSuccessOutput, NodeScenarioErrorOutput]
|
||||
]:
|
||||
with kube_helper.setup_kubernetes(None) as cli:
|
||||
ibmcloud = IbmCloud()
|
||||
core_v1 = client.CoreV1Api(cli)
|
||||
watch_resource = watch.Watch()
|
||||
node_list = kube_helper.get_node_list(cfg, kube_helper.Actions.REBOOT, core_v1)
|
||||
node_name_id_list = ibmcloud.list_instances()
|
||||
nodes_rebooted = {}
|
||||
for name in node_list:
|
||||
try:
|
||||
for _ in range(cfg.runs):
|
||||
logging.info("Starting node_reboot_scenario injection")
|
||||
logging.info("Rebooting the node %s " % (name))
|
||||
instance_id = ibmcloud.find_id_in_list(name, node_name_id_list)
|
||||
if instance_id:
|
||||
ibmcloud.reboot_instances(instance_id)
|
||||
ibmcloud.wait_until_rebooted(instance_id, cfg.timeout)
|
||||
if not cfg.skip_openshift_checks:
|
||||
kube_helper.wait_for_unknown_status(
|
||||
name, cfg.timeout, watch_resource, core_v1
|
||||
)
|
||||
kube_helper.wait_for_ready_status(
|
||||
name, cfg.timeout, watch_resource, core_v1
|
||||
)
|
||||
nodes_rebooted[int(time.time_ns())] = Node(name=name)
|
||||
logging.info(
|
||||
"Node with instance ID: %s has rebooted successfully" % name
|
||||
)
|
||||
logging.info("node_reboot_scenario has been successfully injected!")
|
||||
else:
|
||||
logging.error("Failed to find node that matched instances on ibm cloud in region")
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
"No matching vpc with node name " + name, kube_helper.Actions.REBOOT
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to reboot node instance. Test Failed")
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
format_exc(), kube_helper.Actions.REBOOT
|
||||
)
|
||||
|
||||
return "success", NodeScenarioSuccessOutput(
|
||||
nodes_rebooted, kube_helper.Actions.REBOOT
|
||||
)
|
||||
|
||||
|
||||
@plugin.step(
|
||||
id="ibmcloud-node-terminate",
|
||||
name="Reboot Ibmcloud Instance",
|
||||
description="Wait for node to be deleted",
|
||||
outputs={"success": NodeScenarioSuccessOutput, "error": NodeScenarioErrorOutput},
|
||||
)
|
||||
def node_terminate(
|
||||
cfg: NodeScenarioConfig,
|
||||
) -> typing.Tuple[
|
||||
str, typing.Union[NodeScenarioSuccessOutput, NodeScenarioErrorOutput]
|
||||
]:
|
||||
with kube_helper.setup_kubernetes(None) as cli:
|
||||
ibmcloud = IbmCloud()
|
||||
core_v1 = client.CoreV1Api(cli)
|
||||
node_list = kube_helper.get_node_list(
|
||||
cfg, kube_helper.Actions.TERMINATE, core_v1
|
||||
)
|
||||
node_name_id_list = ibmcloud.list_instances()
|
||||
nodes_terminated = {}
|
||||
for name in node_list:
|
||||
try:
|
||||
for _ in range(cfg.runs):
|
||||
logging.info(
|
||||
"Starting node_termination_scenario injection by first stopping the node"
|
||||
)
|
||||
instance_id = ibmcloud.find_id_in_list(name, node_name_id_list)
|
||||
logging.info("Deleting the node with instance ID: %s " % (name))
|
||||
if instance_id:
|
||||
ibmcloud.delete_instance(instance_id)
|
||||
ibmcloud.wait_until_released(name, cfg.timeout)
|
||||
nodes_terminated[int(time.time_ns())] = Node(name=name)
|
||||
logging.info("Node with instance ID: %s has been released" % name)
|
||||
logging.info("node_terminate_scenario has been successfully injected!")
|
||||
else:
|
||||
logging.error("Failed to find instances that matched the node specifications on ibm cloud in the set region")
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
"No matching vpc with node name " + name, kube_helper.Actions.TERMINATE
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to terminate node instance. Test Failed")
|
||||
logging.error("node_terminate_scenario injection failed!")
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
format_exc(), kube_helper.Actions.TERMINATE
|
||||
)
|
||||
|
||||
return "success", NodeScenarioSuccessOutput(
|
||||
nodes_terminated, kube_helper.Actions.TERMINATE
|
||||
)
|
||||
@@ -7,7 +7,6 @@ import typing
|
||||
from dataclasses import dataclass, field
|
||||
from os import environ
|
||||
from traceback import format_exc
|
||||
|
||||
import requests
|
||||
from arcaflow_plugin_sdk import plugin, validation
|
||||
from com.vmware.vapi.std.errors_client import (AlreadyInDesiredState,
|
||||
@@ -17,7 +16,7 @@ from com.vmware.vcenter_client import VM, ResourcePool
|
||||
from kubernetes import client, watch
|
||||
from vmware.vapi.vsphere.client import create_vsphere_client
|
||||
|
||||
from kraken.plugins.vmware import kubernetes_functions as kube_helper
|
||||
from kraken.plugins.node_scenarios import kubernetes_functions as kube_helper
|
||||
|
||||
|
||||
class vSphere:
|
||||
@@ -534,7 +533,7 @@ class NodeScenarioConfig:
|
||||
|
||||
|
||||
@plugin.step(
|
||||
id="node_start_scenario",
|
||||
id="vmware-node-start",
|
||||
name="Start the node",
|
||||
description="Start the node(s) by starting the VMware VM "
|
||||
"on which the node is configured",
|
||||
@@ -593,7 +592,7 @@ def node_start(
|
||||
|
||||
|
||||
@plugin.step(
|
||||
id="node_stop_scenario",
|
||||
id="vmware-node-stop",
|
||||
name="Stop the node",
|
||||
description="Stop the node(s) by starting the VMware VM "
|
||||
"on which the node is configured",
|
||||
@@ -652,7 +651,7 @@ def node_stop(
|
||||
|
||||
|
||||
@plugin.step(
|
||||
id="node_reboot_scenario",
|
||||
id="vmware-node-reboot",
|
||||
name="Reboot VMware VM",
|
||||
description="Reboot the node(s) by starting the VMware VM "
|
||||
"on which the node is configured",
|
||||
@@ -713,13 +712,10 @@ def node_reboot(
|
||||
|
||||
|
||||
@plugin.step(
|
||||
id="node_terminate_scenario",
|
||||
id="vmware-node-terminate",
|
||||
name="Reboot VMware VM",
|
||||
description="Wait for the specified number of pods to be present",
|
||||
outputs={
|
||||
"success": NodeScenarioSuccessOutput,
|
||||
"error": NodeScenarioErrorOutput
|
||||
},
|
||||
description="Wait for the node to be terminated",
|
||||
outputs={"success": NodeScenarioSuccessOutput, "error": NodeScenarioErrorOutput},
|
||||
)
|
||||
def node_terminate(
|
||||
cfg: NodeScenarioConfig,
|
||||
157
kraken/plugins/pod_network_outage/cerberus.py
Normal file
157
kraken/plugins/pod_network_outage/cerberus.py
Normal file
@@ -0,0 +1,157 @@
|
||||
import logging
|
||||
import requests
|
||||
import sys
|
||||
import json
|
||||
|
||||
|
||||
def get_status(config, start_time, end_time):
|
||||
"""
|
||||
Function to get Cerberus status
|
||||
|
||||
Args:
|
||||
config
|
||||
- Kraken config dictionary
|
||||
|
||||
start_time
|
||||
- The time when chaos is injected
|
||||
|
||||
end_time
|
||||
- The time when chaos is removed
|
||||
|
||||
Returns:
|
||||
Cerberus status
|
||||
"""
|
||||
|
||||
cerberus_status = True
|
||||
check_application_routes = False
|
||||
application_routes_status = True
|
||||
if config["cerberus"]["cerberus_enabled"]:
|
||||
cerberus_url = config["cerberus"]["cerberus_url"]
|
||||
check_application_routes = config["cerberus"]["check_applicaton_routes"]
|
||||
if not cerberus_url:
|
||||
logging.error(
|
||||
"url where Cerberus publishes True/False signal is not provided.")
|
||||
sys.exit(1)
|
||||
cerberus_status = requests.get(cerberus_url, timeout=60).content
|
||||
cerberus_status = True if cerberus_status == b"True" else False
|
||||
|
||||
# Fail if the application routes monitored by cerberus experience
|
||||
# downtime during the chaos
|
||||
if check_application_routes:
|
||||
application_routes_status, unavailable_routes = application_status(
|
||||
cerberus_url, start_time, end_time)
|
||||
if not application_routes_status:
|
||||
logging.error(
|
||||
"Application routes: %s monitored by cerberus encountered downtime during the run, failing"
|
||||
% unavailable_routes
|
||||
)
|
||||
else:
|
||||
logging.info(
|
||||
"Application routes being monitored didn't encounter any downtime during the run!")
|
||||
|
||||
if not cerberus_status:
|
||||
logging.error(
|
||||
"Received a no-go signal from Cerberus, looks like "
|
||||
"the cluster is unhealthy. Please check the Cerberus "
|
||||
"report for more details. Test failed."
|
||||
)
|
||||
|
||||
if not application_routes_status or not cerberus_status:
|
||||
sys.exit(1)
|
||||
else:
|
||||
logging.info(
|
||||
"Received a go signal from Ceberus, the cluster is healthy. "
|
||||
"Test passed.")
|
||||
return cerberus_status
|
||||
|
||||
|
||||
def publish_kraken_status(config, failed_post_scenarios, start_time, end_time):
|
||||
"""
|
||||
Function to publish Kraken status to Cerberus
|
||||
|
||||
Args:
|
||||
config
|
||||
- Kraken config dictionary
|
||||
|
||||
failed_post_scenarios
|
||||
- String containing the failed post scenarios
|
||||
|
||||
start_time
|
||||
- The time when chaos is injected
|
||||
|
||||
end_time
|
||||
- The time when chaos is removed
|
||||
"""
|
||||
|
||||
cerberus_status = get_status(config, start_time, end_time)
|
||||
if not cerberus_status:
|
||||
if failed_post_scenarios:
|
||||
if config["kraken"]["exit_on_failure"]:
|
||||
logging.info(
|
||||
"Cerberus status is not healthy and post action scenarios " "are still failing, exiting kraken run"
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
logging.info(
|
||||
"Cerberus status is not healthy and post action scenarios "
|
||||
"are still failing")
|
||||
else:
|
||||
if failed_post_scenarios:
|
||||
if config["kraken"]["exit_on_failure"]:
|
||||
logging.info(
|
||||
"Cerberus status is healthy but post action scenarios " "are still failing, exiting kraken run"
|
||||
)
|
||||
sys.exit(1)
|
||||
else:
|
||||
logging.info(
|
||||
"Cerberus status is healthy but post action scenarios "
|
||||
"are still failing")
|
||||
|
||||
|
||||
def application_status(cerberus_url, start_time, end_time):
|
||||
"""
|
||||
Function to check application availability
|
||||
|
||||
Args:
|
||||
cerberus_url
|
||||
- url where Cerberus publishes True/False signal
|
||||
|
||||
start_time
|
||||
- The time when chaos is injected
|
||||
|
||||
end_time
|
||||
- The time when chaos is removed
|
||||
|
||||
Returns:
|
||||
Application status and failed routes
|
||||
"""
|
||||
|
||||
if not cerberus_url:
|
||||
logging.error(
|
||||
"url where Cerberus publishes True/False signal is not provided.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
duration = (end_time - start_time) / 60
|
||||
url = cerberus_url + "/" + "history" + \
|
||||
"?" + "loopback=" + str(duration)
|
||||
logging.info(
|
||||
"Scraping the metrics for the test duration from cerberus url: %s" %
|
||||
url)
|
||||
try:
|
||||
failed_routes = []
|
||||
status = True
|
||||
metrics = requests.get(url, timeout=60).content
|
||||
metrics_json = json.loads(metrics)
|
||||
for entry in metrics_json["history"]["failures"]:
|
||||
if entry["component"] == "route":
|
||||
name = entry["name"]
|
||||
failed_routes.append(name)
|
||||
status = False
|
||||
else:
|
||||
continue
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to scrape metrics from cerberus API at %s: %s" %
|
||||
(url, e))
|
||||
sys.exit(1)
|
||||
return status, set(failed_routes)
|
||||
25
kraken/plugins/pod_network_outage/job.j2
Normal file
25
kraken/plugins/pod_network_outage/job.j2
Normal file
@@ -0,0 +1,25 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: chaos-{{jobname}}
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
nodeName: {{nodename}}
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: networkchaos
|
||||
image: docker.io/fedora/tools
|
||||
command: ["chroot", "/host", "/bin/sh", "-c", "{{cmd}}"]
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: host
|
||||
mountPath: /host
|
||||
volumes:
|
||||
- name: host
|
||||
hostPath:
|
||||
path: /
|
||||
|
||||
restartPolicy: Never
|
||||
backoffLimit: 0
|
||||
263
kraken/plugins/pod_network_outage/kubernetes_functions.py
Normal file
263
kraken/plugins/pod_network_outage/kubernetes_functions.py
Normal file
@@ -0,0 +1,263 @@
|
||||
from kubernetes import config, client
|
||||
from kubernetes.client.rest import ApiException
|
||||
from kubernetes.stream import stream
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import random
|
||||
|
||||
|
||||
def setup_kubernetes(kubeconfig_path) -> client.ApiClient:
|
||||
"""
|
||||
Sets up the Kubernetes client
|
||||
"""
|
||||
if kubeconfig_path is None:
|
||||
kubeconfig_path = config.KUBE_CONFIG_DEFAULT_LOCATION
|
||||
client_config = config.load_kube_config(kubeconfig_path)
|
||||
return client.ApiClient(client_config)
|
||||
|
||||
|
||||
def create_job(batch_cli, body, namespace="default"):
|
||||
"""
|
||||
Function used to create a job from a YAML config
|
||||
"""
|
||||
|
||||
try:
|
||||
api_response = batch_cli.create_namespaced_job(
|
||||
body=body, namespace=namespace)
|
||||
return api_response
|
||||
except ApiException as api:
|
||||
logging.warn(
|
||||
"Exception when calling \
|
||||
BatchV1Api->create_job: %s"
|
||||
% api
|
||||
)
|
||||
if api.status == 409:
|
||||
logging.warn("Job already present")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
BatchV1Api->create_namespaced_job: %s"
|
||||
% e
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def delete_pod(cli, name, namespace):
|
||||
"""
|
||||
Function that deletes a pod and waits until deletion is complete
|
||||
"""
|
||||
|
||||
try:
|
||||
cli.delete_namespaced_pod(name=name, namespace=namespace)
|
||||
while cli.read_namespaced_pod(name=name, namespace=namespace):
|
||||
time.sleep(1)
|
||||
except ApiException as e:
|
||||
if e.status == 404:
|
||||
logging.info("Pod deleted")
|
||||
else:
|
||||
logging.error("Failed to delete pod %s" % e)
|
||||
raise e
|
||||
|
||||
|
||||
def create_pod(cli, body, namespace, timeout=120):
|
||||
"""
|
||||
Function used to create a pod from a YAML config
|
||||
"""
|
||||
|
||||
try:
|
||||
pod_stat = None
|
||||
pod_stat = cli.create_namespaced_pod(body=body, namespace=namespace)
|
||||
end_time = time.time() + timeout
|
||||
while True:
|
||||
pod_stat = cli.read_namespaced_pod(
|
||||
name=body["metadata"]["name"], namespace=namespace)
|
||||
if pod_stat.status.phase == "Running":
|
||||
break
|
||||
if time.time() > end_time:
|
||||
raise Exception("Starting pod failed")
|
||||
time.sleep(1)
|
||||
except Exception as e:
|
||||
logging.error("Pod creation failed %s" % e)
|
||||
if pod_stat:
|
||||
logging.error(pod_stat.status.container_statuses)
|
||||
delete_pod(cli, body["metadata"]["name"], namespace)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def exec_cmd_in_pod(cli, command, pod_name, namespace, container=None):
|
||||
"""
|
||||
Function used to execute a command in a running pod
|
||||
"""
|
||||
|
||||
exec_command = command
|
||||
try:
|
||||
if container:
|
||||
ret = stream(
|
||||
cli.connect_get_namespaced_pod_exec,
|
||||
pod_name,
|
||||
namespace,
|
||||
container=container,
|
||||
command=exec_command,
|
||||
stderr=True,
|
||||
stdin=False,
|
||||
stdout=True,
|
||||
tty=False,
|
||||
)
|
||||
else:
|
||||
ret = stream(
|
||||
cli.connect_get_namespaced_pod_exec,
|
||||
pod_name,
|
||||
namespace,
|
||||
command=exec_command,
|
||||
stderr=True,
|
||||
stdin=False,
|
||||
stdout=True,
|
||||
tty=False,
|
||||
)
|
||||
except BaseException:
|
||||
return False
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def list_pods(cli, namespace, label_selector=None):
|
||||
"""
|
||||
Function used to list pods in a given namespace and having a certain label
|
||||
"""
|
||||
|
||||
pods = []
|
||||
try:
|
||||
if label_selector:
|
||||
ret = cli.list_namespaced_pod(
|
||||
namespace, pretty=True, label_selector=label_selector)
|
||||
else:
|
||||
ret = cli.list_namespaced_pod(namespace, pretty=True)
|
||||
except ApiException as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
CoreV1Api->list_namespaced_pod: %s\n"
|
||||
% e
|
||||
)
|
||||
raise e
|
||||
for pod in ret.items:
|
||||
pods.append(pod.metadata.name)
|
||||
|
||||
return pods
|
||||
|
||||
|
||||
def get_job_status(batch_cli, name, namespace="default"):
|
||||
"""
|
||||
Function that retrieves the status of a running job in a given namespace
|
||||
"""
|
||||
|
||||
try:
|
||||
return batch_cli.read_namespaced_job_status(
|
||||
name=name, namespace=namespace)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
BatchV1Api->read_namespaced_job_status: %s"
|
||||
% e
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def get_pod_log(cli, name, namespace="default"):
|
||||
"""
|
||||
Function that retrieves the logs of a running pod in a given namespace
|
||||
"""
|
||||
|
||||
return cli.read_namespaced_pod_log(
|
||||
name=name, namespace=namespace, _return_http_data_only=True, _preload_content=False
|
||||
)
|
||||
|
||||
|
||||
def read_pod(cli, name, namespace="default"):
|
||||
"""
|
||||
Function that retrieves the info of a running pod in a given namespace
|
||||
"""
|
||||
|
||||
return cli.read_namespaced_pod(name=name, namespace=namespace)
|
||||
|
||||
|
||||
def delete_job(batch_cli, name, namespace="default"):
|
||||
"""
|
||||
Deletes a job with the input name and namespace
|
||||
"""
|
||||
|
||||
try:
|
||||
api_response = batch_cli.delete_namespaced_job(
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
body=client.V1DeleteOptions(
|
||||
propagation_policy="Foreground", grace_period_seconds=0),
|
||||
)
|
||||
logging.debug("Job deleted. status='%s'" % str(api_response.status))
|
||||
return api_response
|
||||
except ApiException as api:
|
||||
logging.warn(
|
||||
"Exception when calling \
|
||||
BatchV1Api->create_namespaced_job: %s"
|
||||
% api
|
||||
)
|
||||
logging.warn("Job already deleted\n")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
BatchV1Api->delete_namespaced_job: %s\n"
|
||||
% e
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def list_ready_nodes(cli, label_selector=None):
|
||||
"""
|
||||
Returns a list of ready nodes
|
||||
"""
|
||||
|
||||
nodes = []
|
||||
try:
|
||||
if label_selector:
|
||||
ret = cli.list_node(pretty=True, label_selector=label_selector)
|
||||
else:
|
||||
ret = cli.list_node(pretty=True)
|
||||
except ApiException as e:
|
||||
logging.error("Exception when calling CoreV1Api->list_node: %s\n" % e)
|
||||
raise e
|
||||
for node in ret.items:
|
||||
for cond in node.status.conditions:
|
||||
if str(cond.type) == "Ready" and str(cond.status) == "True":
|
||||
nodes.append(node.metadata.name)
|
||||
|
||||
return nodes
|
||||
|
||||
|
||||
def get_node(node_name, label_selector, instance_kill_count, cli):
|
||||
"""
|
||||
Returns active node(s) on which the scenario can be performed
|
||||
"""
|
||||
|
||||
if node_name in list_ready_nodes(cli):
|
||||
return [node_name]
|
||||
elif node_name:
|
||||
logging.info(
|
||||
"Node with provided node_name does not exist or the node might "
|
||||
"be in NotReady state."
|
||||
)
|
||||
nodes = list_ready_nodes(cli, label_selector)
|
||||
if not nodes:
|
||||
raise Exception(
|
||||
"Ready nodes with the provided label selector do not exist")
|
||||
logging.info(
|
||||
"Ready nodes with the label selector %s: %s" % (label_selector, nodes)
|
||||
)
|
||||
number_of_nodes = len(nodes)
|
||||
if instance_kill_count == number_of_nodes:
|
||||
return nodes
|
||||
nodes_to_return = []
|
||||
for i in range(instance_kill_count):
|
||||
node_to_add = nodes[random.randint(0, len(nodes) - 1)]
|
||||
nodes_to_return.append(node_to_add)
|
||||
nodes.remove(node_to_add)
|
||||
return nodes_to_return
|
||||
30
kraken/plugins/pod_network_outage/pod_module.j2
Normal file
30
kraken/plugins/pod_network_outage/pod_module.j2
Normal file
@@ -0,0 +1,30 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: modtools
|
||||
spec:
|
||||
nodeName: {{nodename}}
|
||||
containers:
|
||||
- name: modtools
|
||||
image: docker.io/fedora/tools
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "trap : TERM INT; sleep infinity & wait"
|
||||
tty: true
|
||||
stdin: true
|
||||
stdinOnce: true
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: host
|
||||
mountPath: /host
|
||||
volumes:
|
||||
- name: host
|
||||
hostPath:
|
||||
path: /
|
||||
hostNetwork: true
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
restartPolicy: Never
|
||||
1151
kraken/plugins/pod_network_outage/pod_network_outage_plugin.py
Executable file
1151
kraken/plugins/pod_network_outage/pod_network_outage_plugin.py
Executable file
File diff suppressed because it is too large
Load Diff
@@ -5,12 +5,12 @@ import arcaflow_plugin_kill_pod
|
||||
|
||||
import kraken.cerberus.setup as cerberus
|
||||
import kraken.post_actions.actions as post_actions
|
||||
import kraken.kubernetes.client as kubecli
|
||||
import krkn_lib_kubernetes
|
||||
import time
|
||||
import yaml
|
||||
import sys
|
||||
import random
|
||||
|
||||
from krkn_lib_kubernetes import ScenarioTelemetry, KrknTelemetry
|
||||
|
||||
# Run pod based scenarios
|
||||
def run(kubeconfig_path, scenarios_list, config, failed_post_scenarios, wait_duration):
|
||||
@@ -66,9 +66,23 @@ def run(kubeconfig_path, scenarios_list, config, failed_post_scenarios, wait_dur
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
return failed_post_scenarios
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
def container_run(kubeconfig_path,
|
||||
scenarios_list,
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
wait_duration,
|
||||
kubecli: krkn_lib_kubernetes.KrknLibKubernetes,
|
||||
telemetry: KrknTelemetry) -> (list[str], list[ScenarioTelemetry]):
|
||||
|
||||
failed_scenarios = []
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
|
||||
def container_run(kubeconfig_path, scenarios_list, config, failed_post_scenarios, wait_duration):
|
||||
for container_scenario_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = container_scenario_config[0]
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, container_scenario_config[0])
|
||||
if len(container_scenario_config) > 1:
|
||||
pre_action_output = post_actions.run(kubeconfig_path, container_scenario_config[1])
|
||||
else:
|
||||
@@ -78,33 +92,44 @@ def container_run(kubeconfig_path, scenarios_list, config, failed_post_scenarios
|
||||
for cont_scenario in cont_scenario_config["scenarios"]:
|
||||
# capture start time
|
||||
start_time = int(time.time())
|
||||
killed_containers = container_killing_in_pod(cont_scenario)
|
||||
|
||||
if len(container_scenario_config) > 1:
|
||||
try:
|
||||
try:
|
||||
killed_containers = container_killing_in_pod(cont_scenario, kubecli)
|
||||
if len(container_scenario_config) > 1:
|
||||
failed_post_scenarios = post_actions.check_recovery(
|
||||
kubeconfig_path, container_scenario_config, failed_post_scenarios, pre_action_output
|
||||
kubeconfig_path,
|
||||
container_scenario_config,
|
||||
failed_post_scenarios,
|
||||
pre_action_output
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to run post action checks: %s" % e)
|
||||
sys.exit(1)
|
||||
else:
|
||||
failed_post_scenarios = check_failed_containers(
|
||||
killed_containers, cont_scenario.get("retry_wait", 120), kubecli
|
||||
)
|
||||
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
|
||||
# capture end time
|
||||
end_time = int(time.time())
|
||||
|
||||
# publish cerberus status
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
except (RuntimeError, Exception):
|
||||
failed_scenarios.append(container_scenario_config[0])
|
||||
telemetry.log_exception(container_scenario_config[0])
|
||||
scenario_telemetry.exitStatus = 1
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
else:
|
||||
failed_post_scenarios = check_failed_containers(
|
||||
killed_containers, cont_scenario.get("retry_wait", 120)
|
||||
)
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
|
||||
# capture end time
|
||||
end_time = int(time.time())
|
||||
|
||||
# publish cerberus status
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
logging.info("")
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
def container_killing_in_pod(cont_scenario):
|
||||
|
||||
def container_killing_in_pod(cont_scenario, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
scenario_name = cont_scenario.get("name", "")
|
||||
namespace = cont_scenario.get("namespace", "*")
|
||||
label_selector = cont_scenario.get("label_selector", None)
|
||||
@@ -114,7 +139,9 @@ def container_killing_in_pod(cont_scenario):
|
||||
kill_count = cont_scenario.get("count", 1)
|
||||
if type(pod_names) != list:
|
||||
logging.error("Please make sure your pod_names are in a list format")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
if len(pod_names) == 0:
|
||||
if namespace == "*":
|
||||
# returns double array of pod name and namespace
|
||||
@@ -126,7 +153,9 @@ def container_killing_in_pod(cont_scenario):
|
||||
if namespace == "*":
|
||||
logging.error("You must specify the namespace to kill a container in a specific pod")
|
||||
logging.error("Scenario " + scenario_name + " failed")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
pods = pod_names
|
||||
# get container and pod name
|
||||
container_pod_list = []
|
||||
@@ -147,17 +176,19 @@ def container_killing_in_pod(cont_scenario):
|
||||
if len(container_pod_list) == 0:
|
||||
logging.error("Trying to kill more containers than were found, try lowering kill count")
|
||||
logging.error("Scenario " + scenario_name + " failed")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
selected_container_pod = container_pod_list[random.randint(0, len(container_pod_list) - 1)]
|
||||
for c_name in selected_container_pod[2]:
|
||||
if container_name != "":
|
||||
if c_name == container_name:
|
||||
killed_container_list.append([selected_container_pod[0], selected_container_pod[1], c_name])
|
||||
retry_container_killing(kill_action, selected_container_pod[0], selected_container_pod[1], c_name)
|
||||
retry_container_killing(kill_action, selected_container_pod[0], selected_container_pod[1], c_name, kubecli)
|
||||
break
|
||||
else:
|
||||
killed_container_list.append([selected_container_pod[0], selected_container_pod[1], c_name])
|
||||
retry_container_killing(kill_action, selected_container_pod[0], selected_container_pod[1], c_name)
|
||||
retry_container_killing(kill_action, selected_container_pod[0], selected_container_pod[1], c_name, kubecli)
|
||||
break
|
||||
container_pod_list.remove(selected_container_pod)
|
||||
killed_count += 1
|
||||
@@ -165,7 +196,7 @@ def container_killing_in_pod(cont_scenario):
|
||||
return killed_container_list
|
||||
|
||||
|
||||
def retry_container_killing(kill_action, podname, namespace, container_name):
|
||||
def retry_container_killing(kill_action, podname, namespace, container_name, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
i = 0
|
||||
while i < 5:
|
||||
logging.info("Killing container %s in pod %s (ns %s)" % (str(container_name), str(podname), str(namespace)))
|
||||
@@ -178,10 +209,11 @@ def retry_container_killing(kill_action, podname, namespace, container_name):
|
||||
time.sleep(2)
|
||||
continue
|
||||
else:
|
||||
logging.warning(response)
|
||||
continue
|
||||
|
||||
|
||||
def check_failed_containers(killed_container_list, wait_time):
|
||||
def check_failed_containers(killed_container_list, wait_time, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
|
||||
container_ready = []
|
||||
timer = 0
|
||||
|
||||
@@ -1,5 +1,37 @@
|
||||
import urllib3
|
||||
import logging
|
||||
import prometheus_api_client
|
||||
import sys
|
||||
import kraken.invoke.command as runcommand
|
||||
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
|
||||
# Initialize the client
|
||||
def initialize_prom_client(distribution, prometheus_url, prometheus_bearer_token):
|
||||
global prom_cli
|
||||
prometheus_url, prometheus_bearer_token = instance(distribution, prometheus_url, prometheus_bearer_token)
|
||||
if prometheus_url and prometheus_bearer_token:
|
||||
bearer = "Bearer " + prometheus_bearer_token
|
||||
headers = {"Authorization": bearer}
|
||||
try:
|
||||
prom_cli = prometheus_api_client.PrometheusConnect(url=prometheus_url, headers=headers, disable_ssl=True)
|
||||
except Exception as e:
|
||||
logging.error("Not able to initialize the client %s" % e)
|
||||
sys.exit(1)
|
||||
else:
|
||||
prom_cli = None
|
||||
|
||||
|
||||
# Process custom prometheus query
|
||||
def process_prom_query(query):
|
||||
if prom_cli:
|
||||
try:
|
||||
return prom_cli.custom_query(query=query, params=None)
|
||||
except Exception as e:
|
||||
logging.error("Failed to get the metrics: %s" % e)
|
||||
sys.exit(1)
|
||||
else:
|
||||
logging.info("Skipping the prometheus query as the prometheus client couldn't " "be initialized\n")
|
||||
|
||||
# Get prometheus details
|
||||
def instance(distribution, prometheus_url, prometheus_bearer_token):
|
||||
@@ -10,7 +42,8 @@ def instance(distribution, prometheus_url, prometheus_bearer_token):
|
||||
prometheus_url = "https://" + url
|
||||
if distribution == "openshift" and not prometheus_bearer_token:
|
||||
prometheus_bearer_token = runcommand.invoke(
|
||||
"oc -n openshift-monitoring sa get-token prometheus-k8s "
|
||||
"|| oc create token -n openshift-monitoring prometheus-k8s"
|
||||
"oc create token -n openshift-monitoring prometheus-k8s --duration=12h "
|
||||
"|| oc -n openshift-monitoring sa get-token prometheus-k8s "
|
||||
"|| oc sa new-token -n openshift-monitoring prometheus-k8s"
|
||||
)
|
||||
return prometheus_url, prometheus_bearer_token
|
||||
|
||||
@@ -3,155 +3,237 @@ import random
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
import krkn_lib_kubernetes
|
||||
import yaml
|
||||
|
||||
from ..cerberus import setup as cerberus
|
||||
from ..kubernetes import client as kubecli
|
||||
from krkn_lib_kubernetes import ScenarioTelemetry, KrknTelemetry
|
||||
|
||||
|
||||
def run(scenarios_list, config):
|
||||
# krkn_lib_kubernetes
|
||||
def run(scenarios_list, config, kubecli: krkn_lib_kubernetes.KrknLibKubernetes, telemetry: KrknTelemetry) -> (list[str], list[ScenarioTelemetry]):
|
||||
"""
|
||||
Reads the scenario config and creates a temp file to fill up the PVC
|
||||
"""
|
||||
failed_post_scenarios = ""
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for app_config in scenarios_list:
|
||||
if len(app_config) > 1:
|
||||
with open(app_config, "r") as f:
|
||||
config_yaml = yaml.full_load(f)
|
||||
scenario_config = config_yaml["pvc_scenario"]
|
||||
pvc_name = scenario_config.get("pvc_name", "")
|
||||
pod_name = scenario_config.get("pod_name", "")
|
||||
namespace = scenario_config.get("namespace", "")
|
||||
target_fill_percentage = scenario_config.get(
|
||||
"fill_percentage", "50"
|
||||
)
|
||||
duration = scenario_config.get("duration", 60)
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = app_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, app_config)
|
||||
try:
|
||||
if len(app_config) > 1:
|
||||
with open(app_config, "r") as f:
|
||||
config_yaml = yaml.full_load(f)
|
||||
scenario_config = config_yaml["pvc_scenario"]
|
||||
pvc_name = scenario_config.get("pvc_name", "")
|
||||
pod_name = scenario_config.get("pod_name", "")
|
||||
namespace = scenario_config.get("namespace", "")
|
||||
target_fill_percentage = scenario_config.get(
|
||||
"fill_percentage", "50"
|
||||
)
|
||||
duration = scenario_config.get("duration", 60)
|
||||
|
||||
logging.info(
|
||||
"Input params:\n"
|
||||
"pvc_name: '%s'\n"
|
||||
"pod_name: '%s'\n"
|
||||
"namespace: '%s'\n"
|
||||
"target_fill_percentage: '%s%%'\nduration: '%ss'"
|
||||
% (
|
||||
str(pvc_name),
|
||||
str(pod_name),
|
||||
str(namespace),
|
||||
str(target_fill_percentage),
|
||||
str(duration)
|
||||
)
|
||||
)
|
||||
|
||||
# Check input params
|
||||
if namespace is None:
|
||||
logging.error(
|
||||
"You must specify the namespace where the PVC is"
|
||||
)
|
||||
sys.exit(1)
|
||||
if pvc_name is None and pod_name is None:
|
||||
logging.error(
|
||||
"You must specify the pvc_name or the pod_name"
|
||||
)
|
||||
sys.exit(1)
|
||||
if pvc_name and pod_name:
|
||||
logging.info(
|
||||
"pod_name will be ignored, pod_name used will be "
|
||||
"a retrieved from the pod used in the pvc_name"
|
||||
"Input params:\n"
|
||||
"pvc_name: '%s'\n"
|
||||
"pod_name: '%s'\n"
|
||||
"namespace: '%s'\n"
|
||||
"target_fill_percentage: '%s%%'\nduration: '%ss'"
|
||||
% (
|
||||
str(pvc_name),
|
||||
str(pod_name),
|
||||
str(namespace),
|
||||
str(target_fill_percentage),
|
||||
str(duration)
|
||||
)
|
||||
)
|
||||
|
||||
# Get pod name
|
||||
if pvc_name:
|
||||
if pod_name:
|
||||
logging.info(
|
||||
"pod_name '%s' will be overridden with one of "
|
||||
"the pods mounted in the PVC" % (str(pod_name))
|
||||
)
|
||||
pvc = kubecli.get_pvc_info(pvc_name, namespace)
|
||||
try:
|
||||
# random generator not used for
|
||||
# security/cryptographic purposes.
|
||||
pod_name = random.choice(pvc.podNames) # nosec
|
||||
logging.info("Pod name: %s" % pod_name)
|
||||
except Exception:
|
||||
# Check input params
|
||||
if namespace is None:
|
||||
logging.error(
|
||||
"Pod associated with %s PVC, on namespace %s, "
|
||||
"not found" % (str(pvc_name), str(namespace))
|
||||
"You must specify the namespace where the PVC is"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Get volume name
|
||||
pod = kubecli.get_pod_info(name=pod_name, namespace=namespace)
|
||||
|
||||
if pod is None:
|
||||
logging.error(
|
||||
"Exiting as pod '%s' doesn't exist "
|
||||
"in namespace '%s'" % (
|
||||
str(pod_name),
|
||||
str(namespace)
|
||||
#sys.exit(1)
|
||||
raise RuntimeError()
|
||||
if pvc_name is None and pod_name is None:
|
||||
logging.error(
|
||||
"You must specify the pvc_name or the pod_name"
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
if pvc_name and pod_name:
|
||||
logging.info(
|
||||
"pod_name will be ignored, pod_name used will be "
|
||||
"a retrieved from the pod used in the pvc_name"
|
||||
)
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
for volume in pod.volumes:
|
||||
if volume.pvcName is not None:
|
||||
volume_name = volume.name
|
||||
pvc_name = volume.pvcName
|
||||
# Get pod name
|
||||
if pvc_name:
|
||||
if pod_name:
|
||||
logging.info(
|
||||
"pod_name '%s' will be overridden with one of "
|
||||
"the pods mounted in the PVC" % (str(pod_name))
|
||||
)
|
||||
pvc = kubecli.get_pvc_info(pvc_name, namespace)
|
||||
break
|
||||
if 'pvc' not in locals():
|
||||
logging.error(
|
||||
"Pod '%s' in namespace '%s' does not use a pvc" % (
|
||||
try:
|
||||
# random generator not used for
|
||||
# security/cryptographic purposes.
|
||||
pod_name = random.choice(pvc.podNames) # nosec
|
||||
logging.info("Pod name: %s" % pod_name)
|
||||
except Exception:
|
||||
logging.error(
|
||||
"Pod associated with %s PVC, on namespace %s, "
|
||||
"not found" % (str(pvc_name), str(namespace))
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Get volume name
|
||||
pod = kubecli.get_pod_info(name=pod_name, namespace=namespace)
|
||||
|
||||
if pod is None:
|
||||
logging.error(
|
||||
"Exiting as pod '%s' doesn't exist "
|
||||
"in namespace '%s'" % (
|
||||
str(pod_name),
|
||||
str(namespace)
|
||||
)
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
for volume in pod.volumes:
|
||||
if volume.pvcName is not None:
|
||||
volume_name = volume.name
|
||||
pvc_name = volume.pvcName
|
||||
pvc = kubecli.get_pvc_info(pvc_name, namespace)
|
||||
break
|
||||
if 'pvc' not in locals():
|
||||
logging.error(
|
||||
"Pod '%s' in namespace '%s' does not use a pvc" % (
|
||||
str(pod_name),
|
||||
str(namespace)
|
||||
)
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
logging.info("Volume name: %s" % volume_name)
|
||||
logging.info("PVC name: %s" % pvc_name)
|
||||
|
||||
# Get container name and mount path
|
||||
for container in pod.containers:
|
||||
for vol in container.volumeMounts:
|
||||
if vol.name == volume_name:
|
||||
mount_path = vol.mountPath
|
||||
container_name = container.name
|
||||
break
|
||||
logging.info("Container path: %s" % container_name)
|
||||
logging.info("Mount path: %s" % mount_path)
|
||||
|
||||
# Get PVC capacity and used bytes
|
||||
command = "df %s -B 1024 | sed 1d" % (str(mount_path))
|
||||
command_output = (
|
||||
kubecli.exec_cmd_in_pod(
|
||||
command,
|
||||
pod_name,
|
||||
namespace,
|
||||
container_name,
|
||||
"sh"
|
||||
)
|
||||
).split()
|
||||
pvc_used_kb = int(command_output[2])
|
||||
pvc_capacity_kb = pvc_used_kb + int(command_output[3])
|
||||
logging.info("PVC used: %s KB" % pvc_used_kb)
|
||||
logging.info("PVC capacity: %s KB" % pvc_capacity_kb)
|
||||
|
||||
# Check valid fill percentage
|
||||
current_fill_percentage = pvc_used_kb / pvc_capacity_kb
|
||||
if not (
|
||||
current_fill_percentage * 100
|
||||
< float(target_fill_percentage)
|
||||
<= 99
|
||||
):
|
||||
logging.error(
|
||||
"Target fill percentage (%.2f%%) is lower than "
|
||||
"current fill percentage (%.2f%%) "
|
||||
"or higher than 99%%" % (
|
||||
target_fill_percentage,
|
||||
current_fill_percentage * 100
|
||||
)
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Calculate file size
|
||||
file_size_kb = int(
|
||||
(
|
||||
float(
|
||||
target_fill_percentage / 100
|
||||
) * float(pvc_capacity_kb)
|
||||
) - float(pvc_used_kb)
|
||||
)
|
||||
logging.debug("File size: %s KB" % file_size_kb)
|
||||
|
||||
file_name = "kraken.tmp"
|
||||
logging.info(
|
||||
"Creating %s file, %s KB size, in pod %s at %s (ns %s)"
|
||||
% (
|
||||
str(file_name),
|
||||
str(file_size_kb),
|
||||
str(pod_name),
|
||||
str(mount_path),
|
||||
str(namespace)
|
||||
)
|
||||
)
|
||||
sys.exit(1)
|
||||
logging.info("Volume name: %s" % volume_name)
|
||||
logging.info("PVC name: %s" % pvc_name)
|
||||
|
||||
# Get container name and mount path
|
||||
for container in pod.containers:
|
||||
for vol in container.volumeMounts:
|
||||
if vol.name == volume_name:
|
||||
mount_path = vol.mountPath
|
||||
container_name = container.name
|
||||
break
|
||||
logging.info("Container path: %s" % container_name)
|
||||
logging.info("Mount path: %s" % mount_path)
|
||||
|
||||
# Get PVC capacity and used bytes
|
||||
command = "df %s -B 1024 | sed 1d" % (str(mount_path))
|
||||
command_output = (
|
||||
start_time = int(time.time())
|
||||
# Create temp file in the PVC
|
||||
full_path = "%s/%s" % (str(mount_path), str(file_name))
|
||||
command = "fallocate -l $((%s*1024)) %s" % (
|
||||
str(file_size_kb),
|
||||
str(full_path)
|
||||
)
|
||||
logging.debug(
|
||||
"Create temp file in the PVC command:\n %s" % command
|
||||
)
|
||||
kubecli.exec_cmd_in_pod(
|
||||
command,
|
||||
pod_name,
|
||||
namespace,
|
||||
container_name,
|
||||
"sh"
|
||||
)
|
||||
).split()
|
||||
pvc_used_kb = int(command_output[2])
|
||||
pvc_capacity_kb = pvc_used_kb + int(command_output[3])
|
||||
logging.info("PVC used: %s KB" % pvc_used_kb)
|
||||
logging.info("PVC capacity: %s KB" % pvc_capacity_kb)
|
||||
|
||||
# Check valid fill percentage
|
||||
current_fill_percentage = pvc_used_kb / pvc_capacity_kb
|
||||
if not (
|
||||
current_fill_percentage * 100
|
||||
< float(target_fill_percentage)
|
||||
<= 99
|
||||
):
|
||||
logging.error(
|
||||
"Target fill percentage (%.2f%%) is lower than "
|
||||
"current fill percentage (%.2f%%) "
|
||||
"or higher than 99%%" % (
|
||||
target_fill_percentage,
|
||||
current_fill_percentage * 100
|
||||
)
|
||||
# Check if file is created
|
||||
command = "ls -lh %s" % (str(mount_path))
|
||||
logging.debug("Check file is created command:\n %s" % command)
|
||||
response = kubecli.exec_cmd_in_pod(
|
||||
command, pod_name, namespace, container_name, "sh"
|
||||
)
|
||||
sys.exit(1)
|
||||
logging.info("\n" + str(response))
|
||||
if str(file_name).lower() in str(response).lower():
|
||||
logging.info(
|
||||
"%s file successfully created" % (str(full_path))
|
||||
)
|
||||
else:
|
||||
logging.error(
|
||||
"Failed to create tmp file with %s size" % (
|
||||
str(file_size_kb)
|
||||
)
|
||||
)
|
||||
remove_temp_file(
|
||||
file_name,
|
||||
full_path,
|
||||
pod_name,
|
||||
namespace,
|
||||
container_name,
|
||||
mount_path,
|
||||
file_size_kb,
|
||||
kubecli
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Calculate file size
|
||||
file_size_kb = int(
|
||||
@@ -186,26 +268,25 @@ def run(scenarios_list, config):
|
||||
"Create temp file in the PVC command:\n %s" % command
|
||||
)
|
||||
kubecli.exec_cmd_in_pod(
|
||||
command, pod_name, namespace, container_name, "sh"
|
||||
command, pod_name, namespace, container_name
|
||||
)
|
||||
|
||||
# Check if file is created
|
||||
command = "ls -lh %s" % (str(mount_path))
|
||||
logging.debug("Check file is created command:\n %s" % command)
|
||||
response = kubecli.exec_cmd_in_pod(
|
||||
command, pod_name, namespace, container_name, "sh"
|
||||
command, pod_name, namespace, container_name
|
||||
)
|
||||
logging.info("\n" + str(response))
|
||||
if str(file_name).lower() in str(response).lower():
|
||||
logging.info(
|
||||
"%s file successfully created" % (str(full_path))
|
||||
)
|
||||
else:
|
||||
logging.error(
|
||||
"Failed to create tmp file with %s size" % (
|
||||
str(file_size_kb)
|
||||
"Waiting for the specified duration in the config: %ss" % (
|
||||
duration
|
||||
)
|
||||
)
|
||||
time.sleep(duration)
|
||||
logging.info("Finish waiting")
|
||||
|
||||
remove_temp_file(
|
||||
file_name,
|
||||
full_path,
|
||||
@@ -213,38 +294,31 @@ def run(scenarios_list, config):
|
||||
namespace,
|
||||
container_name,
|
||||
mount_path,
|
||||
file_size_kb
|
||||
file_size_kb,
|
||||
kubecli
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Wait for the specified duration
|
||||
logging.info(
|
||||
"Waiting for the specified duration in the config: %ss" % (
|
||||
duration
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
)
|
||||
time.sleep(duration)
|
||||
logging.info("Finish waiting")
|
||||
except (RuntimeError, Exception):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(app_config)
|
||||
telemetry.log_exception(app_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
remove_temp_file(
|
||||
file_name,
|
||||
full_path,
|
||||
pod_name,
|
||||
namespace,
|
||||
container_name,
|
||||
mount_path,
|
||||
file_size_kb
|
||||
)
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
def remove_temp_file(
|
||||
file_name,
|
||||
full_path,
|
||||
@@ -252,19 +326,19 @@ def remove_temp_file(
|
||||
namespace,
|
||||
container_name,
|
||||
mount_path,
|
||||
file_size_kb
|
||||
file_size_kb,
|
||||
kubecli: krkn_lib_kubernetes.KrknLibKubernetes
|
||||
):
|
||||
command = "rm -f %s" % (str(full_path))
|
||||
logging.debug("Remove temp file from the PVC command:\n %s" % command)
|
||||
kubecli.exec_cmd_in_pod(command, pod_name, namespace, container_name, "sh")
|
||||
kubecli.exec_cmd_in_pod(command, pod_name, namespace, container_name)
|
||||
command = "ls -lh %s" % (str(mount_path))
|
||||
logging.debug("Check temp file is removed command:\n %s" % command)
|
||||
response = kubecli.exec_cmd_in_pod(
|
||||
command,
|
||||
pod_name,
|
||||
namespace,
|
||||
container_name,
|
||||
"sh"
|
||||
container_name
|
||||
)
|
||||
logging.info("\n" + str(response))
|
||||
if not (str(file_name).lower() in str(response).lower()):
|
||||
@@ -273,7 +347,7 @@ def remove_temp_file(
|
||||
logging.error(
|
||||
"Failed to delete tmp file with %s size" % (str(file_size_kb))
|
||||
)
|
||||
sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
|
||||
def toKbytes(value):
|
||||
@@ -282,7 +356,7 @@ def toKbytes(value):
|
||||
"PVC capacity %s does not match expression "
|
||||
"regexp '^[0-9]+[K|M|G|T]i$'"
|
||||
)
|
||||
sys.exit(1)
|
||||
raise RuntimeError()
|
||||
unit = {"K": 0, "M": 1, "G": 2, "T": 3}
|
||||
base = 1024 if ("i" in value) else 1000
|
||||
exp = unit[value[-2:-1]]
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
import logging
|
||||
import time
|
||||
import krkn_lib_kubernetes
|
||||
from multiprocessing.pool import ThreadPool
|
||||
|
||||
from ..cerberus import setup as cerberus
|
||||
from ..kubernetes import client as kubecli
|
||||
from ..post_actions import actions as post_actions
|
||||
from ..node_actions.aws_node_scenarios import AWS
|
||||
from ..node_actions.openstack_node_scenarios import OPENSTACKCLOUD
|
||||
from ..node_actions.az_node_scenarios import Azure
|
||||
from ..node_actions.gcp_node_scenarios import GCP
|
||||
|
||||
from krkn_lib_kubernetes import ScenarioTelemetry, KrknTelemetry
|
||||
|
||||
def multiprocess_nodes(cloud_object_function, nodes):
|
||||
try:
|
||||
@@ -40,7 +40,8 @@ def multiprocess_nodes(cloud_object_function, nodes):
|
||||
|
||||
|
||||
# Inject the cluster shut down scenario
|
||||
def cluster_shut_down(shut_down_config):
|
||||
# krkn_lib_kubernetes
|
||||
def cluster_shut_down(shut_down_config, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
runs = shut_down_config["runs"]
|
||||
shut_down_duration = shut_down_config["shut_down_duration"]
|
||||
cloud_type = shut_down_config["cloud_type"]
|
||||
@@ -58,7 +59,9 @@ def cluster_shut_down(shut_down_config):
|
||||
"Cloud type %s is not currently supported for cluster shut down" %
|
||||
cloud_type
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
nodes = kubecli.list_nodes()
|
||||
node_id = []
|
||||
@@ -125,10 +128,18 @@ def cluster_shut_down(shut_down_config):
|
||||
|
||||
logging.info("Successfully injected cluster_shut_down scenario!")
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
|
||||
def run(scenarios_list, config, wait_duration):
|
||||
def run(scenarios_list, config, wait_duration, kubecli: krkn_lib_kubernetes.KrknLibKubernetes, telemetry: KrknTelemetry) -> (list[str], list[ScenarioTelemetry]):
|
||||
failed_post_scenarios = []
|
||||
failed_scenarios = []
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
|
||||
for shut_down_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = shut_down_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, shut_down_config[0])
|
||||
if len(shut_down_config) > 1:
|
||||
pre_action_output = post_actions.run("", shut_down_config[1])
|
||||
else:
|
||||
@@ -138,18 +149,32 @@ def run(scenarios_list, config, wait_duration):
|
||||
shut_down_config_scenario = \
|
||||
shut_down_config_yaml["cluster_shut_down_scenario"]
|
||||
start_time = int(time.time())
|
||||
cluster_shut_down(shut_down_config_scenario)
|
||||
logging.info(
|
||||
"Waiting for the specified duration: %s" % (wait_duration)
|
||||
)
|
||||
time.sleep(wait_duration)
|
||||
failed_post_scenarios = post_actions.check_recovery(
|
||||
"", shut_down_config, failed_post_scenarios, pre_action_output
|
||||
)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
try:
|
||||
cluster_shut_down(shut_down_config_scenario, kubecli)
|
||||
logging.info(
|
||||
"Waiting for the specified duration: %s" % (wait_duration)
|
||||
)
|
||||
time.sleep(wait_duration)
|
||||
failed_post_scenarios = post_actions.check_recovery(
|
||||
"", shut_down_config, failed_post_scenarios, pre_action_output
|
||||
)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
|
||||
except (RuntimeError, Exception):
|
||||
telemetry.log_exception(shut_down_config[0])
|
||||
failed_scenarios.append(shut_down_config[0])
|
||||
scenario_telemetry.exitStatus = 1
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
@@ -5,14 +5,13 @@ import re
|
||||
import sys
|
||||
import yaml
|
||||
import random
|
||||
|
||||
import krkn_lib_kubernetes
|
||||
from ..cerberus import setup as cerberus
|
||||
from ..kubernetes import client as kubecli
|
||||
from ..invoke import command as runcommand
|
||||
from krkn_lib_kubernetes import ScenarioTelemetry, KrknTelemetry
|
||||
|
||||
|
||||
def pod_exec(pod_name, command, namespace, container_name):
|
||||
i = 0
|
||||
# krkn_lib_kubernetes
|
||||
def pod_exec(pod_name, command, namespace, container_name, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
for i in range(5):
|
||||
response = kubecli.exec_cmd_in_pod(
|
||||
command,
|
||||
@@ -41,7 +40,8 @@ def node_debug(node_name, command):
|
||||
return response
|
||||
|
||||
|
||||
def get_container_name(pod_name, namespace, container_name=""):
|
||||
# krkn_lib_kubernetes
|
||||
def get_container_name(pod_name, namespace, kubecli: krkn_lib_kubernetes.KrknLibKubernetes, container_name=""):
|
||||
|
||||
container_names = kubecli.get_containers_in_pod(pod_name, namespace)
|
||||
if container_name != "":
|
||||
@@ -63,7 +63,8 @@ def get_container_name(pod_name, namespace, container_name=""):
|
||||
return container_name
|
||||
|
||||
|
||||
def skew_time(scenario):
|
||||
# krkn_lib_kubernetes
|
||||
def skew_time(scenario, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
skew_command = "date --set "
|
||||
if scenario["action"] == "skew_date":
|
||||
skewed_date = "00-01-01"
|
||||
@@ -93,7 +94,9 @@ def skew_time(scenario):
|
||||
for name in scenario["object_name"]:
|
||||
if "namespace" not in scenario.keys():
|
||||
logging.error("Need to set namespace when using pod name")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
pod_names.append([name, scenario["namespace"]])
|
||||
elif "namespace" in scenario.keys() and scenario["namespace"]:
|
||||
if "label_selector" not in scenario.keys():
|
||||
@@ -127,20 +130,26 @@ def skew_time(scenario):
|
||||
"Cannot find pods matching the namespace/label_selector, "
|
||||
"please check"
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
pod_counter = 0
|
||||
for pod in pod_names:
|
||||
if len(pod) > 1:
|
||||
selected_container_name = get_container_name(
|
||||
pod[0],
|
||||
pod[1],
|
||||
container_name
|
||||
kubecli,
|
||||
container_name,
|
||||
|
||||
)
|
||||
pod_exec_response = pod_exec(
|
||||
pod[0],
|
||||
skew_command,
|
||||
pod[1],
|
||||
selected_container_name
|
||||
selected_container_name,
|
||||
kubecli,
|
||||
|
||||
)
|
||||
if pod_exec_response is False:
|
||||
logging.error(
|
||||
@@ -148,19 +157,23 @@ def skew_time(scenario):
|
||||
"in pod %s in namespace %s"
|
||||
% (selected_container_name, pod[0], pod[1])
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
pod_names[pod_counter].append(selected_container_name)
|
||||
else:
|
||||
selected_container_name = get_container_name(
|
||||
pod,
|
||||
scenario["namespace"],
|
||||
kubecli,
|
||||
container_name
|
||||
)
|
||||
pod_exec_response = pod_exec(
|
||||
pod,
|
||||
skew_command,
|
||||
scenario["namespace"],
|
||||
selected_container_name
|
||||
selected_container_name,
|
||||
kubecli
|
||||
)
|
||||
if pod_exec_response is False:
|
||||
logging.error(
|
||||
@@ -172,7 +185,9 @@ def skew_time(scenario):
|
||||
scenario["namespace"]
|
||||
)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
pod_names[pod_counter].append(selected_container_name)
|
||||
logging.info("Reset date/time on pod " + str(pod[0]))
|
||||
pod_counter += 1
|
||||
@@ -216,7 +231,8 @@ def string_to_date(obj_datetime):
|
||||
return datetime.datetime(datetime.MINYEAR, 1, 1)
|
||||
|
||||
|
||||
def check_date_time(object_type, names):
|
||||
# krkn_lib_kubernetes
|
||||
def check_date_time(object_type, names, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
skew_command = "date"
|
||||
not_reset = []
|
||||
max_retries = 30
|
||||
@@ -256,7 +272,8 @@ def check_date_time(object_type, names):
|
||||
pod_name[0],
|
||||
skew_command,
|
||||
pod_name[1],
|
||||
pod_name[2]
|
||||
pod_name[2],
|
||||
kubecli
|
||||
)
|
||||
pod_datetime = string_to_date(pod_datetime_string)
|
||||
while not (
|
||||
@@ -271,7 +288,8 @@ def check_date_time(object_type, names):
|
||||
pod_name[0],
|
||||
skew_command,
|
||||
pod_name[1],
|
||||
pod_name[2]
|
||||
pod_name[2],
|
||||
kubecli
|
||||
)
|
||||
pod_datetime = string_to_date(pod_datetime)
|
||||
counter += 1
|
||||
@@ -289,24 +307,42 @@ def check_date_time(object_type, names):
|
||||
return not_reset
|
||||
|
||||
|
||||
def run(scenarios_list, config, wait_duration):
|
||||
# krkn_lib_kubernetes
|
||||
def run(scenarios_list, config, wait_duration, kubecli: krkn_lib_kubernetes.KrknLibKubernetes, telemetry: KrknTelemetry) -> (list[str], list[ScenarioTelemetry]):
|
||||
failed_scenarios = []
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
for time_scenario_config in scenarios_list:
|
||||
with open(time_scenario_config, "r") as f:
|
||||
scenario_config = yaml.full_load(f)
|
||||
for time_scenario in scenario_config["time_scenarios"]:
|
||||
start_time = int(time.time())
|
||||
object_type, object_names = skew_time(time_scenario)
|
||||
not_reset = check_date_time(object_type, object_names)
|
||||
if len(not_reset) > 0:
|
||||
logging.info("Object times were not reset")
|
||||
logging.info(
|
||||
"Waiting for the specified duration: %s" % (wait_duration)
|
||||
)
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
not_reset,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = time_scenario_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, time_scenario_config)
|
||||
try:
|
||||
with open(time_scenario_config, "r") as f:
|
||||
scenario_config = yaml.full_load(f)
|
||||
for time_scenario in scenario_config["time_scenarios"]:
|
||||
start_time = int(time.time())
|
||||
object_type, object_names = skew_time(time_scenario, kubecli)
|
||||
not_reset = check_date_time(object_type, object_names, kubecli)
|
||||
if len(not_reset) > 0:
|
||||
logging.info("Object times were not reset")
|
||||
logging.info(
|
||||
"Waiting for the specified duration: %s" % (wait_duration)
|
||||
)
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
not_reset,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
except (RuntimeError, Exception):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
telemetry.log_exception(time_scenario_config)
|
||||
failed_scenarios.append(time_scenario_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
@@ -1,100 +1,119 @@
|
||||
import yaml
|
||||
import sys
|
||||
import logging
|
||||
import time
|
||||
from ..node_actions.aws_node_scenarios import AWS
|
||||
from ..cerberus import setup as cerberus
|
||||
from krkn_lib_kubernetes import ScenarioTelemetry, KrknTelemetry
|
||||
|
||||
|
||||
def run(scenarios_list, config, wait_duration):
|
||||
def run(scenarios_list, config, wait_duration, telemetry: KrknTelemetry) -> (list[str], list[ScenarioTelemetry]) :
|
||||
"""
|
||||
filters the subnet of interest and applies the network acl
|
||||
to create zone outage
|
||||
"""
|
||||
failed_post_scenarios = ""
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
|
||||
for zone_outage_config in scenarios_list:
|
||||
if len(zone_outage_config) > 1:
|
||||
with open(zone_outage_config, "r") as f:
|
||||
zone_outage_config_yaml = yaml.full_load(f)
|
||||
scenario_config = zone_outage_config_yaml["zone_outage"]
|
||||
vpc_id = scenario_config["vpc_id"]
|
||||
subnet_ids = scenario_config["subnet_id"]
|
||||
duration = scenario_config["duration"]
|
||||
cloud_type = scenario_config["cloud_type"]
|
||||
ids = {}
|
||||
acl_ids_created = []
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = zone_outage_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, zone_outage_config)
|
||||
try:
|
||||
if len(zone_outage_config) > 1:
|
||||
with open(zone_outage_config, "r") as f:
|
||||
zone_outage_config_yaml = yaml.full_load(f)
|
||||
scenario_config = zone_outage_config_yaml["zone_outage"]
|
||||
vpc_id = scenario_config["vpc_id"]
|
||||
subnet_ids = scenario_config["subnet_id"]
|
||||
duration = scenario_config["duration"]
|
||||
cloud_type = scenario_config["cloud_type"]
|
||||
ids = {}
|
||||
acl_ids_created = []
|
||||
|
||||
if cloud_type.lower() == "aws":
|
||||
cloud_object = AWS()
|
||||
else:
|
||||
logging.error(
|
||||
"Cloud type %s is not currently supported for "
|
||||
"zone outage scenarios"
|
||||
% cloud_type
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
start_time = int(time.time())
|
||||
|
||||
for subnet_id in subnet_ids:
|
||||
logging.info("Targeting subnet_id")
|
||||
network_association_ids = []
|
||||
associations, original_acl_id = \
|
||||
cloud_object.describe_network_acls(vpc_id, subnet_id)
|
||||
for entry in associations:
|
||||
if entry["SubnetId"] == subnet_id:
|
||||
network_association_ids.append(
|
||||
entry["NetworkAclAssociationId"]
|
||||
)
|
||||
logging.info(
|
||||
"Network association ids associated with "
|
||||
"the subnet %s: %s"
|
||||
% (subnet_id, network_association_ids)
|
||||
)
|
||||
acl_id = cloud_object.create_default_network_acl(vpc_id)
|
||||
new_association_id = \
|
||||
cloud_object.replace_network_acl_association(
|
||||
network_association_ids[0], acl_id
|
||||
if cloud_type.lower() == "aws":
|
||||
cloud_object = AWS()
|
||||
else:
|
||||
logging.error(
|
||||
"Cloud type %s is not currently supported for "
|
||||
"zone outage scenarios"
|
||||
% cloud_type
|
||||
)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# capture the orginal_acl_id, created_acl_id and
|
||||
# new association_id to use during the recovery
|
||||
ids[new_association_id] = original_acl_id
|
||||
acl_ids_created.append(acl_id)
|
||||
start_time = int(time.time())
|
||||
|
||||
# wait for the specified duration
|
||||
logging.info(
|
||||
"Waiting for the specified duration "
|
||||
"in the config: %s" % (duration)
|
||||
)
|
||||
time.sleep(duration)
|
||||
for subnet_id in subnet_ids:
|
||||
logging.info("Targeting subnet_id")
|
||||
network_association_ids = []
|
||||
associations, original_acl_id = \
|
||||
cloud_object.describe_network_acls(vpc_id, subnet_id)
|
||||
for entry in associations:
|
||||
if entry["SubnetId"] == subnet_id:
|
||||
network_association_ids.append(
|
||||
entry["NetworkAclAssociationId"]
|
||||
)
|
||||
logging.info(
|
||||
"Network association ids associated with "
|
||||
"the subnet %s: %s"
|
||||
% (subnet_id, network_association_ids)
|
||||
)
|
||||
acl_id = cloud_object.create_default_network_acl(vpc_id)
|
||||
new_association_id = \
|
||||
cloud_object.replace_network_acl_association(
|
||||
network_association_ids[0], acl_id
|
||||
)
|
||||
|
||||
# replace the applied acl with the previous acl in use
|
||||
for new_association_id, original_acl_id in ids.items():
|
||||
cloud_object.replace_network_acl_association(
|
||||
new_association_id,
|
||||
original_acl_id
|
||||
# capture the orginal_acl_id, created_acl_id and
|
||||
# new association_id to use during the recovery
|
||||
ids[new_association_id] = original_acl_id
|
||||
acl_ids_created.append(acl_id)
|
||||
|
||||
# wait for the specified duration
|
||||
logging.info(
|
||||
"Waiting for the specified duration "
|
||||
"in the config: %s" % (duration)
|
||||
)
|
||||
logging.info(
|
||||
"Wating for 60 seconds to make sure "
|
||||
"the changes are in place"
|
||||
)
|
||||
time.sleep(60)
|
||||
time.sleep(duration)
|
||||
|
||||
# delete the network acl created for the run
|
||||
for acl_id in acl_ids_created:
|
||||
cloud_object.delete_network_acl(acl_id)
|
||||
# replace the applied acl with the previous acl in use
|
||||
for new_association_id, original_acl_id in ids.items():
|
||||
cloud_object.replace_network_acl_association(
|
||||
new_association_id,
|
||||
original_acl_id
|
||||
)
|
||||
logging.info(
|
||||
"Wating for 60 seconds to make sure "
|
||||
"the changes are in place"
|
||||
)
|
||||
time.sleep(60)
|
||||
|
||||
logging.info(
|
||||
"End of scenario. "
|
||||
"Waiting for the specified duration: %s" % (wait_duration)
|
||||
)
|
||||
time.sleep(wait_duration)
|
||||
# delete the network acl created for the run
|
||||
for acl_id in acl_ids_created:
|
||||
cloud_object.delete_network_acl(acl_id)
|
||||
|
||||
logging.info(
|
||||
"End of scenario. "
|
||||
"Waiting for the specified duration: %s" % (wait_duration)
|
||||
)
|
||||
time.sleep(wait_duration)
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
except (RuntimeError, Exception):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(zone_outage_config)
|
||||
telemetry.log_exception(zone_outage_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
|
||||
@@ -21,7 +21,7 @@ docker-compose
|
||||
docker
|
||||
jinja2==3.0.3
|
||||
itsdangerous==2.0.1
|
||||
werkzeug==2.0.3
|
||||
werkzeug==2.2.3
|
||||
lxml >= 4.3.0
|
||||
pyVmomi >= 6.7
|
||||
zope.interface==5.4.0
|
||||
@@ -31,4 +31,10 @@ arcaflow-plugin-sdk>=0.9.0
|
||||
wheel
|
||||
service_identity
|
||||
git+https://github.com/vmware/vsphere-automation-sdk-python.git@v8.0.0.0
|
||||
git+https://github.com/arcalot/arcaflow-plugin-kill-pod.git@main
|
||||
git+https://github.com/redhat-chaos/arcaflow-plugin-kill-pod.git
|
||||
arcaflow >= 0.4.1
|
||||
prometheus_api_client
|
||||
ibm_cloud_sdk_core
|
||||
ibm_vpc
|
||||
pytest
|
||||
krkn-lib-kubernetes >= 0.1.3
|
||||
269
run_kraken.py
269
run_kraken.py
@@ -8,7 +8,6 @@ import optparse
|
||||
import pyfiglet
|
||||
import uuid
|
||||
import time
|
||||
import kraken.kubernetes.client as kubecli
|
||||
import kraken.litmus.common_litmus as common_litmus
|
||||
import kraken.time_actions.common_time_functions as time_actions
|
||||
import kraken.performance_dashboards.setup as performance_dashboards
|
||||
@@ -22,14 +21,17 @@ import kraken.zone_outage.actions as zone_outages
|
||||
import kraken.application_outage.actions as application_outage
|
||||
import kraken.pvc.pvc_scenario as pvc_scenario
|
||||
import kraken.network_chaos.actions as network_chaos
|
||||
import kraken.arcaflow_plugin as arcaflow_plugin
|
||||
import server as server
|
||||
import kraken.prometheus.client as promcli
|
||||
from kraken import plugins
|
||||
from krkn_lib_kubernetes import KrknLibKubernetes, KrknTelemetry, ChaosRunTelemetry, SafeLogger
|
||||
|
||||
KUBE_BURNER_URL = (
|
||||
"https://github.com/cloud-bulldozer/kube-burner/"
|
||||
"releases/download/v{version}/kube-burner-{version}-Linux-x86_64.tar.gz"
|
||||
)
|
||||
KUBE_BURNER_VERSION = "0.9.1"
|
||||
KUBE_BURNER_VERSION = "1.7.0"
|
||||
|
||||
|
||||
# Main function
|
||||
@@ -42,13 +44,14 @@ def main(cfg):
|
||||
if os.path.isfile(cfg):
|
||||
with open(cfg, "r") as f:
|
||||
config = yaml.full_load(f)
|
||||
global kubeconfig_path, wait_duration
|
||||
global kubeconfig_path, wait_duration, kraken_config
|
||||
distribution = config["kraken"].get("distribution", "openshift")
|
||||
kubeconfig_path = os.path.expanduser(config["kraken"].get("kubeconfig_path", ""))
|
||||
chaos_scenarios = config["kraken"].get("chaos_scenarios", [])
|
||||
publish_running_status = config["kraken"].get(
|
||||
"publish_kraken_status", False
|
||||
kubeconfig_path = os.path.expanduser(
|
||||
config["kraken"].get("kubeconfig_path", "")
|
||||
)
|
||||
kraken_config = cfg
|
||||
chaos_scenarios = config["kraken"].get("chaos_scenarios", [])
|
||||
publish_running_status = config["kraken"].get("publish_kraken_status", False)
|
||||
port = config["kraken"].get("port")
|
||||
signal_address = config["kraken"].get("signal_address")
|
||||
run_signal = config["kraken"].get("signal_state", "RUN")
|
||||
@@ -65,15 +68,12 @@ def main(cfg):
|
||||
"deploy_dashboards", False
|
||||
)
|
||||
dashboard_repo = config["performance_monitoring"].get(
|
||||
"repo",
|
||||
"https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
)
|
||||
capture_metrics = config["performance_monitoring"].get(
|
||||
"capture_metrics", False
|
||||
"repo", "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
)
|
||||
capture_metrics = config["performance_monitoring"].get("capture_metrics", False)
|
||||
kube_burner_url = config["performance_monitoring"].get(
|
||||
"kube_burner_binary_url",
|
||||
KUBE_BURNER_URL.format(version=KUBE_BURNER_VERSION)
|
||||
KUBE_BURNER_URL.format(version=KUBE_BURNER_VERSION),
|
||||
)
|
||||
config_path = config["performance_monitoring"].get(
|
||||
"config_path", "config/kube_burner.yaml"
|
||||
@@ -81,44 +81,60 @@ def main(cfg):
|
||||
metrics_profile = config["performance_monitoring"].get(
|
||||
"metrics_profile_path", "config/metrics-aggregated.yaml"
|
||||
)
|
||||
prometheus_url = config["performance_monitoring"].get(
|
||||
"prometheus_url", ""
|
||||
)
|
||||
prometheus_url = config["performance_monitoring"].get("prometheus_url", "")
|
||||
prometheus_bearer_token = config["performance_monitoring"].get(
|
||||
"prometheus_bearer_token", ""
|
||||
)
|
||||
run_uuid = config["performance_monitoring"].get("uuid", "")
|
||||
enable_alerts = config["performance_monitoring"].get(
|
||||
"enable_alerts", False
|
||||
)
|
||||
alert_profile = config["performance_monitoring"].get(
|
||||
"alert_profile", ""
|
||||
)
|
||||
enable_alerts = config["performance_monitoring"].get("enable_alerts", False)
|
||||
alert_profile = config["performance_monitoring"].get("alert_profile", "")
|
||||
check_critical_alerts = config["performance_monitoring"].get("check_critical_alerts", False)
|
||||
|
||||
# Initialize clients
|
||||
if not os.path.isfile(kubeconfig_path):
|
||||
if (not os.path.isfile(kubeconfig_path) and
|
||||
not os.path.isfile("/var/run/secrets/kubernetes.io/serviceaccount/token")):
|
||||
logging.error(
|
||||
"Cannot read the kubeconfig file at %s, please check" %
|
||||
kubeconfig_path
|
||||
"Cannot read the kubeconfig file at %s, please check" % kubeconfig_path
|
||||
)
|
||||
sys.exit(1)
|
||||
logging.info("Initializing client to talk to the Kubernetes cluster")
|
||||
os.environ["KUBECONFIG"] = str(kubeconfig_path)
|
||||
kubecli.initialize_clients(kubeconfig_path)
|
||||
|
||||
# Generate uuid for the run
|
||||
if run_uuid:
|
||||
logging.info(
|
||||
"Using the uuid defined by the user for the run: %s" % run_uuid
|
||||
)
|
||||
else:
|
||||
run_uuid = str(uuid.uuid4())
|
||||
logging.info("Generated a uuid for the run: %s" % run_uuid)
|
||||
|
||||
# request_id for telemetry is generated once here and used everywhere
|
||||
telemetry_request_id = f"{int(time.time())}-{run_uuid}"
|
||||
if config["telemetry"].get("run_tag"):
|
||||
telemetry_request_id = f"{telemetry_request_id}-{config['telemetry']['run_tag']}"
|
||||
telemetry_log_file = f'{config["telemetry"]["archive_path"]}/{telemetry_request_id}.log'
|
||||
safe_logger = SafeLogger(filename=telemetry_log_file)
|
||||
|
||||
try:
|
||||
kubeconfig_path
|
||||
os.environ["KUBECONFIG"] = str(kubeconfig_path)
|
||||
# krkn-lib-kubernetes init
|
||||
kubecli = KrknLibKubernetes(kubeconfig_path=kubeconfig_path)
|
||||
except:
|
||||
kubecli.initialize_clients(None)
|
||||
|
||||
# KrknTelemetry init
|
||||
telemetry = KrknTelemetry(safe_logger, kubecli)
|
||||
|
||||
# find node kraken might be running on
|
||||
kubecli.find_kraken_node()
|
||||
|
||||
# Set up kraken url to track signal
|
||||
if not 0 <= int(port) <= 65535:
|
||||
logging.error(
|
||||
"%s isn't a valid port number, please check" % (port)
|
||||
)
|
||||
logging.error("%s isn't a valid port number, please check" % (port))
|
||||
sys.exit(1)
|
||||
if not signal_address:
|
||||
logging.error(
|
||||
"Please set the signal address in the config"
|
||||
)
|
||||
logging.error("Please set the signal address in the config")
|
||||
sys.exit(1)
|
||||
address = (signal_address, port)
|
||||
|
||||
@@ -128,12 +144,8 @@ def main(cfg):
|
||||
server_address = address[0]
|
||||
port = address[1]
|
||||
logging.info(
|
||||
"Publishing kraken status at http://%s:%s" % (
|
||||
server_address,
|
||||
port
|
||||
)
|
||||
"Publishing kraken status at http://%s:%s" % (server_address, port)
|
||||
)
|
||||
logging.info("Publishing kraken status at http://%s:%s" % (server_address, port))
|
||||
server.start_server(address, run_signal)
|
||||
|
||||
# Cluster info
|
||||
@@ -150,14 +162,7 @@ def main(cfg):
|
||||
if deploy_performance_dashboards:
|
||||
performance_dashboards.setup(dashboard_repo, distribution)
|
||||
|
||||
# Generate uuid for the run
|
||||
if run_uuid:
|
||||
logging.info(
|
||||
"Using the uuid defined by the user for the run: %s" % run_uuid
|
||||
)
|
||||
else:
|
||||
run_uuid = str(uuid.uuid4())
|
||||
logging.info("Generated a uuid for the run: %s" % run_uuid)
|
||||
|
||||
|
||||
# Initialize the start iteration to 0
|
||||
iteration = 0
|
||||
@@ -165,15 +170,13 @@ def main(cfg):
|
||||
# Set the number of iterations to loop to infinity if daemon mode is
|
||||
# enabled or else set it to the provided iterations count in the config
|
||||
if daemon_mode:
|
||||
logging.info(
|
||||
"Daemon mode enabled, kraken will cause chaos forever\n"
|
||||
)
|
||||
logging.info("Daemon mode enabled, kraken will cause chaos forever\n")
|
||||
logging.info("Ignoring the iterations set")
|
||||
iterations = float("inf")
|
||||
else:
|
||||
logging.info(
|
||||
"Daemon mode not enabled, will run through %s iterations\n" %
|
||||
str(iterations)
|
||||
"Daemon mode not enabled, will run through %s iterations\n"
|
||||
% str(iterations)
|
||||
)
|
||||
iterations = int(iterations)
|
||||
|
||||
@@ -182,7 +185,8 @@ def main(cfg):
|
||||
# Capture the start time
|
||||
start_time = int(time.time())
|
||||
litmus_installed = False
|
||||
|
||||
chaos_telemetry = ChaosRunTelemetry()
|
||||
chaos_telemetry.run_uuid = run_uuid
|
||||
# Loop to run the chaos starts here
|
||||
while int(iteration) < iterations and run_signal != "STOP":
|
||||
# Inject chaos scenarios specified in the config
|
||||
@@ -195,8 +199,7 @@ def main(cfg):
|
||||
while publish_running_status and run_signal == "PAUSE":
|
||||
logging.info(
|
||||
"Pausing Kraken run, waiting for %s seconds"
|
||||
" and will re-poll signal"
|
||||
% str(wait_duration)
|
||||
" and will re-poll signal" % str(wait_duration)
|
||||
)
|
||||
time.sleep(wait_duration)
|
||||
run_signal = server.get_status(address)
|
||||
@@ -214,52 +217,58 @@ def main(cfg):
|
||||
"kill-pods configuration instead."
|
||||
)
|
||||
sys.exit(1)
|
||||
elif scenario_type == "arcaflow_scenarios":
|
||||
failed_post_scenarios, scenario_telemetries = arcaflow_plugin.run(
|
||||
scenarios_list, kubeconfig_path, telemetry
|
||||
)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
elif scenario_type == "plugin_scenarios":
|
||||
failed_post_scenarios = plugins.run(
|
||||
failed_post_scenarios, scenario_telemetries = plugins.run(
|
||||
scenarios_list,
|
||||
kubeconfig_path,
|
||||
kraken_config,
|
||||
failed_post_scenarios,
|
||||
wait_duration
|
||||
wait_duration,
|
||||
telemetry
|
||||
)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
# krkn_lib_kubernetes
|
||||
elif scenario_type == "container_scenarios":
|
||||
logging.info("Running container scenarios")
|
||||
failed_post_scenarios = \
|
||||
pod_scenarios.container_run(
|
||||
kubeconfig_path,
|
||||
scenarios_list,
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
wait_duration
|
||||
)
|
||||
|
||||
# Inject node chaos scenarios specified in the config
|
||||
elif scenario_type == "node_scenarios":
|
||||
logging.info("Running node scenarios")
|
||||
nodeaction.run(
|
||||
failed_post_scenarios, scenario_telemetries = pod_scenarios.container_run(
|
||||
kubeconfig_path,
|
||||
scenarios_list,
|
||||
config,
|
||||
wait_duration
|
||||
failed_post_scenarios,
|
||||
wait_duration,
|
||||
kubecli,
|
||||
telemetry
|
||||
)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
# Inject node chaos scenarios specified in the config
|
||||
# krkn_lib_kubernetes
|
||||
elif scenario_type == "node_scenarios":
|
||||
logging.info("Running node scenarios")
|
||||
failed_post_scenarios, scenario_telemetries = nodeaction.run(scenarios_list, config, wait_duration, kubecli, telemetry)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
# Inject managedcluster chaos scenarios specified in the config
|
||||
# krkn_lib_kubernetes
|
||||
elif scenario_type == "managedcluster_scenarios":
|
||||
logging.info("Running managedcluster scenarios")
|
||||
managedcluster_scenarios.run(
|
||||
scenarios_list,
|
||||
config,
|
||||
wait_duration
|
||||
)
|
||||
scenarios_list, config, wait_duration, kubecli
|
||||
)
|
||||
|
||||
# Inject time skew chaos scenarios specified
|
||||
# in the config
|
||||
# krkn_lib_kubernetes
|
||||
elif scenario_type == "time_scenarios":
|
||||
if distribution == "openshift":
|
||||
logging.info("Running time skew scenarios")
|
||||
time_actions.run(
|
||||
scenarios_list,
|
||||
config,
|
||||
wait_duration
|
||||
)
|
||||
failed_post_scenarios, scenario_telemetries = time_actions.run(scenarios_list, config, wait_duration, kubecli, telemetry)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
else:
|
||||
logging.error(
|
||||
"Litmus scenarios are currently "
|
||||
@@ -275,24 +284,20 @@ def main(cfg):
|
||||
if litmus_install:
|
||||
# Remove Litmus resources
|
||||
# before running the scenarios
|
||||
common_litmus.delete_chaos(
|
||||
litmus_namespace
|
||||
)
|
||||
common_litmus.delete_chaos(litmus_namespace, kubecli)
|
||||
common_litmus.delete_chaos_experiments(
|
||||
litmus_namespace
|
||||
litmus_namespace,
|
||||
kubecli
|
||||
)
|
||||
if litmus_uninstall_before_run:
|
||||
common_litmus.uninstall_litmus(
|
||||
litmus_version,
|
||||
litmus_namespace
|
||||
litmus_version, litmus_namespace, kubecli
|
||||
)
|
||||
common_litmus.install_litmus(
|
||||
litmus_version,
|
||||
litmus_namespace
|
||||
litmus_version, litmus_namespace
|
||||
)
|
||||
common_litmus.deploy_all_experiments(
|
||||
litmus_version,
|
||||
litmus_namespace
|
||||
litmus_version, litmus_namespace
|
||||
)
|
||||
litmus_installed = True
|
||||
common_litmus.run(
|
||||
@@ -301,6 +306,7 @@ def main(cfg):
|
||||
litmus_uninstall,
|
||||
wait_duration,
|
||||
litmus_namespace,
|
||||
kubecli
|
||||
)
|
||||
else:
|
||||
logging.error(
|
||||
@@ -310,59 +316,83 @@ def main(cfg):
|
||||
sys.exit(1)
|
||||
|
||||
# Inject cluster shutdown scenarios
|
||||
# krkn_lib_kubernetes
|
||||
elif scenario_type == "cluster_shut_down_scenarios":
|
||||
shut_down.run(
|
||||
scenarios_list,
|
||||
config,
|
||||
wait_duration
|
||||
)
|
||||
failed_post_scenarios, scenario_telemetries = shut_down.run(scenarios_list, config, wait_duration, kubecli, telemetry)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
# Inject namespace chaos scenarios
|
||||
# krkn_lib_kubernetes
|
||||
elif scenario_type == "namespace_scenarios":
|
||||
logging.info("Running namespace scenarios")
|
||||
namespace_actions.run(
|
||||
failed_post_scenarios, scenario_telemetries = namespace_actions.run(
|
||||
scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
failed_post_scenarios,
|
||||
kubeconfig_path
|
||||
kubeconfig_path,
|
||||
kubecli,
|
||||
telemetry
|
||||
)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
# Inject zone failures
|
||||
elif scenario_type == "zone_outages":
|
||||
logging.info("Inject zone outages")
|
||||
zone_outages.run(
|
||||
scenarios_list,
|
||||
config,
|
||||
wait_duration
|
||||
)
|
||||
|
||||
failed_post_scenarios, scenario_telemetries = zone_outages.run(scenarios_list, config, wait_duration, telemetry)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
# Application outages
|
||||
elif scenario_type == "application_outages":
|
||||
logging.info("Injecting application outage")
|
||||
application_outage.run(
|
||||
scenarios_list,
|
||||
config,
|
||||
wait_duration
|
||||
)
|
||||
failed_post_scenarios, scenario_telemetries = application_outage.run(
|
||||
scenarios_list, config, wait_duration, telemetry)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
# PVC scenarios
|
||||
# krkn_lib_kubernetes
|
||||
elif scenario_type == "pvc_scenarios":
|
||||
logging.info("Running PVC scenario")
|
||||
pvc_scenario.run(scenarios_list, config)
|
||||
failed_post_scenarios, scenario_telemetries = pvc_scenario.run(scenarios_list, config, kubecli, telemetry)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
# Network scenarios
|
||||
# krkn_lib_kubernetes
|
||||
elif scenario_type == "network_chaos":
|
||||
logging.info("Running Network Chaos")
|
||||
network_chaos.run(
|
||||
scenarios_list,
|
||||
config,
|
||||
wait_duration
|
||||
)
|
||||
failed_post_scenarios, scenario_telemetries = network_chaos.run(scenarios_list, config, wait_duration, kubecli, telemetry)
|
||||
|
||||
# Check for critical alerts when enabled
|
||||
if check_critical_alerts:
|
||||
logging.info("Checking for critical alerts firing post choas")
|
||||
promcli.initialize_prom_client(distribution, prometheus_url, prometheus_bearer_token)
|
||||
query = r"""ALERTS{severity="critical"}"""
|
||||
critical_alerts = promcli.process_prom_query(query)
|
||||
critical_alerts_count = len(critical_alerts)
|
||||
if critical_alerts_count > 0:
|
||||
logging.error("Critical alerts are firing: %s", critical_alerts)
|
||||
logging.error("Please check, exiting")
|
||||
sys.exit(1)
|
||||
else:
|
||||
logging.info("No critical alerts are firing!!")
|
||||
|
||||
iteration += 1
|
||||
logging.info("")
|
||||
|
||||
# telemetry
|
||||
if config["telemetry"]["enabled"]:
|
||||
logging.info(f"telemetry data will be stored on s3 bucket folder: {telemetry_request_id}")
|
||||
logging.info(f"telemetry upload log: {safe_logger.log_file_name}")
|
||||
try:
|
||||
telemetry.send_telemetry(config["telemetry"], telemetry_request_id, chaos_telemetry)
|
||||
safe_logger.info("archives download started:")
|
||||
prometheus_archive_files = telemetry.get_ocp_prometheus_data(config["telemetry"], telemetry_request_id)
|
||||
safe_logger.info("archives upload started:")
|
||||
telemetry.put_ocp_prometheus_data(config["telemetry"], prometheus_archive_files, telemetry_request_id)
|
||||
except Exception as e:
|
||||
logging.error(f"failed to send telemetry data: {str(e)}")
|
||||
else:
|
||||
logging.info("telemetry collection disabled, skipping.")
|
||||
|
||||
# Capture the end time
|
||||
end_time = int(time.time())
|
||||
|
||||
@@ -397,11 +427,11 @@ def main(cfg):
|
||||
else:
|
||||
logging.error("Alert profile is not defined")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if litmus_uninstall and litmus_installed:
|
||||
common_litmus.delete_chaos(litmus_namespace)
|
||||
common_litmus.delete_chaos_experiments(litmus_namespace)
|
||||
common_litmus.uninstall_litmus(litmus_version, litmus_namespace)
|
||||
common_litmus.delete_chaos(litmus_namespace, kubecli)
|
||||
common_litmus.delete_chaos_experiments(litmus_namespace, kubecli)
|
||||
common_litmus.uninstall_litmus(litmus_version, litmus_namespace, kubecli)
|
||||
|
||||
if failed_post_scenarios:
|
||||
logging.error(
|
||||
@@ -412,8 +442,7 @@ def main(cfg):
|
||||
run_dir = os.getcwd() + "/kraken.report"
|
||||
logging.info(
|
||||
"Successfully finished running Kraken. UUID for the run: "
|
||||
"%s. Report generated at %s. Exiting"
|
||||
% (run_uuid, run_dir)
|
||||
"%s. Report generated at %s. Exiting" % (run_uuid, run_dir)
|
||||
)
|
||||
else:
|
||||
logging.error("Cannot find a config at %s, please check" % (cfg))
|
||||
@@ -436,7 +465,7 @@ if __name__ == "__main__":
|
||||
format="%(asctime)s [%(levelname)s] %(message)s",
|
||||
handlers=[
|
||||
logging.FileHandler("kraken.report", mode="w"),
|
||||
logging.StreamHandler()
|
||||
logging.StreamHandler(),
|
||||
],
|
||||
)
|
||||
if options.cfg is None:
|
||||
|
||||
11
scenarios/arcaflow/cpu-hog/config.yaml
Normal file
11
scenarios/arcaflow/cpu-hog/config.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
deployer:
|
||||
connection: {}
|
||||
type: kubernetes
|
||||
log:
|
||||
level: debug
|
||||
logged_outputs:
|
||||
error:
|
||||
level: error
|
||||
success:
|
||||
level: debug
|
||||
14
scenarios/arcaflow/cpu-hog/input.yaml
Normal file
14
scenarios/arcaflow/cpu-hog/input.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
input_list:
|
||||
- cpu_count: 1
|
||||
cpu_load_percentage: 80
|
||||
cpu_method: all
|
||||
duration: 30s
|
||||
node_selector: {}
|
||||
# node selector example
|
||||
# node_selector:
|
||||
# kubernetes.io/hostname: master
|
||||
kubeconfig: ""
|
||||
namespace: default
|
||||
|
||||
# duplicate this section to run simultaneous stressors in the same run
|
||||
|
||||
93
scenarios/arcaflow/cpu-hog/sub-workflow.yaml
Normal file
93
scenarios/arcaflow/cpu-hog/sub-workflow.yaml
Normal file
@@ -0,0 +1,93 @@
|
||||
input:
|
||||
root: RootObject
|
||||
objects:
|
||||
RootObject:
|
||||
id: RootObject
|
||||
properties:
|
||||
kubeconfig:
|
||||
display:
|
||||
description: The complete kubeconfig file as a string
|
||||
name: Kubeconfig file contents
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
namespace:
|
||||
display:
|
||||
description: The namespace where the container will be deployed
|
||||
name: Namespace
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
node_selector:
|
||||
display:
|
||||
description: kubernetes node name where the plugin must be deployed
|
||||
type:
|
||||
type_id: map
|
||||
values:
|
||||
type_id: string
|
||||
keys:
|
||||
type_id: string
|
||||
required: true
|
||||
duration:
|
||||
display:
|
||||
name: duration the scenario expressed in seconds
|
||||
description: stop stress test after T seconds. One can also specify the units of time in
|
||||
seconds, minutes, hours, days or years with the suffix s, m, h, d or y
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
cpu_count:
|
||||
display:
|
||||
description: Number of CPU cores to be used (0 means all)
|
||||
name: number of CPUs
|
||||
type:
|
||||
type_id: integer
|
||||
required: true
|
||||
cpu_method:
|
||||
display:
|
||||
description: CPU stress method
|
||||
name: fine grained control of which cpu stressors to use (ackermann, cfloat etc.)
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
cpu_load_percentage:
|
||||
display:
|
||||
description: load CPU by percentage
|
||||
name: CPU load
|
||||
type:
|
||||
type_id: integer
|
||||
required: true
|
||||
|
||||
steps:
|
||||
kubeconfig:
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-kubeconfig:latest
|
||||
input:
|
||||
kubeconfig: !expr $.input.kubeconfig
|
||||
stressng:
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-stressng:latest
|
||||
step: workload
|
||||
input:
|
||||
StressNGParams:
|
||||
timeout: !expr $.input.duration
|
||||
cleanup: "true"
|
||||
items:
|
||||
- stressor: cpu
|
||||
cpu_count: !expr $.input.cpu_count
|
||||
cpu_method: !expr $.input.cpu_method
|
||||
cpu_load: !expr $.input.cpu_load_percentage
|
||||
deploy:
|
||||
type: kubernetes
|
||||
connection: !expr $.steps.kubeconfig.outputs.success.connection
|
||||
pod:
|
||||
metadata:
|
||||
namespace: !expr $.input.namespace
|
||||
labels:
|
||||
arcaflow: stressng
|
||||
spec:
|
||||
nodeSelector: !expr $.input.node_selector
|
||||
pluginContainer:
|
||||
imagePullPolicy: Always
|
||||
outputs:
|
||||
success:
|
||||
stressng: !expr $.steps.stressng.outputs.success
|
||||
|
||||
76
scenarios/arcaflow/cpu-hog/workflow.yaml
Normal file
76
scenarios/arcaflow/cpu-hog/workflow.yaml
Normal file
@@ -0,0 +1,76 @@
|
||||
input:
|
||||
root: RootObject
|
||||
objects:
|
||||
RootObject:
|
||||
id: RootObject
|
||||
properties:
|
||||
input_list:
|
||||
type:
|
||||
type_id: list
|
||||
items:
|
||||
id: input_item
|
||||
type_id: object
|
||||
properties:
|
||||
kubeconfig:
|
||||
display:
|
||||
description: The complete kubeconfig file as a string
|
||||
name: Kubeconfig file contents
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
namespace:
|
||||
display:
|
||||
description: The namespace where the container will be deployed
|
||||
name: Namespace
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
node_selector:
|
||||
display:
|
||||
description: kubernetes node name where the plugin must be deployed
|
||||
type:
|
||||
type_id: map
|
||||
values:
|
||||
type_id: string
|
||||
keys:
|
||||
type_id: string
|
||||
required: true
|
||||
duration:
|
||||
display:
|
||||
name: duration the scenario expressed in seconds
|
||||
description: stop stress test after T seconds. One can also specify the units of time in
|
||||
seconds, minutes, hours, days or years with the suffix s, m, h, d or y
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
cpu_count:
|
||||
display:
|
||||
description: Number of CPU cores to be used (0 means all)
|
||||
name: number of CPUs
|
||||
type:
|
||||
type_id: integer
|
||||
required: true
|
||||
cpu_method:
|
||||
display:
|
||||
description: CPU stress method
|
||||
name: fine grained control of which cpu stressors to use (ackermann, cfloat etc.)
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
cpu_load_percentage:
|
||||
display:
|
||||
description: load CPU by percentage
|
||||
name: CPU load
|
||||
type:
|
||||
type_id: integer
|
||||
required: true
|
||||
steps:
|
||||
workload_loop:
|
||||
kind: foreach
|
||||
items: !expr $.input.input_list
|
||||
workflow: sub-workflow.yaml
|
||||
parallelism: 1000
|
||||
outputs:
|
||||
success:
|
||||
workloads: !expr $.steps.workload_loop.outputs.success.data
|
||||
|
||||
11
scenarios/arcaflow/memory-hog/config.yaml
Normal file
11
scenarios/arcaflow/memory-hog/config.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
deployer:
|
||||
connection: {}
|
||||
type: kubernetes
|
||||
log:
|
||||
level: debug
|
||||
logged_outputs:
|
||||
error:
|
||||
level: error
|
||||
success:
|
||||
level: debug
|
||||
13
scenarios/arcaflow/memory-hog/input.yaml
Normal file
13
scenarios/arcaflow/memory-hog/input.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
input_list:
|
||||
- duration: 30s
|
||||
vm_bytes: 10%
|
||||
vm_workers: 2
|
||||
node_selector: { }
|
||||
# node selector example
|
||||
# node_selector:
|
||||
# kubernetes.io/hostname: master
|
||||
kubeconfig: ""
|
||||
namespace: default
|
||||
|
||||
# duplicate this section to run simultaneous stressors in the same run
|
||||
|
||||
85
scenarios/arcaflow/memory-hog/sub-workflow.yaml
Normal file
85
scenarios/arcaflow/memory-hog/sub-workflow.yaml
Normal file
@@ -0,0 +1,85 @@
|
||||
input:
|
||||
root: RootObject
|
||||
objects:
|
||||
RootObject:
|
||||
id: RootObject
|
||||
properties:
|
||||
kubeconfig:
|
||||
display:
|
||||
description: The complete kubeconfig file as a string
|
||||
name: Kubeconfig file contents
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
namespace:
|
||||
display:
|
||||
description: The namespace where the container will be deployed
|
||||
name: Namespace
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
node_selector:
|
||||
display:
|
||||
description: kubernetes node name where the plugin must be deployed
|
||||
type:
|
||||
type_id: map
|
||||
values:
|
||||
type_id: string
|
||||
keys:
|
||||
type_id: string
|
||||
required: true
|
||||
duration:
|
||||
display:
|
||||
name: duration the scenario expressed in seconds
|
||||
description: stop stress test after T seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
vm_workers:
|
||||
display:
|
||||
description: Number of VM stressors to be run (0 means 1 stressor per CPU)
|
||||
name: Number of VM stressors
|
||||
type:
|
||||
type_id: integer
|
||||
required: true
|
||||
vm_bytes:
|
||||
display:
|
||||
description: N bytes per vm process, the default is 256MB. The size can be expressed in units of Bytes, KBytes, MBytes and GBytes using the suffix b, k, m or g.
|
||||
name: Kubeconfig file contents
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
|
||||
steps:
|
||||
kubeconfig:
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-kubeconfig:latest
|
||||
input:
|
||||
kubeconfig: !expr $.input.kubeconfig
|
||||
stressng:
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-stressng:latest
|
||||
step: workload
|
||||
input:
|
||||
StressNGParams:
|
||||
timeout: !expr $.input.duration
|
||||
cleanup: "true"
|
||||
items:
|
||||
- stressor: vm
|
||||
vm: !expr $.input.vm_workers
|
||||
vm_bytes: !expr $.input.vm_bytes
|
||||
deploy:
|
||||
type: kubernetes
|
||||
connection: !expr $.steps.kubeconfig.outputs.success.connection
|
||||
pod:
|
||||
metadata:
|
||||
namespace: !expr $.input.namespace
|
||||
labels:
|
||||
arcaflow: stressng
|
||||
spec:
|
||||
nodeSelector: !expr $.input.node_selector
|
||||
pluginContainer:
|
||||
imagePullPolicy: Always
|
||||
|
||||
outputs:
|
||||
success:
|
||||
stressng: !expr $.steps.stressng.outputs.success
|
||||
|
||||
72
scenarios/arcaflow/memory-hog/workflow.yaml
Normal file
72
scenarios/arcaflow/memory-hog/workflow.yaml
Normal file
@@ -0,0 +1,72 @@
|
||||
input:
|
||||
root: RootObject
|
||||
objects:
|
||||
RootObject:
|
||||
id: RootObject
|
||||
properties:
|
||||
input_list:
|
||||
type:
|
||||
type_id: list
|
||||
items:
|
||||
id: input_item
|
||||
type_id: object
|
||||
properties:
|
||||
kubeconfig:
|
||||
display:
|
||||
description: The complete kubeconfig file as a string
|
||||
name: Kubeconfig file contents
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
namespace:
|
||||
display:
|
||||
description: The namespace where the container will be deployed
|
||||
name: Namespace
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
node_selector:
|
||||
display:
|
||||
description: kubernetes node name where the plugin must be deployed
|
||||
type:
|
||||
type_id: map
|
||||
values:
|
||||
type_id: string
|
||||
keys:
|
||||
type_id: string
|
||||
required: true
|
||||
duration:
|
||||
display:
|
||||
name: duration the scenario expressed in seconds
|
||||
description: stop stress test after T seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
vm_workers:
|
||||
display:
|
||||
description: Number of VM stressors to be run (0 means 1 stressor per CPU)
|
||||
name: Number of VM stressors
|
||||
type:
|
||||
type_id: integer
|
||||
required: true
|
||||
vm_bytes:
|
||||
display:
|
||||
description: N bytes per vm process, the default is 256MB. The size can be expressed in units of Bytes, KBytes, MBytes and GBytes using the suffix b, k, m or g.
|
||||
name: Kubeconfig file contents
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
steps:
|
||||
workload_loop:
|
||||
kind: foreach
|
||||
items: !expr $.input.input_list
|
||||
workflow: sub-workflow.yaml
|
||||
parallelism: 1000
|
||||
outputs:
|
||||
success:
|
||||
workloads: !expr $.steps.workload_loop.outputs.success.data
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -5,4 +5,4 @@ scenarios:
|
||||
container_name: "etcd"
|
||||
action: "kill 1"
|
||||
count: 1
|
||||
retry_wait: 60
|
||||
expected_recovery_time: 60
|
||||
|
||||
9
scenarios/openshift/ibmcloud_node_scenarios.yml
Normal file
9
scenarios/openshift/ibmcloud_node_scenarios.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
# yaml-language-server: $schema=../plugin.schema.json
|
||||
- id: <ibmcloud-node-terminate/ibmcloud-node-reboot/ibmcloud-node-stop/ibmcloud-node-start>
|
||||
config:
|
||||
name: ""
|
||||
label_selector: "node-role.kubernetes.io/worker" # When node_name is not specified, a node with matching label_selector is selected for node chaos scenario injection
|
||||
runs: 1 # Number of times to inject each scenario under actions (will perform on same node each time)
|
||||
instance_count: 1 # Number of nodes to perform action/select that match the label selector
|
||||
timeout: 30 # Duration to wait for completion of node scenario injection
|
||||
skip_openshift_checks: False # Set to True if you don't want to wait for the status of the nodes to change on OpenShift before passing the scenario
|
||||
15
scenarios/openshift/pod_network_outage.yml
Normal file
15
scenarios/openshift/pod_network_outage.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
# yaml-language-server: $schema=../plugin.schema.json
|
||||
- id: pod_network_outage
|
||||
config:
|
||||
namespace: <namespace> # Required - Namespace of the pod to which filter need to be applied
|
||||
direction: # Optioinal - List of directions to apply filters
|
||||
- <egress/ingress> # Default both egress and ingress
|
||||
ingress_ports: # Optional - List of ports to block traffic on
|
||||
- <port number> # Default [], i.e. all ports
|
||||
egress_ports: # Optional - List of ports to block traffic on
|
||||
- <port number> # Default [], i.e. all ports
|
||||
pod_name: <pod name> # When label_selector is not specified, pod matching the name will be selected for the chaos scenario
|
||||
label_selector: <label_selector> # When pod_name is not specified, pod with matching label_selector is selected for chaos scenario
|
||||
instance_count: <number> # Number of nodes to perform action/select that match the label selector
|
||||
wait_duration: <time_duration> # Default is 300. Ensure that it is at least about twice of test_duration
|
||||
test_duration: <time_duration> # Default is 120
|
||||
14
scenarios/openshift/pod_network_shaping.yml
Normal file
14
scenarios/openshift/pod_network_shaping.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
# yaml-language-server: $schema=../plugin.schema.json
|
||||
- id: pod_egress_shaping
|
||||
config:
|
||||
namespace: <namespace> # Required - Namespace of the pod to which traffic shaping need to be applied
|
||||
label_selector: <label_selector> # When pod_name is not specified, pod with matching label_selector is selected for chaos scenario
|
||||
pod_name: <pod name> # When label_selector is not specified, pod matching the name will be selected for the chaos scenario
|
||||
network_params: # latency, loss and bandwidth are the three supported network parameters to alter for the chaos test
|
||||
latency: <time> # Value is a string. For example : 50ms
|
||||
loss: <fraction> # Loss is a fraction between 0 and 1. It has to be enclosed in quotes to treat it as a string. For example, '0.02%' (not 0.02%)
|
||||
bandwidth: <rate> # Value is a string. For example: 100mbit
|
||||
execution_type: <serial/parallel> # Used to specify whether you want to apply filters on interfaces one at a time or all at once. Default is 'parallel'
|
||||
instance_count: <number> # Number of pods to perform action/select that match the label selector
|
||||
wait_duration: <time_duration> # Default is 300. Ensure that it is at least about twice of test_duration
|
||||
test_duration: <time_duration> # Default is 120
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user