mirror of
https://github.com/krkn-chaos/krkn.git
synced 2026-02-19 20:40:33 +00:00
Compare commits
26 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4f7c58106d | ||
|
|
a7e5ae6c80 | ||
|
|
aa030a21d3 | ||
|
|
631f12bdff | ||
|
|
2525982c55 | ||
|
|
9760d7d97d | ||
|
|
720488c159 | ||
|
|
487a9f464c | ||
|
|
d9e137e85a | ||
|
|
d6c8054275 | ||
|
|
462f93ad87 | ||
|
|
c200f0774f | ||
|
|
f2d7f88cb8 | ||
|
|
93f1f19411 | ||
|
|
83c6058816 | ||
|
|
ee34d08f41 | ||
|
|
41f9573563 | ||
|
|
c00328cc2b | ||
|
|
c2431d548f | ||
|
|
b03511850b | ||
|
|
82db2fca75 | ||
|
|
afe8d817a9 | ||
|
|
dbf02a6c22 | ||
|
|
94bec8dc9b | ||
|
|
2111bab9a4 | ||
|
|
b734f1dd05 |
15
.github/workflows/docker-image.yml
vendored
15
.github/workflows/docker-image.yml
vendored
@@ -12,14 +12,25 @@ jobs:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
- name: Build the Docker images
|
||||
run: docker build --no-cache -t quay.io/redhat-chaos/krkn containers/
|
||||
run: |
|
||||
docker build --no-cache -t quay.io/krkn-chaos/krkn containers/
|
||||
docker tag quay.io/krkn-chaos/krkn quay.io/redhat-chaos/krkn
|
||||
- name: Login in quay
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker login quay.io -u ${QUAY_USER} -p ${QUAY_TOKEN}
|
||||
env:
|
||||
QUAY_USER: ${{ secrets.QUAY_USERNAME }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_PASSWORD }}
|
||||
- name: Push the KrknChaos Docker images
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker push quay.io/krkn-chaos/krkn
|
||||
- name: Login in to redhat-chaos quay
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker login quay.io -u ${QUAY_USER} -p ${QUAY_TOKEN}
|
||||
env:
|
||||
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}
|
||||
- name: Push the Docker images
|
||||
- name: Push the RedHat Chaos Docker images
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker push quay.io/redhat-chaos/krkn
|
||||
- name: Rebuild krkn-hub
|
||||
|
||||
64
.github/workflows/functional_tests.yaml
vendored
64
.github/workflows/functional_tests.yaml
vendored
@@ -44,47 +44,65 @@ jobs:
|
||||
- name: Install python 3.9
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
python-version: '3.9'
|
||||
- name: Setup kraken dependencies
|
||||
run: pip install -r requirements.txt
|
||||
- name: Create Workdir & export the path
|
||||
run: |
|
||||
mkdir workdir
|
||||
echo "WORKDIR_PATH=`pwd`/workdir" >> $GITHUB_ENV
|
||||
- name: Generate run id
|
||||
run: |
|
||||
echo "RUN_ID=`date +%s`" > $GITHUB_ENV
|
||||
echo "Run Id: ${RUN_ID}"
|
||||
- name: Write Pull Secret
|
||||
env:
|
||||
PULLSECRET_BASE64: ${{ secrets.PS_64 }}
|
||||
run: |
|
||||
echo "$PULLSECRET_BASE64" | base64 --decode > pullsecret.txt
|
||||
- name: Write Boot Private Key
|
||||
env:
|
||||
BOOT_KEY: ${{ secrets.CRC_KEY_FILE }}
|
||||
run: |
|
||||
echo -n "$BOOT_KEY" > key.txt
|
||||
- name: Teardown CRC (Post Action)
|
||||
uses: webiny/action-post-run@3.0.0
|
||||
id: post-run-command
|
||||
with:
|
||||
# currently using image coming from tsebastiani quay.io repo
|
||||
# waiting that a fix is merged in the upstream one
|
||||
# post action run cannot (apparently) be properly indented
|
||||
run: docker run -v "${{ env.WORKDIR_PATH }}:/workdir" -e WORKING_MODE=T -e AWS_ACCESS_KEY_ID=${{ secrets.AWS_ACCESS_KEY_ID }} -e AWS_SECRET_ACCESS_KEY=${{ secrets.AWS_SECRET_ACCESS_KEY }} -e AWS_DEFAULT_REGION=us-west-2 -e TEARDOWN_RUN_ID=crc quay.io/tsebastiani/crc-cloud
|
||||
- name: Run CRC
|
||||
# currently using image coming from tsebastiani quay.io repo
|
||||
# waiting that a fix is merged in the upstream one
|
||||
run: podman run --rm -v "${{ github.workspace }}:/workspace:z" -e AWS_ACCESS_KEY_ID="${{ secrets.AWS_ACCESS_KEY_ID }}" -e AWS_SECRET_ACCESS_KEY="${{ secrets.AWS_SECRET_ACCESS_KEY }}" -e AWS_DEFAULT_REGION=us-west-2 quay.io/crcont/crc-cloud:v0.0.2 destroy --project-name "chaos-funtest-${{ env.RUN_ID }}" --backed-url "s3://krkn-crc-state/${{ env.RUN_ID }}" --provider "aws"
|
||||
- name: Create cluster
|
||||
run: |
|
||||
docker run -v "${{ env.WORKDIR_PATH }}:/workdir" \
|
||||
-e WORKING_MODE=C \
|
||||
-e PULL_SECRET="${{ secrets.PULL_SECRET }}" \
|
||||
-e AWS_ACCESS_KEY_ID="${{ secrets.AWS_ACCESS_KEY_ID }}" \
|
||||
-e AWS_SECRET_ACCESS_KEY="${{ secrets.AWS_SECRET_ACCESS_KEY }}" \
|
||||
-e AWS_DEFAULT_REGION=us-west-2 \
|
||||
-e CREATE_RUN_ID=crc \
|
||||
-e PASS_KUBEADMIN="${{ secrets.KUBEADMIN_PWD }}" \
|
||||
-e PASS_REDHAT="${{ secrets.REDHAT_PWD }}" \
|
||||
-e PASS_DEVELOPER="${{ secrets.DEVELOPER_PWD }}" \
|
||||
quay.io/tsebastiani/crc-cloud
|
||||
- name: OpenShift login and example deployment, GitHub Action env init
|
||||
podman run --name crc-cloud-create --rm \
|
||||
-v ${PWD}:/workspace:z \
|
||||
-e AWS_ACCESS_KEY_ID="${{ secrets.AWS_ACCESS_KEY_ID }}" \
|
||||
-e AWS_SECRET_ACCESS_KEY="${{ secrets.AWS_SECRET_ACCESS_KEY }}" \
|
||||
-e AWS_DEFAULT_REGION="us-west-2" \
|
||||
quay.io/crcont/crc-cloud:v0.0.2 \
|
||||
create aws \
|
||||
--project-name "chaos-funtest-${RUN_ID}" \
|
||||
--backed-url "s3://krkn-crc-state/${RUN_ID}" \
|
||||
--output "/workspace" \
|
||||
--aws-ami-id "ami-00f5eaf98cf42ef9f" \
|
||||
--pullsecret-filepath /workspace/pullsecret.txt \
|
||||
--key-filepath /workspace/key.txt
|
||||
|
||||
- name: Setup kubeconfig
|
||||
continue-on-error: true
|
||||
run: |
|
||||
ssh -o StrictHostKeyChecking=no -i id_rsa core@$(cat host) "cat /opt/kubeconfig" > kubeconfig
|
||||
sed -i "s/https:\/\/api.crc.testing:6443/https:\/\/`cat host`.nip.io:6443/g" kubeconfig
|
||||
echo "KUBECONFIG=${PWD}/kubeconfig" > $GITHUB_ENV
|
||||
|
||||
- name: Example deployment, GitHub Action env init
|
||||
env:
|
||||
NAMESPACE: test-namespace
|
||||
DEPLOYMENT_NAME: test-nginx
|
||||
KUBEADMIN_PWD: '${{ secrets.KUBEADMIN_PWD }}'
|
||||
run: ./CI/CRC/init_github_action.sh
|
||||
- name: Setup test suite
|
||||
run: |
|
||||
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
|
||||
|
||||
yq -i '.kraken.kubeconfig_path="'${KUBECONFIG}'"' CI/config/common_test_config.yaml
|
||||
echo "test_app_outages_gh" > ./CI/tests/my_tests
|
||||
echo "test_container" >> ./CI/tests/my_tests
|
||||
echo "test_namespace" >> ./CI/tests/my_tests
|
||||
@@ -106,6 +124,6 @@ jobs:
|
||||
echo "# Test results" > $GITHUB_STEP_SUMMARY
|
||||
cat CI/results.markdown >> $GITHUB_STEP_SUMMARY
|
||||
echo "# Test coverage" >> $GITHUB_STEP_SUMMARY
|
||||
python -m coverage report --format=markdown >> $GITHUB_STEP_SUMMARY
|
||||
python -m coverage report --format=markdown >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
|
||||
|
||||
@@ -1,16 +1,11 @@
|
||||
#!/bin/bash
|
||||
SCRIPT_PATH=./CI/CRC
|
||||
DEPLOYMENT_PATH=$SCRIPT_PATH/deployment.yaml
|
||||
CLUSTER_INFO=cluster_infos.json
|
||||
|
||||
[[ -z $WORKDIR_PATH ]] && echo "[ERROR] please set \$WORKDIR_PATH environment variable" && exit 1
|
||||
CLUSTER_INFO_PATH=$WORKDIR_PATH/crc/$CLUSTER_INFO
|
||||
|
||||
[[ ! -f $DEPLOYMENT_PATH ]] && echo "[ERROR] please run $0 from GitHub action root directory" && exit 1
|
||||
[[ -z $KUBEADMIN_PWD ]] && echo "[ERROR] kubeadmin password not set, please check the repository secrets" && exit 1
|
||||
[[ -z $DEPLOYMENT_NAME ]] && echo "[ERROR] please set \$DEPLOYMENT_NAME environment variable" && exit 1
|
||||
[[ -z $NAMESPACE ]] && echo "[ERROR] please set \$NAMESPACE environment variable" && exit 1
|
||||
[[ ! -f $CLUSTER_INFO_PATH ]] && echo "[ERROR] cluster_info.json not found in $CLUSTER_INFO_PATH" && exit 1
|
||||
|
||||
|
||||
OPENSSL=`which openssl 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: openssl missing, please install it and try again" && exit 1
|
||||
@@ -23,20 +18,11 @@ JQ=`which jq 2>/dev/null`
|
||||
ENVSUBST=`which envsubst 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: envsubst missing, please install it and try again" && exit 1
|
||||
|
||||
API_ADDRESS="$($JQ -r '.api.address' $CLUSTER_INFO_PATH)"
|
||||
API_PORT="$($JQ -r '.api.port' $CLUSTER_INFO_PATH)"
|
||||
BASE_HOST=`$JQ -r '.api.address' $CLUSTER_INFO_PATH | sed -r 's#https:\/\/api\.(.+\.nip\.io)#\1#'`
|
||||
FQN=$DEPLOYMENT_NAME.apps.$BASE_HOST
|
||||
|
||||
echo "[INF] logging on $API_ADDRESS:$API_PORT"
|
||||
COUNTER=1
|
||||
until `$OC login --insecure-skip-tls-verify -u kubeadmin -p $KUBEADMIN_PWD $API_ADDRESS:$API_PORT > /dev/null 2>&1`
|
||||
do
|
||||
echo "[INF] login attempt $COUNTER"
|
||||
[[ $COUNTER == 20 ]] && echo "[ERR] maximum login attempts exceeded, failing" && exit 1
|
||||
((COUNTER++))
|
||||
sleep 10
|
||||
done
|
||||
API_PORT="6443"
|
||||
API_ADDRESS="https://api.`cat host`.nip.io:${API_PORT}"
|
||||
FQN=$DEPLOYMENT_NAME.apps.$API_ADDRESS
|
||||
|
||||
|
||||
echo "[INF] deploying example deployment: $DEPLOYMENT_NAME in namespace: $NAMESPACE"
|
||||
$ENVSUBST < $DEPLOYMENT_PATH | $OC apply -f - > /dev/null 2>&1
|
||||
|
||||
@@ -15,15 +15,13 @@ cerberus:
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift.
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
kube_burner_binary_url: "https://github.com/cloud-bulldozer/kube-burner/releases/download/v0.9.1/kube-burner-0.9.1-Linux-x86_64.tar.gz"
|
||||
capture_metrics: False
|
||||
config_path: config/kube_burner.yaml # Define the Elasticsearch url and index name in this config.
|
||||
metrics_profile_path: config/metrics-aggregated.yaml
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set.
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error.
|
||||
alert_profile: config/alerts # Path to alert profile with the prometheus queries.
|
||||
alert_profile: config/alerts.yaml # Path to alert profile with the prometheus queries.
|
||||
|
||||
tunings:
|
||||
wait_duration: 6 # Duration to wait between each chaos scenario.
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
apiVersion: litmuschaos.io/v1alpha1
|
||||
kind: ChaosEngine
|
||||
metadata:
|
||||
name: nginx-chaos
|
||||
namespace: litmus
|
||||
spec:
|
||||
# It can be true/false
|
||||
annotationCheck: 'false'
|
||||
# It can be active/stop
|
||||
engineState: 'active'
|
||||
chaosServiceAccount: litmus-sa
|
||||
monitoring: false
|
||||
# It can be delete/retain
|
||||
jobCleanUpPolicy: 'delete'
|
||||
experiments:
|
||||
- name: node-cpu-hog
|
||||
spec:
|
||||
components:
|
||||
env:
|
||||
# set chaos duration (in sec) as desired
|
||||
- name: TOTAL_CHAOS_DURATION
|
||||
value: '10'
|
||||
|
||||
# Number of cores of node CPU to be consumed
|
||||
- name: NODE_CPU_CORE
|
||||
value: '1'
|
||||
|
||||
# percentage of total nodes to target
|
||||
- name: NODES_AFFECTED_PERC
|
||||
value: '30'
|
||||
|
||||
# ENTER THE COMMA SEPARATED TARGET NODES NAME
|
||||
- name: TARGET_NODES
|
||||
value: $WORKER_NODE
|
||||
@@ -1,34 +0,0 @@
|
||||
apiVersion: litmuschaos.io/v1alpha1
|
||||
kind: ChaosEngine
|
||||
metadata:
|
||||
name: nginx-chaos
|
||||
namespace: litmus
|
||||
spec:
|
||||
# It can be true/false
|
||||
annotationCheck: 'false'
|
||||
# It can be active/stop
|
||||
engineState: 'active'
|
||||
chaosServiceAccount: litmus-sa
|
||||
monitoring: false
|
||||
# It can be delete/retain
|
||||
jobCleanUpPolicy: 'delete'
|
||||
experiments:
|
||||
- name: node-cpu-hog
|
||||
spec:
|
||||
components:
|
||||
env:
|
||||
# set chaos duration (in sec) as desired
|
||||
- name: TOTAL_CHAOS_DURATION
|
||||
value: '10'
|
||||
|
||||
# Number of cores of node CPU to be consumed
|
||||
- name: NODE_CPU_CORE
|
||||
value: '1'
|
||||
|
||||
# percentage of total nodes to target
|
||||
- name: NODES_AFFECTED_PERC
|
||||
value: '30'
|
||||
|
||||
# ENTER THE COMMA SEPARATED TARGET NODES NAME
|
||||
- name: TARGET_NODES
|
||||
value:
|
||||
@@ -1,35 +0,0 @@
|
||||
apiVersion: litmuschaos.io/v1alpha1
|
||||
kind: ChaosEngine
|
||||
metadata:
|
||||
name: nginx-chaos
|
||||
namespace: litmus
|
||||
spec:
|
||||
# It can be delete/retain
|
||||
jobCleanUpPolicy: 'retain'
|
||||
# It can be active/stop
|
||||
engineState: 'active'
|
||||
chaosServiceAccount: litmus-sa
|
||||
experiments:
|
||||
- name: node-io-stress
|
||||
spec:
|
||||
components:
|
||||
env:
|
||||
# set chaos duration (in sec) as desired
|
||||
- name: TOTAL_CHAOS_DURATION
|
||||
value: '10'
|
||||
|
||||
## specify the size as percentage of free space on the file system
|
||||
- name: FILESYSTEM_UTILIZATION_PERCENTAGE
|
||||
value: '100'
|
||||
|
||||
## Number of core of CPU
|
||||
- name: CPU
|
||||
value: '1'
|
||||
|
||||
## Total number of workers default value is 4
|
||||
- name: NUMBER_OF_WORKERS
|
||||
value: '3'
|
||||
|
||||
## enter the comma separated target nodes name
|
||||
- name: TARGET_NODES
|
||||
value: $WORKER_NODE
|
||||
@@ -1,35 +0,0 @@
|
||||
apiVersion: litmuschaos.io/v1alpha1
|
||||
kind: ChaosEngine
|
||||
metadata:
|
||||
name: nginx-chaos
|
||||
namespace: litmus
|
||||
spec:
|
||||
# It can be delete/retain
|
||||
jobCleanUpPolicy: 'retain'
|
||||
# It can be active/stop
|
||||
engineState: 'active'
|
||||
chaosServiceAccount: litmus-sa
|
||||
experiments:
|
||||
- name: node-io-stress
|
||||
spec:
|
||||
components:
|
||||
env:
|
||||
# set chaos duration (in sec) as desired
|
||||
- name: TOTAL_CHAOS_DURATION
|
||||
value: '10'
|
||||
|
||||
## specify the size as percentage of free space on the file system
|
||||
- name: FILESYSTEM_UTILIZATION_PERCENTAGE
|
||||
value: '100'
|
||||
|
||||
## Number of core of CPU
|
||||
- name: CPU
|
||||
value: '1'
|
||||
|
||||
## Total number of workers default value is 4
|
||||
- name: NUMBER_OF_WORKERS
|
||||
value: '3'
|
||||
|
||||
## enter the comma separated target nodes name
|
||||
- name: TARGET_NODES
|
||||
value:
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: litmuschaos.io/v1alpha1
|
||||
kind: ChaosEngine
|
||||
metadata:
|
||||
name: nginx-chaos
|
||||
namespace: litmus
|
||||
spec:
|
||||
# It can be delete/retain
|
||||
jobCleanUpPolicy: 'retain'
|
||||
# It can be active/stop
|
||||
engineState: 'active'
|
||||
chaosServiceAccount: litmus-sa
|
||||
experiments:
|
||||
- name: node-memory-hog
|
||||
spec:
|
||||
components:
|
||||
env:
|
||||
# set chaos duration (in sec) as desired
|
||||
- name: TOTAL_CHAOS_DURATION
|
||||
value: '10'
|
||||
|
||||
## Specify the size as percent of total node capacity Ex: '30'
|
||||
## Note: For consuming memory in mebibytes change the variable to MEMORY_CONSUMPTION_MEBIBYTES
|
||||
- name: MEMORY_CONSUMPTION_PERCENTAGE
|
||||
value: '30'
|
||||
|
||||
# ENTER THE COMMA SEPARATED TARGET NODES NAME
|
||||
- name: TARGET_NODES
|
||||
value: $WORKER_NODE
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: litmuschaos.io/v1alpha1
|
||||
kind: ChaosEngine
|
||||
metadata:
|
||||
name: nginx-chaos
|
||||
namespace: litmus
|
||||
spec:
|
||||
# It can be delete/retain
|
||||
jobCleanUpPolicy: 'retain'
|
||||
# It can be active/stop
|
||||
engineState: 'active'
|
||||
chaosServiceAccount: litmus-sa
|
||||
experiments:
|
||||
- name: node-memory-hog
|
||||
spec:
|
||||
components:
|
||||
env:
|
||||
# set chaos duration (in sec) as desired
|
||||
- name: TOTAL_CHAOS_DURATION
|
||||
value: '10'
|
||||
|
||||
## Specify the size as percent of total node capacity Ex: '30'
|
||||
## Note: For consuming memory in mebibytes change the variable to MEMORY_CONSUMPTION_MEBIBYTES
|
||||
- name: MEMORY_CONSUMPTION_PERCENTAGE
|
||||
value: '30'
|
||||
|
||||
# ENTER THE COMMA SEPARATED TARGET NODES NAME
|
||||
- name: TARGET_NODES
|
||||
value:
|
||||
@@ -1,20 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_litmus_cpu {
|
||||
|
||||
export scenario_type="litmus_scenarios"
|
||||
export scenario_file="- scenarios/templates/litmus-rbac.yaml"
|
||||
export post_config="- CI/scenarios/node_cpu_hog_engine_node.yaml"
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/litmus_config.yaml
|
||||
envsubst < CI/scenarios/node_cpu_hog_engine.yaml > CI/scenarios/node_cpu_hog_engine_node.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/litmus_config.yaml
|
||||
echo "Litmus scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_litmus_cpu
|
||||
@@ -1,20 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_litmus_cpu {
|
||||
[ -z $NODE_NAME ] && echo "[ERR] NODE_NAME variable not set, failing." && exit 1
|
||||
yq -i ' .spec.experiments = [{"name": "node-cpu-hog", "spec":{"components":{"env":[{"name":"TOTAL_CHAOS_DURATION","value":"10"},{"name":"NODE_CPU_CORE","value":"1"},{"name":"NODES_AFFECTED_PERC","value":"30"},{"name":"TARGET_NODES","value":"'$NODE_NAME'"}]}}}]' CI/scenarios/node_cpu_hog_engine_node.yaml
|
||||
|
||||
cp CI/config/common_test_config.yaml CI/config/litmus_config.yaml
|
||||
yq '.kraken.chaos_scenarios = [{"litmus_scenarios":[["scenarios/openshift/templates/litmus-rbac.yaml","CI/scenarios/node_cpu_hog_engine_node.yaml"]]}]' -i CI/config/litmus_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/litmus_config.yaml
|
||||
echo "Litmus scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_litmus_cpu
|
||||
@@ -1,20 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_litmus_io {
|
||||
|
||||
export scenario_type="litmus_scenarios"
|
||||
export scenario_file="- scenarios/templates/litmus-rbac.yaml"
|
||||
export post_config="- CI/scenarios/node_io_engine_node.yaml"
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/litmus_config.yaml
|
||||
envsubst < CI/scenarios/node_io_engine.yaml > CI/scenarios/node_io_engine_node.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/litmus_config.yaml
|
||||
echo "Litmus scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_litmus_io
|
||||
@@ -1,19 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_litmus_io {
|
||||
[ -z $NODE_NAME ] && echo "[ERR] NODE_NAME variable not set, failing." && exit 1
|
||||
yq -i ' .spec.experiments = [{"name": "node-io-stress", "spec":{"components":{"env":[{"name":"TOTAL_CHAOS_DURATION","value":"10"},{"name":"FILESYSTEM_UTILIZATION_PERCENTAGE","value":"100"},{"name":"CPU","value":"1"},{"name":"NUMBER_OF_WORKERS","value":"3"},{"name":"TARGET_NODES","value":"'$NODE_NAME'"}]}}}]' CI/scenarios/node_io_engine_node.yaml
|
||||
cp CI/config/common_test_config.yaml CI/config/litmus_config.yaml
|
||||
yq '.kraken.chaos_scenarios = [{"litmus_scenarios":[["scenarios/openshift/templates/litmus-rbac.yaml","CI/scenarios/node_io_engine_node.yaml"]]}]' -i CI/config/litmus_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/litmus_config.yaml
|
||||
echo "Litmus scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_litmus_io
|
||||
@@ -1,20 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_litmus_mem {
|
||||
|
||||
export scenario_type="litmus_scenarios"
|
||||
export scenario_file="- scenarios/templates/litmus-rbac.yaml"
|
||||
export post_config="- CI/scenarios/node_mem_engine_node.yaml"
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/litmus_config.yaml
|
||||
envsubst < CI/scenarios/node_mem_engine.yaml > CI/scenarios/node_mem_engine_node.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/litmus_config.yaml
|
||||
echo "Litmus scenario $1 test: Success"
|
||||
}
|
||||
|
||||
functional_test_litmus_mem "- CI/scenarios/node_mem_engine.yaml"
|
||||
@@ -1,19 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_litmus_mem {
|
||||
[ -z $NODE_NAME ] && echo "[ERR] NODE_NAME variable not set, failing." && exit 1
|
||||
yq -i ' .spec.experiments = [{"name": "node-io-stress", "spec":{"components":{"env":[{"name":"TOTAL_CHAOS_DURATION","value":"10"},{"name":"CPU","value":"1"},{"name":"TARGET_NODES","value":"'$NODE_NAME'"}]}}}]' CI/scenarios/node_mem_engine_node.yaml
|
||||
cp CI/config/common_test_config.yaml CI/config/litmus_config.yaml
|
||||
yq '.kraken.chaos_scenarios = [{"litmus_scenarios":[["scenarios/openshift/templates/litmus-rbac.yaml","CI/scenarios/node_mem_engine_node.yaml"]]}]' -i CI/config/litmus_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/litmus_config.yaml
|
||||
echo "Litmus scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_litmus_mem
|
||||
@@ -1,21 +1,50 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
## CNCF Community Code of Conduct v1.3
|
||||
|
||||
## Our Pledge
|
||||
Other languages available:
|
||||
- [Arabic/العربية](code-of-conduct-languages/ar.md)
|
||||
- [Bulgarian/Български](code-of-conduct-languages/bg.md)
|
||||
- [Chinese/中文](code-of-conduct-languages/zh.md)
|
||||
- [Czech/Česky](code-of-conduct-languages/cs.md)
|
||||
- [Farsi/فارسی](code-of-conduct-languages/fa.md)
|
||||
- [French/Français](code-of-conduct-languages/fr.md)
|
||||
- [German/Deutsch](code-of-conduct-languages/de.md)
|
||||
- [Hindi/हिन्दी](code-of-conduct-languages/hi.md)
|
||||
- [Indonesian/Bahasa Indonesia](code-of-conduct-languages/id.md)
|
||||
- [Italian/Italiano](code-of-conduct-languages/it.md)
|
||||
- [Japanese/日本語](code-of-conduct-languages/jp.md)
|
||||
- [Korean/한국어](code-of-conduct-languages/ko.md)
|
||||
- [Polish/Polski](code-of-conduct-languages/pl.md)
|
||||
- [Portuguese/Português](code-of-conduct-languages/pt.md)
|
||||
- [Russian/Русский](code-of-conduct-languages/ru.md)
|
||||
- [Spanish/Español](code-of-conduct-languages/es.md)
|
||||
- [Turkish/Türkçe](code-of-conduct-languages/tr.md)
|
||||
- [Ukrainian/Українська](code-of-conduct-languages/uk.md)
|
||||
- [Vietnamese/Tiếng Việt](code-of-conduct-languages/vi.md)
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
### Community Code of Conduct
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
As contributors, maintainers, and participants in the CNCF community, and in the interest of fostering
|
||||
an open and welcoming community, we pledge to respect all people who participate or contribute
|
||||
through reporting issues, posting feature requests, updating documentation,
|
||||
submitting pull requests or patches, attending conferences or events, or engaging in other community or project activities.
|
||||
|
||||
We are committed to making participation in the CNCF community a harassment-free experience for everyone, regardless of age, body size, caste, disability, ethnicity, level of experience, family status, gender, gender identity and expression, marital status, military or veteran status, nationality, personal appearance, race, religion, sexual orientation, socioeconomic status, tribe, or any other dimension of diversity.
|
||||
|
||||
## Scope
|
||||
|
||||
This code of conduct applies:
|
||||
* within project and community spaces,
|
||||
* in other spaces when an individual CNCF community participant's words or actions are directed at or are about a CNCF project, the CNCF community, or another CNCF community participant.
|
||||
|
||||
### CNCF Events
|
||||
|
||||
CNCF events that are produced by the Linux Foundation with professional events staff are governed by the Linux Foundation [Events Code of Conduct](https://events.linuxfoundation.org/code-of-conduct/) available on the event page. This is designed to be used in conjunction with the CNCF Code of Conduct.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
The CNCF Community is open, inclusive and respectful. Every member of our community has the right to have their identity respected.
|
||||
|
||||
Examples of behavior that contributes to a positive environment include but are not limited to:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
@@ -24,104 +53,52 @@ community include:
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
* Using welcoming and inclusive language
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
Examples of unacceptable behavior include but are not limited to:
|
||||
|
||||
* The use of sexualized language or imagery
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Public or private harassment in any form
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Violence, threatening violence, or encouraging others to engage in violent behavior
|
||||
* Stalking or following someone without their consent
|
||||
* Unwelcome physical contact
|
||||
* Unwelcome sexual or romantic attention or advances
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
The following behaviors are also prohibited:
|
||||
* Providing knowingly false or misleading information in connection with a Code of Conduct investigation or otherwise intentionally tampering with an investigation.
|
||||
* Retaliating against a person because they reported an incident or provided information about an incident as a witness.
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct.
|
||||
By adopting this Code of Conduct, project maintainers commit themselves to fairly and consistently applying these principles to every aspect
|
||||
of managing a CNCF project.
|
||||
Project maintainers who do not follow or enforce the Code of Conduct may be temporarily or permanently removed from the project team.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
## Reporting
|
||||
|
||||
## Scope
|
||||
For incidents occurring in the Kubernetes community, contact the [Kubernetes Code of Conduct Committee](https://git.k8s.io/community/committee-code-of-conduct) via <conduct@kubernetes.io>. You can expect a response within three business days.
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
For other projects, or for incidents that are project-agnostic or impact multiple CNCF projects, please contact the [CNCF Code of Conduct Committee](https://www.cncf.io/conduct/committee/) via <conduct@cncf.io>. Alternatively, you can contact any of the individual members of the [CNCF Code of Conduct Committee](https://www.cncf.io/conduct/committee/) to submit your report. For more detailed instructions on how to submit a report, including how to submit a report anonymously, please see our [Incident Resolution Procedures](https://github.com/cncf/foundation/blob/main/code-of-conduct/coc-incident-resolution-procedures.md). You can expect a response within three business days.
|
||||
|
||||
For incidents occurring at CNCF event that is produced by the Linux Foundation, please contact <eventconduct@cncf.io>.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
Upon review and investigation of a reported incident, the CoC response team that has jurisdiction will determine what action is appropriate based on this Code of Conduct and its related documentation.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
For information about which Code of Conduct incidents are handled by project leadership, which incidents are handled by the CNCF Code of Conduct Committee, and which incidents are handled by the Linux Foundation (including its events team), see our [Jurisdiction Policy](https://github.com/cncf/foundation/blob/main/code-of-conduct/coc-committee-jurisdiction-policy.md).
|
||||
|
||||
## Enforcement Guidelines
|
||||
## Amendments
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
Consistent with the CNCF Charter, any substantive changes to this Code of Conduct must be approved by the Technical Oversight Committee.
|
||||
|
||||
### 1. Correction
|
||||
## Acknowledgements
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at
|
||||
https://www.contributor-covenant.org/translations.
|
||||
This Code of Conduct is adapted from the Contributor Covenant
|
||||
(http://contributor-covenant.org), version 2.0 available at
|
||||
http://contributor-covenant.org/version/2/0/code_of_conduct/
|
||||
|
||||
53
README.md
53
README.md
@@ -4,8 +4,8 @@
|
||||
|
||||

|
||||
|
||||
Chaos and resiliency testing tool for Kubernetes and OpenShift.
|
||||
Kraken injects deliberate failures into Kubernetes/OpenShift clusters to check if it is resilient to turbulent conditions.
|
||||
Chaos and resiliency testing tool for Kubernetes.
|
||||
Kraken injects deliberate failures into Kubernetes clusters to check if it is resilient to turbulent conditions.
|
||||
|
||||
|
||||
### Workflow
|
||||
@@ -18,13 +18,13 @@ Kraken injects deliberate failures into Kubernetes/OpenShift clusters to check i
|
||||
### Chaos Testing Guide
|
||||
[Guide](docs/index.md) encapsulates:
|
||||
- Test methodology that needs to be embraced.
|
||||
- Best practices that an OpenShift cluster, platform and applications running on top of it should take into account for best user experience, performance, resilience and reliability.
|
||||
- Best practices that an Kubernetes cluster, platform and applications running on top of it should take into account for best user experience, performance, resilience and reliability.
|
||||
- Tooling.
|
||||
- Scenarios supported.
|
||||
- Test environment recommendations as to how and where to run chaos tests.
|
||||
- Chaos testing in practice.
|
||||
|
||||
The guide is hosted at https://redhat-chaos.github.io/krkn.
|
||||
The guide is hosted at https://krkn-chaos.github.io/krkn.
|
||||
|
||||
|
||||
### How to Get Started
|
||||
@@ -38,7 +38,7 @@ After installation, refer back to the below sections for supported scenarios and
|
||||
|
||||
|
||||
#### Running Kraken with minimal configuration tweaks
|
||||
For cases where you want to run Kraken with minimal configuration changes, refer to [Kraken-hub](https://github.com/redhat-chaos/krkn-hub). One use case is CI integration where you do not want to carry around different configuration files for the scenarios.
|
||||
For cases where you want to run Kraken with minimal configuration changes, refer to [Kraken-hub](https://github.com/redhat-chaos/krknChaos-hub). One use case is CI integration where you do not want to carry around different configuration files for the scenarios.
|
||||
|
||||
### Setting up infrastructure dependencies
|
||||
Kraken indexes the metrics specified in the profile into Elasticsearch in addition to leveraging Cerberus for understanding the health of the Kubernetes/OpenShift cluster under test. More information on the features is documented below. The infrastructure pieces can be easily installed and uninstalled by running:
|
||||
@@ -57,30 +57,30 @@ This will manage the Cerberus and Elasticsearch containers on the host on which
|
||||
Instructions on how to setup the config and the options supported can be found at [Config](docs/config.md).
|
||||
|
||||
|
||||
### Kubernetes/OpenShift chaos scenarios supported
|
||||
### Kubernetes chaos scenarios supported
|
||||
|
||||
Scenario type | Kubernetes | OpenShift
|
||||
--------------------------- | ------------- |--------------------|
|
||||
[Pod Scenarios](docs/pod_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Pod Network Scenarios](docs/pod_network_scenarios.md) | :x: | :heavy_check_mark: |
|
||||
[Container Scenarios](docs/container_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Node Scenarios](docs/node_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Time Scenarios](docs/time_scenarios.md) | :x: | :heavy_check_mark: |
|
||||
[Hog Scenarios: CPU, Memory](docs/arcaflow_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Cluster Shut Down Scenarios](docs/cluster_shut_down_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Service Disruption Scenarios](docs/service_disruption_scenarios.md.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Zone Outage Scenarios](docs/zone_outage.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Application_outages](docs/application_outages.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[PVC scenario](docs/pvc_scenario.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Network_Chaos](docs/network_chaos.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[ManagedCluster Scenarios](docs/managedcluster_scenarios.md) | :heavy_check_mark: | :question: |
|
||||
Scenario type | Kubernetes
|
||||
--------------------------- | ------------- |
|
||||
[Pod Scenarios](docs/pod_scenarios.md) | :heavy_check_mark: |
|
||||
[Pod Network Scenarios](docs/pod_network_scenarios.md) | :x: |
|
||||
[Container Scenarios](docs/container_scenarios.md) | :heavy_check_mark: |
|
||||
[Node Scenarios](docs/node_scenarios.md) | :heavy_check_mark: |
|
||||
[Time Scenarios](docs/time_scenarios.md) | :x: |
|
||||
[Hog Scenarios: CPU, Memory](docs/arcaflow_scenarios.md) | :heavy_check_mark: |
|
||||
[Cluster Shut Down Scenarios](docs/cluster_shut_down_scenarios.md) | :heavy_check_mark: |
|
||||
[Service Disruption Scenarios](docs/service_disruption_scenarios.md.md) | :heavy_check_mark: |
|
||||
[Zone Outage Scenarios](docs/zone_outage.md) | :heavy_check_mark: |
|
||||
[Application_outages](docs/application_outages.md) | :heavy_check_mark: |
|
||||
[PVC scenario](docs/pvc_scenario.md) | :heavy_check_mark: |
|
||||
[Network_Chaos](docs/network_chaos.md) | :heavy_check_mark: |
|
||||
[ManagedCluster Scenarios](docs/managedcluster_scenarios.md) | :heavy_check_mark: |
|
||||
|
||||
|
||||
### Kraken scenario pass/fail criteria and report
|
||||
It is important to make sure to check if the targeted component recovered from the chaos injection and also if the Kubernetes/OpenShift cluster is healthy as failures in one component can have an adverse impact on other components. Kraken does this by:
|
||||
- Having built in checks for pod and node based scenarios to ensure the expected number of replicas and nodes are up. It also supports running custom scripts with the checks.
|
||||
- Leveraging [Cerberus](https://github.com/openshift-scale/cerberus) to monitor the cluster under test and consuming the aggregated go/no-go signal to determine pass/fail post chaos. It is highly recommended to turn on the Cerberus health check feature available in Kraken. Instructions on installing and setting up Cerberus can be found [here](https://github.com/openshift-scale/cerberus#installation) or can be installed from Kraken using the [instructions](https://github.com/redhat-chaos/krkn#setting-up-infrastructure-dependencies). Once Cerberus is up and running, set cerberus_enabled to True and cerberus_url to the url where Cerberus publishes go/no-go signal in the Kraken config file. Cerberus can monitor [application routes](https://github.com/redhat-chaos/cerberus/blob/main/docs/config.md#watch-routes) during the chaos and fails the run if it encounters downtime as it is a potential downtime in a customers, or users environment as well. It is especially important during the control plane chaos scenarios including the API server, Etcd, Ingress etc. It can be enabled by setting `check_applicaton_routes: True` in the [Kraken config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) provided application routes are being monitored in the [cerberus config](https://github.com/redhat-chaos/krkn/blob/main/config/cerberus.yaml).
|
||||
- Leveraging [kube-burner](docs/alerts.md) alerting feature to fail the runs in case of critical alerts.
|
||||
- Leveraging [Cerberus](https://github.com/redhat-chaos/cerberus) to monitor the cluster under test and consuming the aggregated go/no-go signal to determine pass/fail post chaos. It is highly recommended to turn on the Cerberus health check feature available in Kraken. Instructions on installing and setting up Cerberus can be found [here](https://github.com/openshift-scale/cerberus#installation) or can be installed from Kraken using the [instructions](https://github.com/redhat-chaos/krkn#setting-up-infrastructure-dependencies). Once Cerberus is up and running, set cerberus_enabled to True and cerberus_url to the url where Cerberus publishes go/no-go signal in the Kraken config file. Cerberus can monitor [application routes](https://github.com/redhat-chaos/cerberus/blob/main/docs/config.md#watch-routes) during the chaos and fails the run if it encounters downtime as it is a potential downtime in a customers, or users environment as well. It is especially important during the control plane chaos scenarios including the API server, Etcd, Ingress etc. It can be enabled by setting `check_applicaton_routes: True` in the [Kraken config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) provided application routes are being monitored in the [cerberus config](https://github.com/redhat-chaos/krkn/blob/main/config/cerberus.yaml).
|
||||
- Leveraging built-in alert collection feature to fail the runs in case of critical alerts.
|
||||
|
||||
### Signaling
|
||||
In CI runs or any external job it is useful to stop Kraken once a certain test or state gets reached. We created a way to signal to kraken to pause the chaos or stop it completely using a signal posted to a port of your choice.
|
||||
@@ -94,10 +94,6 @@ More detailed information on enabling and leveraging this feature can be found [
|
||||
Monitoring the Kubernetes/OpenShift cluster to observe the impact of Kraken chaos scenarios on various components is key to find out the bottlenecks as it is important to make sure the cluster is healthy in terms if both recovery as well as performance during/after the failure has been injected. Instructions on enabling it can be found [here](docs/performance_dashboards.md).
|
||||
|
||||
|
||||
### Scraping and storing metrics long term
|
||||
Kraken supports capturing metrics for the duration of the scenarios defined in the config and indexes then into Elasticsearch to be able to store and evaluate the state of the runs long term. The indexed metrics can be visualized with the help of Grafana. It uses [Kube-burner](https://github.com/cloud-bulldozer/kube-burner) under the hood. The metrics to capture need to be defined in a metrics profile which Kraken consumes to query prometheus ( installed by default in OpenShift ) with the start and end timestamp of the run. Information on enabling and leveraging this feature can be found [here](docs/metrics.md).
|
||||
|
||||
|
||||
### SLOs validation during and post chaos
|
||||
- In addition to checking the recovery and health of the cluster and components under test, Kraken takes in a profile with the Prometheus expressions to validate and alerts, exits with a non-zero return code depending on the severity set. This feature can be used to determine pass/fail or alert on abnormalities observed in the cluster based on the metrics.
|
||||
- Kraken also provides ability to check if any critical alerts are firing in the cluster post chaos and pass/fail's.
|
||||
@@ -116,7 +112,8 @@ Kraken supports injecting faults into [Open Cluster Management (OCM)](https://op
|
||||
- Blog post emphasizing the importance of making Chaos part of Performance and Scale runs to mimic the production environments: https://www.openshift.com/blog/making-chaos-part-of-kubernetes/openshift-performance-and-scalability-tests
|
||||
- Blog post on findings from Chaos test runs: https://cloud.redhat.com/blog/openshift/kubernetes-chaos-stories
|
||||
- Discussion with CNCF TAG App Delivery on Krkn workflow, features and addition to CNCF sandbox: [Github](https://github.com/cncf/sandbox/issues/44), [Tracker](https://github.com/cncf/tag-app-delivery/issues/465), [recording](https://www.youtube.com/watch?v=nXQkBFK_MWc&t=722s)
|
||||
|
||||
- Blog post on supercharging chaos testing using AI integration in Krkn: https://www.redhat.com/en/blog/supercharging-chaos-testing-using-ai
|
||||
- Blog post announcing Krkn joining CNCF Sandbox: https://www.redhat.com/en/blog/krknchaos-joining-cncf-sandbox
|
||||
|
||||
### Roadmap
|
||||
Enhancements being planned can be found in the [roadmap](ROADMAP.md).
|
||||
|
||||
90
config/alerts.yaml
Normal file
90
config/alerts.yaml
Normal file
@@ -0,0 +1,90 @@
|
||||
# etcd
|
||||
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[10m:]) > 0.01
|
||||
description: 10 minutes avg. 99th etcd fsync latency on {{$labels.pod}} higher than 10ms. {{$value}}s
|
||||
severity: warning
|
||||
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[10m:]) > 1
|
||||
description: 10 minutes avg. 99th etcd fsync latency on {{$labels.pod}} higher than 1s. {{$value}}s
|
||||
severity: error
|
||||
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))[10m:]) > 0.007
|
||||
description: 10 minutes avg. 99th etcd commit latency on {{$labels.pod}} higher than 30ms. {{$value}}s
|
||||
severity: warning
|
||||
|
||||
- expr: rate(etcd_server_leader_changes_seen_total[2m]) > 0
|
||||
description: etcd leader changes observed
|
||||
severity: warning
|
||||
|
||||
- expr: (last_over_time(etcd_mvcc_db_total_size_in_bytes[5m]) / last_over_time(etcd_server_quota_backend_bytes[5m]))*100 > 95
|
||||
description: etcd cluster database is running full.
|
||||
severity: critical
|
||||
|
||||
- expr: (last_over_time(etcd_mvcc_db_total_size_in_use_in_bytes[5m]) / last_over_time(etcd_mvcc_db_total_size_in_bytes[5m])) < 0.5
|
||||
description: etcd database size in use is less than 50% of the actual allocated storage.
|
||||
severity: warning
|
||||
|
||||
- expr: rate(etcd_server_proposals_failed_total{job=~".*etcd.*"}[15m]) > 5
|
||||
description: etcd cluster has high number of proposal failures.
|
||||
severity: warning
|
||||
|
||||
- expr: histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket{job=~".*etcd.*"}[5m])) > 0.15
|
||||
description: etcd cluster member communication is slow.
|
||||
severity: warning
|
||||
|
||||
- expr: histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job=~".*etcd.*", grpc_method!="Defragment", grpc_type="unary"}[5m])) without(grpc_type)) > 0.15
|
||||
description: etcd grpc requests are slow.
|
||||
severity: critical
|
||||
|
||||
- expr: 100 * sum(rate(grpc_server_handled_total{job=~".*etcd.*", grpc_code=~"Unknown|FailedPrecondition|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded"}[5m])) without (grpc_type, grpc_code) / sum(rate(grpc_server_handled_total{job=~".*etcd.*"}[5m])) without (grpc_type, grpc_code) > 5
|
||||
description: etcd cluster has high number of failed grpc requests.
|
||||
severity: critical
|
||||
|
||||
- expr: etcd_server_has_leader{job=~".*etcd.*"} == 0
|
||||
description: etcd cluster has no leader.
|
||||
severity: warning
|
||||
|
||||
- expr: sum(up{job=~".*etcd.*"} == bool 1) without (instance) < ((count(up{job=~".*etcd.*"}) without (instance) + 1) / 2)
|
||||
description: etcd cluster has insufficient number of members.
|
||||
severity: warning
|
||||
|
||||
- expr: max without (endpoint) ( sum without (instance) (up{job=~".*etcd.*"} == bool 0) or count without (To) ( sum without (instance) (rate(etcd_network_peer_sent_failures_total{job=~".*etcd.*"}[120s])) > 0.01 )) > 0
|
||||
description: etcd cluster members are down.
|
||||
severity: warning
|
||||
|
||||
# API server
|
||||
- expr: avg_over_time(histogram_quantile(0.99, sum(irate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb=~"POST|PUT|DELETE|PATCH", subresource!~"log|exec|portforward|attach|proxy"}[2m])) by (le, resource, verb))[10m:]) > 1
|
||||
description: 10 minutes avg. 99th mutating API call latency for {{$labels.verb}}/{{$labels.resource}} higher than 1 second. {{$value}}s
|
||||
severity: error
|
||||
|
||||
- expr: avg_over_time(histogram_quantile(0.99, sum(irate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb=~"LIST|GET", subresource!~"log|exec|portforward|attach|proxy", scope="resource"}[2m])) by (le, resource, verb, scope))[5m:]) > 1
|
||||
description: 5 minutes avg. 99th read-only API call latency for {{$labels.verb}}/{{$labels.resource}} in scope {{$labels.scope}} higher than 1 second. {{$value}}s
|
||||
severity: error
|
||||
|
||||
- expr: avg_over_time(histogram_quantile(0.99, sum(irate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb=~"LIST|GET", subresource!~"log|exec|portforward|attach|proxy", scope="namespace"}[2m])) by (le, resource, verb, scope))[5m:]) > 5
|
||||
description: 5 minutes avg. 99th read-only API call latency for {{$labels.verb}}/{{$labels.resource}} in scope {{$labels.scope}} higher than 5 seconds. {{$value}}s
|
||||
severity: error
|
||||
|
||||
- expr: avg_over_time(histogram_quantile(0.99, sum(irate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb=~"LIST|GET", subresource!~"log|exec|portforward|attach|proxy", scope="cluster"}[2m])) by (le, resource, verb, scope))[5m:]) > 30
|
||||
description: 5 minutes avg. 99th read-only API call latency for {{$labels.verb}}/{{$labels.resource}} in scope {{$labels.scope}} higher than 30 seconds. {{$value}}s
|
||||
severity: error
|
||||
|
||||
# Control plane pods
|
||||
|
||||
- expr: up{job=~"crio|kubelet"} == 0
|
||||
description: "{{$labels.node}}/{{$labels.job}} down"
|
||||
severity: warning
|
||||
|
||||
- expr: up{job="ovnkube-node"} == 0
|
||||
description: "{{$labels.instance}}/{{$labels.pod}} {{$labels.job}} down"
|
||||
severity: warning
|
||||
|
||||
# Service sync latency
|
||||
- expr: histogram_quantile(0.99, sum(rate(kubeproxy_network_programming_duration_seconds_bucket[2m])) by (le)) > 10
|
||||
description: 99th Kubeproxy network programming latency higher than 10 seconds. {{$value}}s
|
||||
severity: warning
|
||||
|
||||
# Prometheus alerts
|
||||
- expr: ALERTS{severity="critical", alertstate="firing"} > 0
|
||||
description: Critical prometheus alert. {{$labels.alertname}}
|
||||
severity: warning
|
||||
@@ -51,15 +51,11 @@ cerberus:
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
kube_burner_binary_url: "https://github.com/cloud-bulldozer/kube-burner/releases/download/v1.7.0/kube-burner-1.7.0-Linux-x86_64.tar.gz"
|
||||
capture_metrics: False
|
||||
config_path: config/kube_burner.yaml # Define the Elasticsearch url and index name in this config
|
||||
metrics_profile_path: config/metrics-aggregated.yaml
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
alert_profile: config/alerts # Path or URL to alert profile with the prometheus queries
|
||||
alert_profile: config/alerts.yaml # Path or URL to alert profile with the prometheus queries
|
||||
check_critical_alerts: False # When enabled will check prometheus for critical alerts firing post chaos
|
||||
tunings:
|
||||
wait_duration: 60 # Duration to wait between each chaos scenario
|
||||
@@ -90,3 +86,6 @@ telemetry:
|
||||
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
|
||||
events_backup: True # enables/disables cluster events collection
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -6,11 +6,7 @@ kraken:
|
||||
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
litmus_install: True # Installs specified version, set to False if it's already setup
|
||||
litmus_version: v1.13.6 # Litmus version to install
|
||||
litmus_uninstall: False # If you want to uninstall litmus if failure
|
||||
litmus_uninstall_before_run: True # If you want to uninstall litmus before a new run starts
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load
|
||||
- plugin_scenarios:
|
||||
- scenarios/kind/scheduler.yml
|
||||
- node_scenarios:
|
||||
@@ -24,15 +20,11 @@ cerberus:
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
kube_burner_binary_url: "https://github.com/cloud-bulldozer/kube-burner/releases/download/v0.9.1/kube-burner-0.9.1-Linux-x86_64.tar.gz"
|
||||
capture_metrics: False
|
||||
config_path: config/kube_burner.yaml # Define the Elasticsearch url and index name in this config
|
||||
metrics_profile_path: config/metrics-aggregated.yaml
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
alert_profile: config/alerts # Path to alert profile with the prometheus queries
|
||||
alert_profile: config/alerts.yaml # Path to alert profile with the prometheus queries
|
||||
|
||||
tunings:
|
||||
wait_duration: 60 # Duration to wait between each chaos scenario
|
||||
|
||||
@@ -5,10 +5,6 @@ kraken:
|
||||
port: 8081
|
||||
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
litmus_install: True # Installs specified version, set to False if it's already setup
|
||||
litmus_version: v1.13.6 # Litmus version to install
|
||||
litmus_uninstall: False # If you want to uninstall litmus if failure
|
||||
litmus_uninstall_before_run: True # If you want to uninstall litmus before a new run starts
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load
|
||||
- container_scenarios: # List of chaos pod scenarios to load
|
||||
- - scenarios/kube/container_dns.yml
|
||||
@@ -23,15 +19,11 @@ cerberus:
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
kube_burner_binary_url: "https://github.com/cloud-bulldozer/kube-burner/releases/download/v0.9.1/kube-burner-0.9.1-Linux-x86_64.tar.gz"
|
||||
capture_metrics: False
|
||||
config_path: config/kube_burner.yaml # Define the Elasticsearch url and index name in this config
|
||||
metrics_profile_path: config/metrics-aggregated.yaml
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
alert_profile: config/alerts # Path to alert profile with the prometheus queries
|
||||
alert_profile: config/alerts.yaml # Path to alert profile with the prometheus queries
|
||||
check_critical_alerts: False # When enabled will check prometheus for critical alerts firing post chaos after soak time for the cluster to settle down
|
||||
tunings:
|
||||
wait_duration: 60 # Duration to wait between each chaos scenario
|
||||
|
||||
@@ -6,9 +6,6 @@ kraken:
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
port: 8081 # Signal port
|
||||
litmus_version: v1.13.6 # Litmus version to install
|
||||
litmus_uninstall: False # If you want to uninstall litmus if failure
|
||||
litmus_uninstall_before_run: True # If you want to uninstall litmus before a new run starts
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load
|
||||
- plugin_scenarios: # List of chaos pod scenarios to load
|
||||
- scenarios/openshift/etcd.yml
|
||||
@@ -44,15 +41,13 @@ cerberus:
|
||||
performance_monitoring:
|
||||
deploy_dashboards: True # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
kube_burner_binary_url: "https://github.com/cloud-bulldozer/kube-burner/releases/download/v0.9.1/kube-burner-0.9.1-Linux-x86_64.tar.gz"
|
||||
capture_metrics: True
|
||||
config_path: config/kube_burner.yaml # Define the Elasticsearch url and index name in this config
|
||||
metrics_profile_path: config/metrics-aggregated.yaml
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
enable_alerts: True # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
alert_profile: config/alerts # Path to alert profile with the prometheus queries
|
||||
alert_profile: config/alerts.yaml # Path to alert profile with the prometheus queries
|
||||
|
||||
tunings:
|
||||
wait_duration: 60 # Duration to wait between each chaos scenario
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
---
|
||||
|
||||
global:
|
||||
writeToFile: true
|
||||
metricsDirectory: collected-metrics
|
||||
measurements:
|
||||
- name: podLatency
|
||||
esIndex: kraken
|
||||
|
||||
indexerConfig:
|
||||
enabled: true
|
||||
esServers: [http://0.0.0.0:9200] # Please change this to the respective Elasticsearch in use if you haven't run the podman-compose command to setup the infrastructure containers
|
||||
insecureSkipVerify: true
|
||||
defaultIndex: kraken
|
||||
type: elastic
|
||||
@@ -4,8 +4,6 @@ FROM mcr.microsoft.com/azure-cli:latest as azure-cli
|
||||
|
||||
FROM registry.access.redhat.com/ubi8/ubi:latest
|
||||
|
||||
LABEL org.opencontainers.image.authors="Red Hat OpenShift Chaos Engineering"
|
||||
|
||||
ENV KUBECONFIG /root/.kube/config
|
||||
|
||||
# Copy azure client binary from azure-cli image
|
||||
@@ -14,7 +12,7 @@ COPY --from=azure-cli /usr/local/bin/az /usr/bin/az
|
||||
# Install dependencies
|
||||
RUN yum install -y git python39 python3-pip jq gettext wget && \
|
||||
python3.9 -m pip install -U pip && \
|
||||
git clone https://github.com/redhat-chaos/krkn.git --branch v1.5.0 /root/kraken && \
|
||||
git clone https://github.com/krkn-chaos/krkn.git --branch v1.5.4 /root/kraken && \
|
||||
mkdir -p /root/.kube && cd /root/kraken && \
|
||||
pip3.9 install -r requirements.txt && \
|
||||
pip3.9 install virtualenv && \
|
||||
|
||||
@@ -14,7 +14,7 @@ COPY --from=azure-cli /usr/local/bin/az /usr/bin/az
|
||||
# Install dependencies
|
||||
RUN yum install -y git python39 python3-pip jq gettext wget && \
|
||||
python3.9 -m pip install -U pip && \
|
||||
git clone https://github.com/redhat-chaos/krkn.git --branch v1.5.0 /root/kraken && \
|
||||
git clone https://github.com/redhat-chaos/krkn.git --branch v1.5.4 /root/kraken && \
|
||||
mkdir -p /root/.kube && cd /root/kraken && \
|
||||
pip3.9 install -r requirements.txt && \
|
||||
pip3.9 install virtualenv && \
|
||||
|
||||
@@ -11,19 +11,18 @@ performance_monitoring:
|
||||
```
|
||||
|
||||
### Validation and alerting based on the queries defined by the user during chaos
|
||||
Takes PromQL queries as input and modifies the return code of the run to determine pass/fail. It's especially useful in case of automated runs in CI where user won't be able to monitor the system. It uses [Kube-burner](https://kube-burner.readthedocs.io/en/latest/) under the hood. This feature can be enabled in the [config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) by setting the following:
|
||||
Takes PromQL queries as input and modifies the return code of the run to determine pass/fail. It's especially useful in case of automated runs in CI where user won't be able to monitor the system. This feature can be enabled in the [config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) by setting the following:
|
||||
|
||||
```
|
||||
performance_monitoring:
|
||||
kube_burner_binary_url: "https://github.com/cloud-bulldozer/kube-burner/releases/download/v0.9.1/kube-burner-0.9.1-Linux-x86_64.tar.gz"
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
enable_alerts: True # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error.
|
||||
alert_profile: config/alerts # Path to alert profile with the prometheus queries.
|
||||
alert_profile: config/alerts.yaml # Path to alert profile with the prometheus queries.
|
||||
```
|
||||
|
||||
#### Alert profile
|
||||
A couple of [alert profiles](https://github.com/redhat-chaos/krkn/tree/main/config) [alerts](https://github.com/redhat-chaos/krkn/blob/main/config/alerts) are shipped by default and can be tweaked to add more queries to alert on. User can provide a URL or path to the file in the [config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml). The following are a few alerts examples:
|
||||
A couple of [alert profiles](https://github.com/redhat-chaos/krkn/tree/main/config) [alerts](https://github.com/redhat-chaos/krkn/blob/main/config/alerts.yaml) are shipped by default and can be tweaked to add more queries to alert on. User can provide a URL or path to the file in the [config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml). The following are a few alerts examples:
|
||||
|
||||
```
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[5m:]) > 0.01
|
||||
|
||||
@@ -62,7 +62,7 @@ If changes go into the main repository while you're working on your code it is b
|
||||
|
||||
If not already configured, set the upstream url for kraken.
|
||||
```
|
||||
git remote add upstream https://github.com/cloud-bulldozer/kraken.git
|
||||
git remote add upstream https://github.com/redhat-chaos/krkn.git
|
||||
```
|
||||
|
||||
Rebase to upstream master branch.
|
||||
|
||||
@@ -48,7 +48,7 @@ Failures in production are costly. To help mitigate risk to service health, cons
|
||||
|
||||
|
||||
### Best Practices
|
||||
Now that we understand the test methodology, let us take a look at the best practices for an OpenShift cluster. On that platform there are user applications and cluster workloads that need to be designed for stability and to provide the best user experience possible:
|
||||
Now that we understand the test methodology, let us take a look at the best practices for an Kubernetes cluster. On that platform there are user applications and cluster workloads that need to be designed for stability and to provide the best user experience possible:
|
||||
|
||||
- Alerts with appropriate severity should get fired.
|
||||
- Alerts are key to identify when a component starts degrading, and can help focus the investigation effort on affected system components.
|
||||
@@ -77,11 +77,11 @@ We want to look at this in terms of CPU, Memory, Disk, Throughput, Network etc.
|
||||
- The controller watching the component should recognize a failure as soon as possible. The component needs to have minimal initialization time to avoid extended downtime or overloading the replicas if it is a highly available configuration. The cause of failure can be because of issues with the infrastructure on top of which it is running, application failures, or because of service failures that it depends on.
|
||||
|
||||
- High Availability deployment strategy.
|
||||
- There should be multiple replicas ( both OpenShift and application control planes ) running preferably in different availability zones to survive outages while still serving the user/system requests. Avoid single points of failure.
|
||||
- There should be multiple replicas ( both Kubernetes and application control planes ) running preferably in different availability zones to survive outages while still serving the user/system requests. Avoid single points of failure.
|
||||
- Backed by persistent storage
|
||||
- It is important to have the system/application backed by persistent storage. This is especially important in cases where the application is a database or a stateful application given that a node, pod, or container failure will wipe off the data.
|
||||
|
||||
- There should be fallback routes to the backend in case of using CDN, for example, Akamai in case of console.redhat.com - a managed service deployed on top of OpenShift dedicated:
|
||||
- There should be fallback routes to the backend in case of using CDN, for example, Akamai in case of console.redhat.com - a managed service deployed on top of Kubernetes dedicated:
|
||||
- Content delivery networks (CDNs) are commonly used to host resources such as images, JavaScript files, and CSS. The average web page is nearly 2 MB in size, and offloading heavy resources to third-parties is extremely effective for reducing backend server traffic and latency. However, this makes each CDN an additional point of failure for every site that relies on it. If the CDN fails, its customers could also fail.
|
||||
- To test how the application reacts to failures, drop all network traffic between the system and CDN. The application should still serve the content to the user irrespective of the failure.
|
||||
|
||||
@@ -92,10 +92,10 @@ We want to look at this in terms of CPU, Memory, Disk, Throughput, Network etc.
|
||||
|
||||
|
||||
### Tooling
|
||||
Now that we looked at the best practices, In this section, we will go through how [Kraken](https://github.com/redhat-chaos/krkn) - a chaos testing framework can help test the resilience of OpenShift and make sure the applications and services are following the best practices.
|
||||
Now that we looked at the best practices, In this section, we will go through how [Kraken](https://github.com/redhat-chaos/krkn) - a chaos testing framework can help test the resilience of Kubernetes and make sure the applications and services are following the best practices.
|
||||
|
||||
#### Workflow
|
||||
Let us start by understanding the workflow of kraken: the user will start by running kraken by pointing to a specific OpenShift cluster using kubeconfig to be able to talk to the platform on top of which the OpenShift cluster is hosted. This can be done by either the oc/kubectl API or the cloud API. Based on the configuration of kraken, it will inject specific chaos scenarios as shown below, talk to [Cerberus](https://github.com/redhat-chaos/cerberus) to get the go/no-go signal representing the overall health of the cluster ( optional - can be turned off ), scrapes metrics from in-cluster prometheus given a metrics profile with the promql queries and stores them long term in Elasticsearch configured ( optional - can be turned off ), evaluates the promql expressions specified in the alerts profile ( optional - can be turned off ) and aggregated everything to set the pass/fail i.e. exits 0 or 1. More about the metrics collection, cerberus and metrics evaluation can be found in the next section.
|
||||
Let us start by understanding the workflow of kraken: the user will start by running kraken by pointing to a specific Kubernetes cluster using kubeconfig to be able to talk to the platform on top of which the Kubernetes cluster is hosted. This can be done by either the oc/kubectl API or the cloud API. Based on the configuration of kraken, it will inject specific chaos scenarios as shown below, talk to [Cerberus](https://github.com/redhat-chaos/cerberus) to get the go/no-go signal representing the overall health of the cluster ( optional - can be turned off ), scrapes metrics from in-cluster prometheus given a metrics profile with the promql queries and stores them long term in Elasticsearch configured ( optional - can be turned off ), evaluates the promql expressions specified in the alerts profile ( optional - can be turned off ) and aggregated everything to set the pass/fail i.e. exits 0 or 1. More about the metrics collection, cerberus and metrics evaluation can be found in the next section.
|
||||
|
||||

|
||||
|
||||
@@ -112,15 +112,15 @@ If the monitoring tool, cerberus is enabled it will consume the signal and conti
|
||||
|
||||
### Scenarios
|
||||
|
||||
Let us take a look at how to run the chaos scenarios on your OpenShift clusters using Kraken-hub - a lightweight wrapper around Kraken to ease the runs by providing the ability to run them by just running container images using podman with parameters set as environment variables. This eliminates the need to carry around and edit configuration files and makes it easy for any CI framework integration. Here are the scenarios supported:
|
||||
Let us take a look at how to run the chaos scenarios on your Kubernetes clusters using Kraken-hub - a lightweight wrapper around Kraken to ease the runs by providing the ability to run them by just running container images using podman with parameters set as environment variables. This eliminates the need to carry around and edit configuration files and makes it easy for any CI framework integration. Here are the scenarios supported:
|
||||
|
||||
- Pod Scenarios ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/pod-scenarios.md))
|
||||
- Disrupts OpenShift/Kubernetes and applications deployed as pods:
|
||||
- Disrupts Kubernetes/Kubernetes and applications deployed as pods:
|
||||
- Helps understand the availability of the application, the initialization timing and recovery status.
|
||||
- [Demo](https://asciinema.org/a/452351?speed=3&theme=solarized-dark)
|
||||
|
||||
- Container Scenarios ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/container-scenarios.md))
|
||||
- Disrupts OpenShift/Kubernetes and applications deployed as containers running as part of a pod(s) using a specified kill signal to mimic failures:
|
||||
- Disrupts Kubernetes/Kubernetes and applications deployed as containers running as part of a pod(s) using a specified kill signal to mimic failures:
|
||||
- Helps understand the impact and recovery timing when the program/process running in the containers are disrupted - hangs, paused, killed etc., using various kill signals, i.e. SIGHUP, SIGTERM, SIGKILL etc.
|
||||
- [Demo](https://asciinema.org/a/BXqs9JSGDSEKcydTIJ5LpPZBM?speed=3&theme=solarized-dark)
|
||||
|
||||
@@ -134,8 +134,8 @@ Let us take a look at how to run the chaos scenarios on your OpenShift clusters
|
||||
- [Demo](https://asciinema.org/a/ANZY7HhPdWTNaWt4xMFanF6Q5)
|
||||
|
||||
- Zone Outages ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/zone-outages.md))
|
||||
- Creates outage of availability zone(s) in a targeted region in the public cloud where the OpenShift cluster is running by tweaking the network acl of the zone to simulate the failure, and that in turn will stop both ingress and egress traffic from all nodes in a particular zone for the specified duration and reverts it back to the previous state.
|
||||
- Helps understand the impact on both Kubernetes/OpenShift control plane as well as applications and services running on the worker nodes in that zone.
|
||||
- Creates outage of availability zone(s) in a targeted region in the public cloud where the Kubernetes cluster is running by tweaking the network acl of the zone to simulate the failure, and that in turn will stop both ingress and egress traffic from all nodes in a particular zone for the specified duration and reverts it back to the previous state.
|
||||
- Helps understand the impact on both Kubernetes/Kubernetes control plane as well as applications and services running on the worker nodes in that zone.
|
||||
- Currently, only set up for AWS cloud platform: 1 VPC and multiples subnets within the VPC can be specified.
|
||||
- [Demo](https://asciinema.org/a/452672?speed=3&theme=solarized-dark)
|
||||
|
||||
@@ -200,7 +200,7 @@ Let us take a look at few recommendations on how and where to run the chaos test
|
||||
- Enable Observability:
|
||||
- Chaos Engineering Without Observability ... Is Just Chaos.
|
||||
- Make sure to have logging and monitoring installed on the cluster to help with understanding the behaviour as to why it is happening. In case of running the tests in the CI where it is not humanly possible to monitor the cluster all the time, it is recommended to leverage Cerberus to capture the state during the runs and metrics collection in Kraken to store metrics long term even after the cluster is gone.
|
||||
- Kraken ships with dashboards that will help understand API, Etcd and OpenShift cluster level stats and performance metrics.
|
||||
- Kraken ships with dashboards that will help understand API, Etcd and Kubernetes cluster level stats and performance metrics.
|
||||
- Pay attention to Prometheus alerts. Check if they are firing as expected.
|
||||
|
||||
- Run multiple chaos tests at once to mimic the production outages:
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
## Scraping and storing metrics for the run
|
||||
|
||||
There are cases where the state of the cluster and metrics on the cluster during the chaos test run need to be stored long term to review after the cluster is terminated, for example CI and automation test runs. To help with this, Kraken supports capturing metrics for the duration of the scenarios defined in the config and indexes them into Elasticsearch. The indexed metrics can be visualized with the help of Grafana.
|
||||
|
||||
It uses [Kube-burner](https://github.com/cloud-bulldozer/kube-burner) under the hood. The metrics to capture need to be defined in a metrics profile which Kraken consumes to query prometheus ( installed by default in OpenShift ) with the start and end timestamp of the run. Each run has a unique identifier ( uuid ) and all the metrics/documents in Elasticsearch will be associated with it. The uuid is generated automatically if not set in the config. This feature can be enabled in the [config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) by setting the following:
|
||||
|
||||
```
|
||||
performance_monitoring:
|
||||
kube_burner_binary_url: "https://github.com/cloud-bulldozer/kube-burner/releases/download/v0.9.1/kube-burner-0.9.1-Linux-x86_64.tar.gz"
|
||||
capture_metrics: True
|
||||
config_path: config/kube_burner.yaml # Define the Elasticsearch url and index name in this config.
|
||||
metrics_profile_path: config/metrics-aggregated.yaml
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set.
|
||||
```
|
||||
|
||||
### Metrics profile
|
||||
A couple of [metric profiles](https://github.com/redhat-chaos/krkn/tree/main/config), [metrics.yaml](https://github.com/redhat-chaos/krkn/blob/main/config/metrics.yaml), and [metrics-aggregated.yaml](https://github.com/redhat-chaos/krkn/blob/main/config/metrics-aggregated.yaml) are shipped by default and can be tweaked to add more metrics to capture during the run. The following are the API server metrics for example:
|
||||
|
||||
```
|
||||
metrics:
|
||||
# API server
|
||||
- query: histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb!~"WATCH", subresource!="log"}[2m])) by (verb,resource,subresource,instance,le)) > 0
|
||||
metricName: API99thLatency
|
||||
|
||||
- query: sum(irate(apiserver_request_total{apiserver="kube-apiserver",verb!="WATCH",subresource!="log"}[2m])) by (verb,instance,resource,code) > 0
|
||||
metricName: APIRequestRate
|
||||
|
||||
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
|
||||
metricName: APIInflightRequests
|
||||
```
|
||||
|
||||
### Indexing
|
||||
Define the Elasticsearch and index to store the metrics/documents in the kube_burner config:
|
||||
|
||||
```
|
||||
global:
|
||||
writeToFile: true
|
||||
metricsDirectory: collected-metrics
|
||||
measurements:
|
||||
- name: podLatency
|
||||
esIndex: kube-burner
|
||||
|
||||
indexerConfig:
|
||||
enabled: true
|
||||
esServers: [https://elastic.example.com:9200]
|
||||
insecureSkipVerify: true
|
||||
defaultIndex: kraken
|
||||
type: elastic
|
||||
```
|
||||
@@ -12,9 +12,9 @@ network_chaos: # Scenario to create an outage
|
||||
- "ens5" # Interface name would be the Kernel host network interface name.
|
||||
execution: serial|parallel # Execute each of the egress options as a single scenario(parallel) or as separate scenario(serial).
|
||||
egress:
|
||||
latency: 50ms
|
||||
loss: 0.02 # percentage
|
||||
bandwidth: 100mbit
|
||||
latency: 500ms
|
||||
loss: 50% # percentage
|
||||
bandwidth: 10mbit
|
||||
```
|
||||
|
||||
##### Sample scenario config for ingress traffic shaping (using a plugin)
|
||||
@@ -30,9 +30,9 @@ network_chaos: # Scenario to create an outage
|
||||
kubeconfig_path: ~/.kube/config # Path to kubernetes config file. If not specified, it defaults to ~/.kube/config
|
||||
execution_type: parallel # Execute each of the ingress options as a single scenario(parallel) or as separate scenario(serial).
|
||||
network_params:
|
||||
latency: 50ms
|
||||
loss: '0.02'
|
||||
bandwidth: 100mbit
|
||||
latency: 500ms
|
||||
loss: '50%'
|
||||
bandwidth: 10mbit
|
||||
wait_duration: 120
|
||||
test_duration: 60
|
||||
'''
|
||||
|
||||
@@ -27,6 +27,15 @@ Scenario to introduce network latency, packet loss, and bandwidth restriction in
|
||||
network_params:
|
||||
latency: 500ms # Add 500ms latency to egress traffic from the pod.
|
||||
```
|
||||
##### Sample scenario config for ingress traffic shaping (using plugin)
|
||||
```
|
||||
- id: pod_ingress_shaping
|
||||
config:
|
||||
namespace: openshift-console # Required - Namespace of the pod to which filter need to be applied.
|
||||
label_selector: 'component=ui' # Applies traffic shaping to access openshift console.
|
||||
network_params:
|
||||
latency: 500ms # Add 500ms latency to egress traffic from the pod.
|
||||
```
|
||||
|
||||
##### Steps
|
||||
- Pick the pods to introduce the network anomaly either from label_selector or pod_name.
|
||||
|
||||
@@ -96,9 +96,9 @@ def set_arca_kubeconfig(engine_args: arcaflow.EngineArgs, kubeconfig_path: str):
|
||||
|
||||
with open(engine_args.config, "r") as stream:
|
||||
config_file = yaml.safe_load(stream)
|
||||
if config_file["deployer"]["type"] == "kubernetes":
|
||||
kube_connection = set_kubernetes_deployer_auth(config_file["deployer"]["connection"], context_auth)
|
||||
config_file["deployer"]["connection"]=kube_connection
|
||||
if config_file["deployers"]["image"]["deployer_name"] == "kubernetes":
|
||||
kube_connection = set_kubernetes_deployer_auth(config_file["deployers"]["image"]["connection"], context_auth)
|
||||
config_file["deployers"]["image"]["connection"]=kube_connection
|
||||
with open(engine_args.config, "w") as stream:
|
||||
yaml.safe_dump(config_file, stream,explicit_start=True, width=4096)
|
||||
|
||||
|
||||
@@ -1,116 +0,0 @@
|
||||
import subprocess
|
||||
import logging
|
||||
import urllib.request
|
||||
import shutil
|
||||
import sys
|
||||
import requests
|
||||
import tempfile
|
||||
import kraken.prometheus.client as prometheus
|
||||
from urllib.parse import urlparse
|
||||
|
||||
|
||||
def setup(url):
|
||||
"""
|
||||
Downloads and unpacks kube-burner binary
|
||||
"""
|
||||
|
||||
filename = "kube_burner.tar"
|
||||
try:
|
||||
logging.info("Fetching kube-burner binary")
|
||||
urllib.request.urlretrieve(url, filename)
|
||||
except Exception as e:
|
||||
logging.error("Failed to download kube-burner binary located at %s" % url, e)
|
||||
sys.exit(1)
|
||||
try:
|
||||
logging.info("Unpacking kube-burner tar ball")
|
||||
shutil.unpack_archive(filename)
|
||||
except Exception as e:
|
||||
logging.error("Failed to unpack the kube-burner binary tarball: %s" % e)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def scrape_metrics(
|
||||
distribution, uuid, prometheus_url, prometheus_bearer_token, start_time, end_time, config_path, metrics_profile
|
||||
):
|
||||
"""
|
||||
Scrapes metrics defined in the profile from Prometheus and indexes them into Elasticsearch
|
||||
"""
|
||||
|
||||
if not prometheus_url:
|
||||
if distribution == "openshift":
|
||||
logging.info("Looks like prometheus_url is not defined, trying to use the default instance on the cluster")
|
||||
prometheus_url, prometheus_bearer_token = prometheus.instance(
|
||||
distribution, prometheus_url, prometheus_bearer_token
|
||||
)
|
||||
else:
|
||||
logging.error("Looks like prometheus url is not defined, exiting")
|
||||
sys.exit(1)
|
||||
command = (
|
||||
"./kube-burner index --uuid "
|
||||
+ str(uuid)
|
||||
+ " -u "
|
||||
+ str(prometheus_url)
|
||||
+ " -t "
|
||||
+ str(prometheus_bearer_token)
|
||||
+ " -m "
|
||||
+ str(metrics_profile)
|
||||
+ " --start "
|
||||
+ str(start_time)
|
||||
+ " --end "
|
||||
+ str(end_time)
|
||||
+ " -c "
|
||||
+ str(config_path)
|
||||
)
|
||||
try:
|
||||
logging.info("Running kube-burner to capture the metrics: %s" % command)
|
||||
logging.info("UUID for the run: %s" % uuid)
|
||||
subprocess.run(command, shell=True, universal_newlines=True)
|
||||
except Exception as e:
|
||||
logging.error("Failed to run kube-burner, error: %s" % (e))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def alerts(distribution, prometheus_url, prometheus_bearer_token, start_time, end_time, alert_profile):
|
||||
"""
|
||||
Scrapes metrics defined in the profile from Prometheus and alerts based on the severity defined
|
||||
"""
|
||||
|
||||
is_url = urlparse(alert_profile)
|
||||
if is_url.scheme and is_url.netloc:
|
||||
response = requests.get(alert_profile)
|
||||
temp_alerts = tempfile.NamedTemporaryFile()
|
||||
temp_alerts.write(response.content)
|
||||
temp_alerts.flush()
|
||||
alert_profile = temp_alerts.name
|
||||
|
||||
if not prometheus_url:
|
||||
if distribution == "openshift":
|
||||
logging.info("Looks like prometheus_url is not defined, trying to use the default instance on the cluster")
|
||||
prometheus_url, prometheus_bearer_token = prometheus.instance(
|
||||
distribution, prometheus_url, prometheus_bearer_token
|
||||
)
|
||||
else:
|
||||
logging.error("Looks like prometheus url is not defined, exiting")
|
||||
sys.exit(1)
|
||||
command = (
|
||||
"./kube-burner check-alerts "
|
||||
+ " -u "
|
||||
+ str(prometheus_url)
|
||||
+ " -t "
|
||||
+ str(prometheus_bearer_token)
|
||||
+ " -a "
|
||||
+ str(alert_profile)
|
||||
+ " --start "
|
||||
+ str(start_time)
|
||||
+ " --end "
|
||||
+ str(end_time)
|
||||
)
|
||||
try:
|
||||
logging.info("Running kube-burner to capture the metrics: %s" % command)
|
||||
output = subprocess.run(command, shell=True, universal_newlines=True)
|
||||
if output.returncode != 0:
|
||||
logging.error("command exited with a non-zero rc, please check the logs for errors or critical alerts")
|
||||
sys.exit(output.returncode)
|
||||
except Exception as e:
|
||||
logging.error("Failed to run kube-burner, error: %s" % (e))
|
||||
sys.exit(1)
|
||||
@@ -1,221 +0,0 @@
|
||||
import kraken.invoke.command as runcommand
|
||||
import logging
|
||||
import time
|
||||
import sys
|
||||
import requests
|
||||
import yaml
|
||||
import kraken.cerberus.setup as cerberus
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
# krkn_lib
|
||||
# Inject litmus scenarios defined in the config
|
||||
def run(
|
||||
scenarios_list,
|
||||
config,
|
||||
litmus_uninstall,
|
||||
wait_duration,
|
||||
litmus_namespace,
|
||||
kubecli: KrknKubernetes
|
||||
):
|
||||
# Loop to run the scenarios starts here
|
||||
for l_scenario in scenarios_list:
|
||||
start_time = int(time.time())
|
||||
try:
|
||||
for item in l_scenario:
|
||||
runcommand.invoke("kubectl apply -f %s -n %s" % (item, litmus_namespace))
|
||||
if "http" in item:
|
||||
f = requests.get(item)
|
||||
yaml_item = list(yaml.safe_load_all(f.content))[0]
|
||||
else:
|
||||
with open(item, "r") as f:
|
||||
yaml_item = list(yaml.safe_load_all(f))[0]
|
||||
|
||||
if yaml_item["kind"] == "ChaosEngine":
|
||||
engine_name = yaml_item["metadata"]["name"]
|
||||
experiment_names = yaml_item["spec"]["experiments"]
|
||||
experiment_namespace = yaml_item["metadata"]["namespace"]
|
||||
if experiment_namespace != "litmus":
|
||||
logging.error(
|
||||
"Specified namespace: %s in the scenario: %s is not supported, please switch it to litmus"
|
||||
% (experiment_namespace, l_scenario)
|
||||
)
|
||||
sys.exit(1)
|
||||
for expr in experiment_names:
|
||||
expr_name = expr["name"]
|
||||
experiment_result = check_experiment(engine_name, expr_name, litmus_namespace, kubecli)
|
||||
if experiment_result:
|
||||
logging.info("Scenario: %s has been successfully injected!" % item)
|
||||
else:
|
||||
logging.info("Scenario: %s was not successfully injected, please check" % item)
|
||||
if litmus_uninstall:
|
||||
delete_chaos(litmus_namespace, kubecli)
|
||||
sys.exit(1)
|
||||
if litmus_uninstall:
|
||||
delete_chaos(litmus_namespace, kubecli)
|
||||
logging.info("Waiting for the specified duration: %s" % wait_duration)
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.get_status(config, start_time, end_time)
|
||||
except Exception as e:
|
||||
logging.error("Failed to run litmus scenario: %s. Encountered " "the following exception: %s" % (item, e))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Install litmus and wait until pod is running
|
||||
def install_litmus(version, namespace):
|
||||
logging.info("Installing version %s of litmus in namespace %s" % (version, namespace))
|
||||
litmus_install = runcommand.invoke(
|
||||
"kubectl -n %s apply -f " "https://litmuschaos.github.io/litmus/litmus-operator-%s.yaml" % (namespace, version)
|
||||
)
|
||||
if "unable" in litmus_install:
|
||||
logging.info("Unable to install litmus because " + str(litmus_install))
|
||||
sys.exit(1)
|
||||
|
||||
runcommand.invoke(
|
||||
"oc patch -n %s deployment.apps/chaos-operator-ce --type=json --patch ' "
|
||||
'[ { "op": "add", "path": "/spec/template/spec/containers/0/env/-", '
|
||||
'"value": { "name": "ANALYTICS", "value": "FALSE" } } ]\'' % namespace
|
||||
)
|
||||
logging.info("Waiting for litmus operator to become available")
|
||||
runcommand.invoke("oc wait deploy -n %s chaos-operator-ce --for=condition=Available" % namespace)
|
||||
|
||||
|
||||
def deploy_all_experiments(version_string, namespace):
|
||||
|
||||
if not version_string.startswith("v"):
|
||||
logging.error("Incorrect version string for litmus, needs to start with 'v' " "followed by a number")
|
||||
sys.exit(1)
|
||||
version = version_string[1:]
|
||||
logging.info("Installing all litmus experiments")
|
||||
runcommand.invoke(
|
||||
"kubectl -n %s apply -f "
|
||||
"https://hub.litmuschaos.io/api/chaos/%s?file=charts/generic/experiments.yaml" % (namespace, version)
|
||||
)
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def wait_for_initialized(engine_name, experiment_name, namespace, kubecli: KrknKubernetes):
|
||||
|
||||
chaos_engine = kubecli.get_litmus_chaos_object(kind='chaosengine', name=engine_name,
|
||||
namespace=namespace).engineStatus
|
||||
engine_status = chaos_engine.strip()
|
||||
max_tries = 30
|
||||
engine_counter = 0
|
||||
while engine_status.lower() != "initialized":
|
||||
time.sleep(10)
|
||||
logging.info("Waiting for " + experiment_name + " to be initialized")
|
||||
chaos_engine = kubecli.get_litmus_chaos_object(kind='chaosengine', name=engine_name,
|
||||
namespace=namespace).engineStatus
|
||||
engine_status = chaos_engine.strip()
|
||||
if engine_counter >= max_tries:
|
||||
logging.error("Chaos engine " + experiment_name + " took longer than 5 minutes to be initialized")
|
||||
return False
|
||||
engine_counter += 1
|
||||
# need to see if error in run
|
||||
if "notfound" in engine_status.lower():
|
||||
logging.info("Chaos engine was not found")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def wait_for_status(
|
||||
engine_name,
|
||||
expected_status,
|
||||
experiment_name,
|
||||
namespace,
|
||||
kubecli: KrknKubernetes
|
||||
):
|
||||
|
||||
if expected_status == "running":
|
||||
response = wait_for_initialized(engine_name, experiment_name, namespace, kubecli)
|
||||
if not response:
|
||||
logging.info("Chaos engine never initialized, exiting")
|
||||
return False
|
||||
chaos_engine = kubecli.get_litmus_chaos_object(kind='chaosengine', name=engine_name,
|
||||
namespace=namespace).expStatus
|
||||
engine_status = chaos_engine.strip()
|
||||
max_tries = 30
|
||||
engine_counter = 0
|
||||
while engine_status.lower() != expected_status:
|
||||
time.sleep(10)
|
||||
logging.info("Waiting for " + experiment_name + " to be " + expected_status)
|
||||
chaos_engine = kubecli.get_litmus_chaos_object(kind='chaosengine', name=engine_name,
|
||||
namespace=namespace).expStatus
|
||||
engine_status = chaos_engine.strip()
|
||||
if engine_counter >= max_tries:
|
||||
logging.error("Chaos engine " + experiment_name + " took longer than 5 minutes to be " + expected_status)
|
||||
return False
|
||||
engine_counter += 1
|
||||
# need to see if error in run
|
||||
if "notfound" in engine_status.lower():
|
||||
logging.info("Chaos engine was not found")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
# Check status of experiment
|
||||
# krkn_lib
|
||||
def check_experiment(engine_name, experiment_name, namespace, kubecli: KrknKubernetes):
|
||||
|
||||
wait_response = wait_for_status(engine_name, "running", experiment_name, namespace, kubecli)
|
||||
|
||||
if wait_response:
|
||||
wait_for_status(engine_name, "completed", experiment_name, namespace, kubecli)
|
||||
else:
|
||||
sys.exit(1)
|
||||
|
||||
chaos_result = kubecli.get_litmus_chaos_object(kind='chaosresult', name=engine_name+'-'+experiment_name,
|
||||
namespace=namespace).verdict
|
||||
if chaos_result == "Pass":
|
||||
logging.info("Engine " + str(engine_name) + " finished with status " + str(chaos_result))
|
||||
return True
|
||||
else:
|
||||
chaos_result = kubecli.get_litmus_chaos_object(kind='chaosresult', name=engine_name+'-'+experiment_name,
|
||||
namespace=namespace).failStep
|
||||
logging.info("Chaos scenario:" + engine_name + " failed with error: " + str(chaos_result))
|
||||
logging.info(
|
||||
"See 'kubectl get chaosresult %s"
|
||||
"-%s -n %s -o yaml' for full results" % (engine_name, experiment_name, namespace)
|
||||
)
|
||||
return False
|
||||
|
||||
|
||||
# Delete all chaos engines in a given namespace
|
||||
# krkn_lib
|
||||
def delete_chaos_experiments(namespace, kubecli: KrknKubernetes):
|
||||
|
||||
if kubecli.check_if_namespace_exists(namespace):
|
||||
chaos_exp_exists = runcommand.invoke_no_exit("kubectl get chaosexperiment")
|
||||
if "returned non-zero exit status 1" not in chaos_exp_exists:
|
||||
logging.info("Deleting all litmus experiments")
|
||||
runcommand.invoke("kubectl delete chaosexperiment --all -n " + str(namespace))
|
||||
|
||||
|
||||
# Delete all chaos engines in a given namespace
|
||||
# krkn_lib
|
||||
def delete_chaos(namespace, kubecli:KrknKubernetes):
|
||||
|
||||
if kubecli.check_if_namespace_exists(namespace):
|
||||
logging.info("Deleting all litmus run objects")
|
||||
chaos_engine_exists = runcommand.invoke_no_exit("kubectl get chaosengine")
|
||||
if "returned non-zero exit status 1" not in chaos_engine_exists:
|
||||
runcommand.invoke("kubectl delete chaosengine --all -n " + str(namespace))
|
||||
chaos_result_exists = runcommand.invoke_no_exit("kubectl get chaosresult")
|
||||
if "returned non-zero exit status 1" not in chaos_result_exists:
|
||||
runcommand.invoke("kubectl delete chaosresult --all -n " + str(namespace))
|
||||
else:
|
||||
logging.info(namespace + " namespace doesn't exist")
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def uninstall_litmus(version, litmus_namespace, kubecli: KrknKubernetes):
|
||||
|
||||
if kubecli.check_if_namespace_exists(litmus_namespace):
|
||||
logging.info("Uninstalling Litmus operator")
|
||||
runcommand.invoke_no_exit(
|
||||
"kubectl delete -n %s -f "
|
||||
"https://litmuschaos.github.io/litmus/litmus-operator-%s.yaml" % (litmus_namespace, version)
|
||||
)
|
||||
logging.info("Deleting litmus crd")
|
||||
runcommand.invoke_no_exit("kubectl get crds | grep litmus | awk '{print $1}' | xargs -I {} oc delete crd/{}")
|
||||
@@ -15,6 +15,8 @@ import kraken.cerberus.setup as cerberus
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value
|
||||
|
||||
node_general = False
|
||||
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ from kraken.plugins.network.ingress_shaping import network_chaos
|
||||
from kraken.plugins.pod_network_outage.pod_network_outage_plugin import pod_outage
|
||||
from kraken.plugins.pod_network_outage.pod_network_outage_plugin import pod_egress_shaping
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from kraken.plugins.pod_network_outage.pod_network_outage_plugin import pod_ingress_shaping
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import log_exception
|
||||
|
||||
@@ -223,7 +224,13 @@ PLUGINS = Plugins(
|
||||
[
|
||||
"error"
|
||||
]
|
||||
)
|
||||
),
|
||||
PluginStep(
|
||||
pod_ingress_shaping,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@@ -269,6 +269,85 @@ def apply_outage_policy(
|
||||
return job_list
|
||||
|
||||
|
||||
def apply_ingress_policy(
|
||||
mod: str,
|
||||
node: str,
|
||||
ips: typing.List[str],
|
||||
job_template,
|
||||
pod_template,
|
||||
network_params: typing.Dict[str, str],
|
||||
duration: str,
|
||||
bridge_name: str,
|
||||
kubecli: KrknKubernetes,
|
||||
test_execution: str,
|
||||
) -> typing.List[str]:
|
||||
"""
|
||||
Function that applies ingress traffic shaping to pod interface.
|
||||
|
||||
Args:
|
||||
|
||||
mod (String)
|
||||
- Traffic shaping filter to apply
|
||||
|
||||
node (String)
|
||||
- node associated with the pod
|
||||
|
||||
ips (List)
|
||||
- IPs of pods found in the node
|
||||
|
||||
job_template (jinja2.environment.Template)
|
||||
- The YAML template used to instantiate a job to apply and remove
|
||||
the filters on the interfaces
|
||||
|
||||
pod_template (jinja2.environment.Template)
|
||||
- The YAML template used to instantiate a pod to query
|
||||
the node's interface
|
||||
|
||||
network_params (Dictionary with key and value as string)
|
||||
- Loss/Delay/Bandwidth and their corresponding value
|
||||
|
||||
duration (string)
|
||||
- Duration for which the traffic control is to be done
|
||||
|
||||
bridge_name (string):
|
||||
- bridge to which filter rules need to be applied
|
||||
|
||||
kubecli (KrknKubernetes)
|
||||
- Object to interact with Kubernetes Python client
|
||||
|
||||
test_execution (String)
|
||||
- The order in which the filters are applied
|
||||
|
||||
Returns:
|
||||
The name of the job created that executes the traffic shaping
|
||||
filter
|
||||
"""
|
||||
|
||||
job_list = []
|
||||
|
||||
create_virtual_interfaces(kubecli, len(ips), node, pod_template)
|
||||
|
||||
for count, pod_ip in enumerate(set(ips)):
|
||||
pod_inf = get_pod_interface(
|
||||
node, pod_ip, pod_template, bridge_name, kubecli)
|
||||
exec_cmd = get_ingress_cmd(
|
||||
test_execution, pod_inf, mod, count, network_params, duration
|
||||
)
|
||||
logging.info("Executing %s on pod %s in node %s" %
|
||||
(exec_cmd, pod_ip, node))
|
||||
job_body = yaml.safe_load(
|
||||
job_template.render(jobname=mod + str(pod_ip),
|
||||
nodename=node, cmd=exec_cmd)
|
||||
)
|
||||
job_list.append(job_body["metadata"]["name"])
|
||||
api_response = kubecli.create_job(job_body)
|
||||
if api_response is None:
|
||||
raise Exception("Error creating job")
|
||||
if pod_ip == node:
|
||||
break
|
||||
return job_list
|
||||
|
||||
|
||||
def apply_net_policy(
|
||||
mod: str,
|
||||
node: str,
|
||||
@@ -325,7 +404,7 @@ def apply_net_policy(
|
||||
|
||||
job_list = []
|
||||
|
||||
for pod_ip in ips:
|
||||
for pod_ip in set(ips):
|
||||
pod_inf = get_pod_interface(
|
||||
node, pod_ip, pod_template, bridge_name, kubecli)
|
||||
exec_cmd = get_egress_cmd(
|
||||
@@ -344,6 +423,64 @@ def apply_net_policy(
|
||||
return job_list
|
||||
|
||||
|
||||
def get_ingress_cmd(
|
||||
execution: str,
|
||||
test_interface: str,
|
||||
mod: str,
|
||||
count: int,
|
||||
vallst: typing.List[str],
|
||||
duration: str,
|
||||
) -> str:
|
||||
"""
|
||||
Function generates ingress filter to apply on pod
|
||||
|
||||
Args:
|
||||
execution (str):
|
||||
- The order in which the filters are applied
|
||||
|
||||
test_interface (str):
|
||||
- Pod interface
|
||||
|
||||
mod (str):
|
||||
- Filter to apply
|
||||
|
||||
count (int):
|
||||
- IFB device number
|
||||
|
||||
vallst (typing.List[str]):
|
||||
- List of filters to apply
|
||||
|
||||
duration (str):
|
||||
- Duration for which the traffic control is to be done
|
||||
|
||||
Returns:
|
||||
str: ingress filter
|
||||
"""
|
||||
ifb_dev = 'ifb{0}'.format(count)
|
||||
tc_set = tc_unset = tc_ls = ""
|
||||
param_map = {"latency": "delay", "loss": "loss", "bandwidth": "rate"}
|
||||
tc_set = "tc qdisc add dev {0} ingress ;".format(test_interface)
|
||||
tc_set = "{0} tc filter add dev {1} ingress matchall action mirred egress redirect dev {2} ;".format(
|
||||
tc_set, test_interface, ifb_dev)
|
||||
tc_set = "{0} tc qdisc replace dev {1} root netem".format(
|
||||
tc_set, ifb_dev)
|
||||
tc_unset = "{0} tc qdisc del dev {1} root ;".format(
|
||||
tc_unset, ifb_dev)
|
||||
tc_unset = "{0} tc qdisc del dev {1} ingress".format(
|
||||
tc_unset, test_interface)
|
||||
tc_ls = "{0} tc qdisc ls dev {1} ;".format(tc_ls, ifb_dev)
|
||||
if execution == "parallel":
|
||||
for val in vallst.keys():
|
||||
tc_set += " {0} {1} ".format(param_map[val], vallst[val])
|
||||
tc_set += ";"
|
||||
else:
|
||||
tc_set += " {0} {1} ;".format(param_map[mod], vallst[mod])
|
||||
exec_cmd = "{0} {1} sleep {2};{3}".format(
|
||||
tc_set, tc_ls, duration, tc_unset)
|
||||
|
||||
return exec_cmd
|
||||
|
||||
|
||||
def get_egress_cmd(
|
||||
execution: str,
|
||||
test_interface: str,
|
||||
@@ -392,6 +529,124 @@ def get_egress_cmd(
|
||||
return exec_cmd
|
||||
|
||||
|
||||
def create_virtual_interfaces(
|
||||
kubecli: KrknKubernetes,
|
||||
nummber: int,
|
||||
node: str,
|
||||
pod_template
|
||||
) -> None:
|
||||
"""
|
||||
Function that creates a privileged pod and uses it to create
|
||||
virtual interfaces on the node
|
||||
|
||||
Args:
|
||||
cli (CoreV1Api)
|
||||
- Object to interact with Kubernetes Python client's CoreV1 API
|
||||
|
||||
interface_list (List of strings)
|
||||
- The list of interfaces on the node for which virtual interfaces
|
||||
are to be created
|
||||
|
||||
node (string)
|
||||
- The node on which the virtual interfaces are created
|
||||
|
||||
pod_template (jinja2.environment.Template))
|
||||
- The YAML template used to instantiate a pod to create
|
||||
virtual interfaces on the node
|
||||
"""
|
||||
pod_body = yaml.safe_load(
|
||||
pod_template.render(nodename=node)
|
||||
)
|
||||
kubecli.create_pod(pod_body, "default", 300)
|
||||
logging.info(
|
||||
"Creating {0} virtual interfaces on node {1} using a pod".format(
|
||||
nummber,
|
||||
node
|
||||
)
|
||||
)
|
||||
create_ifb(kubecli, nummber, 'modtools')
|
||||
logging.info("Deleting pod used to create virtual interfaces")
|
||||
kubecli.delete_pod("modtools", "default")
|
||||
|
||||
|
||||
def delete_virtual_interfaces(
|
||||
kubecli: KrknKubernetes,
|
||||
node_list: typing.List[str],
|
||||
pod_template
|
||||
):
|
||||
"""
|
||||
Function that creates a privileged pod and uses it to delete all
|
||||
virtual interfaces on the specified nodes
|
||||
|
||||
Args:
|
||||
cli (CoreV1Api)
|
||||
- Object to interact with Kubernetes Python client's CoreV1 API
|
||||
|
||||
node_list (List of strings)
|
||||
- The list of nodes on which the list of virtual interfaces are
|
||||
to be deleted
|
||||
|
||||
node (string)
|
||||
- The node on which the virtual interfaces are created
|
||||
|
||||
pod_template (jinja2.environment.Template))
|
||||
- The YAML template used to instantiate a pod to delete
|
||||
virtual interfaces on the node
|
||||
"""
|
||||
|
||||
for node in node_list:
|
||||
pod_body = yaml.safe_load(
|
||||
pod_template.render(nodename=node)
|
||||
)
|
||||
kubecli.create_pod(pod_body, "default", 300)
|
||||
logging.info(
|
||||
"Deleting all virtual interfaces on node {0}".format(node)
|
||||
)
|
||||
delete_ifb(kubecli, 'modtools')
|
||||
kubecli.delete_pod("modtools", "default")
|
||||
|
||||
|
||||
def create_ifb(kubecli: KrknKubernetes, number: int, pod_name: str):
|
||||
"""
|
||||
Function that creates virtual interfaces in a pod.
|
||||
Makes use of modprobe commands
|
||||
"""
|
||||
|
||||
exec_command = [
|
||||
'/host',
|
||||
'modprobe', 'ifb', 'numifbs=' + str(number)
|
||||
]
|
||||
kubecli.exec_cmd_in_pod(
|
||||
exec_command,
|
||||
pod_name,
|
||||
'default',
|
||||
base_command="chroot")
|
||||
|
||||
for i in range(0, number):
|
||||
exec_command = ['/host', 'ip', 'link', 'set', 'dev']
|
||||
exec_command += ['ifb' + str(i), 'up']
|
||||
kubecli.exec_cmd_in_pod(
|
||||
exec_command,
|
||||
pod_name,
|
||||
'default',
|
||||
base_command="chroot"
|
||||
)
|
||||
|
||||
|
||||
def delete_ifb(kubecli: KrknKubernetes, pod_name: str):
|
||||
"""
|
||||
Function that deletes all virtual interfaces in a pod.
|
||||
Makes use of modprobe command
|
||||
"""
|
||||
|
||||
exec_command = ['/host', 'modprobe', '-r', 'ifb']
|
||||
kubecli.exec_cmd_in_pod(
|
||||
exec_command,
|
||||
pod_name,
|
||||
'default',
|
||||
base_command="chroot")
|
||||
|
||||
|
||||
def list_bridges(
|
||||
node: str, pod_template, kubecli: KrknKubernetes
|
||||
) -> typing.List[str]:
|
||||
@@ -424,7 +679,7 @@ def list_bridges(
|
||||
)
|
||||
|
||||
if not output:
|
||||
logging.error("Exception occurred while executing command in pod")
|
||||
logging.error(f"Exception occurred while executing command {cmd} in pod")
|
||||
sys.exit(1)
|
||||
|
||||
bridges = output.split("\n")
|
||||
@@ -483,7 +738,7 @@ def check_cookie(
|
||||
)
|
||||
|
||||
if not output:
|
||||
logging.error("Exception occurred while executing command in pod")
|
||||
logging.error(f"Exception occurred while executing command {cmd} in pod")
|
||||
sys.exit(1)
|
||||
|
||||
flow_list = output.split("\n")
|
||||
@@ -525,50 +780,41 @@ def get_pod_interface(
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
logging.info("Creating pod to query pod interface on node %s" % node)
|
||||
kubecli.create_pod(pod_body, "default", 300)
|
||||
inf = ""
|
||||
|
||||
try:
|
||||
if br_name == "br-int":
|
||||
find_ip = f"external-ids:ip_addresses={ip}/23"
|
||||
else:
|
||||
find_ip = f"external-ids:ip={ip}"
|
||||
|
||||
cmd = [
|
||||
"/host",
|
||||
"ovs-ofctl",
|
||||
"-O",
|
||||
"OpenFlow13",
|
||||
"dump-flows",
|
||||
br_name,
|
||||
f"ip,nw_src={ip}",
|
||||
"ovs-vsctl",
|
||||
"--bare",
|
||||
"--columns=name",
|
||||
"find",
|
||||
"interface",
|
||||
find_ip,
|
||||
]
|
||||
|
||||
output = kubecli.exec_cmd_in_pod(
|
||||
cmd, "modtools", "default", base_command="chroot"
|
||||
)
|
||||
if not output:
|
||||
logging.error("Exception occurred while executing command in pod")
|
||||
sys.exit(1)
|
||||
|
||||
flow_lists = output.split("\n")
|
||||
port = ""
|
||||
inf = ""
|
||||
for flow in flow_lists:
|
||||
match = re.search(r".*in_port=(.*),nw_src=.*", flow)
|
||||
if match is not None:
|
||||
port = match.group(1)
|
||||
exit
|
||||
if not re.findall("\\D", port):
|
||||
cmd = ["/host", "ovs-ofctl", "-O",
|
||||
"OpenFlow13", "dump-ports-desc", br_name]
|
||||
cmd= [
|
||||
"/host",
|
||||
"ip",
|
||||
"addr",
|
||||
"show"
|
||||
]
|
||||
output = kubecli.exec_cmd_in_pod(
|
||||
cmd, "modtools", "default", base_command="chroot"
|
||||
)
|
||||
if not output:
|
||||
logging.error(
|
||||
"Exception occurred while executing command in pod")
|
||||
sys.exit(1)
|
||||
ports_desc = output.split("\n")
|
||||
for desc in ports_desc:
|
||||
match = re.search(rf".*{port}\((.*)\):.*", desc)
|
||||
if match is not None:
|
||||
inf = match.group(1)
|
||||
exit
|
||||
cmd, "modtools", "default", base_command="chroot")
|
||||
for if_str in output.split("\n"):
|
||||
if re.search(ip,if_str):
|
||||
inf = if_str.split(' ')[-1]
|
||||
else:
|
||||
inf = port
|
||||
inf = output
|
||||
finally:
|
||||
logging.info("Deleting pod to query interface on node")
|
||||
kubecli.delete_pod("modtools", "default")
|
||||
@@ -1098,7 +1344,7 @@ def pod_egress_shaping(
|
||||
|
||||
for mod in mod_lst:
|
||||
for node, ips in node_dict.items():
|
||||
job_list = apply_net_policy(
|
||||
job_list.extend( apply_net_policy(
|
||||
mod,
|
||||
node,
|
||||
ips,
|
||||
@@ -1109,20 +1355,20 @@ def pod_egress_shaping(
|
||||
br_name,
|
||||
kubecli,
|
||||
params.execution_type,
|
||||
)
|
||||
if params.execution_type == "serial":
|
||||
logging.info("Waiting for serial job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(job_list[:], kubecli,
|
||||
params.test_duration + 20)
|
||||
logging.info("Waiting for wait_duration %s" %
|
||||
params.test_duration)
|
||||
time.sleep(params.test_duration)
|
||||
end_time = int(time.time())
|
||||
if publish:
|
||||
cerberus.publish_kraken_status(
|
||||
config, failed_post_scenarios, start_time, end_time
|
||||
)
|
||||
))
|
||||
if params.execution_type == "serial":
|
||||
logging.info("Waiting for serial job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(job_list[:], kubecli,
|
||||
params.test_duration + 20)
|
||||
logging.info("Waiting for wait_duration %s" %
|
||||
params.test_duration)
|
||||
time.sleep(params.test_duration)
|
||||
end_time = int(time.time())
|
||||
if publish:
|
||||
cerberus.publish_kraken_status(
|
||||
config, failed_post_scenarios, start_time, end_time
|
||||
)
|
||||
if params.execution_type == "parallel":
|
||||
break
|
||||
if params.execution_type == "parallel":
|
||||
@@ -1149,3 +1395,281 @@ def pod_egress_shaping(
|
||||
finally:
|
||||
logging.info("Deleting jobs(if any)")
|
||||
delete_jobs(kubecli, job_list[:])
|
||||
|
||||
|
||||
@dataclass
|
||||
class IngressParams:
|
||||
"""
|
||||
This is the data structure for the input parameters of the step defined below.
|
||||
"""
|
||||
|
||||
namespace: typing.Annotated[str, validation.min(1)] = field(
|
||||
metadata={
|
||||
"name": "Namespace",
|
||||
"description": "Namespace of the pod to which filter need to be applied"
|
||||
"for details.",
|
||||
}
|
||||
)
|
||||
|
||||
network_params: typing.Dict[str, str] = field(
|
||||
metadata={
|
||||
"name": "Network Parameters",
|
||||
"description": "The network filters that are applied on the interface. "
|
||||
"The currently supported filters are latency, "
|
||||
"loss and bandwidth",
|
||||
},
|
||||
)
|
||||
|
||||
kubeconfig_path: typing.Optional[str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Kubeconfig path",
|
||||
"description": "Kubeconfig file as string\n"
|
||||
"See https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ for "
|
||||
"details.",
|
||||
},
|
||||
)
|
||||
pod_name: typing.Annotated[
|
||||
typing.Optional[str],
|
||||
validation.required_if_not("label_selector"),
|
||||
] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Pod name",
|
||||
"description": "When label_selector is not specified, pod matching the name will be"
|
||||
"selected for the chaos scenario",
|
||||
},
|
||||
)
|
||||
|
||||
label_selector: typing.Annotated[
|
||||
typing.Optional[str], validation.required_if_not("pod_name")
|
||||
] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Label selector",
|
||||
"description": "Kubernetes label selector for the target pod. "
|
||||
"When pod_name is not specified, pod with matching label_selector is selected for chaos scenario",
|
||||
},
|
||||
)
|
||||
|
||||
kraken_config: typing.Optional[str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Kraken Config",
|
||||
"description": "Path to the config file of Kraken. "
|
||||
"Set this field if you wish to publish status onto Cerberus",
|
||||
},
|
||||
)
|
||||
|
||||
test_duration: typing.Annotated[typing.Optional[int], validation.min(1)] = field(
|
||||
default=90,
|
||||
metadata={
|
||||
"name": "Test duration",
|
||||
"description": "Duration for which each step of the ingress chaos testing "
|
||||
"is to be performed.",
|
||||
},
|
||||
)
|
||||
|
||||
wait_duration: typing.Annotated[typing.Optional[int], validation.min(1)] = field(
|
||||
default=300,
|
||||
metadata={
|
||||
"name": "Wait Duration",
|
||||
"description": "Wait duration for finishing a test and its cleanup."
|
||||
"Ensure that it is significantly greater than wait_duration",
|
||||
},
|
||||
)
|
||||
|
||||
instance_count: typing.Annotated[typing.Optional[int], validation.min(1)] = field(
|
||||
default=1,
|
||||
metadata={
|
||||
"name": "Instance Count",
|
||||
"description": "Number of pods to perform action/select that match "
|
||||
"the label selector.",
|
||||
},
|
||||
)
|
||||
|
||||
execution_type: typing.Optional[str] = field(
|
||||
default="parallel",
|
||||
metadata={
|
||||
"name": "Execution Type",
|
||||
"description": "The order in which the ingress filters are applied. "
|
||||
"Execution type can be 'serial' or 'parallel'",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PodIngressNetShapingSuccessOutput:
|
||||
"""
|
||||
This is the output data structure for the success case.
|
||||
"""
|
||||
|
||||
test_pods: typing.List[str] = field(
|
||||
metadata={
|
||||
"name": "Test pods",
|
||||
"description": "List of test pods where the selected for chaos scenario",
|
||||
}
|
||||
)
|
||||
|
||||
network_parameters: typing.Dict[str, str] = field(
|
||||
metadata={
|
||||
"name": "Network Parameters",
|
||||
"description": "The network filters that are applied on the interfaces",
|
||||
}
|
||||
)
|
||||
|
||||
execution_type: str = field(
|
||||
metadata={
|
||||
"name": "Execution Type",
|
||||
"description": "The order in which the filters are applied",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class PodIngressNetShapingErrorOutput:
|
||||
error: str = field(
|
||||
metadata={
|
||||
"name": "Error",
|
||||
"description": "Error message when there is a run-time error during "
|
||||
"the execution of the scenario",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@plugin.step(
|
||||
id="pod_ingress_shaping",
|
||||
name="Pod ingress network Shaping",
|
||||
description="Does ingress network traffic shaping at pod level",
|
||||
outputs={
|
||||
"success": PodIngressNetShapingSuccessOutput,
|
||||
"error": PodIngressNetShapingErrorOutput,
|
||||
},
|
||||
)
|
||||
def pod_ingress_shaping(
|
||||
params: IngressParams,
|
||||
) -> typing.Tuple[
|
||||
str, typing.Union[PodIngressNetShapingSuccessOutput,
|
||||
PodIngressNetShapingErrorOutput]
|
||||
]:
|
||||
"""
|
||||
Function that performs ingress pod traffic shaping based
|
||||
on the provided configuration
|
||||
|
||||
Args:
|
||||
params (IngressParams,)
|
||||
- The object containing the configuration for the scenario
|
||||
|
||||
Returns
|
||||
A 'success' or 'error' message along with their details
|
||||
"""
|
||||
|
||||
file_loader = FileSystemLoader(os.path.abspath(os.path.dirname(__file__)))
|
||||
env = Environment(loader=file_loader)
|
||||
job_template = env.get_template("job.j2")
|
||||
pod_module_template = env.get_template("pod_module.j2")
|
||||
test_namespace = params.namespace
|
||||
test_label_selector = params.label_selector
|
||||
test_pod_name = params.pod_name
|
||||
job_list = []
|
||||
publish = False
|
||||
|
||||
if params.kraken_config:
|
||||
failed_post_scenarios = ""
|
||||
try:
|
||||
with open(params.kraken_config, "r") as f:
|
||||
config = yaml.full_load(f)
|
||||
except Exception:
|
||||
logging.error("Error reading Kraken config from %s" %
|
||||
params.kraken_config)
|
||||
return "error", PodIngressNetShapingErrorOutput(format_exc())
|
||||
publish = True
|
||||
|
||||
try:
|
||||
ip_set = set()
|
||||
node_dict = {}
|
||||
label_set = set()
|
||||
param_lst = ["latency", "loss", "bandwidth"]
|
||||
mod_lst = [i for i in param_lst if i in params.network_params]
|
||||
|
||||
kubecli = KrknKubernetes(kubeconfig_path=params.kubeconfig_path)
|
||||
api_ext = client.ApiextensionsV1Api(kubecli.api_client)
|
||||
custom_obj = client.CustomObjectsApi(kubecli.api_client)
|
||||
|
||||
br_name = get_bridge_name(api_ext, custom_obj)
|
||||
pods_list = get_test_pods(
|
||||
test_pod_name, test_label_selector, test_namespace, kubecli
|
||||
)
|
||||
|
||||
while not len(pods_list) <= params.instance_count:
|
||||
pods_list.pop(random.randint(0, len(pods_list) - 1))
|
||||
for pod_name in pods_list:
|
||||
pod_stat = kubecli.read_pod(pod_name, test_namespace)
|
||||
ip_set.add(pod_stat.status.pod_ip)
|
||||
node_dict.setdefault(pod_stat.spec.node_name, [])
|
||||
node_dict[pod_stat.spec.node_name].append(pod_stat.status.pod_ip)
|
||||
for key, value in pod_stat.metadata.labels.items():
|
||||
label_set.add("%s=%s" % (key, value))
|
||||
|
||||
check_bridge_interface(
|
||||
list(node_dict.keys())[0], pod_module_template, br_name, kubecli
|
||||
)
|
||||
|
||||
for mod in mod_lst:
|
||||
for node, ips in node_dict.items():
|
||||
job_list.extend(apply_ingress_policy(
|
||||
mod,
|
||||
node,
|
||||
ips,
|
||||
job_template,
|
||||
pod_module_template,
|
||||
params.network_params,
|
||||
params.test_duration,
|
||||
br_name,
|
||||
kubecli,
|
||||
params.execution_type,
|
||||
))
|
||||
if params.execution_type == "serial":
|
||||
logging.info("Waiting for serial job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(job_list[:], kubecli,
|
||||
params.test_duration + 20)
|
||||
logging.info("Waiting for wait_duration %s" %
|
||||
params.test_duration)
|
||||
time.sleep(params.test_duration)
|
||||
end_time = int(time.time())
|
||||
if publish:
|
||||
cerberus.publish_kraken_status(
|
||||
config, failed_post_scenarios, start_time, end_time
|
||||
)
|
||||
if params.execution_type == "parallel":
|
||||
break
|
||||
if params.execution_type == "parallel":
|
||||
logging.info("Waiting for parallel job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(job_list[:], kubecli, params.test_duration + 300)
|
||||
logging.info("Waiting for wait_duration %s" % params.test_duration)
|
||||
time.sleep(params.test_duration)
|
||||
end_time = int(time.time())
|
||||
if publish:
|
||||
cerberus.publish_kraken_status(
|
||||
config, failed_post_scenarios, start_time, end_time
|
||||
)
|
||||
|
||||
return "success", PodIngressNetShapingSuccessOutput(
|
||||
test_pods=pods_list,
|
||||
network_parameters=params.network_params,
|
||||
execution_type=params.execution_type,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Pod network Shaping scenario exiting due to Exception - %s" % e)
|
||||
return "error", PodIngressNetShapingErrorOutput(format_exc())
|
||||
finally:
|
||||
delete_virtual_interfaces(
|
||||
kubecli,
|
||||
node_dict.keys(),
|
||||
pod_module_template
|
||||
)
|
||||
logging.info("Deleting jobs(if any)")
|
||||
delete_jobs(kubecli, job_list[:])
|
||||
|
||||
@@ -0,0 +1 @@
|
||||
from .client import *
|
||||
@@ -1,49 +1,30 @@
|
||||
import datetime
|
||||
import os.path
|
||||
import urllib3
|
||||
import logging
|
||||
import prometheus_api_client
|
||||
import sys
|
||||
import kraken.invoke.command as runcommand
|
||||
|
||||
import yaml
|
||||
from krkn_lib.prometheus.krkn_prometheus import KrknPrometheus
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
def alerts(prom_cli: KrknPrometheus, start_time, end_time, alert_profile):
|
||||
|
||||
# Initialize the client
|
||||
def initialize_prom_client(distribution, prometheus_url, prometheus_bearer_token):
|
||||
global prom_cli
|
||||
prometheus_url, prometheus_bearer_token = instance(distribution, prometheus_url, prometheus_bearer_token)
|
||||
if prometheus_url and prometheus_bearer_token:
|
||||
bearer = "Bearer " + prometheus_bearer_token
|
||||
headers = {"Authorization": bearer}
|
||||
try:
|
||||
prom_cli = prometheus_api_client.PrometheusConnect(url=prometheus_url, headers=headers, disable_ssl=True)
|
||||
except Exception as e:
|
||||
logging.error("Not able to initialize the client %s" % e)
|
||||
if alert_profile is None or os.path.exists(alert_profile) is False:
|
||||
logging.error(f"{alert_profile} alert profile does not exist")
|
||||
sys.exit(1)
|
||||
|
||||
with open(alert_profile) as profile:
|
||||
profile_yaml = yaml.safe_load(profile)
|
||||
if not isinstance(profile_yaml, list):
|
||||
logging.error(f"{alert_profile} wrong file format, alert profile must be "
|
||||
f"a valid yaml file containing a list of items with 3 properties: "
|
||||
f"expr, description, severity" )
|
||||
sys.exit(1)
|
||||
else:
|
||||
prom_cli = None
|
||||
|
||||
for alert in profile_yaml:
|
||||
if list(alert.keys()).sort() != ["expr", "description", "severity"].sort():
|
||||
logging.error(f"wrong alert {alert}, skipping")
|
||||
|
||||
# Process custom prometheus query
|
||||
def process_prom_query(query):
|
||||
if prom_cli:
|
||||
try:
|
||||
return prom_cli.custom_query(query=query, params=None)
|
||||
except Exception as e:
|
||||
logging.error("Failed to get the metrics: %s" % e)
|
||||
sys.exit(1)
|
||||
else:
|
||||
logging.info("Skipping the prometheus query as the prometheus client couldn't " "be initialized\n")
|
||||
|
||||
# Get prometheus details
|
||||
def instance(distribution, prometheus_url, prometheus_bearer_token):
|
||||
if distribution == "openshift" and not prometheus_url:
|
||||
url = runcommand.invoke(
|
||||
r"""oc get routes -n openshift-monitoring -o=jsonpath='{.items[?(@.metadata.name=="prometheus-k8s")].spec.host}'""" # noqa
|
||||
)
|
||||
prometheus_url = "https://" + url
|
||||
if distribution == "openshift" and not prometheus_bearer_token:
|
||||
prometheus_bearer_token = runcommand.invoke(
|
||||
"oc create token -n openshift-monitoring prometheus-k8s --duration=12h "
|
||||
"|| oc -n openshift-monitoring sa get-token prometheus-k8s "
|
||||
"|| oc sa new-token -n openshift-monitoring prometheus-k8s"
|
||||
)
|
||||
return prometheus_url, prometheus_bearer_token
|
||||
prom_cli.process_alert(alert,
|
||||
datetime.datetime.fromtimestamp(start_time),
|
||||
datetime.datetime.fromtimestamp(end_time))
|
||||
@@ -135,15 +135,22 @@ def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetr
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
|
||||
for shut_down_config in scenarios_list:
|
||||
config_path = shut_down_config
|
||||
pre_action_output = ""
|
||||
if isinstance(shut_down_config, list) :
|
||||
if len(shut_down_config) == 0:
|
||||
raise Exception("bad config file format for shutdown scenario")
|
||||
|
||||
config_path = shut_down_config[0]
|
||||
if len(shut_down_config) > 1:
|
||||
pre_action_output = post_actions.run("", shut_down_config[1])
|
||||
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = shut_down_config
|
||||
scenario_telemetry.scenario = config_path
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, shut_down_config[0])
|
||||
if len(shut_down_config) > 1:
|
||||
pre_action_output = post_actions.run("", shut_down_config[1])
|
||||
else:
|
||||
pre_action_output = ""
|
||||
with open(shut_down_config[0], "r") as f:
|
||||
telemetry.set_parameters_base64(scenario_telemetry, config_path)
|
||||
|
||||
with open(config_path, "r") as f:
|
||||
shut_down_config_yaml = yaml.full_load(f)
|
||||
shut_down_config_scenario = \
|
||||
shut_down_config_yaml["cluster_shut_down_scenario"]
|
||||
@@ -166,8 +173,8 @@ def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetr
|
||||
)
|
||||
|
||||
except (RuntimeError, Exception):
|
||||
log_exception(shut_down_config[0])
|
||||
failed_scenarios.append(shut_down_config[0])
|
||||
log_exception(config_path)
|
||||
failed_scenarios.append(config_path)
|
||||
scenario_telemetry.exitStatus = 1
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
|
||||
@@ -2,14 +2,18 @@ import datetime
|
||||
import time
|
||||
import logging
|
||||
import re
|
||||
|
||||
import yaml
|
||||
import random
|
||||
|
||||
from krkn_lib import utils
|
||||
from kubernetes.client import ApiException
|
||||
|
||||
from ..cerberus import setup as cerberus
|
||||
from ..invoke import command as runcommand
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception, get_random_string
|
||||
|
||||
|
||||
# krkn_lib
|
||||
@@ -35,13 +39,6 @@ def pod_exec(pod_name, command, namespace, container_name, kubecli:KrknKubernete
|
||||
return response
|
||||
|
||||
|
||||
def node_debug(node_name, command):
|
||||
response = runcommand.invoke(
|
||||
"oc debug node/" + node_name + " -- chroot /host " + command
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def get_container_name(pod_name, namespace, kubecli:KrknKubernetes, container_name=""):
|
||||
|
||||
@@ -65,15 +62,46 @@ def get_container_name(pod_name, namespace, kubecli:KrknKubernetes, container_na
|
||||
return container_name
|
||||
|
||||
|
||||
|
||||
def skew_node(node_name: str, action: str, kubecli: KrknKubernetes):
|
||||
pod_namespace = "default"
|
||||
status_pod_name = f"time-skew-pod-{get_random_string(5)}"
|
||||
skew_pod_name = f"time-skew-pod-{get_random_string(5)}"
|
||||
ntp_enabled = True
|
||||
logging.info(f'Creating pod to skew {"time" if action == "skew_time" else "date"} on node {node_name}')
|
||||
status_command = ["timedatectl"]
|
||||
param = "2001-01-01"
|
||||
skew_command = ["timedatectl", "set-time"]
|
||||
if action == "skew_time":
|
||||
skew_command.append("01:01:01")
|
||||
else:
|
||||
skew_command.append("2001-01-01")
|
||||
|
||||
try:
|
||||
status_response = kubecli.exec_command_on_node(node_name, status_command, status_pod_name, pod_namespace)
|
||||
if "Network time on: no" in status_response:
|
||||
ntp_enabled = False
|
||||
|
||||
logging.warning(f'ntp unactive on node {node_name} skewing {"time" if action == "skew_time" else "date"} to {param}')
|
||||
pod_exec(skew_pod_name, skew_command, pod_namespace, None, kubecli)
|
||||
else:
|
||||
logging.info(f'ntp active in cluster node, {"time" if action == "skew_time" else "date"} skewing will have no effect, skipping')
|
||||
except ApiException:
|
||||
pass
|
||||
except Exception as e:
|
||||
logging.error(f"failed to execute skew command in pod: {e}")
|
||||
finally:
|
||||
kubecli.delete_pod(status_pod_name, pod_namespace)
|
||||
if not ntp_enabled :
|
||||
kubecli.delete_pod(skew_pod_name, pod_namespace)
|
||||
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def skew_time(scenario, kubecli:KrknKubernetes):
|
||||
skew_command = "date --date "
|
||||
if scenario["action"] == "skew_date":
|
||||
skewed_date = "00-01-01"
|
||||
skew_command += skewed_date
|
||||
elif scenario["action"] == "skew_time":
|
||||
skewed_time = "01:01:01"
|
||||
skew_command += skewed_time
|
||||
if scenario["action"] not in ["skew_date","skew_time"]:
|
||||
raise RuntimeError(f'{scenario["action"]} is not a valid time skew action')
|
||||
|
||||
if "node" in scenario["object_type"]:
|
||||
node_names = []
|
||||
if "object_name" in scenario.keys() and scenario["object_name"]:
|
||||
@@ -83,13 +111,19 @@ def skew_time(scenario, kubecli:KrknKubernetes):
|
||||
scenario["label_selector"]
|
||||
):
|
||||
node_names = kubecli.list_nodes(scenario["label_selector"])
|
||||
|
||||
for node in node_names:
|
||||
node_debug(node, skew_command)
|
||||
skew_node(node, scenario["action"], kubecli)
|
||||
logging.info("Reset date/time on node " + str(node))
|
||||
return "node", node_names
|
||||
|
||||
elif "pod" in scenario["object_type"]:
|
||||
skew_command = "date --date "
|
||||
if scenario["action"] == "skew_date":
|
||||
skewed_date = "00-01-01"
|
||||
skew_command += skewed_date
|
||||
elif scenario["action"] == "skew_time":
|
||||
skewed_time = "01:01:01"
|
||||
skew_command += skewed_time
|
||||
container_name = get_yaml_item_value(scenario, "container_name", "")
|
||||
pod_names = []
|
||||
if "object_name" in scenario.keys() and scenario["object_name"]:
|
||||
@@ -241,7 +275,8 @@ def check_date_time(object_type, names, kubecli:KrknKubernetes):
|
||||
if object_type == "node":
|
||||
for node_name in names:
|
||||
first_date_time = datetime.datetime.utcnow()
|
||||
node_datetime_string = node_debug(node_name, skew_command)
|
||||
check_pod_name = f"time-skew-pod-{get_random_string(5)}"
|
||||
node_datetime_string = kubecli.exec_command_on_node(node_name, [skew_command], check_pod_name)
|
||||
node_datetime = string_to_date(node_datetime_string)
|
||||
counter = 0
|
||||
while not (
|
||||
@@ -252,7 +287,8 @@ def check_date_time(object_type, names, kubecli:KrknKubernetes):
|
||||
"Date/time on node %s still not reset, "
|
||||
"waiting 10 seconds and retrying" % node_name
|
||||
)
|
||||
node_datetime_string = node_debug(node_name, skew_command)
|
||||
|
||||
node_datetime_string = kubecli.exec_cmd_in_pod([skew_command], check_pod_name, "default")
|
||||
node_datetime = string_to_date(node_datetime_string)
|
||||
counter += 1
|
||||
if counter > max_retries:
|
||||
@@ -266,6 +302,8 @@ def check_date_time(object_type, names, kubecli:KrknKubernetes):
|
||||
logging.info(
|
||||
"Date in node " + str(node_name) + " reset properly"
|
||||
)
|
||||
kubecli.delete_pod(check_pod_name)
|
||||
|
||||
elif object_type == "pod":
|
||||
for pod_name in names:
|
||||
first_date_time = datetime.datetime.utcnow()
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 248 KiB After Width: | Height: | Size: 283 KiB |
@@ -1,8 +1,8 @@
|
||||
PyYAML>=5.1
|
||||
aliyun-python-sdk-core==2.13.36
|
||||
aliyun-python-sdk-ecs==4.24.25
|
||||
arcaflow >= 0.8.0
|
||||
arcaflow-plugin-sdk >= 0.10.0
|
||||
arcaflow==0.9.0
|
||||
arcaflow-plugin-sdk==0.10.0
|
||||
azure-identity
|
||||
azure-keyvault
|
||||
azure-mgmt-compute
|
||||
@@ -18,15 +18,14 @@ google-api-python-client
|
||||
ibm_cloud_sdk_core
|
||||
ibm_vpc
|
||||
itsdangerous==2.0.1
|
||||
jinja2==3.0.3
|
||||
krkn-lib>=1.4.1
|
||||
jinja2==3.1.3
|
||||
krkn-lib >= 1.4.6
|
||||
kubernetes
|
||||
lxml >= 4.3.0
|
||||
oauth2client>=4.1.3
|
||||
openshift-client
|
||||
paramiko
|
||||
podman-compose
|
||||
prometheus_api_client
|
||||
pyVmomi >= 6.7
|
||||
pyfiglet
|
||||
pytest
|
||||
|
||||
152
run_kraken.py
152
run_kraken.py
@@ -1,4 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
@@ -8,7 +9,7 @@ import optparse
|
||||
import pyfiglet
|
||||
import uuid
|
||||
import time
|
||||
import kraken.litmus.common_litmus as common_litmus
|
||||
from krkn_lib.prometheus.krkn_prometheus import KrknPrometheus
|
||||
import kraken.time_actions.common_time_functions as time_actions
|
||||
import kraken.performance_dashboards.setup as performance_dashboards
|
||||
import kraken.pod_scenarios.setup as pod_scenarios
|
||||
@@ -16,16 +17,14 @@ import kraken.service_disruption.common_service_disruption_functions as service_
|
||||
import kraken.shut_down.common_shut_down_func as shut_down
|
||||
import kraken.node_actions.run as nodeaction
|
||||
import kraken.managedcluster_scenarios.run as managedcluster_scenarios
|
||||
import kraken.kube_burner.client as kube_burner
|
||||
import kraken.zone_outage.actions as zone_outages
|
||||
import kraken.application_outage.actions as application_outage
|
||||
import kraken.pvc.pvc_scenario as pvc_scenario
|
||||
import kraken.network_chaos.actions as network_chaos
|
||||
import kraken.arcaflow_plugin as arcaflow_plugin
|
||||
import kraken.prometheus as prometheus_plugin
|
||||
import server as server
|
||||
import kraken.prometheus.client as promcli
|
||||
from kraken import plugins
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.ocp import KrknOpenshift
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
@@ -34,11 +33,7 @@ from krkn_lib.models.telemetry import ChaosRunTelemetry
|
||||
from krkn_lib.utils import SafeLogger
|
||||
from krkn_lib.utils.functions import get_yaml_item_value
|
||||
|
||||
KUBE_BURNER_URL = (
|
||||
"https://github.com/cloud-bulldozer/kube-burner/"
|
||||
"releases/download/v{version}/kube-burner-{version}-Linux-x86_64.tar.gz"
|
||||
)
|
||||
KUBE_BURNER_VERSION = "1.7.0"
|
||||
|
||||
|
||||
|
||||
# Main function
|
||||
@@ -71,18 +66,6 @@ def main(cfg):
|
||||
run_signal = get_yaml_item_value(
|
||||
config["kraken"], "signal_state", "RUN"
|
||||
)
|
||||
litmus_install = get_yaml_item_value(
|
||||
config["kraken"], "litmus_install", False
|
||||
)
|
||||
litmus_version = get_yaml_item_value(
|
||||
config["kraken"], "litmus_version", "v1.9.1"
|
||||
)
|
||||
litmus_uninstall = get_yaml_item_value(
|
||||
config["kraken"], "litmus_uninstall", True
|
||||
)
|
||||
litmus_uninstall_before_run = get_yaml_item_value(
|
||||
config["kraken"], "litmus_uninstall_before_run", True
|
||||
)
|
||||
wait_duration = get_yaml_item_value(
|
||||
config["tunings"], "wait_duration", 60
|
||||
)
|
||||
@@ -97,21 +80,7 @@ def main(cfg):
|
||||
config["performance_monitoring"], "repo",
|
||||
"https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
)
|
||||
capture_metrics = get_yaml_item_value(
|
||||
config["performance_monitoring"], "capture_metrics", False
|
||||
)
|
||||
kube_burner_url = get_yaml_item_value(
|
||||
config["performance_monitoring"], "kube_burner_binary_url",
|
||||
KUBE_BURNER_URL.format(version=KUBE_BURNER_VERSION),
|
||||
)
|
||||
config_path = get_yaml_item_value(
|
||||
config["performance_monitoring"], "config_path",
|
||||
"config/kube_burner.yaml"
|
||||
)
|
||||
metrics_profile = get_yaml_item_value(
|
||||
config["performance_monitoring"], "metrics_profile_path",
|
||||
"config/metrics-aggregated.yaml"
|
||||
)
|
||||
|
||||
prometheus_url = config["performance_monitoring"].get("prometheus_url")
|
||||
prometheus_bearer_token = config["performance_monitoring"].get(
|
||||
"prometheus_bearer_token"
|
||||
@@ -124,7 +93,8 @@ def main(cfg):
|
||||
check_critical_alerts = get_yaml_item_value(
|
||||
config["performance_monitoring"], "check_critical_alerts", False
|
||||
)
|
||||
|
||||
telemetry_api_url = config["telemetry"].get("api_url")
|
||||
|
||||
# Initialize clients
|
||||
if (not os.path.isfile(kubeconfig_path) and
|
||||
not os.path.isfile("/var/run/secrets/kubernetes.io/serviceaccount/token")):
|
||||
@@ -159,9 +129,6 @@ def main(cfg):
|
||||
except:
|
||||
kubecli.initialize_clients(None)
|
||||
|
||||
# KrknTelemetry init
|
||||
telemetry_k8s = KrknTelemetryKubernetes(safe_logger, kubecli)
|
||||
telemetry_ocp = KrknTelemetryOpenshift(safe_logger, ocpcli)
|
||||
|
||||
|
||||
# find node kraken might be running on
|
||||
@@ -191,11 +158,23 @@ def main(cfg):
|
||||
cv = ""
|
||||
if config["kraken"]["distribution"] == "openshift":
|
||||
cv = ocpcli.get_clusterversion_string()
|
||||
if prometheus_url is None:
|
||||
connection_data = ocpcli.get_prometheus_api_connection_data()
|
||||
prometheus_url = connection_data.endpoint
|
||||
prometheus_bearer_token = connection_data.token
|
||||
if cv != "":
|
||||
logging.info(cv)
|
||||
else:
|
||||
logging.info("Cluster version CRD not detected, skipping")
|
||||
|
||||
# KrknTelemetry init
|
||||
telemetry_k8s = KrknTelemetryKubernetes(safe_logger, kubecli)
|
||||
telemetry_ocp = KrknTelemetryOpenshift(safe_logger, ocpcli)
|
||||
|
||||
|
||||
if enable_alerts:
|
||||
prometheus = KrknPrometheus(prometheus_url, prometheus_bearer_token)
|
||||
|
||||
logging.info("Server URL: %s" % kubecli.get_host())
|
||||
|
||||
# Deploy performance dashboards
|
||||
@@ -224,7 +203,7 @@ def main(cfg):
|
||||
|
||||
# Capture the start time
|
||||
start_time = int(time.time())
|
||||
litmus_installed = False
|
||||
|
||||
chaos_telemetry = ChaosRunTelemetry()
|
||||
chaos_telemetry.run_uuid = run_uuid
|
||||
# Loop to run the chaos starts here
|
||||
@@ -305,56 +284,9 @@ def main(cfg):
|
||||
# in the config
|
||||
# krkn_lib
|
||||
elif scenario_type == "time_scenarios":
|
||||
if distribution == "openshift":
|
||||
logging.info("Running time skew scenarios")
|
||||
failed_post_scenarios, scenario_telemetries = time_actions.run(scenarios_list, config, wait_duration, kubecli, telemetry_k8s)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
else:
|
||||
logging.error(
|
||||
"Litmus scenarios are currently "
|
||||
"supported only on openshift"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Inject litmus based chaos scenarios
|
||||
elif scenario_type == "litmus_scenarios":
|
||||
if distribution == "openshift":
|
||||
logging.info("Running litmus scenarios")
|
||||
litmus_namespace = "litmus"
|
||||
if litmus_install:
|
||||
# Remove Litmus resources
|
||||
# before running the scenarios
|
||||
common_litmus.delete_chaos(litmus_namespace, kubecli)
|
||||
common_litmus.delete_chaos_experiments(
|
||||
litmus_namespace,
|
||||
kubecli
|
||||
)
|
||||
if litmus_uninstall_before_run:
|
||||
common_litmus.uninstall_litmus(
|
||||
litmus_version, litmus_namespace, kubecli
|
||||
)
|
||||
common_litmus.install_litmus(
|
||||
litmus_version, litmus_namespace
|
||||
)
|
||||
common_litmus.deploy_all_experiments(
|
||||
litmus_version, litmus_namespace
|
||||
)
|
||||
litmus_installed = True
|
||||
common_litmus.run(
|
||||
scenarios_list,
|
||||
config,
|
||||
litmus_uninstall,
|
||||
wait_duration,
|
||||
litmus_namespace,
|
||||
kubecli
|
||||
)
|
||||
else:
|
||||
logging.error(
|
||||
"Litmus scenarios are currently "
|
||||
"only supported on openshift"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Inject cluster shutdown scenarios
|
||||
# krkn_lib
|
||||
elif scenario_type == "cluster_shut_down_scenarios":
|
||||
@@ -402,11 +334,18 @@ def main(cfg):
|
||||
failed_post_scenarios, scenario_telemetries = network_chaos.run(scenarios_list, config, wait_duration, kubecli, telemetry_k8s)
|
||||
|
||||
# Check for critical alerts when enabled
|
||||
if check_critical_alerts:
|
||||
if enable_alerts and check_critical_alerts :
|
||||
logging.info("Checking for critical alerts firing post choas")
|
||||
promcli.initialize_prom_client(distribution, prometheus_url, prometheus_bearer_token)
|
||||
|
||||
##PROM
|
||||
query = r"""ALERTS{severity="critical"}"""
|
||||
critical_alerts = promcli.process_prom_query(query)
|
||||
end_time = datetime.datetime.now()
|
||||
critical_alerts = prometheus.process_prom_query_in_range(
|
||||
query,
|
||||
start_time = datetime.datetime.fromtimestamp(start_time),
|
||||
end_time = end_time
|
||||
|
||||
)
|
||||
critical_alerts_count = len(critical_alerts)
|
||||
if critical_alerts_count > 0:
|
||||
logging.error("Critical alerts are firing: %s", critical_alerts)
|
||||
@@ -436,7 +375,7 @@ def main(cfg):
|
||||
logging.info(f"Telemetry data:\n{decoded_chaos_run_telemetry.to_json()}")
|
||||
|
||||
if config["telemetry"]["enabled"]:
|
||||
logging.info(f"telemetry data will be stored on s3 bucket folder: {telemetry_request_id}")
|
||||
logging.info(f"telemetry data will be stored on s3 bucket folder: {telemetry_api_url}/download/{telemetry_request_id}")
|
||||
logging.info(f"telemetry upload log: {safe_logger.log_file_name}")
|
||||
try:
|
||||
telemetry_k8s.send_telemetry(config["telemetry"], telemetry_request_id, chaos_telemetry)
|
||||
@@ -454,33 +393,13 @@ def main(cfg):
|
||||
else:
|
||||
logging.info("telemetry collection disabled, skipping.")
|
||||
|
||||
# Capture the end time
|
||||
|
||||
|
||||
# Capture metrics for the run
|
||||
if capture_metrics:
|
||||
logging.info("Capturing metrics")
|
||||
kube_burner.setup(kube_burner_url)
|
||||
kube_burner.scrape_metrics(
|
||||
distribution,
|
||||
run_uuid,
|
||||
prometheus_url,
|
||||
prometheus_bearer_token,
|
||||
start_time,
|
||||
end_time,
|
||||
config_path,
|
||||
metrics_profile,
|
||||
)
|
||||
|
||||
# Check for the alerts specified
|
||||
if enable_alerts:
|
||||
logging.info("Alerts checking is enabled")
|
||||
kube_burner.setup(kube_burner_url)
|
||||
if alert_profile:
|
||||
kube_burner.alerts(
|
||||
distribution,
|
||||
prometheus_url,
|
||||
prometheus_bearer_token,
|
||||
prometheus_plugin.alerts(
|
||||
prometheus,
|
||||
start_time,
|
||||
end_time,
|
||||
alert_profile,
|
||||
@@ -489,11 +408,6 @@ def main(cfg):
|
||||
logging.error("Alert profile is not defined")
|
||||
sys.exit(1)
|
||||
|
||||
if litmus_uninstall and litmus_installed:
|
||||
common_litmus.delete_chaos(litmus_namespace, kubecli)
|
||||
common_litmus.delete_chaos_experiments(litmus_namespace, kubecli)
|
||||
common_litmus.uninstall_litmus(litmus_version, litmus_namespace, kubecli)
|
||||
|
||||
if failed_post_scenarios:
|
||||
logging.error(
|
||||
"Post scenarios are still failing at the end of all iterations"
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
---
|
||||
deployer:
|
||||
connection: {}
|
||||
type: kubernetes
|
||||
deployers:
|
||||
image:
|
||||
connection: {}
|
||||
deployer_name: kubernetes
|
||||
log:
|
||||
level: debug
|
||||
logged_outputs:
|
||||
|
||||
@@ -2,13 +2,7 @@ input_list:
|
||||
- cpu_count: 1
|
||||
cpu_load_percentage: 80
|
||||
cpu_method: all
|
||||
duration: 30s
|
||||
node_selector: {}
|
||||
# node selector example
|
||||
# node_selector:
|
||||
# kubernetes.io/hostname: master
|
||||
kubeconfig: ""
|
||||
duration: 1s
|
||||
kubeconfig: ''
|
||||
namespace: default
|
||||
|
||||
# duplicate this section to run simultaneous stressors in the same run
|
||||
|
||||
node_selector: {}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
version: v0.1.0
|
||||
version: v0.2.0
|
||||
input:
|
||||
root: RootObject
|
||||
objects:
|
||||
@@ -61,11 +61,15 @@ input:
|
||||
|
||||
steps:
|
||||
kubeconfig:
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-kubeconfig:0.2.0
|
||||
plugin:
|
||||
src: quay.io/arcalot/arcaflow-plugin-kubeconfig:0.2.0
|
||||
deployment_type: image
|
||||
input:
|
||||
kubeconfig: !expr $.input.kubeconfig
|
||||
stressng:
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-stressng:0.5.0
|
||||
plugin:
|
||||
src: quay.io/arcalot/arcaflow-plugin-stressng:0.5.0
|
||||
deployment_type: image
|
||||
step: workload
|
||||
input:
|
||||
cleanup: "true"
|
||||
@@ -77,7 +81,7 @@ steps:
|
||||
cpu_method: !expr $.input.cpu_method
|
||||
cpu_load: !expr $.input.cpu_load_percentage
|
||||
deploy:
|
||||
type: kubernetes
|
||||
deployer_name: kubernetes
|
||||
connection: !expr $.steps.kubeconfig.outputs.success.connection
|
||||
pod:
|
||||
metadata:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
version: v0.1.0
|
||||
version: v0.2.0
|
||||
input:
|
||||
root: RootObject
|
||||
objects:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
deployer:
|
||||
connection: {}
|
||||
type: kubernetes
|
||||
deployers:
|
||||
image:
|
||||
connection: {}
|
||||
deployer_name: kubernetes
|
||||
log:
|
||||
level: debug
|
||||
logged_outputs:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
version: v0.1.0
|
||||
version: v0.2.0
|
||||
input:
|
||||
root: RootObject
|
||||
objects:
|
||||
@@ -96,11 +96,15 @@ input:
|
||||
|
||||
steps:
|
||||
kubeconfig:
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-kubeconfig:0.2.0
|
||||
plugin:
|
||||
src: quay.io/arcalot/arcaflow-plugin-kubeconfig:0.2.0
|
||||
deployment_type: image
|
||||
input:
|
||||
kubeconfig: !expr $.input.kubeconfig
|
||||
stressng:
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-stressng:0.5.0
|
||||
plugin:
|
||||
src: quay.io/arcalot/arcaflow-plugin-stressng:0.5.0
|
||||
deployment_type: image
|
||||
step: workload
|
||||
input:
|
||||
cleanup: "true"
|
||||
@@ -114,7 +118,7 @@ steps:
|
||||
hdd_write_size: !expr $.input.io_block_size
|
||||
|
||||
deploy:
|
||||
type: kubernetes
|
||||
deployer_name: kubernetes
|
||||
connection: !expr $.steps.kubeconfig.outputs.success.connection
|
||||
pod:
|
||||
metadata:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
version: v0.1.0
|
||||
version: v0.2.0
|
||||
input:
|
||||
root: RootObject
|
||||
objects:
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
---
|
||||
deployer:
|
||||
connection: {}
|
||||
type: kubernetes
|
||||
deployers:
|
||||
image:
|
||||
connection: {}
|
||||
deployer_name: kubernetes
|
||||
log:
|
||||
level: debug
|
||||
logged_outputs:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
version: v0.1.0
|
||||
version: v0.2.0
|
||||
input:
|
||||
root: RootObject
|
||||
objects:
|
||||
@@ -53,11 +53,15 @@ input:
|
||||
|
||||
steps:
|
||||
kubeconfig:
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-kubeconfig:0.2.0
|
||||
plugin:
|
||||
src: quay.io/arcalot/arcaflow-plugin-kubeconfig:0.2.0
|
||||
deployment_type: image
|
||||
input:
|
||||
kubeconfig: !expr $.input.kubeconfig
|
||||
stressng:
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-stressng:0.5.0
|
||||
plugin:
|
||||
src: quay.io/arcalot/arcaflow-plugin-stressng:0.5.0
|
||||
deployment_type: image
|
||||
step: workload
|
||||
input:
|
||||
cleanup: "true"
|
||||
@@ -68,7 +72,7 @@ steps:
|
||||
vm: !expr $.input.vm_workers
|
||||
vm_bytes: !expr $.input.vm_bytes
|
||||
deploy:
|
||||
type: kubernetes
|
||||
deployer_name: kubernetes
|
||||
connection: !expr $.steps.kubeconfig.outputs.success.connection
|
||||
pod:
|
||||
metadata:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
version: v0.1.0
|
||||
version: v0.2.0
|
||||
input:
|
||||
root: RootObject
|
||||
objects:
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
apiVersion: litmuschaos.io/v1alpha1
|
||||
kind: ChaosEngine
|
||||
metadata:
|
||||
name: nginx-chaos
|
||||
namespace: litmus
|
||||
spec:
|
||||
# It can be true/false
|
||||
annotationCheck: 'false'
|
||||
# It can be active/stop
|
||||
engineState: 'active'
|
||||
chaosServiceAccount: litmus-sa
|
||||
monitoring: false
|
||||
# It can be delete/retain
|
||||
jobCleanUpPolicy: 'delete'
|
||||
experiments:
|
||||
- name: node-cpu-hog
|
||||
spec:
|
||||
components:
|
||||
env:
|
||||
# set chaos duration (in sec) as desired
|
||||
- name: TOTAL_CHAOS_DURATION
|
||||
value: '60'
|
||||
|
||||
# Number of cores of node CPU to be consumed
|
||||
- name: NODE_CPU_CORE
|
||||
value: '1'
|
||||
|
||||
# percentage of total nodes to target
|
||||
- name: NODES_AFFECTED_PERC
|
||||
value: ''
|
||||
|
||||
# ENTER THE COMMA SEPARATED TARGET NODES NAME
|
||||
- name: TARGET_NODES
|
||||
value: '<node_name>'
|
||||
@@ -1,35 +0,0 @@
|
||||
apiVersion: litmuschaos.io/v1alpha1
|
||||
kind: ChaosEngine
|
||||
metadata:
|
||||
name: nginx-chaos
|
||||
namespace: litmus
|
||||
spec:
|
||||
# It can be delete/retain
|
||||
jobCleanUpPolicy: 'retain'
|
||||
# It can be active/stop
|
||||
engineState: 'active'
|
||||
chaosServiceAccount: litmus-sa
|
||||
experiments:
|
||||
- name: node-io-stress
|
||||
spec:
|
||||
components:
|
||||
env:
|
||||
# set chaos duration (in sec) as desired
|
||||
- name: TOTAL_CHAOS_DURATION
|
||||
value: '40'
|
||||
|
||||
## specify the size as percentage of free space on the file system
|
||||
- name: FILESYSTEM_UTILIZATION_PERCENTAGE
|
||||
value: '100'
|
||||
|
||||
## Number of core of CPU
|
||||
- name: CPU
|
||||
value: '1'
|
||||
|
||||
## Total number of workers default value is 4
|
||||
- name: NUMBER_OF_WORKERS
|
||||
value: '3'
|
||||
|
||||
## enter the comma separated target nodes name
|
||||
- name: TARGET_NODES
|
||||
value: '<node_name>'
|
||||
@@ -1,28 +0,0 @@
|
||||
apiVersion: litmuschaos.io/v1alpha1
|
||||
kind: ChaosEngine
|
||||
metadata:
|
||||
name: nginx-chaos
|
||||
namespace: litmus
|
||||
spec:
|
||||
# It can be delete/retain
|
||||
jobCleanUpPolicy: 'retain'
|
||||
# It can be active/stop
|
||||
engineState: 'active'
|
||||
chaosServiceAccount: litmus-sa
|
||||
experiments:
|
||||
- name: node-memory-hog
|
||||
spec:
|
||||
components:
|
||||
env:
|
||||
# set chaos duration (in sec) as desired
|
||||
- name: TOTAL_CHAOS_DURATION
|
||||
value: '30'
|
||||
|
||||
## Specify the size as percent of total node capacity Ex: '30'
|
||||
## Note: For consuming memory in mebibytes change the variable to MEMORY_CONSUMPTION_MEBIBYTES
|
||||
- name: MEMORY_CONSUMPTION_PERCENTAGE
|
||||
value: '30'
|
||||
|
||||
# ENTER THE COMMA SEPARATED TARGET NODES NAME
|
||||
- name: TARGET_NODES
|
||||
value: '<node_name>'
|
||||
14
scenarios/openshift/pod_ingress_shaping.yml
Normal file
14
scenarios/openshift/pod_ingress_shaping.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
# yaml-language-server: $schema=../plugin.schema.json
|
||||
- id: pod_ingress_shaping
|
||||
config:
|
||||
namespace: <namespace> # Required - Namespace of the pod to which traffic shaping need to be applied
|
||||
label_selector: <label_selector> # When pod_name is not specified, pod with matching label_selector is selected for chaos scenario
|
||||
pod_name: <pod name> # When label_selector is not specified, pod matching the name will be selected for the chaos scenario
|
||||
network_params: # latency, loss and bandwidth are the three supported network parameters to alter for the chaos test
|
||||
latency: <time> # Value is a string. For example : 50ms
|
||||
loss: <fraction> # Loss is a fraction between 0 and 1. It has to be enclosed in quotes to treat it as a string. For example, '0.02%' (not 0.02%)
|
||||
bandwidth: <rate> # Value is a string. For example: 100mbit
|
||||
execution_type: <serial/parallel> # Used to specify whether you want to apply filters on interfaces one at a time or all at once. Default is 'parallel'
|
||||
instance_count: <number> # Number of pods to perform action/select that match the label selector
|
||||
wait_duration: <time_duration> # Default is 300. Ensure that it is at least about twice of test_duration
|
||||
test_duration: <time_duration> # Default is 120
|
||||
@@ -2413,7 +2413,168 @@
|
||||
"id",
|
||||
"config"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "pod_ingress_shaping Arcaflow scenarios",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"const": "pod_ingress_shaping"
|
||||
},
|
||||
"config": {
|
||||
"$defs": {
|
||||
"IngressParams": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"namespace": {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
"title": "Namespace",
|
||||
"description": "Namespace of the pod to which filter need to be appliedfor details."
|
||||
},
|
||||
"network_params": {
|
||||
"type": "object",
|
||||
"propertyNames": {},
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": "Network Parameters",
|
||||
"description": "The network filters that are applied on the interface. The currently supported filters are latency, loss and bandwidth"
|
||||
},
|
||||
"kubeconfig_path": {
|
||||
"type": "string",
|
||||
"title": "Kubeconfig path",
|
||||
"description": "Kubeconfig file as string\nSee https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ for details."
|
||||
},
|
||||
"pod_name": {
|
||||
"type": "string",
|
||||
"title": "Pod name",
|
||||
"description": "When label_selector is not specified, pod matching the name will beselected for the chaos scenario"
|
||||
},
|
||||
"label_selector": {
|
||||
"type": "string",
|
||||
"title": "Label selector",
|
||||
"description": "Kubernetes label selector for the target pod. When pod_name is not specified, pod with matching label_selector is selected for chaos scenario"
|
||||
},
|
||||
"kraken_config": {
|
||||
"type": "string",
|
||||
"title": "Kraken Config",
|
||||
"description": "Path to the config file of Kraken. Set this field if you wish to publish status onto Cerberus"
|
||||
},
|
||||
"test_duration": {
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"default": 90,
|
||||
"title": "Test duration",
|
||||
"description": "Duration for which each step of the ingress chaos testing is to be performed."
|
||||
},
|
||||
"wait_duration": {
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"default": 300,
|
||||
"title": "Wait Duration",
|
||||
"description": "Wait duration for finishing a test and its cleanup.Ensure that it is significantly greater than wait_duration"
|
||||
},
|
||||
"instance_count": {
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"default": 1,
|
||||
"title": "Instance Count",
|
||||
"description": "Number of pods to perform action/select that match the label selector."
|
||||
},
|
||||
"execution_type": {
|
||||
"type": "string",
|
||||
"default": "parallel",
|
||||
"title": "Execution Type",
|
||||
"description": "The order in which the ingress filters are applied. Execution type can be 'serial' or 'parallel'"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"namespace"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"dependentRequired": {}
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"namespace": {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
"title": "Namespace",
|
||||
"description": "Namespace of the pod to which filter need to be appliedfor details."
|
||||
},
|
||||
"network_params": {
|
||||
"type": "object",
|
||||
"propertyNames": {},
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": "Network Parameters",
|
||||
"description": "The network filters that are applied on the interface. The currently supported filters are latency, loss and bandwidth"
|
||||
},
|
||||
"kubeconfig_path": {
|
||||
"type": "string",
|
||||
"title": "Kubeconfig path",
|
||||
"description": "Kubeconfig file as string\nSee https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ for details."
|
||||
},
|
||||
"pod_name": {
|
||||
"type": "string",
|
||||
"title": "Pod name",
|
||||
"description": "When label_selector is not specified, pod matching the name will beselected for the chaos scenario"
|
||||
},
|
||||
"label_selector": {
|
||||
"type": "string",
|
||||
"title": "Label selector",
|
||||
"description": "Kubernetes label selector for the target pod. When pod_name is not specified, pod with matching label_selector is selected for chaos scenario"
|
||||
},
|
||||
"kraken_config": {
|
||||
"type": "string",
|
||||
"title": "Kraken Config",
|
||||
"description": "Path to the config file of Kraken. Set this field if you wish to publish status onto Cerberus"
|
||||
},
|
||||
"test_duration": {
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"default": 90,
|
||||
"title": "Test duration",
|
||||
"description": "Duration for which each step of the ingress chaos testing is to be performed."
|
||||
},
|
||||
"wait_duration": {
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"default": 300,
|
||||
"title": "Wait Duration",
|
||||
"description": "Wait duration for finishing a test and its cleanup.Ensure that it is significantly greater than wait_duration"
|
||||
},
|
||||
"instance_count": {
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"default": 1,
|
||||
"title": "Instance Count",
|
||||
"description": "Number of pods to perform action/select that match the label selector."
|
||||
},
|
||||
"execution_type": {
|
||||
"type": "string",
|
||||
"default": "parallel",
|
||||
"title": "Execution Type",
|
||||
"description": "The order in which the ingress filters are applied. Execution type can be 'serial' or 'parallel'"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"namespace"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"dependentRequired": {}
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"config"
|
||||
]
|
||||
}
|
||||
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,12 +15,12 @@ This tool profiles an application and gathers telemetry data such as CPU, Memory
|
||||
1. To run
|
||||
|
||||
```
|
||||
$ python3 -m venv chaos
|
||||
$ python3.9 -m venv chaos
|
||||
$ source chaos/bin/activate
|
||||
$ git clone https://github.com/redhat-chaos/krkn.git
|
||||
$ cd krkn
|
||||
$ pip3 install -r requirements.txt
|
||||
$ python3 utils/chaos_recommender/chaos_recommender.py
|
||||
$ python3.9 utils/chaos_recommender/chaos_recommender.py
|
||||
```
|
||||
|
||||
2. Follow the prompts to provide the required information.
|
||||
|
||||
Reference in New Issue
Block a user