Compare commits

...

10 Commits

Author SHA1 Message Date
Paige Rubendall
f154bcb692 adding krkn report location
Signed-off-by: Paige Rubendall <prubenda@redhat.com>
2024-01-25 10:45:01 -05:00
Naga Ravi Chaitanya Elluri
60ece4b1b8 Use 0.38.0 wheel version to fix security vulnerability
Reported by https://snyk.io/

Signed-off-by: Naga Ravi Chaitanya Elluri <nelluri@redhat.com>
2024-01-25 09:51:19 -05:00
Naga Ravi Chaitanya Elluri
d660542a40 Add CNCF trademark guidelines and update community members (#560)
Signed-off-by: Naga Ravi Chaitanya Elluri <nelluri@redhat.com>
2024-01-24 14:13:53 -05:00
Naga Ravi Chaitanya Elluri
2e651798fa Update redhat-chaos references with krkn-chaos
The tools are now hosted under https://github.com/krkn-chaos

Signed-off-by: Naga Ravi Chaitanya Elluri <nelluri@redhat.com>
2024-01-24 13:40:39 -05:00
Tullio Sebastiani
f801dfce54 functional tests pointing to real scenario config files
Signed-off-by: Tullio Sebastiani <tsebasti@redhat.com>

typo

Signed-off-by: Tullio Sebastiani <tsebasti@redhat.com>

app_outage fix

Signed-off-by: Tullio Sebastiani <tsebasti@redhat.com>

typo

Signed-off-by: Tullio Sebastiani <tsebasti@redhat.com>

typo

Signed-off-by: Tullio Sebastiani <tsebasti@redhat.com>
2024-01-18 12:54:39 -05:00
Tullio Sebastiani
8b95458444 Dockerfile v1.5.5 (#558)
Signed-off-by: Tullio Sebastiani <tsebasti@redhat.com>
Co-authored-by: Naga Ravi Chaitanya Elluri <nelluri@redhat.com>
2024-01-17 17:06:51 +01:00
Naga Ravi Chaitanya Elluri
ce1ae78f1f Update new references in the docs
This commit also updates the support matrix docs for the time scenarios.

Signed-off-by: Naga Ravi Chaitanya Elluri <nelluri@redhat.com>
2024-01-17 10:47:49 -05:00
Tullio Sebastiani
967753489b arcaflow hog scenarios + app outage functional tests
Signed-off-by: Tullio Sebastiani <tsebasti@redhat.com>
2024-01-17 10:40:33 -05:00
Tullio Sebastiani
aa16cb1bf2 fixed io-hog scenario (#555)
Signed-off-by: Tullio Sebastiani <tsebasti@redhat.com>
2024-01-17 16:05:35 +01:00
Tullio Sebastiani
ac47e215d8 Functional Tests porting to kubernetes (#553)
* Functional Tests porting to kubernetes

Signed-off-by: Tullio Sebastiani <tsebasti@redhat.com>
2024-01-17 09:48:43 +01:00
55 changed files with 388 additions and 466 deletions

View File

@@ -1,51 +0,0 @@
name: Build Krkn
on:
pull_request:
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Create multi-node KinD cluster
uses: redhat-chaos/actions/kind@main
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: '3.9'
architecture: 'x64'
- name: Install environment
run: |
sudo apt-get install build-essential python3-dev
pip install --upgrade pip
pip install -r requirements.txt
- name: Run unit tests
run: python -m coverage run -a -m unittest discover -s tests -v
- name: Run CI
run: |
./CI/run.sh
cat ./CI/results.markdown >> $GITHUB_STEP_SUMMARY
echo >> $GITHUB_STEP_SUMMARY
- name: Upload CI logs
uses: actions/upload-artifact@v3
with:
name: ci-logs
path: CI/out
if-no-files-found: error
- name: Collect coverage report
run: |
python -m coverage html
- name: Publish coverage report to job summary
run: |
pip install html2text
html2text --ignore-images --ignore-links -b 0 htmlcov/index.html >> $GITHUB_STEP_SUMMARY
- name: Upload coverage data
uses: actions/upload-artifact@v3
with:
name: coverage
path: htmlcov
if-no-files-found: error
- name: Check CI results
run: grep Fail CI/results.markdown && false || true

View File

@@ -1,129 +0,0 @@
on: issue_comment
jobs:
check_user:
# This job only runs for pull request comments
name: Check User Authorization
env:
USERS: ${{vars.USERS}}
if: contains(github.event.comment.body, '/funtest') && contains(github.event.comment.html_url, '/pull/')
runs-on: ubuntu-latest
steps:
- name: Check User
run: |
for name in `echo $USERS`
do
name="${name//$'\r'/}"
name="${name//$'\n'/}"
if [ $name == "${{github.event.sender.login}}" ]
then
echo "user ${{github.event.sender.login}} authorized, action started..."
exit 0
fi
done
echo "user ${{github.event.sender.login}} is not allowed to run functional tests Action"
exit 1
pr_commented:
# This job only runs for pull request comments containing /functional
name: Functional Tests
if: contains(github.event.comment.body, '/funtest') && contains(github.event.comment.html_url, '/pull/')
runs-on: ubuntu-latest
needs:
- check_user
steps:
- name: Check out Kraken
uses: actions/checkout@v3
- name: Checkout Pull Request
run: gh pr checkout ${{ github.event.issue.number }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Install OC CLI
uses: redhat-actions/oc-installer@v1
with:
oc_version: latest
- name: Install python 3.9
uses: actions/setup-python@v4
with:
python-version: '3.9'
- name: Setup kraken dependencies
run: pip install -r requirements.txt
- name: Create Workdir & export the path
run: |
mkdir workdir
echo "WORKDIR_PATH=`pwd`/workdir" >> $GITHUB_ENV
- name: Generate run id
run: |
echo "RUN_ID=`date +%s`" > $GITHUB_ENV
echo "Run Id: ${RUN_ID}"
- name: Write Pull Secret
env:
PULLSECRET_BASE64: ${{ secrets.PS_64 }}
run: |
echo "$PULLSECRET_BASE64" | base64 --decode > pullsecret.txt
- name: Write Boot Private Key
env:
BOOT_KEY: ${{ secrets.CRC_KEY_FILE }}
run: |
echo -n "$BOOT_KEY" > key.txt
- name: Teardown CRC (Post Action)
uses: webiny/action-post-run@3.0.0
id: post-run-command
with:
run: podman run --rm -v "${{ github.workspace }}:/workspace:z" -e AWS_ACCESS_KEY_ID="${{ secrets.AWS_ACCESS_KEY_ID }}" -e AWS_SECRET_ACCESS_KEY="${{ secrets.AWS_SECRET_ACCESS_KEY }}" -e AWS_DEFAULT_REGION=us-west-2 quay.io/crcont/crc-cloud:v0.0.2 destroy --project-name "chaos-funtest-${{ env.RUN_ID }}" --backed-url "s3://krkn-crc-state/${{ env.RUN_ID }}" --provider "aws"
- name: Create cluster
run: |
podman run --name crc-cloud-create --rm \
-v ${PWD}:/workspace:z \
-e AWS_ACCESS_KEY_ID="${{ secrets.AWS_ACCESS_KEY_ID }}" \
-e AWS_SECRET_ACCESS_KEY="${{ secrets.AWS_SECRET_ACCESS_KEY }}" \
-e AWS_DEFAULT_REGION="us-west-2" \
quay.io/crcont/crc-cloud:v0.0.2 \
create aws \
--project-name "chaos-funtest-${RUN_ID}" \
--backed-url "s3://krkn-crc-state/${RUN_ID}" \
--output "/workspace" \
--aws-ami-id "ami-00f5eaf98cf42ef9f" \
--pullsecret-filepath /workspace/pullsecret.txt \
--key-filepath /workspace/key.txt
- name: Setup kubeconfig
continue-on-error: true
run: |
ssh -o StrictHostKeyChecking=no -i id_rsa core@$(cat host) "cat /opt/kubeconfig" > kubeconfig
sed -i "s/https:\/\/api.crc.testing:6443/https:\/\/`cat host`.nip.io:6443/g" kubeconfig
echo "KUBECONFIG=${PWD}/kubeconfig" > $GITHUB_ENV
- name: Example deployment, GitHub Action env init
env:
NAMESPACE: test-namespace
DEPLOYMENT_NAME: test-nginx
run: ./CI/CRC/init_github_action.sh
- name: Setup test suite
run: |
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
yq -i '.kraken.kubeconfig_path="'${KUBECONFIG}'"' CI/config/common_test_config.yaml
echo "test_app_outages_gh" > ./CI/tests/my_tests
echo "test_container" >> ./CI/tests/my_tests
echo "test_namespace" >> ./CI/tests/my_tests
echo "test_net_chaos" >> ./CI/tests/my_tests
echo "test_time" >> ./CI/tests/my_tests
- name: Print affected config files
run: |
echo -e "## CI/config/common_test_config.yaml\n\n"
cat CI/config/common_test_config.yaml
- name: Running test suite
run: |
./CI/run.sh
- name: Print test output
run: cat CI/out/*
- name: Create coverage report
run: |
echo "# Test results" > $GITHUB_STEP_SUMMARY
cat CI/results.markdown >> $GITHUB_STEP_SUMMARY
echo "# Test coverage" >> $GITHUB_STEP_SUMMARY
python -m coverage report --format=markdown >> $GITHUB_STEP_SUMMARY

107
.github/workflows/tests.yml vendored Normal file
View File

@@ -0,0 +1,107 @@
name: Functional & Unit Tests
on:
pull_request:
jobs:
tests:
name: Functional & Unit Tests
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Create multi-node KinD cluster
uses: redhat-chaos/actions/kind@main
- name: Install Helm & add repos
run: |
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
helm repo add stable https://charts.helm.sh/stable
helm repo update
- name: Deploy prometheus & Port Forwarding
run: |
kubectl create namespace prometheus-k8s
helm install \
--wait --timeout 360s \
kind-prometheus \
prometheus-community/kube-prometheus-stack \
--namespace prometheus-k8s \
--set prometheus.service.nodePort=30000 \
--set prometheus.service.type=NodePort \
--set grafana.service.nodePort=31000 \
--set grafana.service.type=NodePort \
--set alertmanager.service.nodePort=32000 \
--set alertmanager.service.type=NodePort \
--set prometheus-node-exporter.service.nodePort=32001 \
--set prometheus-node-exporter.service.type=NodePort
SELECTOR=`kubectl -n prometheus-k8s get service kind-prometheus-kube-prome-prometheus -o wide --no-headers=true | awk '{ print $7 }'`
POD_NAME=`kubectl -n prometheus-k8s get pods --selector="$SELECTOR" --no-headers=true | awk '{ print $1 }'`
kubectl -n prometheus-k8s port-forward $POD_NAME 9090:9090 &
sleep 5
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: '3.9'
architecture: 'x64'
- name: Install environment
run: |
sudo apt-get install build-essential python3-dev
pip install --upgrade pip
pip install -r requirements.txt
# - name: Run unit tests
# run: python -m coverage run -a -m unittest discover -s tests -v
- name: Deploy test workloads
run: |
kubectl apply -f CI/templates/outage_pod.yaml
kubectl wait --for=condition=ready pod -l scenario=outage --timeout=300s
kubectl apply -f CI/templates/container_scenario_pod.yaml
kubectl wait --for=condition=ready pod -l scenario=container --timeout=300s
kubectl create namespace namespace-scenario
kubectl apply -f CI/templates/time_pod.yaml
kubectl wait --for=condition=ready pod -l scenario=time-skew --timeout=300s
- name: Get Kind nodes
run: |
kubectl get nodes --show-labels=true
- name: Setup Functional Tests
run: |
yq -i '.kraken.distribution="kubernetes"' CI/config/common_test_config.yaml
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
yq -i '.kraken.performance_monitoring="localhost:9090"' CI/config/common_test_config.yaml
echo "test_app_outages" > ./CI/tests/functional_tests
echo "test_container" >> ./CI/tests/functional_tests
echo "test_namespace" >> ./CI/tests/functional_tests
echo "test_net_chaos" >> ./CI/tests/functional_tests
echo "test_time" >> ./CI/tests/functional_tests
echo "test_arca_cpu_hog" >> ./CI/tests/functional_tests
echo "test_arca_memory_hog" >> ./CI/tests/functional_tests
echo "test_arca_io_hog" >> ./CI/tests/functional_tests
- name: Run Functional tests
run: |
./CI/run.sh
cat ./CI/results.markdown >> $GITHUB_STEP_SUMMARY
echo >> $GITHUB_STEP_SUMMARY
- name: Run Unit tests
run: python -m coverage run -a -m unittest discover -s tests -v
- name: Upload CI logs
uses: actions/upload-artifact@v3
with:
name: ci-logs
path: CI/out
if-no-files-found: error
- name: Collect coverage report
run: |
python -m coverage html
- name: Publish coverage report to job summary
run: |
pip install html2text
html2text --ignore-images --ignore-links -b 0 htmlcov/index.html >> $GITHUB_STEP_SUMMARY
- name: Upload coverage data
uses: actions/upload-artifact@v3
with:
name: coverage
path: htmlcov
if-no-files-found: error
- name: Check CI results
run: grep Fail CI/results.markdown && false || true

2
.gitignore vendored
View File

@@ -61,7 +61,7 @@ inspect.local.*
!CI/config/common_test_config.yaml
CI/out/*
CI/ci_results
CI/scenarios/*node.yaml
CI/legacy/*node.yaml
CI/results.markdown
#env

View File

@@ -1,44 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: $NAMESPACE
---
apiVersion: v1
kind: Service
metadata:
name: $DEPLOYMENT_NAME-service
namespace: $NAMESPACE
spec:
selector:
app: $DEPLOYMENT_NAME
ports:
- name: http
port: 80
targetPort: 8080
type: ClusterIP
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: $NAMESPACE
name: $DEPLOYMENT_NAME-deployment
spec:
replicas: 3
selector:
matchLabels:
app: $DEPLOYMENT_NAME
template:
metadata:
labels:
app: $DEPLOYMENT_NAME
spec:
containers:
- name: $DEPLOYMENT_NAME
image: nginxinc/nginx-unprivileged:stable-alpine
ports:
- name: http
containerPort: 8080

View File

@@ -1,58 +0,0 @@
#!/bin/bash
SCRIPT_PATH=./CI/CRC
DEPLOYMENT_PATH=$SCRIPT_PATH/deployment.yaml
[[ ! -f $DEPLOYMENT_PATH ]] && echo "[ERROR] please run $0 from GitHub action root directory" && exit 1
[[ -z $DEPLOYMENT_NAME ]] && echo "[ERROR] please set \$DEPLOYMENT_NAME environment variable" && exit 1
[[ -z $NAMESPACE ]] && echo "[ERROR] please set \$NAMESPACE environment variable" && exit 1
OPENSSL=`which openssl 2>/dev/null`
[[ $? != 0 ]] && echo "[ERROR]: openssl missing, please install it and try again" && exit 1
OC=`which oc 2>/dev/null`
[[ $? != 0 ]] && echo "[ERROR]: oc missing, please install it and try again" && exit 1
SED=`which sed 2>/dev/null`
[[ $? != 0 ]] && echo "[ERROR]: sed missing, please install it and try again" && exit 1
JQ=`which jq 2>/dev/null`
[[ $? != 0 ]] && echo "[ERROR]: jq missing, please install it and try again" && exit 1
ENVSUBST=`which envsubst 2>/dev/null`
[[ $? != 0 ]] && echo "[ERROR]: envsubst missing, please install it and try again" && exit 1
API_PORT="6443"
API_ADDRESS="https://api.`cat host`.nip.io:${API_PORT}"
FQN=$DEPLOYMENT_NAME.apps.$API_ADDRESS
echo "[INF] deploying example deployment: $DEPLOYMENT_NAME in namespace: $NAMESPACE"
$ENVSUBST < $DEPLOYMENT_PATH | $OC apply -f - > /dev/null 2>&1
echo "[INF] creating SSL self-signed certificates for route https://$FQN"
$OPENSSL genrsa -out servercakey.pem > /dev/null 2>&1
$OPENSSL req -new -x509 -key servercakey.pem -out serverca.crt -subj "/CN=$FQN/O=Red Hat Inc./C=US" > /dev/null 2>&1
$OPENSSL genrsa -out server.key > /dev/null 2>&1
$OPENSSL req -new -key server.key -out server_reqout.txt -subj "/CN=$FQN/O=Red Hat Inc./C=US" > /dev/null 2>&1
$OPENSSL x509 -req -in server_reqout.txt -days 3650 -sha256 -CAcreateserial -CA serverca.crt -CAkey servercakey.pem -out server.crt > /dev/null 2>&1
echo "[INF] creating deployment: $DEPLOYMENT_NAME public route: https://$FQN"
$OC create route --namespace $NAMESPACE edge --service=$DEPLOYMENT_NAME-service --cert=server.crt --key=server.key --ca-cert=serverca.crt --hostname="$FQN" > /dev/null 2>&1
echo "[INF] setting github action environment variables"
NODE_NAME="`$OC get nodes -o json | $JQ -r '.items[0].metadata.name'`"
COVERAGE_FILE="`pwd`/coverage.md"
echo "DEPLOYMENT_NAME=$DEPLOYMENT_NAME" >> $GITHUB_ENV
echo "DEPLOYMENT_FQN=$FQN" >> $GITHUB_ENV
echo "API_ADDRESS=$API_ADDRESS" >> $GITHUB_ENV
echo "API_PORT=$API_PORT" >> $GITHUB_ENV
echo "NODE_NAME=$NODE_NAME" >> $GITHUB_ENV
echo "NAMESPACE=$NAMESPACE" >> $GITHUB_ENV
echo "COVERAGE_FILE=$COVERAGE_FILE" >> $GITHUB_ENV
echo "[INF] deployment fully qualified name will be available in \${{ env.DEPLOYMENT_NAME }} with value $DEPLOYMENT_NAME"
echo "[INF] deployment name will be available in \${{ env.DEPLOYMENT_FQN }} with value $FQN"
echo "[INF] OCP API address will be available in \${{ env.API_ADDRESS }} with value $API_ADDRESS"
echo "[INF] OCP API port will be available in \${{ env.API_PORT }} with value $API_PORT"
echo "[INF] OCP node name will be available in \${{ env.NODE_NAME }} with value $NODE_NAME"
echo "[INF] coverage file will ve available in \${{ env.COVERAGE_FILE }} with value $COVERAGE_FILE"

View File

@@ -1,7 +1,7 @@
## CI Tests
### First steps
Edit [my_tests](tests/my_tests) with tests you want to run
Edit [functional_tests](tests/functional_tests) with tests you want to run
### How to run
```./CI/run.sh```
@@ -11,7 +11,7 @@ This will run kraken using python, make sure python3 is set up and configured pr
### Adding a test case
1. Add in simple scenario yaml file to execute under [../CI/scenarios/](scenarios)
1. Add in simple scenario yaml file to execute under [../CI/scenarios/](legacy)
2. Copy [test_application_outages.sh](tests/test_app_outages.sh) for example on how to get started
@@ -27,7 +27,7 @@ This will run kraken using python, make sure python3 is set up and configured pr
e. 15: Make sure name of config in line 14 matches what you pass on this line
4. Add test name to [my_tests](../CI/tests/my_tests) file
4. Add test name to [functional_tests](../CI/tests/functional_tests) file
a. This will be the name of the file without ".sh"

View File

@@ -1,15 +1,14 @@
#!/bin/bash
set -x
MAX_RETRIES=60
OC=`which oc 2>/dev/null`
[[ $? != 0 ]] && echo "[ERROR]: oc missing, please install it and try again" && exit 1
KUBECTL=`which kubectl 2>/dev/null`
[[ $? != 0 ]] && echo "[ERROR]: kubectl missing, please install it and try again" && exit 1
wait_cluster_become_ready() {
COUNT=1
until `$OC get namespace > /dev/null 2>&1`
until `$KUBECTL get namespace > /dev/null 2>&1`
do
echo "[INF] waiting OpenShift to become ready, after $COUNT check"
echo "[INF] waiting Kubernetes to become ready, after $COUNT check"
sleep 3
[[ $COUNT == $MAX_RETRIES ]] && echo "[ERR] max retries exceeded, failing" && exit 1
((COUNT++))
@@ -18,9 +17,9 @@ wait_cluster_become_ready() {
ci_tests_loc="CI/tests/my_tests"
ci_tests_loc="CI/tests/functional_tests"
echo "running test suit consisting of ${ci_tests}"
echo -e "********* Running Functional Tests Suite *********\n\n"
rm -rf CI/out
@@ -37,9 +36,31 @@ echo 'Test | Result | Duration' >> $results
echo '-----------------------|--------|---------' >> $results
# Run each test
for test_name in `cat CI/tests/my_tests`
failed_tests=()
for test_name in `cat CI/tests/functional_tests`
do
wait_cluster_become_ready
./CI/run_test.sh $test_name $results
return_value=`./CI/run_test.sh $test_name $results`
if [[ $return_value == 1 ]]
then
echo "Failed"
failed_tests+=("$test_name")
fi
wait_cluster_become_ready
done
if (( ${#failed_tests[@]}>0 ))
then
echo -e "\n\n======================================================================"
echo -e "\n FUNCTIONAL TESTS FAILED ${failed_tests[*]} ABORTING"
echo -e "\n======================================================================\n\n"
for test in "${failed_tests[@]}"
do
echo -e "\n********** $test KRKN RUN OUTPUT **********\n"
cat "CI/out/$test.out"
echo -e "\n********************************************\n\n\n\n"
done
exit 1
fi

View File

@@ -1,5 +1,4 @@
#!/bin/bash
set -x
readonly SECONDS_PER_HOUR=3600
readonly SECONDS_PER_MINUTE=60
function get_time_format() {
@@ -14,9 +13,7 @@ ci_test=`echo $1`
results_file=$2
echo -e "\n======================================================================"
echo -e " CI test for ${ci_test} "
echo -e "======================================================================\n"
echo -e "test: ${ci_test}" >&2
ci_results="CI/out/$ci_test.out"
# Test ci
@@ -28,13 +25,16 @@ then
# if the test passes update the results and complete
duration=$SECONDS
duration=$(get_time_format $duration)
echo "$ci_test: Successful"
echo -e "> $ci_test: Successful\n" >&2
echo "$ci_test | Pass | $duration" >> $results_file
count=$retries
# return value for run.sh
echo 0
else
duration=$SECONDS
duration=$(get_time_format $duration)
echo "$ci_test: Failed"
echo -e "> $ci_test: Failed\n" >&2
echo "$ci_test | Fail | $duration" >> $results_file
echo "Logs for "$ci_test
# return value for run.sh
echo 1
fi

View File

@@ -1,5 +0,0 @@
application_outage: # Scenario to create an outage of an application by blocking traffic
duration: 10 # Duration in seconds after which the routes will be accessible
namespace: openshift-monitoring # Namespace to target - all application routes will go inaccessible if pod selector is empty
pod_selector: {} # Pods to target
block: [Ingress, Egress] # It can be Ingress or Egress or Ingress, Egress

View File

@@ -1,8 +0,0 @@
scenarios:
- name: "kill machine config container"
namespace: "openshift-machine-config-operator"
label_selector: "k8s-app=machine-config-server"
container_name: "hello-openshift"
action: "kill 1"
count: 1
retry_wait: 60

View File

@@ -1,6 +0,0 @@
network_chaos: # Scenario to create an outage by simulating random variations in the network.
duration: 10 # seconds
instance_count: 1
execution: serial
egress:
bandwidth: 100mbit

View File

@@ -1,7 +0,0 @@
scenarios:
- action: delete
namespace: "^$openshift-network-diagnostics$"
label_selector:
runs: 1
sleep: 15
wait_time: 30

View File

@@ -1,5 +0,0 @@
time_scenarios:
- action: skew_time
object_type: pod
label_selector: k8s-app=etcd
container_name: ""

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Pod
metadata:
name: container
labels:
scenario: container
spec:
hostNetwork: true
containers:
- name: fedtools
image: docker.io/fedora/tools
command:
- /bin/sh
- -c
- |
sleep infinity

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Pod
metadata:
name: outage
labels:
scenario: outage
spec:
hostNetwork: true
containers:
- name: fedtools
image: docker.io/fedora/tools
command:
- /bin/sh
- -c
- |
sleep infinity

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Pod
metadata:
name: time-skew
labels:
scenario: time-skew
spec:
hostNetwork: true
containers:
- name: fedtools
image: docker.io/fedora/tools
command:
- /bin/sh
- -c
- |
sleep infinity

View File

@@ -13,6 +13,6 @@ function error {
}
function get_node {
worker_node=$(oc get nodes --no-headers | grep worker | head -n 1)
worker_node=$(kubectl get nodes --no-headers | grep worker | head -n 1)
export WORKER_NODE=$worker_node
}

View File

@@ -0,0 +1 @@

View File

@@ -1 +0,0 @@
test_net_chaos

View File

@@ -7,9 +7,11 @@ trap finish EXIT
function functional_test_app_outage {
yq -i '.application_outage.duration=10' scenarios/openshift/app_outage.yaml
yq -i '.application_outage.pod_selector={"scenario":"outage"}' scenarios/openshift/app_outage.yaml
yq -i '.application_outage.namespace="default"' scenarios/openshift/app_outage.yaml
export scenario_type="application_outages"
export scenario_file="CI/scenarios/app_outage.yaml"
export scenario_file="scenarios/openshift/app_outage.yaml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/app_outage.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/app_outage.yaml

View File

@@ -1,21 +0,0 @@
set -xeEo pipefail
source CI/tests/common.sh
trap error ERR
trap finish EXIT
function functional_test_app_outage {
[ -z $DEPLOYMENT_NAME ] && echo "[ERR] DEPLOYMENT_NAME variable not set, failing." && exit 1
yq -i '.application_outage.pod_selector={"app":"'$DEPLOYMENT_NAME'"}' CI/scenarios/app_outage.yaml
yq -i '.application_outage.namespace="'$NAMESPACE'"' CI/scenarios/app_outage.yaml
export scenario_type="application_outages"
export scenario_file="CI/scenarios/app_outage.yaml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/app_outage.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/app_outage.yaml
echo "App outage scenario test: Success"
}
functional_test_app_outage

View File

@@ -0,0 +1,19 @@
set -xeEo pipefail
source CI/tests/common.sh
trap error ERR
trap finish EXIT
function functional_test_arca_cpu_hog {
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/arcaflow/cpu-hog/input.yaml
export scenario_type="arcaflow_scenarios"
export scenario_file="scenarios/arcaflow/cpu-hog/input.yaml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/arca_cpu_hog.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/arca_cpu_hog.yaml
echo "Arcaflow CPU Hog: Success"
}
functional_test_arca_cpu_hog

View File

@@ -0,0 +1,19 @@
set -xeEo pipefail
source CI/tests/common.sh
trap error ERR
trap finish EXIT
function functional_test_arca_io_hog {
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/arcaflow/io-hog/input.yaml
export scenario_type="arcaflow_scenarios"
export scenario_file="scenarios/arcaflow/io-hog/input.yaml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/arca_io_hog.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/arca_io_hog.yaml
echo "Arcaflow IO Hog: Success"
}
functional_test_arca_io_hog

View File

@@ -0,0 +1,19 @@
set -xeEo pipefail
source CI/tests/common.sh
trap error ERR
trap finish EXIT
function functional_test_arca_memory_hog {
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/arcaflow/memory-hog/input.yaml
export scenario_type="arcaflow_scenarios"
export scenario_file="scenarios/arcaflow/memory-hog/input.yaml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/arca_memory_hog.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/arca_memory_hog.yaml
echo "Arcaflow Memory Hog: Success"
}
functional_test_arca_memory_hog

View File

@@ -8,9 +8,11 @@ trap finish EXIT
pod_file="CI/scenarios/hello_pod.yaml"
function functional_test_container_crash {
yq -i '.scenarios[0].namespace="default"' scenarios/openshift/app_outage.yaml
yq -i '.scenarios[0].label_selector="scenario=container"' scenarios/openshift/app_outage.yaml
yq -i '.scenarios[0].container_name="fedtools"' scenarios/openshift/app_outage.yaml
export scenario_type="container_scenarios"
export scenario_file="- CI/scenarios/container_scenario.yml"
export scenario_file="- scenarios/openshift/app_outage.yaml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/container_config.yaml

View File

@@ -7,12 +7,13 @@ trap finish EXIT
function funtional_test_namespace_deletion {
export scenario_type="namespace_scenarios"
export scenario_file="- CI/scenarios/network_diagnostics_namespace.yaml"
export scenario_file="- scenarios/openshift/ingress_namespace.yaml"
export post_config=""
yq '.scenarios.[0].namespace="^openshift-network-diagnostics$"' -i CI/scenarios/network_diagnostics_namespace.yaml
yq '.scenarios[0].namespace="^namespace-scenario$"' -i scenarios/openshift/ingress_namespace.yaml
yq '.scenarios[0].wait_time=30' -i scenarios/openshift/ingress_namespace.yaml
yq '.scenarios[0].action="delete"' -i scenarios/openshift/ingress_namespace.yaml
envsubst < CI/config/common_test_config.yaml > CI/config/namespace_config.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/namespace_config.yaml
echo $?
echo "Namespace scenario test: Success"
}

View File

@@ -7,9 +7,16 @@ trap finish EXIT
function functional_test_network_chaos {
yq -i '.network_chaos.duration=10' scenarios/openshift/network_chaos.yaml
yq -i '.network_chaos.node_name="kind-worker2"' scenarios/openshift/network_chaos.yaml
yq -i '.network_chaos.egress.bandwidth="100mbit"' scenarios/openshift/network_chaos.yaml
yq -i 'del(.network_chaos.interfaces)' scenarios/openshift/network_chaos.yaml
yq -i 'del(.network_chaos.label_selector)' scenarios/openshift/network_chaos.yaml
yq -i 'del(.network_chaos.egress.latency)' scenarios/openshift/network_chaos.yaml
yq -i 'del(.network_chaos.egress.loss)' scenarios/openshift/network_chaos.yaml
export scenario_type="network_chaos"
export scenario_file="CI/scenarios/network_chaos.yaml"
export scenario_file="scenarios/openshift/network_chaos.yaml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/network_chaos.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/network_chaos.yaml

View File

@@ -7,8 +7,12 @@ trap finish EXIT
function functional_test_time_scenario {
yq -i '.time_scenarios[0].label_selector="scenario=time-skew"' scenarios/openshift/time_scenarios_example.yml
yq -i '.time_scenarios[0].container_name=""' scenarios/openshift/time_scenarios_example.yml
yq -i '.time_scenarios[0].namespace="default"' scenarios/openshift/time_scenarios_example.yml
yq -i '.time_scenarios[1].label_selector="kubernetes.io/hostname=kind-worker2"' scenarios/openshift/time_scenarios_example.yml
export scenario_type="time_scenarios"
export scenario_file="CI/scenarios/time_scenarios.yml"
export scenario_file="scenarios/openshift/time_scenarios_example.yml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/time_config.yaml

View File

@@ -1,6 +1,6 @@
# Krkn aka Kraken
[![Docker Repository on Quay](https://quay.io/repository/redhat-chaos/krkn/status "Docker Repository on Quay")](https://quay.io/repository/redhat-chaos/krkn?tab=tags&tag=latest)
![Workflow-Status](https://github.com/redhat-chaos/krkn/actions/workflows/docker-image.yml/badge.svg)
[![Docker Repository on Quay](https://quay.io/repository/krkn-chaos/krkn/status "Docker Repository on Quay")](https://quay.io/repository/krkn-chaos/krkn?tab=tags&tag=latest)
![Workflow-Status](https://github.com/krkn-chaos/krkn/actions/workflows/docker-image.yml/badge.svg)
![Krkn logo](media/logo.png)
@@ -38,10 +38,10 @@ After installation, refer back to the below sections for supported scenarios and
#### Running Kraken with minimal configuration tweaks
For cases where you want to run Kraken with minimal configuration changes, refer to [Kraken-hub](https://github.com/redhat-chaos/krknChaos-hub). One use case is CI integration where you do not want to carry around different configuration files for the scenarios.
For cases where you want to run Kraken with minimal configuration changes, refer to [krkn-hub](https://github.com/krkn-chaos/krkn-hub). One use case is CI integration where you do not want to carry around different configuration files for the scenarios.
### Setting up infrastructure dependencies
Kraken indexes the metrics specified in the profile into Elasticsearch in addition to leveraging Cerberus for understanding the health of the Kubernetes/OpenShift cluster under test. More information on the features is documented below. The infrastructure pieces can be easily installed and uninstalled by running:
Kraken indexes the metrics specified in the profile into Elasticsearch in addition to leveraging Cerberus for understanding the health of the Kubernetes cluster under test. More information on the features is documented below. The infrastructure pieces can be easily installed and uninstalled by running:
```
$ cd kraken
@@ -65,7 +65,7 @@ Scenario type | Kubernetes
[Pod Network Scenarios](docs/pod_network_scenarios.md) | :x: |
[Container Scenarios](docs/container_scenarios.md) | :heavy_check_mark: |
[Node Scenarios](docs/node_scenarios.md) | :heavy_check_mark: |
[Time Scenarios](docs/time_scenarios.md) | :x: |
[Time Scenarios](docs/time_scenarios.md) | :heavy_check_mark: |
[Hog Scenarios: CPU, Memory](docs/arcaflow_scenarios.md) | :heavy_check_mark: |
[Cluster Shut Down Scenarios](docs/cluster_shut_down_scenarios.md) | :heavy_check_mark: |
[Service Disruption Scenarios](docs/service_disruption_scenarios.md.md) | :heavy_check_mark: |
@@ -77,9 +77,9 @@ Scenario type | Kubernetes
### Kraken scenario pass/fail criteria and report
It is important to make sure to check if the targeted component recovered from the chaos injection and also if the Kubernetes/OpenShift cluster is healthy as failures in one component can have an adverse impact on other components. Kraken does this by:
It is important to make sure to check if the targeted component recovered from the chaos injection and also if the Kubernetes cluster is healthy as failures in one component can have an adverse impact on other components. Kraken does this by:
- Having built in checks for pod and node based scenarios to ensure the expected number of replicas and nodes are up. It also supports running custom scripts with the checks.
- Leveraging [Cerberus](https://github.com/redhat-chaos/cerberus) to monitor the cluster under test and consuming the aggregated go/no-go signal to determine pass/fail post chaos. It is highly recommended to turn on the Cerberus health check feature available in Kraken. Instructions on installing and setting up Cerberus can be found [here](https://github.com/openshift-scale/cerberus#installation) or can be installed from Kraken using the [instructions](https://github.com/redhat-chaos/krkn#setting-up-infrastructure-dependencies). Once Cerberus is up and running, set cerberus_enabled to True and cerberus_url to the url where Cerberus publishes go/no-go signal in the Kraken config file. Cerberus can monitor [application routes](https://github.com/redhat-chaos/cerberus/blob/main/docs/config.md#watch-routes) during the chaos and fails the run if it encounters downtime as it is a potential downtime in a customers, or users environment as well. It is especially important during the control plane chaos scenarios including the API server, Etcd, Ingress etc. It can be enabled by setting `check_applicaton_routes: True` in the [Kraken config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) provided application routes are being monitored in the [cerberus config](https://github.com/redhat-chaos/krkn/blob/main/config/cerberus.yaml).
- Leveraging [Cerberus](https://github.com/krkn-chaos/cerberus) to monitor the cluster under test and consuming the aggregated go/no-go signal to determine pass/fail post chaos. It is highly recommended to turn on the Cerberus health check feature available in Kraken. Instructions on installing and setting up Cerberus can be found [here](https://github.com/openshift-scale/cerberus#installation) or can be installed from Kraken using the [instructions](https://github.com/krkn-chaos/krkn#setting-up-infrastructure-dependencies). Once Cerberus is up and running, set cerberus_enabled to True and cerberus_url to the url where Cerberus publishes go/no-go signal in the Kraken config file. Cerberus can monitor [application routes](https://github.com/redhat-chaos/cerberus/blob/main/docs/config.md#watch-routes) during the chaos and fails the run if it encounters downtime as it is a potential downtime in a customers, or users environment as well. It is especially important during the control plane chaos scenarios including the API server, Etcd, Ingress etc. It can be enabled by setting `check_applicaton_routes: True` in the [Kraken config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) provided application routes are being monitored in the [cerberus config](https://github.com/redhat-chaos/krkn/blob/main/config/cerberus.yaml).
- Leveraging built-in alert collection feature to fail the runs in case of critical alerts.
### Signaling
@@ -103,7 +103,7 @@ Information on enabling and leveraging this feature can be found [here](docs/SLO
### OCM / ACM integration
Kraken supports injecting faults into [Open Cluster Management (OCM)](https://open-cluster-management.io/) and [Red Hat Advanced Cluster Management for Kubernetes (ACM)](https://www.redhat.com/en/technologies/management/advanced-cluster-management) managed clusters through [ManagedCluster Scenarios](docs/managedcluster_scenarios.md).
Kraken supports injecting faults into [Open Cluster Management (OCM)](https://open-cluster-management.io/) and [Red Hat Advanced Cluster Management for Kubernetes (ACM)](https://www.krkn.com/en/technologies/management/advanced-cluster-management) managed clusters through [ManagedCluster Scenarios](docs/managedcluster_scenarios.md).
### Blogs and other useful resources
@@ -129,6 +129,7 @@ Please read [this file]((CI/README.md#adding-a-test-case)) for more information
### Community
Key Members(slack_usernames/full name): paigerube14/Paige Rubendall, mffiedler/Mike Fiedler, ravielluri/Naga Ravi Chaitanya Elluri.
* [**#krkn on Kubernetes Slack**](https://kubernetes.slack.com)
* [**#forum-chaos on CoreOS Slack internal to Red Hat**](https://coreos.slack.com)
Key Members(slack_usernames/full name): paigerube14/Paige Rubendall, mffiedler/Mike Fiedler, tsebasti/Tullio Sebastiani, yogi/Yogananth Subramanian, sahil/Sahil Shah, pradeep/Pradeep Surisetty and ravielluri/Naga Ravi Chaitanya Elluri.
* [**#krkn on Kubernetes Slack**](https://kubernetes.slack.com/messages/C05SFMHRWK1)
The Linux Foundation® (TLF) has registered trademarks and uses trademarks. For a list of TLF trademarks, see [Trademark Usage](https://www.linuxfoundation.org/legal/trademark-usage).

View File

@@ -2,14 +2,14 @@
Following are a list of enhancements that we are planning to work on adding support in Krkn. Of course any help/contributions are greatly appreciated.
- [ ] [Ability to run multiple chaos scenarios in parallel under load to mimic real world outages](https://github.com/redhat-chaos/krkn/issues/424)
- [x] [Centralized storage for chaos experiments artifacts](https://github.com/redhat-chaos/krkn/issues/423)
- [ ] [Support for causing DNS outages](https://github.com/redhat-chaos/krkn/issues/394)
- [x] [Chaos recommender](https://github.com/redhat-chaos/krkn/tree/main/utils/chaos-recommender) to suggest scenarios having probability of impacting the service under test using profiling results
- [ ] [Ability to run multiple chaos scenarios in parallel under load to mimic real world outages](https://github.com/krkn-chaos/krkn/issues/424)
- [x] [Centralized storage for chaos experiments artifacts](https://github.com/krkn-chaos/krkn/issues/423)
- [ ] [Support for causing DNS outages](https://github.com/krkn-chaos/krkn/issues/394)
- [x] [Chaos recommender](https://github.com/krkn-chaos/krkn/tree/main/utils/chaos-recommender) to suggest scenarios having probability of impacting the service under test using profiling results
- [ ] Chaos AI integration to improve and automate test coverage
- [x] [Support for pod level network traffic shaping](https://github.com/redhat-chaos/krkn/issues/393)
- [ ] [Ability to visualize the metrics that are being captured by Kraken and stored in Elasticsearch](https://github.com/redhat-chaos/krkn/issues/124)
- [ ] Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/redhat-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
- [ ] Continue to improve [Chaos Testing Guide](https://redhat-chaos.github.io/krkn) in terms of adding best practices, test environment recommendations and scenarios to make sure the OpenShift platform, as well the applications running on top it, are resilient and performant under chaotic conditions.
- [ ] [Switch documentation references to Kubernetes](https://github.com/redhat-chaos/krkn/issues/495)
- [ ] [OCP and Kubernetes functionalities segregation](https://github.com/redhat-chaos/krkn/issues/497)
- [x] [Support for pod level network traffic shaping](https://github.com/krkn-chaos/krkn/issues/393)
- [ ] [Ability to visualize the metrics that are being captured by Kraken and stored in Elasticsearch](https://github.com/krkn-chaos/krkn/issues/124)
- [ ] Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/krkn-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
- [ ] Continue to improve [Chaos Testing Guide](https://krkn-chaos.github.io/krkn) in terms of adding best practices, test environment recommendations and scenarios to make sure the OpenShift platform, as well the applications running on top it, are resilient and performant under chaotic conditions.
- [ ] [Switch documentation references to Kubernetes](https://github.com/krkn-chaos/krkn/issues/495)
- [ ] [OCP and Kubernetes functionalities segregation](https://github.com/krkn-chaos/krkn/issues/497)

View File

@@ -12,7 +12,7 @@ COPY --from=azure-cli /usr/local/bin/az /usr/bin/az
# Install dependencies
RUN yum install -y git python39 python3-pip jq gettext wget && \
python3.9 -m pip install -U pip && \
git clone https://github.com/krkn-chaos/krkn.git --branch v1.5.4 /root/kraken && \
git clone https://github.com/krkn-chaos/krkn.git --branch v1.5.5 /root/kraken && \
mkdir -p /root/.kube && cd /root/kraken && \
pip3.9 install -r requirements.txt && \
pip3.9 install virtualenv && \

View File

@@ -14,7 +14,7 @@ COPY --from=azure-cli /usr/local/bin/az /usr/bin/az
# Install dependencies
RUN yum install -y git python39 python3-pip jq gettext wget && \
python3.9 -m pip install -U pip && \
git clone https://github.com/redhat-chaos/krkn.git --branch v1.5.4 /root/kraken && \
git clone https://github.com/redhat-chaos/krkn.git --branch v1.5.5 /root/kraken && \
mkdir -p /root/.kube && cd /root/kraken && \
pip3.9 install -r requirements.txt && \
pip3.9 install virtualenv && \

View File

@@ -1,5 +1,5 @@
#### Kubernetes/OpenShift cluster shut down scenario
Scenario to shut down all the nodes including the masters and restart them after specified duration. Cluster shut down scenario can be injected by placing the shut_down config file under cluster_shut_down_scenario option in the kraken config. Refer to [cluster_shut_down_scenario](https://github.com/redhat-chaos/krkn/blob/main/scenarios/cluster_shut_down_scenario.yml) config file.
#### Kubernetes cluster shut down scenario
Scenario to shut down all the nodes including the masters and restart them after specified duration. Cluster shut down scenario can be injected by placing the shut_down config file under cluster_shut_down_scenario option in the kraken config. Refer to [cluster_shut_down_scenario](https://github.com/krkn-chaos/krkn/blob/main/scenarios/cluster_shut_down_scenario.yml) config file.
Refer to [cloud setup](cloud_setup.md) to configure your cli properly for the cloud provider of the cluster you want to shut down.

View File

@@ -4,7 +4,7 @@ This can be based on the pods namespace or labels. If you know the exact object
These scenarios are in a simple yaml format that you can manipulate to run your specific tests or use the pre-existing scenarios to see how it works.
#### Example Config
The following are the components of Kubernetes/OpenShift for which a basic chaos scenario config exists today.
The following are the components of Kubernetes for which a basic chaos scenario config exists today.
```
scenarios:
@@ -25,7 +25,7 @@ In all scenarios we do a post chaos check to wait and verify the specific compon
Here there are two options:
1. Pass a custom script in the main config scenario list that will run before the chaos and verify the output matches post chaos scenario.
See [scenarios/post_action_etcd_container.py](https://github.com/redhat-chaos/krkn/blob/main/scenarios/post_action_etcd_container.py) for an example.
See [scenarios/post_action_etcd_container.py](https://github.com/krkn-chaos/krkn/blob/main/scenarios/post_action_etcd_container.py) for an example.
```
- container_scenarios: # List of chaos pod scenarios to load.
- - scenarios/container_etcd.yml

View File

@@ -62,7 +62,7 @@ If changes go into the main repository while you're working on your code it is b
If not already configured, set the upstream url for kraken.
```
git remote add upstream https://github.com/redhat-chaos/krkn.git
git remote add upstream https://github.com/krkn-chaos/krkn.git
```
Rebase to upstream master branch.

View File

@@ -3,13 +3,13 @@
The following ways are supported to run Kraken:
- Standalone python program through Git.
- Containerized version using either Podman or Docker as the runtime via [Krkn-hub](https://github.com/redhat-chaos/krkn-hub)
- Containerized version using either Podman or Docker as the runtime via [Krkn-hub](https://github.com/krkn-chaos/krkn-hub)
- Kubernetes or OpenShift deployment ( unsupported )
**NOTE**: It is recommended to run Kraken external to the cluster ( Standalone or Containerized ) hitting the Kubernetes/OpenShift API as running it internal to the cluster might be disruptive to itself and also might not report back the results if the chaos leads to cluster's API server instability.
**NOTE**: To run Kraken on Power (ppc64le) architecture, build and run a containerized version by following the
instructions given [here](https://github.com/redhat-chaos/krkn/blob/main/containers/build_own_image-README.md).
instructions given [here](https://github.com/krkn-chaos/krkn/blob/main/containers/build_own_image-README.md).
**NOTE**: Helper functions for interactions in Krkn are part of [krkn-lib](https://github.com/redhat-chaos/krkn-lib).
Please feel free to reuse and expand them as you see fit when adding a new scenario or expanding
@@ -19,9 +19,9 @@ the capabilities of the current supported scenarios.
### Git
#### Clone the repository
Pick the latest stable release to install [here](https://github.com/redhat-chaos/krkn/releases).
Pick the latest stable release to install [here](https://github.com/krkn-chaos/krkn/releases).
```
$ git clone https://github.com/redhat-chaos/krkn.git --branch <release version>
$ git clone https://github.com/krkn-chaos/krkn.git --branch <release version>
$ cd kraken
```
@@ -40,13 +40,13 @@ $ python3.9 run_kraken.py --config <config_file_location>
```
### Run containerized version
[Krkn-hub](https://github.com/redhat-chaos/krkn-hub) is a wrapper that allows running Krkn chaos scenarios via podman or docker runtime with scenario parameters/configuration defined as environment variables.
[Krkn-hub](https://github.com/krkn-chaos/krkn-hub) is a wrapper that allows running Krkn chaos scenarios via podman or docker runtime with scenario parameters/configuration defined as environment variables.
Refer [instructions](https://github.com/redhat-chaos/krkn-hub#supported-chaos-scenarios) to get started.
Refer [instructions](https://github.com/krkn-chaos/krkn-hub#supported-chaos-scenarios) to get started.
### Run Kraken as a Kubernetes deployment ( unsupported option - standalone or containerized deployers are recommended )
Refer [Instructions](https://github.com/redhat-chaos/krkn/blob/main/containers/README.md) on how to deploy and run Kraken as a Kubernetes/OpenShift deployment.
Refer [Instructions](https://github.com/krkn-chaos/krkn/blob/main/containers/README.md) on how to deploy and run Kraken as a Kubernetes/OpenShift deployment.
Refer to the [chaos-kraken chart manpage](https://artifacthub.io/packages/helm/startx/chaos-kraken)

View File

@@ -16,7 +16,7 @@ Set to '^.*$' and label_selector to "" to randomly select any namespace in your
**sleep:** Number of seconds to wait between each iteration/count of killing namespaces. Defaults to 10 seconds if not set
Refer to [namespace_scenarios_example](https://github.com/redhat-chaos/krkn/blob/main/scenarios/regex_namespace.yaml) config file.
Refer to [namespace_scenarios_example](https://github.com/krkn-chaos/krkn/blob/main/scenarios/regex_namespace.yaml) config file.
```
scenarios:

View File

@@ -16,7 +16,7 @@ Configuration Options:
**object_name:** List of the names of pods or nodes you want to skew.
Refer to [time_scenarios_example](https://github.com/redhat-chaos/krkn/blob/main/scenarios/time_scenarios_example.yml) config file.
Refer to [time_scenarios_example](https://github.com/krkn-chaos/krkn/blob/main/scenarios/time_scenarios_example.yml) config file.
```
time_scenarios:

View File

@@ -11,7 +11,7 @@ coverage
datetime
docker
docker-compose
git+https://github.com/redhat-chaos/arcaflow-plugin-kill-pod.git
git+https://github.com/krkn-chaos/arcaflow-plugin-kill-pod.git
git+https://github.com/vmware/vsphere-automation-sdk-python.git@v8.0.0.0
gitpython
google-api-python-client
@@ -35,6 +35,6 @@ requests
service_identity
setuptools==65.5.1
werkzeug==3.0.1
wheel
wheel>=0.38.0
zope.interface==5.4.0
pandas<2.0.0
pandas>=2.2.0

View File

@@ -34,7 +34,7 @@ from krkn_lib.utils import SafeLogger
from krkn_lib.utils.functions import get_yaml_item_value
report_file = ""
# Main function
def main(cfg):
@@ -414,10 +414,9 @@ def main(cfg):
)
sys.exit(1)
run_dir = os.getcwd() + "/kraken.report"
logging.info(
"Successfully finished running Kraken. UUID for the run: "
"%s. Report generated at %s. Exiting" % (run_uuid, run_dir)
"%s. Report generated at %s. Exiting" % (run_uuid, report_file)
)
else:
logging.error("Cannot find a config at %s, please check" % (cfg))
@@ -434,12 +433,21 @@ if __name__ == "__main__":
help="config location",
default="config/config.yaml",
)
parser.add_option(
"-o",
"--output",
dest="output",
help="output report location",
default="kraken.report",
)
(options, args) = parser.parse_args()
report_file = options.output
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler("kraken.report", mode="w"),
logging.FileHandler(report_file, mode="w"),
logging.StreamHandler(),
],
)

View File

@@ -1,8 +1,9 @@
input_list:
- cpu_count: 1
cpu_load_percentage: 80
cpu_method: all
duration: 1s
kubeconfig: ''
namespace: default
node_selector: {}
- cpu_count: 1
cpu_load_percentage: 80
cpu_method: all
duration: 1s
kubeconfig: ''
namespace: default
node_selector:
kubernetes.io/hostname: kind-worker2

View File

@@ -2,6 +2,22 @@ version: v0.2.0
input:
root: RootObject
objects:
hostPath:
id: HostPathVolumeSource
properties:
path:
type:
type_id: string
Volume:
id: Volume
properties:
name:
type:
type_id: string
hostPath:
type:
id: hostPath
type_id: ref
RootObject:
id: input_item
properties:
@@ -73,25 +89,8 @@ input:
description: the volume that will be attached to the pod. In order to stress
the node storage only hosPath mode is currently supported
type:
type_id: object
id: k8s_volume
properties:
name:
display:
description: name of the volume (must match the name in pod definition)
type:
type_id: string
required: true
hostPath:
display:
description: hostPath options expressed as string map (key-value)
type:
type_id: map
values:
type_id: string
keys:
type_id: string
required: true
type_id: ref
id: Volume
required: true
steps:

View File

@@ -2,6 +2,22 @@ version: v0.2.0
input:
root: RootObject
objects:
hostPath:
id: HostPathVolumeSource
properties:
path:
type:
type_id: string
Volume:
id: Volume
properties:
name:
type:
type_id: string
hostPath:
type:
id: hostPath
type_id: ref
RootObject:
id: RootObject
properties:
@@ -80,25 +96,8 @@ input:
description: the volume that will be attached to the pod. In order to stress
the node storage only hosPath mode is currently supported
type:
type_id: object
id: k8s_volume
properties:
name:
display:
description: name of the volume (must match the name in pod definition)
type:
type_id: string
required: true
hostPath:
display:
description: hostPath options expressed as string map (key-value)
type:
type_id: map
values:
type_id: string
keys:
type_id: string
required: true
type_id: ref
id: Volume
required: true
steps:
workload_loop:

View File

@@ -3,6 +3,6 @@ scenarios:
namespace: "kube-system"
label_selector: "k8s-app=kube-dns"
container_name: ""
action: "kill 1"
action: 1
count: 1
retry_wait: 60

View File

@@ -1,12 +1,11 @@
network_chaos: # Scenario to create an outage by simulating random variations in the network.
duration: 300 # seconds
node_name: # node on which scenario has to be injected;
label_selector: <label_selector> # when node_name is not specified, a node with matching label_selector is selected for running the scenario.
network_chaos: # Scenario to create an outage by simulating random variations in the network.
duration: 300 # seconds
node_name: # node on which scenario has to be injected;
label_selector: <label_selector> # when node_name is not specified, a node with matching label_selector is selected for running the scenario.
instance_count: 1
interfaces: # Interface name would be the Kernel host network interface name.
- "<interface_name>"
interfaces: # Interface name would be the Kernel host network interface name.
- "<interface_name>"
execution: serial
egress:
latency: 50ms # 50ms
loss: 0.02 # percentage
bandwidth: 100mbit
latency: 50ms # 50ms
loss: 0.02 # percentage

View File

@@ -17,7 +17,7 @@ This tool profiles an application and gathers telemetry data such as CPU, Memory
```
$ python3.9 -m venv chaos
$ source chaos/bin/activate
$ git clone https://github.com/redhat-chaos/krkn.git
$ git clone https://github.com/krkn-chaos/krkn.git
$ cd krkn
$ pip3 install -r requirements.txt
$ python3.9 utils/chaos_recommender/chaos_recommender.py
@@ -89,7 +89,7 @@ If you provide the input values through command-line arguments, the correspondin
## Podman & Docker image
To run the recommender image please visit the [krkn-hub](https://github.com/redhat-chaos/krkn-hub for further infos.
To run the recommender image please visit the [krkn-hub](https://github.com/krkn-chaos/krkn-hub for further infos.
## How it works