mirror of
https://github.com/krkn-chaos/krkn.git
synced 2026-02-19 20:40:33 +00:00
Compare commits
109 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
32142cc159 | ||
|
|
34bfc0d3d9 | ||
|
|
736c90e937 | ||
|
|
5e7938ba4a | ||
|
|
b525f83261 | ||
|
|
26460a0dce | ||
|
|
7968c2a776 | ||
|
|
6186555c15 | ||
|
|
9cd086f59c | ||
|
|
1057917731 | ||
|
|
5484828b67 | ||
|
|
d18b6332e5 | ||
|
|
89a0e166f1 | ||
|
|
624f50acd1 | ||
|
|
e02c6d1287 | ||
|
|
04425a8d8a | ||
|
|
f3933f0e62 | ||
|
|
56ff0a8c72 | ||
|
|
9378cd74cd | ||
|
|
4d3491da0f | ||
|
|
d6ce66160b | ||
|
|
ef1a55438b | ||
|
|
d8f54b83a2 | ||
|
|
4870c86515 | ||
|
|
6ae17cf678 | ||
|
|
ce9f8aa050 | ||
|
|
05148317c1 | ||
|
|
5f836f294b | ||
|
|
cfa1bb09a0 | ||
|
|
5ddfff5a85 | ||
|
|
7d18487228 | ||
|
|
08de42c91a | ||
|
|
dc7d5bb01b | ||
|
|
ea3444d375 | ||
|
|
7b660a0878 | ||
|
|
5fe0655f22 | ||
|
|
5df343c183 | ||
|
|
f364e9f283 | ||
|
|
86a7427606 | ||
|
|
31266fbc3e | ||
|
|
57de3769e7 | ||
|
|
42fc8eea40 | ||
|
|
22d56e2cdc | ||
|
|
a259b68221 | ||
|
|
052f83e7d9 | ||
|
|
fb3bbe4e26 | ||
|
|
96ba9be4b8 | ||
|
|
58d5d1d8dc | ||
|
|
3fe22a0d8f | ||
|
|
21b89a32a7 | ||
|
|
dbe3ea9718 | ||
|
|
a142f6e7a4 | ||
|
|
2610a7af67 | ||
|
|
f827f65132 | ||
|
|
aa6cbbc11a | ||
|
|
e17354e54d | ||
|
|
2dfa5cb0cd | ||
|
|
0799008cd5 | ||
|
|
2327531e46 | ||
|
|
2c14c48a63 | ||
|
|
ab98e416a6 | ||
|
|
19ad2d1a3d | ||
|
|
804d7cbf58 | ||
|
|
54af2fc6ff | ||
|
|
b79e526cfd | ||
|
|
a5efd7d06c | ||
|
|
a1b81bd382 | ||
|
|
782440c8c4 | ||
|
|
7e2755cbb7 | ||
|
|
2babb53d6e | ||
|
|
85f76e9193 | ||
|
|
8bf21392f1 | ||
|
|
606fb60811 | ||
|
|
fac7c3c6fb | ||
|
|
8dd9b30030 | ||
|
|
2d99f17aaf | ||
|
|
50742a793c | ||
|
|
ba6a844544 | ||
|
|
7e7a917dba | ||
|
|
b9c0bb39c7 | ||
|
|
706a886151 | ||
|
|
a1cf9e2c00 | ||
|
|
0f5dfcb823 | ||
|
|
1e1015e6e7 | ||
|
|
c71ce31779 | ||
|
|
1298f220a6 | ||
|
|
24059fb731 | ||
|
|
ab951adb78 | ||
|
|
a9a7fb7e51 | ||
|
|
5a8d5b0fe1 | ||
|
|
c440dc4b51 | ||
|
|
b174c51ee0 | ||
|
|
fec0434ce1 | ||
|
|
1067d5ec8d | ||
|
|
85ea1ef7e1 | ||
|
|
2e38b8b033 | ||
|
|
c7ea366756 | ||
|
|
67d4ee9fa2 | ||
|
|
fa59834bae | ||
|
|
f154bcb692 | ||
|
|
60ece4b1b8 | ||
|
|
d660542a40 | ||
|
|
2e651798fa | ||
|
|
f801dfce54 | ||
|
|
8b95458444 | ||
|
|
ce1ae78f1f | ||
|
|
967753489b | ||
|
|
aa16cb1bf2 | ||
|
|
ac47e215d8 |
51
.github/workflows/build.yml
vendored
51
.github/workflows/build.yml
vendored
@@ -1,51 +0,0 @@
|
||||
name: Build Krkn
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
- name: Create multi-node KinD cluster
|
||||
uses: redhat-chaos/actions/kind@main
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
architecture: 'x64'
|
||||
- name: Install environment
|
||||
run: |
|
||||
sudo apt-get install build-essential python3-dev
|
||||
pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
- name: Run unit tests
|
||||
run: python -m coverage run -a -m unittest discover -s tests -v
|
||||
- name: Run CI
|
||||
run: |
|
||||
./CI/run.sh
|
||||
cat ./CI/results.markdown >> $GITHUB_STEP_SUMMARY
|
||||
echo >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload CI logs
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ci-logs
|
||||
path: CI/out
|
||||
if-no-files-found: error
|
||||
- name: Collect coverage report
|
||||
run: |
|
||||
python -m coverage html
|
||||
- name: Publish coverage report to job summary
|
||||
run: |
|
||||
pip install html2text
|
||||
html2text --ignore-images --ignore-links -b 0 htmlcov/index.html >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload coverage data
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverage
|
||||
path: htmlcov
|
||||
if-no-files-found: error
|
||||
- name: Check CI results
|
||||
run: grep Fail CI/results.markdown && false || true
|
||||
|
||||
36
.github/workflows/docker-image.yml
vendored
36
.github/workflows/docker-image.yml
vendored
@@ -1,8 +1,7 @@
|
||||
name: Docker Image CI
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags: ['v[0-9].[0-9]+.[0-9]+']
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
@@ -12,30 +11,43 @@ jobs:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
- name: Build the Docker images
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
run: |
|
||||
docker build --no-cache -t quay.io/krkn-chaos/krkn containers/
|
||||
docker build --no-cache -t quay.io/krkn-chaos/krkn containers/ --build-arg TAG=${GITHUB_REF#refs/tags/}
|
||||
docker tag quay.io/krkn-chaos/krkn quay.io/redhat-chaos/krkn
|
||||
docker tag quay.io/krkn-chaos/krkn quay.io/krkn-chaos/krkn:${GITHUB_REF#refs/tags/}
|
||||
docker tag quay.io/krkn-chaos/krkn quay.io/redhat-chaos/krkn:${GITHUB_REF#refs/tags/}
|
||||
|
||||
- name: Test Build the Docker images
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
run: |
|
||||
docker build --no-cache -t quay.io/krkn-chaos/krkn containers/ --build-arg PR_NUMBER=${{ github.event.pull_request.number }}
|
||||
- name: Login in quay
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
run: docker login quay.io -u ${QUAY_USER} -p ${QUAY_TOKEN}
|
||||
env:
|
||||
QUAY_USER: ${{ secrets.QUAY_USERNAME }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_PASSWORD }}
|
||||
- name: Push the KrknChaos Docker images
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker push quay.io/krkn-chaos/krkn
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
run: |
|
||||
docker push quay.io/krkn-chaos/krkn
|
||||
docker push quay.io/krkn-chaos/krkn:${GITHUB_REF#refs/tags/}
|
||||
- name: Login in to redhat-chaos quay
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: docker login quay.io -u ${QUAY_USER} -p ${QUAY_TOKEN}
|
||||
env:
|
||||
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}
|
||||
- name: Push the RedHat Chaos Docker images
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker push quay.io/redhat-chaos/krkn
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
run: |
|
||||
docker push quay.io/redhat-chaos/krkn
|
||||
docker push quay.io/redhat-chaos/krkn:${GITHUB_REF#refs/tags/}
|
||||
- name: Rebuild krkn-hub
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
uses: redhat-chaos/actions/krkn-hub@main
|
||||
with:
|
||||
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}
|
||||
QUAY_USER: ${{ secrets.QUAY_USERNAME }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_PASSWORD }}
|
||||
AUTOPUSH: ${{ secrets.AUTOPUSH }}
|
||||
|
||||
129
.github/workflows/functional_tests.yaml
vendored
129
.github/workflows/functional_tests.yaml
vendored
@@ -1,129 +0,0 @@
|
||||
on: issue_comment
|
||||
|
||||
jobs:
|
||||
check_user:
|
||||
# This job only runs for pull request comments
|
||||
name: Check User Authorization
|
||||
env:
|
||||
USERS: ${{vars.USERS}}
|
||||
if: contains(github.event.comment.body, '/funtest') && contains(github.event.comment.html_url, '/pull/')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check User
|
||||
run: |
|
||||
for name in `echo $USERS`
|
||||
do
|
||||
name="${name//$'\r'/}"
|
||||
name="${name//$'\n'/}"
|
||||
if [ $name == "${{github.event.sender.login}}" ]
|
||||
then
|
||||
echo "user ${{github.event.sender.login}} authorized, action started..."
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
echo "user ${{github.event.sender.login}} is not allowed to run functional tests Action"
|
||||
exit 1
|
||||
pr_commented:
|
||||
# This job only runs for pull request comments containing /functional
|
||||
name: Functional Tests
|
||||
if: contains(github.event.comment.body, '/funtest') && contains(github.event.comment.html_url, '/pull/')
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- check_user
|
||||
steps:
|
||||
- name: Check out Kraken
|
||||
uses: actions/checkout@v3
|
||||
- name: Checkout Pull Request
|
||||
run: gh pr checkout ${{ github.event.issue.number }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install OC CLI
|
||||
uses: redhat-actions/oc-installer@v1
|
||||
with:
|
||||
oc_version: latest
|
||||
- name: Install python 3.9
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
- name: Setup kraken dependencies
|
||||
run: pip install -r requirements.txt
|
||||
- name: Create Workdir & export the path
|
||||
run: |
|
||||
mkdir workdir
|
||||
echo "WORKDIR_PATH=`pwd`/workdir" >> $GITHUB_ENV
|
||||
- name: Generate run id
|
||||
run: |
|
||||
echo "RUN_ID=`date +%s`" > $GITHUB_ENV
|
||||
echo "Run Id: ${RUN_ID}"
|
||||
- name: Write Pull Secret
|
||||
env:
|
||||
PULLSECRET_BASE64: ${{ secrets.PS_64 }}
|
||||
run: |
|
||||
echo "$PULLSECRET_BASE64" | base64 --decode > pullsecret.txt
|
||||
- name: Write Boot Private Key
|
||||
env:
|
||||
BOOT_KEY: ${{ secrets.CRC_KEY_FILE }}
|
||||
run: |
|
||||
echo -n "$BOOT_KEY" > key.txt
|
||||
- name: Teardown CRC (Post Action)
|
||||
uses: webiny/action-post-run@3.0.0
|
||||
id: post-run-command
|
||||
with:
|
||||
run: podman run --rm -v "${{ github.workspace }}:/workspace:z" -e AWS_ACCESS_KEY_ID="${{ secrets.AWS_ACCESS_KEY_ID }}" -e AWS_SECRET_ACCESS_KEY="${{ secrets.AWS_SECRET_ACCESS_KEY }}" -e AWS_DEFAULT_REGION=us-west-2 quay.io/crcont/crc-cloud:v0.0.2 destroy --project-name "chaos-funtest-${{ env.RUN_ID }}" --backed-url "s3://krkn-crc-state/${{ env.RUN_ID }}" --provider "aws"
|
||||
- name: Create cluster
|
||||
run: |
|
||||
podman run --name crc-cloud-create --rm \
|
||||
-v ${PWD}:/workspace:z \
|
||||
-e AWS_ACCESS_KEY_ID="${{ secrets.AWS_ACCESS_KEY_ID }}" \
|
||||
-e AWS_SECRET_ACCESS_KEY="${{ secrets.AWS_SECRET_ACCESS_KEY }}" \
|
||||
-e AWS_DEFAULT_REGION="us-west-2" \
|
||||
quay.io/crcont/crc-cloud:v0.0.2 \
|
||||
create aws \
|
||||
--project-name "chaos-funtest-${RUN_ID}" \
|
||||
--backed-url "s3://krkn-crc-state/${RUN_ID}" \
|
||||
--output "/workspace" \
|
||||
--aws-ami-id "ami-00f5eaf98cf42ef9f" \
|
||||
--pullsecret-filepath /workspace/pullsecret.txt \
|
||||
--key-filepath /workspace/key.txt
|
||||
|
||||
- name: Setup kubeconfig
|
||||
continue-on-error: true
|
||||
run: |
|
||||
ssh -o StrictHostKeyChecking=no -i id_rsa core@$(cat host) "cat /opt/kubeconfig" > kubeconfig
|
||||
sed -i "s/https:\/\/api.crc.testing:6443/https:\/\/`cat host`.nip.io:6443/g" kubeconfig
|
||||
echo "KUBECONFIG=${PWD}/kubeconfig" > $GITHUB_ENV
|
||||
|
||||
- name: Example deployment, GitHub Action env init
|
||||
env:
|
||||
NAMESPACE: test-namespace
|
||||
DEPLOYMENT_NAME: test-nginx
|
||||
run: ./CI/CRC/init_github_action.sh
|
||||
- name: Setup test suite
|
||||
run: |
|
||||
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.kubeconfig_path="'${KUBECONFIG}'"' CI/config/common_test_config.yaml
|
||||
echo "test_app_outages_gh" > ./CI/tests/my_tests
|
||||
echo "test_container" >> ./CI/tests/my_tests
|
||||
echo "test_namespace" >> ./CI/tests/my_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/my_tests
|
||||
echo "test_time" >> ./CI/tests/my_tests
|
||||
|
||||
- name: Print affected config files
|
||||
run: |
|
||||
echo -e "## CI/config/common_test_config.yaml\n\n"
|
||||
cat CI/config/common_test_config.yaml
|
||||
|
||||
- name: Running test suite
|
||||
run: |
|
||||
./CI/run.sh
|
||||
- name: Print test output
|
||||
run: cat CI/out/*
|
||||
- name: Create coverage report
|
||||
run: |
|
||||
echo "# Test results" > $GITHUB_STEP_SUMMARY
|
||||
cat CI/results.markdown >> $GITHUB_STEP_SUMMARY
|
||||
echo "# Test coverage" >> $GITHUB_STEP_SUMMARY
|
||||
python -m coverage report --format=markdown >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
|
||||
198
.github/workflows/tests.yml
vendored
Normal file
198
.github/workflows/tests.yml
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
name: Functional & Unit Tests
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
tests:
|
||||
# Common steps
|
||||
name: Functional & Unit Tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
- name: Create multi-node KinD cluster
|
||||
uses: redhat-chaos/actions/kind@main
|
||||
- name: Install Helm & add repos
|
||||
run: |
|
||||
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
|
||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
||||
helm repo add stable https://charts.helm.sh/stable
|
||||
helm repo update
|
||||
- name: Deploy prometheus & Port Forwarding
|
||||
run: |
|
||||
kubectl create namespace prometheus-k8s
|
||||
helm install \
|
||||
--wait --timeout 360s \
|
||||
kind-prometheus \
|
||||
prometheus-community/kube-prometheus-stack \
|
||||
--namespace prometheus-k8s \
|
||||
--set prometheus.service.nodePort=30000 \
|
||||
--set prometheus.service.type=NodePort \
|
||||
--set grafana.service.nodePort=31000 \
|
||||
--set grafana.service.type=NodePort \
|
||||
--set alertmanager.service.nodePort=32000 \
|
||||
--set alertmanager.service.type=NodePort \
|
||||
--set prometheus-node-exporter.service.nodePort=32001 \
|
||||
--set prometheus-node-exporter.service.type=NodePort
|
||||
|
||||
SELECTOR=`kubectl -n prometheus-k8s get service kind-prometheus-kube-prome-prometheus -o wide --no-headers=true | awk '{ print $7 }'`
|
||||
POD_NAME=`kubectl -n prometheus-k8s get pods --selector="$SELECTOR" --no-headers=true | awk '{ print $1 }'`
|
||||
kubectl -n prometheus-k8s port-forward $POD_NAME 9090:9090 &
|
||||
sleep 5
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
architecture: 'x64'
|
||||
- name: Install environment
|
||||
run: |
|
||||
sudo apt-get install build-essential python3-dev
|
||||
pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Deploy test workloads
|
||||
run: |
|
||||
kubectl apply -f CI/templates/outage_pod.yaml
|
||||
kubectl wait --for=condition=ready pod -l scenario=outage --timeout=300s
|
||||
kubectl apply -f CI/templates/container_scenario_pod.yaml
|
||||
kubectl wait --for=condition=ready pod -l scenario=container --timeout=300s
|
||||
kubectl create namespace namespace-scenario
|
||||
kubectl apply -f CI/templates/time_pod.yaml
|
||||
kubectl wait --for=condition=ready pod -l scenario=time-skew --timeout=300s
|
||||
kubectl apply -f CI/templates/service_hijacking.yaml
|
||||
kubectl wait --for=condition=ready pod -l "app.kubernetes.io/name=proxy" --timeout=300s
|
||||
- name: Get Kind nodes
|
||||
run: |
|
||||
kubectl get nodes --show-labels=true
|
||||
# Pull request only steps
|
||||
- name: Run unit tests
|
||||
if: github.event_name == 'pull_request'
|
||||
run: python -m coverage run -a -m unittest discover -s tests -v
|
||||
|
||||
- name: Setup Pull Request Functional Tests
|
||||
if: |
|
||||
github.event_name == 'pull_request'
|
||||
run: |
|
||||
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.performance_monitoring="localhost:9090"' CI/config/common_test_config.yaml
|
||||
echo "test_service_hijacking" > ./CI/tests/functional_tests
|
||||
echo "test_app_outages" >> ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_time" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_io_hog" >> ./CI/tests/functional_tests
|
||||
|
||||
|
||||
# Push on main only steps + all other functional to collect coverage
|
||||
# for the badge
|
||||
- name: Configure AWS Credentials
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region : ${{ secrets.AWS_REGION }}
|
||||
- name: Setup Post Merge Request Functional Tests
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: |
|
||||
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.performance_monitoring="localhost:9090"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.username="${{secrets.TELEMETRY_USERNAME}}"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.password="${{secrets.TELEMETRY_PASSWORD}}"' CI/config/common_test_config.yaml
|
||||
echo "test_telemetry" > ./CI/tests/functional_tests
|
||||
echo "test_service_hijacking" >> ./CI/tests/functional_tests
|
||||
echo "test_app_outages" >> ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_time" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_io_hog" >> ./CI/tests/functional_tests
|
||||
|
||||
# Final common steps
|
||||
- name: Run Functional tests
|
||||
env:
|
||||
AWS_BUCKET: ${{ secrets.AWS_BUCKET }}
|
||||
run: |
|
||||
./CI/run.sh
|
||||
cat ./CI/results.markdown >> $GITHUB_STEP_SUMMARY
|
||||
echo >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload CI logs
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ci-logs
|
||||
path: CI/out
|
||||
if-no-files-found: error
|
||||
- name: Collect coverage report
|
||||
run: |
|
||||
python -m coverage html
|
||||
python -m coverage json
|
||||
- name: Publish coverage report to job summary
|
||||
run: |
|
||||
pip install html2text
|
||||
html2text --ignore-images --ignore-links -b 0 htmlcov/index.html >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload coverage data
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverage
|
||||
path: htmlcov
|
||||
if-no-files-found: error
|
||||
- name: Upload json coverage
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverage.json
|
||||
path: coverage.json
|
||||
if-no-files-found: error
|
||||
- name: Check CI results
|
||||
run: grep Fail CI/results.markdown && false || true
|
||||
badge:
|
||||
permissions:
|
||||
contents: write
|
||||
name: Generate Coverage Badge
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- tests
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
steps:
|
||||
- name: Check out doc repo
|
||||
uses: actions/checkout@master
|
||||
with:
|
||||
repository: krkn-chaos/krkn-lib-docs
|
||||
path: krkn-lib-docs
|
||||
ssh-key: ${{ secrets.KRKN_LIB_DOCS_PRIV_KEY }}
|
||||
- name: Download json coverage
|
||||
uses: actions/download-artifact@v4.1.7
|
||||
with:
|
||||
name: coverage.json
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Copy badge on GitHub Page Repo
|
||||
env:
|
||||
COLOR: yellow
|
||||
run: |
|
||||
# generate coverage badge on previously calculated total coverage
|
||||
# and copy in the docs page
|
||||
export TOTAL=$(python -c "import json;print(json.load(open('coverage.json'))['totals']['percent_covered_display'])")
|
||||
[[ $TOTAL > 40 ]] && COLOR=green
|
||||
echo "TOTAL: $TOTAL"
|
||||
echo "COLOR: $COLOR"
|
||||
curl "https://img.shields.io/badge/coverage-$TOTAL%25-$COLOR" > ./krkn-lib-docs/coverage_badge_krkn.svg
|
||||
- name: Push updated Coverage Badge
|
||||
run: |
|
||||
cd krkn-lib-docs
|
||||
git add .
|
||||
git config user.name "krkn-chaos"
|
||||
git config user.email "<>"
|
||||
git commit -m "[KRKN] Coverage Badge ${GITHUB_REF##*/}" || echo "no changes to commit"
|
||||
git push
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -16,6 +16,7 @@ __pycache__/*
|
||||
*.out
|
||||
kube-burner*
|
||||
kube_burner*
|
||||
recommender_*.json
|
||||
|
||||
# Project files
|
||||
.ropeproject
|
||||
@@ -61,7 +62,7 @@ inspect.local.*
|
||||
!CI/config/common_test_config.yaml
|
||||
CI/out/*
|
||||
CI/ci_results
|
||||
CI/scenarios/*node.yaml
|
||||
CI/legacy/*node.yaml
|
||||
CI/results.markdown
|
||||
|
||||
#env
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: $NAMESPACE
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: $DEPLOYMENT_NAME-service
|
||||
namespace: $NAMESPACE
|
||||
spec:
|
||||
selector:
|
||||
app: $DEPLOYMENT_NAME
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
type: ClusterIP
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: $NAMESPACE
|
||||
name: $DEPLOYMENT_NAME-deployment
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: $DEPLOYMENT_NAME
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: $DEPLOYMENT_NAME
|
||||
spec:
|
||||
containers:
|
||||
- name: $DEPLOYMENT_NAME
|
||||
image: nginxinc/nginx-unprivileged:stable-alpine
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
@@ -1,58 +0,0 @@
|
||||
#!/bin/bash
|
||||
SCRIPT_PATH=./CI/CRC
|
||||
DEPLOYMENT_PATH=$SCRIPT_PATH/deployment.yaml
|
||||
|
||||
[[ ! -f $DEPLOYMENT_PATH ]] && echo "[ERROR] please run $0 from GitHub action root directory" && exit 1
|
||||
[[ -z $DEPLOYMENT_NAME ]] && echo "[ERROR] please set \$DEPLOYMENT_NAME environment variable" && exit 1
|
||||
[[ -z $NAMESPACE ]] && echo "[ERROR] please set \$NAMESPACE environment variable" && exit 1
|
||||
|
||||
|
||||
OPENSSL=`which openssl 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: openssl missing, please install it and try again" && exit 1
|
||||
OC=`which oc 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: oc missing, please install it and try again" && exit 1
|
||||
SED=`which sed 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: sed missing, please install it and try again" && exit 1
|
||||
JQ=`which jq 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: jq missing, please install it and try again" && exit 1
|
||||
ENVSUBST=`which envsubst 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: envsubst missing, please install it and try again" && exit 1
|
||||
|
||||
|
||||
API_PORT="6443"
|
||||
API_ADDRESS="https://api.`cat host`.nip.io:${API_PORT}"
|
||||
FQN=$DEPLOYMENT_NAME.apps.$API_ADDRESS
|
||||
|
||||
|
||||
echo "[INF] deploying example deployment: $DEPLOYMENT_NAME in namespace: $NAMESPACE"
|
||||
$ENVSUBST < $DEPLOYMENT_PATH | $OC apply -f - > /dev/null 2>&1
|
||||
|
||||
echo "[INF] creating SSL self-signed certificates for route https://$FQN"
|
||||
$OPENSSL genrsa -out servercakey.pem > /dev/null 2>&1
|
||||
$OPENSSL req -new -x509 -key servercakey.pem -out serverca.crt -subj "/CN=$FQN/O=Red Hat Inc./C=US" > /dev/null 2>&1
|
||||
$OPENSSL genrsa -out server.key > /dev/null 2>&1
|
||||
$OPENSSL req -new -key server.key -out server_reqout.txt -subj "/CN=$FQN/O=Red Hat Inc./C=US" > /dev/null 2>&1
|
||||
$OPENSSL x509 -req -in server_reqout.txt -days 3650 -sha256 -CAcreateserial -CA serverca.crt -CAkey servercakey.pem -out server.crt > /dev/null 2>&1
|
||||
echo "[INF] creating deployment: $DEPLOYMENT_NAME public route: https://$FQN"
|
||||
$OC create route --namespace $NAMESPACE edge --service=$DEPLOYMENT_NAME-service --cert=server.crt --key=server.key --ca-cert=serverca.crt --hostname="$FQN" > /dev/null 2>&1
|
||||
|
||||
|
||||
echo "[INF] setting github action environment variables"
|
||||
|
||||
NODE_NAME="`$OC get nodes -o json | $JQ -r '.items[0].metadata.name'`"
|
||||
COVERAGE_FILE="`pwd`/coverage.md"
|
||||
echo "DEPLOYMENT_NAME=$DEPLOYMENT_NAME" >> $GITHUB_ENV
|
||||
echo "DEPLOYMENT_FQN=$FQN" >> $GITHUB_ENV
|
||||
echo "API_ADDRESS=$API_ADDRESS" >> $GITHUB_ENV
|
||||
echo "API_PORT=$API_PORT" >> $GITHUB_ENV
|
||||
echo "NODE_NAME=$NODE_NAME" >> $GITHUB_ENV
|
||||
echo "NAMESPACE=$NAMESPACE" >> $GITHUB_ENV
|
||||
echo "COVERAGE_FILE=$COVERAGE_FILE" >> $GITHUB_ENV
|
||||
|
||||
echo "[INF] deployment fully qualified name will be available in \${{ env.DEPLOYMENT_NAME }} with value $DEPLOYMENT_NAME"
|
||||
echo "[INF] deployment name will be available in \${{ env.DEPLOYMENT_FQN }} with value $FQN"
|
||||
echo "[INF] OCP API address will be available in \${{ env.API_ADDRESS }} with value $API_ADDRESS"
|
||||
echo "[INF] OCP API port will be available in \${{ env.API_PORT }} with value $API_PORT"
|
||||
echo "[INF] OCP node name will be available in \${{ env.NODE_NAME }} with value $NODE_NAME"
|
||||
echo "[INF] coverage file will ve available in \${{ env.COVERAGE_FILE }} with value $COVERAGE_FILE"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
## CI Tests
|
||||
|
||||
### First steps
|
||||
Edit [my_tests](tests/my_tests) with tests you want to run
|
||||
Edit [functional_tests](tests/functional_tests) with tests you want to run
|
||||
|
||||
### How to run
|
||||
```./CI/run.sh```
|
||||
@@ -11,7 +11,7 @@ This will run kraken using python, make sure python3 is set up and configured pr
|
||||
|
||||
### Adding a test case
|
||||
|
||||
1. Add in simple scenario yaml file to execute under [../CI/scenarios/](scenarios)
|
||||
1. Add in simple scenario yaml file to execute under [../CI/scenarios/](legacy)
|
||||
|
||||
2. Copy [test_application_outages.sh](tests/test_app_outages.sh) for example on how to get started
|
||||
|
||||
@@ -27,7 +27,7 @@ This will run kraken using python, make sure python3 is set up and configured pr
|
||||
|
||||
e. 15: Make sure name of config in line 14 matches what you pass on this line
|
||||
|
||||
4. Add test name to [my_tests](../CI/tests/my_tests) file
|
||||
4. Add test name to [functional_tests](../CI/tests/functional_tests) file
|
||||
|
||||
a. This will be the name of the file without ".sh"
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
kraken:
|
||||
distribution: openshift # Distribution can be kubernetes or openshift.
|
||||
distribution: kubernetes # Distribution can be kubernetes or openshift.
|
||||
kubeconfig_path: ~/.kube/config # Path to kubeconfig.
|
||||
exit_on_failure: False # Exit when a post action scenario fails.
|
||||
litmus_version: v1.13.6 # Litmus version to install.
|
||||
@@ -29,9 +29,12 @@ tunings:
|
||||
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever.
|
||||
telemetry:
|
||||
enabled: False # enable/disables the telemetry collection feature
|
||||
api_url: https://ulnmf9xv7j.execute-api.us-west-2.amazonaws.com/production #telemetry service endpoint
|
||||
username: username # telemetry service username
|
||||
password: password # telemetry service password
|
||||
api_url: https://yvnn4rfoi7.execute-api.us-west-2.amazonaws.com/test #telemetry service endpoint
|
||||
username: $TELEMETRY_USERNAME # telemetry service username
|
||||
password: $TELEMETRY_PASSWORD # telemetry service password
|
||||
prometheus_namespace: 'prometheus-k8s' # prometheus namespace
|
||||
prometheus_pod_name: 'prometheus-kind-prometheus-kube-prome-prometheus-0' # prometheus pod_name
|
||||
prometheus_container_name: 'prometheus'
|
||||
prometheus_backup: True # enables/disables prometheus data collection
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
@@ -39,3 +42,23 @@ telemetry:
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 10000 # the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
logs_backup: True
|
||||
logs_filter_patterns:
|
||||
- "(\\w{3}\\s\\d{1,2}\\s\\d{2}:\\d{2}:\\d{2}\\.\\d+).+" # Sep 9 11:20:36.123425532
|
||||
- "kinit (\\d+/\\d+/\\d+\\s\\d{2}:\\d{2}:\\d{2})\\s+" # kinit 2023/09/15 11:20:36 log
|
||||
- "(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z).+" # 2023-09-15T11:20:36.123425532Z log
|
||||
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
|
||||
events_backup: True # enables/disables cluster events collection
|
||||
telemetry_group: "funtests"
|
||||
elastic:
|
||||
enable_elastic: True
|
||||
collect_metrics: False
|
||||
collect_alerts: False
|
||||
verify_certs: False
|
||||
elastic_url: "https://192.168.39.196" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
|
||||
elastic_port: 32766
|
||||
username: "elastic"
|
||||
password: "test"
|
||||
metrics_index: "krkn-metrics"
|
||||
alerts_index: "krkn-alerts"
|
||||
telemetry_index: "krkn-telemetry"
|
||||
|
||||
42
CI/run.sh
42
CI/run.sh
@@ -1,15 +1,14 @@
|
||||
#!/bin/bash
|
||||
set -x
|
||||
MAX_RETRIES=60
|
||||
|
||||
OC=`which oc 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: oc missing, please install it and try again" && exit 1
|
||||
KUBECTL=`which kubectl 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: kubectl missing, please install it and try again" && exit 1
|
||||
|
||||
wait_cluster_become_ready() {
|
||||
COUNT=1
|
||||
until `$OC get namespace > /dev/null 2>&1`
|
||||
until `$KUBECTL get namespace > /dev/null 2>&1`
|
||||
do
|
||||
echo "[INF] waiting OpenShift to become ready, after $COUNT check"
|
||||
echo "[INF] waiting Kubernetes to become ready, after $COUNT check"
|
||||
sleep 3
|
||||
[[ $COUNT == $MAX_RETRIES ]] && echo "[ERR] max retries exceeded, failing" && exit 1
|
||||
((COUNT++))
|
||||
@@ -18,9 +17,9 @@ wait_cluster_become_ready() {
|
||||
|
||||
|
||||
|
||||
ci_tests_loc="CI/tests/my_tests"
|
||||
ci_tests_loc="CI/tests/functional_tests"
|
||||
|
||||
echo "running test suit consisting of ${ci_tests}"
|
||||
echo -e "********* Running Functional Tests Suite *********\n\n"
|
||||
|
||||
rm -rf CI/out
|
||||
|
||||
@@ -37,9 +36,32 @@ echo 'Test | Result | Duration' >> $results
|
||||
echo '-----------------------|--------|---------' >> $results
|
||||
|
||||
# Run each test
|
||||
for test_name in `cat CI/tests/my_tests`
|
||||
failed_tests=()
|
||||
for test_name in `cat CI/tests/functional_tests`
|
||||
do
|
||||
wait_cluster_become_ready
|
||||
./CI/run_test.sh $test_name $results
|
||||
#wait_cluster_become_ready
|
||||
return_value=`./CI/run_test.sh $test_name $results`
|
||||
if [[ $return_value == 1 ]]
|
||||
then
|
||||
echo "Failed"
|
||||
failed_tests+=("$test_name")
|
||||
fi
|
||||
wait_cluster_become_ready
|
||||
done
|
||||
|
||||
|
||||
if (( ${#failed_tests[@]}>0 ))
|
||||
then
|
||||
echo -e "\n\n======================================================================"
|
||||
echo -e "\n FUNCTIONAL TESTS FAILED ${failed_tests[*]} ABORTING"
|
||||
echo -e "\n======================================================================\n\n"
|
||||
|
||||
for test in "${failed_tests[@]}"
|
||||
do
|
||||
echo -e "\n********** $test KRKN RUN OUTPUT **********\n"
|
||||
cat "CI/out/$test.out"
|
||||
echo -e "\n********************************************\n\n\n\n"
|
||||
done
|
||||
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
#!/bin/bash
|
||||
set -x
|
||||
readonly SECONDS_PER_HOUR=3600
|
||||
readonly SECONDS_PER_MINUTE=60
|
||||
function get_time_format() {
|
||||
@@ -14,9 +13,7 @@ ci_test=`echo $1`
|
||||
|
||||
results_file=$2
|
||||
|
||||
echo -e "\n======================================================================"
|
||||
echo -e " CI test for ${ci_test} "
|
||||
echo -e "======================================================================\n"
|
||||
echo -e "test: ${ci_test}" >&2
|
||||
|
||||
ci_results="CI/out/$ci_test.out"
|
||||
# Test ci
|
||||
@@ -28,13 +25,16 @@ then
|
||||
# if the test passes update the results and complete
|
||||
duration=$SECONDS
|
||||
duration=$(get_time_format $duration)
|
||||
echo "$ci_test: Successful"
|
||||
echo -e "> $ci_test: Successful\n" >&2
|
||||
echo "$ci_test | Pass | $duration" >> $results_file
|
||||
count=$retries
|
||||
# return value for run.sh
|
||||
echo 0
|
||||
else
|
||||
duration=$SECONDS
|
||||
duration=$(get_time_format $duration)
|
||||
echo "$ci_test: Failed"
|
||||
echo -e "> $ci_test: Failed\n" >&2
|
||||
echo "$ci_test | Fail | $duration" >> $results_file
|
||||
echo "Logs for "$ci_test
|
||||
# return value for run.sh
|
||||
echo 1
|
||||
fi
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
application_outage: # Scenario to create an outage of an application by blocking traffic
|
||||
duration: 10 # Duration in seconds after which the routes will be accessible
|
||||
namespace: openshift-monitoring # Namespace to target - all application routes will go inaccessible if pod selector is empty
|
||||
pod_selector: {} # Pods to target
|
||||
block: [Ingress, Egress] # It can be Ingress or Egress or Ingress, Egress
|
||||
@@ -1,8 +0,0 @@
|
||||
scenarios:
|
||||
- name: "kill machine config container"
|
||||
namespace: "openshift-machine-config-operator"
|
||||
label_selector: "k8s-app=machine-config-server"
|
||||
container_name: "hello-openshift"
|
||||
action: "kill 1"
|
||||
count: 1
|
||||
retry_wait: 60
|
||||
@@ -1,6 +0,0 @@
|
||||
network_chaos: # Scenario to create an outage by simulating random variations in the network.
|
||||
duration: 10 # seconds
|
||||
instance_count: 1
|
||||
execution: serial
|
||||
egress:
|
||||
bandwidth: 100mbit
|
||||
@@ -1,7 +0,0 @@
|
||||
scenarios:
|
||||
- action: delete
|
||||
namespace: "^$openshift-network-diagnostics$"
|
||||
label_selector:
|
||||
runs: 1
|
||||
sleep: 15
|
||||
wait_time: 30
|
||||
@@ -1,5 +0,0 @@
|
||||
time_scenarios:
|
||||
- action: skew_time
|
||||
object_type: pod
|
||||
label_selector: k8s-app=etcd
|
||||
container_name: ""
|
||||
16
CI/templates/container_scenario_pod.yaml
Normal file
16
CI/templates/container_scenario_pod.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: container
|
||||
labels:
|
||||
scenario: container
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: docker.io/fedora/tools
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
sleep infinity
|
||||
16
CI/templates/outage_pod.yaml
Normal file
16
CI/templates/outage_pod.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: outage
|
||||
labels:
|
||||
scenario: outage
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: docker.io/fedora/tools
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
sleep infinity
|
||||
29
CI/templates/service_hijacking.yaml
Normal file
29
CI/templates/service_hijacking.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: proxy
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:stable
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: http-web-svc
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx-service
|
||||
spec:
|
||||
selector:
|
||||
app.kubernetes.io/name: proxy
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: name-of-service-port
|
||||
protocol: TCP
|
||||
port: 80
|
||||
targetPort: http-web-svc
|
||||
nodePort: 30036
|
||||
16
CI/templates/time_pod.yaml
Normal file
16
CI/templates/time_pod.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: time-skew
|
||||
labels:
|
||||
scenario: time-skew
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: docker.io/fedora/tools
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
sleep infinity
|
||||
@@ -1,18 +1,26 @@
|
||||
ERRORED=false
|
||||
|
||||
function finish {
|
||||
if [ $? -eq 1 ] && [ $ERRORED != "true" ]
|
||||
if [ $? != 0 ] && [ $ERRORED != "true" ]
|
||||
then
|
||||
error
|
||||
fi
|
||||
}
|
||||
|
||||
function error {
|
||||
echo "Error caught."
|
||||
ERRORED=true
|
||||
exit_code=$?
|
||||
if [ $exit_code == 1 ]
|
||||
then
|
||||
echo "Error caught."
|
||||
ERRORED=true
|
||||
elif [ $exit_code == 2 ]
|
||||
then
|
||||
echo "Run with exit code 2 detected, it is expected, wrapping the exit code with 0 to avoid pipeline failure"
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
function get_node {
|
||||
worker_node=$(oc get nodes --no-headers | grep worker | head -n 1)
|
||||
worker_node=$(kubectl get nodes --no-headers | grep worker | head -n 1)
|
||||
export WORKER_NODE=$worker_node
|
||||
}
|
||||
|
||||
1
CI/tests/functional_tests
Normal file
1
CI/tests/functional_tests
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
test_net_chaos
|
||||
@@ -7,9 +7,11 @@ trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_app_outage {
|
||||
|
||||
yq -i '.application_outage.duration=10' scenarios/openshift/app_outage.yaml
|
||||
yq -i '.application_outage.pod_selector={"scenario":"outage"}' scenarios/openshift/app_outage.yaml
|
||||
yq -i '.application_outage.namespace="default"' scenarios/openshift/app_outage.yaml
|
||||
export scenario_type="application_outages"
|
||||
export scenario_file="CI/scenarios/app_outage.yaml"
|
||||
export scenario_file="scenarios/openshift/app_outage.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/app_outage.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/app_outage.yaml
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_app_outage {
|
||||
[ -z $DEPLOYMENT_NAME ] && echo "[ERR] DEPLOYMENT_NAME variable not set, failing." && exit 1
|
||||
yq -i '.application_outage.pod_selector={"app":"'$DEPLOYMENT_NAME'"}' CI/scenarios/app_outage.yaml
|
||||
yq -i '.application_outage.namespace="'$NAMESPACE'"' CI/scenarios/app_outage.yaml
|
||||
export scenario_type="application_outages"
|
||||
export scenario_file="CI/scenarios/app_outage.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/app_outage.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/app_outage.yaml
|
||||
echo "App outage scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_app_outage
|
||||
19
CI/tests/test_arca_cpu_hog.sh
Normal file
19
CI/tests/test_arca_cpu_hog.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_arca_cpu_hog {
|
||||
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/arcaflow/cpu-hog/input.yaml
|
||||
export scenario_type="arcaflow_scenarios"
|
||||
export scenario_file="scenarios/arcaflow/cpu-hog/input.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/arca_cpu_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/arca_cpu_hog.yaml
|
||||
echo "Arcaflow CPU Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_arca_cpu_hog
|
||||
19
CI/tests/test_arca_io_hog.sh
Normal file
19
CI/tests/test_arca_io_hog.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_arca_io_hog {
|
||||
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/arcaflow/io-hog/input.yaml
|
||||
export scenario_type="arcaflow_scenarios"
|
||||
export scenario_file="scenarios/arcaflow/io-hog/input.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/arca_io_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/arca_io_hog.yaml
|
||||
echo "Arcaflow IO Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_arca_io_hog
|
||||
19
CI/tests/test_arca_memory_hog.sh
Normal file
19
CI/tests/test_arca_memory_hog.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_arca_memory_hog {
|
||||
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/arcaflow/memory-hog/input.yaml
|
||||
export scenario_type="arcaflow_scenarios"
|
||||
export scenario_file="scenarios/arcaflow/memory-hog/input.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/arca_memory_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/arca_memory_hog.yaml
|
||||
echo "Arcaflow Memory Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_arca_memory_hog
|
||||
@@ -8,9 +8,11 @@ trap finish EXIT
|
||||
pod_file="CI/scenarios/hello_pod.yaml"
|
||||
|
||||
function functional_test_container_crash {
|
||||
|
||||
yq -i '.scenarios[0].namespace="default"' scenarios/openshift/container_etcd.yml
|
||||
yq -i '.scenarios[0].label_selector="scenario=container"' scenarios/openshift/container_etcd.yml
|
||||
yq -i '.scenarios[0].container_name="fedtools"' scenarios/openshift/container_etcd.yml
|
||||
export scenario_type="container_scenarios"
|
||||
export scenario_file="- CI/scenarios/container_scenario.yml"
|
||||
export scenario_file="- scenarios/openshift/container_etcd.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/container_config.yaml
|
||||
|
||||
|
||||
@@ -7,12 +7,13 @@ trap finish EXIT
|
||||
|
||||
function funtional_test_namespace_deletion {
|
||||
export scenario_type="namespace_scenarios"
|
||||
export scenario_file="- CI/scenarios/network_diagnostics_namespace.yaml"
|
||||
export scenario_file="- scenarios/openshift/ingress_namespace.yaml"
|
||||
export post_config=""
|
||||
yq '.scenarios.[0].namespace="^openshift-network-diagnostics$"' -i CI/scenarios/network_diagnostics_namespace.yaml
|
||||
yq '.scenarios[0].namespace="^namespace-scenario$"' -i scenarios/openshift/ingress_namespace.yaml
|
||||
yq '.scenarios[0].wait_time=30' -i scenarios/openshift/ingress_namespace.yaml
|
||||
yq '.scenarios[0].action="delete"' -i scenarios/openshift/ingress_namespace.yaml
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/namespace_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/namespace_config.yaml
|
||||
echo $?
|
||||
echo "Namespace scenario test: Success"
|
||||
}
|
||||
|
||||
|
||||
@@ -7,9 +7,16 @@ trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_network_chaos {
|
||||
yq -i '.network_chaos.duration=10' scenarios/openshift/network_chaos.yaml
|
||||
yq -i '.network_chaos.node_name="kind-worker2"' scenarios/openshift/network_chaos.yaml
|
||||
yq -i '.network_chaos.egress.bandwidth="100mbit"' scenarios/openshift/network_chaos.yaml
|
||||
yq -i 'del(.network_chaos.interfaces)' scenarios/openshift/network_chaos.yaml
|
||||
yq -i 'del(.network_chaos.label_selector)' scenarios/openshift/network_chaos.yaml
|
||||
yq -i 'del(.network_chaos.egress.latency)' scenarios/openshift/network_chaos.yaml
|
||||
yq -i 'del(.network_chaos.egress.loss)' scenarios/openshift/network_chaos.yaml
|
||||
|
||||
export scenario_type="network_chaos"
|
||||
export scenario_file="CI/scenarios/network_chaos.yaml"
|
||||
export scenario_file="scenarios/openshift/network_chaos.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/network_chaos.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/network_chaos.yaml
|
||||
|
||||
114
CI/tests/test_service_hijacking.sh
Normal file
114
CI/tests/test_service_hijacking.sh
Normal file
@@ -0,0 +1,114 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
# port mapping has been configured in kind-config.yml
|
||||
SERVICE_URL=http://localhost:8888
|
||||
PAYLOAD_GET_1="{ \
|
||||
\"status\":\"internal server error\" \
|
||||
}"
|
||||
STATUS_CODE_GET_1=500
|
||||
|
||||
PAYLOAD_PATCH_1="resource patched"
|
||||
STATUS_CODE_PATCH_1=201
|
||||
|
||||
PAYLOAD_POST_1="{ \
|
||||
\"status\": \"unauthorized\" \
|
||||
}"
|
||||
STATUS_CODE_POST_1=401
|
||||
|
||||
PAYLOAD_GET_2="{ \
|
||||
\"status\":\"resource created\" \
|
||||
}"
|
||||
STATUS_CODE_GET_2=201
|
||||
|
||||
PAYLOAD_PATCH_2="bad request"
|
||||
STATUS_CODE_PATCH_2=400
|
||||
|
||||
PAYLOAD_POST_2="not found"
|
||||
STATUS_CODE_POST_2=404
|
||||
|
||||
JSON_MIME="application/json"
|
||||
TEXT_MIME="text/plain; charset=utf-8"
|
||||
|
||||
function functional_test_service_hijacking {
|
||||
|
||||
export scenario_type="service_hijacking"
|
||||
export scenario_file="scenarios/kube/service_hijacking.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/service_hijacking.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/service_hijacking.yaml > /dev/null 2>&1 &
|
||||
PID=$!
|
||||
#Waiting the hijacking to have effect
|
||||
COUNTER=0
|
||||
while [ `curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php` == 404 ]
|
||||
do
|
||||
echo "waiting scenario to kick in."
|
||||
sleep 1
|
||||
COUNTER=$((COUNTER+1))
|
||||
[ $COUNTER -eq "100" ] && echo "maximum number of retry reached, test failed" && exit 1
|
||||
done
|
||||
|
||||
#Checking Step 1 GET on /list/index.php
|
||||
OUT_GET="`curl -X GET -s $SERVICE_URL/list/index.php`"
|
||||
OUT_CONTENT=`curl -X GET -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
|
||||
OUT_STATUS_CODE=`curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
|
||||
[ "${PAYLOAD_GET_1//[$'\t\r\n ']}" == "${OUT_GET//[$'\t\r\n ']}" ] && echo "Step 1 GET Payload OK" || (echo "Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_GET_1" ] && echo "Step 1 GET Status Code OK" || (echo " Step 1 GET status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$JSON_MIME" ] && echo "Step 1 GET MIME OK" || (echo " Step 1 GET MIME did not match. Test failed." && exit 1)
|
||||
|
||||
#Checking Step 1 POST on /list/index.php
|
||||
OUT_POST="`curl -s -X POST $SERVICE_URL/list/index.php`"
|
||||
OUT_STATUS_CODE=`curl -X POST -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
|
||||
OUT_CONTENT=`curl -X POST -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
|
||||
[ "${PAYLOAD_POST_1//[$'\t\r\n ']}" == "${OUT_POST//[$'\t\r\n ']}" ] && echo "Step 1 POST Payload OK" || (echo "Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_POST_1" ] && echo "Step 1 POST Status Code OK" || (echo "Step 1 POST status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$JSON_MIME" ] && echo "Step 1 POST MIME OK" || (echo " Step 1 POST MIME did not match. Test failed." && exit 1)
|
||||
|
||||
#Checking Step 1 PATCH on /patch
|
||||
OUT_PATCH="`curl -s -X PATCH $SERVICE_URL/patch`"
|
||||
OUT_STATUS_CODE=`curl -X PATCH -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/patch`
|
||||
OUT_CONTENT=`curl -X PATCH -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/patch`
|
||||
[ "${PAYLOAD_PATCH_1//[$'\t\r\n ']}" == "${OUT_PATCH//[$'\t\r\n ']}" ] && echo "Step 1 PATCH Payload OK" || (echo "Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_PATCH_1" ] && echo "Step 1 PATCH Status Code OK" || (echo "Step 1 PATCH status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$TEXT_MIME" ] && echo "Step 1 PATCH MIME OK" || (echo " Step 1 PATCH MIME did not match. Test failed." && exit 1)
|
||||
# wait for the next step
|
||||
sleep 16
|
||||
|
||||
#Checking Step 2 GET on /list/index.php
|
||||
OUT_GET="`curl -X GET -s $SERVICE_URL/list/index.php`"
|
||||
OUT_CONTENT=`curl -X GET -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
|
||||
OUT_STATUS_CODE=`curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
|
||||
[ "${PAYLOAD_GET_2//[$'\t\r\n ']}" == "${OUT_GET//[$'\t\r\n ']}" ] && echo "Step 2 GET Payload OK" || (echo "Step 2 GET Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_GET_2" ] && echo "Step 2 GET Status Code OK" || (echo "Step 2 GET status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$JSON_MIME" ] && echo "Step 2 GET MIME OK" || (echo " Step 2 GET MIME did not match. Test failed." && exit 1)
|
||||
|
||||
#Checking Step 2 POST on /list/index.php
|
||||
OUT_POST="`curl -s -X POST $SERVICE_URL/list/index.php`"
|
||||
OUT_CONTENT=`curl -X POST -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
|
||||
OUT_STATUS_CODE=`curl -X POST -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
|
||||
[ "${PAYLOAD_POST_2//[$'\t\r\n ']}" == "${OUT_POST//[$'\t\r\n ']}" ] && echo "Step 2 POST Payload OK" || (echo "Step 2 POST Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_POST_2" ] && echo "Step 2 POST Status Code OK" || (echo "Step 2 POST status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$TEXT_MIME" ] && echo "Step 2 POST MIME OK" || (echo " Step 2 POST MIME did not match. Test failed." && exit 1)
|
||||
|
||||
#Checking Step 2 PATCH on /patch
|
||||
OUT_PATCH="`curl -s -X PATCH $SERVICE_URL/patch`"
|
||||
OUT_CONTENT=`curl -X PATCH -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/patch`
|
||||
OUT_STATUS_CODE=`curl -X PATCH -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/patch`
|
||||
[ "${PAYLOAD_PATCH_2//[$'\t\r\n ']}" == "${OUT_PATCH//[$'\t\r\n ']}" ] && echo "Step 2 PATCH Payload OK" || (echo "Step 2 PATCH Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_PATCH_2" ] && echo "Step 2 PATCH Status Code OK" || (echo "Step 2 PATCH status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$TEXT_MIME" ] && echo "Step 2 PATCH MIME OK" || (echo " Step 2 PATCH MIME did not match. Test failed." && exit 1)
|
||||
wait $PID
|
||||
|
||||
# now checking if service has been restore correctly and nginx responds correctly
|
||||
curl -s $SERVICE_URL | grep nginx! && echo "BODY: Service restored!" || (echo "BODY: failed to restore service" && exit 1)
|
||||
OUT_STATUS_CODE=`curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL`
|
||||
[ "$OUT_STATUS_CODE" == "200" ] && echo "STATUS_CODE: Service restored!" || (echo "STATUS_CODE: failed to restore service" && exit 1)
|
||||
|
||||
echo "Service Hijacking Chaos test: Success"
|
||||
}
|
||||
|
||||
|
||||
functional_test_service_hijacking
|
||||
37
CI/tests/test_telemetry.sh
Normal file
37
CI/tests/test_telemetry.sh
Normal file
@@ -0,0 +1,37 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_telemetry {
|
||||
AWS_CLI=`which aws`
|
||||
[ -z "$AWS_CLI" ]&& echo "AWS cli not found in path" && exit 1
|
||||
[ -z "$AWS_BUCKET" ] && echo "AWS bucket not set in environment" && exit 1
|
||||
|
||||
export RUN_TAG="funtest-telemetry"
|
||||
yq -i '.telemetry.enabled=True' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.full_prometheus_backup=True' CI/config/common_test_config.yaml
|
||||
yq -i '.performance_monitoring.check_critical_alerts=True' CI/config/common_test_config.yaml
|
||||
yq -i '.performance_monitoring.prometheus_url="http://localhost:9090"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.run_tag=env(RUN_TAG)' CI/config/common_test_config.yaml
|
||||
|
||||
export scenario_type="arcaflow_scenarios"
|
||||
export scenario_file="scenarios/arcaflow/cpu-hog/input.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/telemetry.yaml
|
||||
retval=$(python3 -m coverage run -a run_kraken.py -c CI/config/telemetry.yaml)
|
||||
RUN_FOLDER=`cat CI/out/test_telemetry.out | grep amazonaws.com | sed -rn "s#.*https:\/\/.*\/files/(.*)#\1#p"`
|
||||
$AWS_CLI s3 ls "s3://$AWS_BUCKET/$RUN_FOLDER/" | awk '{ print $4 }' > s3_remote_files
|
||||
echo "checking if telemetry files are uploaded on s3"
|
||||
cat s3_remote_files | grep events-00.json || ( echo "FAILED: events-00.json not uploaded" && exit 1 )
|
||||
cat s3_remote_files | grep critical-alerts-00.log || ( echo "FAILED: critical-alerts-00.log not uploaded" && exit 1 )
|
||||
cat s3_remote_files | grep prometheus-00.tar || ( echo "FAILED: prometheus backup not uploaded" && exit 1 )
|
||||
cat s3_remote_files | grep telemetry.json || ( echo "FAILED: telemetry.json not uploaded" && exit 1 )
|
||||
echo "all files uploaded!"
|
||||
echo "Telemetry Collection: Success"
|
||||
}
|
||||
|
||||
functional_test_telemetry
|
||||
@@ -7,8 +7,12 @@ trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_time_scenario {
|
||||
yq -i '.time_scenarios[0].label_selector="scenario=time-skew"' scenarios/openshift/time_scenarios_example.yml
|
||||
yq -i '.time_scenarios[0].container_name=""' scenarios/openshift/time_scenarios_example.yml
|
||||
yq -i '.time_scenarios[0].namespace="default"' scenarios/openshift/time_scenarios_example.yml
|
||||
yq -i '.time_scenarios[1].label_selector="kubernetes.io/hostname=kind-worker2"' scenarios/openshift/time_scenarios_example.yml
|
||||
export scenario_type="time_scenarios"
|
||||
export scenario_file="CI/scenarios/time_scenarios.yml"
|
||||
export scenario_file="scenarios/openshift/time_scenarios_example.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/time_config.yaml
|
||||
|
||||
|
||||
36
README.md
36
README.md
@@ -1,6 +1,7 @@
|
||||
# Krkn aka Kraken
|
||||
[](https://quay.io/repository/redhat-chaos/krkn?tab=tags&tag=latest)
|
||||

|
||||

|
||||

|
||||

|
||||
|
||||

|
||||
|
||||
@@ -38,19 +39,7 @@ After installation, refer back to the below sections for supported scenarios and
|
||||
|
||||
|
||||
#### Running Kraken with minimal configuration tweaks
|
||||
For cases where you want to run Kraken with minimal configuration changes, refer to [Kraken-hub](https://github.com/redhat-chaos/krknChaos-hub). One use case is CI integration where you do not want to carry around different configuration files for the scenarios.
|
||||
|
||||
### Setting up infrastructure dependencies
|
||||
Kraken indexes the metrics specified in the profile into Elasticsearch in addition to leveraging Cerberus for understanding the health of the Kubernetes/OpenShift cluster under test. More information on the features is documented below. The infrastructure pieces can be easily installed and uninstalled by running:
|
||||
|
||||
```
|
||||
$ cd kraken
|
||||
$ podman-compose up or $ docker-compose up # Spins up the containers specified in the docker-compose.yml file present in the run directory.
|
||||
$ podman-compose down or $ docker-compose down # Delete the containers installed.
|
||||
```
|
||||
This will manage the Cerberus and Elasticsearch containers on the host on which you are running Kraken.
|
||||
|
||||
**NOTE**: Make sure you have enough resources (memory and disk) on the machine on top of which the containers are running as Elasticsearch is resource intensive. Cerberus monitors the system components by default, the [config](config/cerberus.yaml) can be tweaked to add applications namespaces, routes and other components to monitor as well. The command will keep running until killed since detached mode is not supported as of now.
|
||||
For cases where you want to run Kraken with minimal configuration changes, refer to [krkn-hub](https://github.com/krkn-chaos/krkn-hub). One use case is CI integration where you do not want to carry around different configuration files for the scenarios.
|
||||
|
||||
|
||||
### Config
|
||||
@@ -65,7 +54,7 @@ Scenario type | Kubernetes
|
||||
[Pod Network Scenarios](docs/pod_network_scenarios.md) | :x: |
|
||||
[Container Scenarios](docs/container_scenarios.md) | :heavy_check_mark: |
|
||||
[Node Scenarios](docs/node_scenarios.md) | :heavy_check_mark: |
|
||||
[Time Scenarios](docs/time_scenarios.md) | :x: |
|
||||
[Time Scenarios](docs/time_scenarios.md) | :heavy_check_mark: |
|
||||
[Hog Scenarios: CPU, Memory](docs/arcaflow_scenarios.md) | :heavy_check_mark: |
|
||||
[Cluster Shut Down Scenarios](docs/cluster_shut_down_scenarios.md) | :heavy_check_mark: |
|
||||
[Service Disruption Scenarios](docs/service_disruption_scenarios.md.md) | :heavy_check_mark: |
|
||||
@@ -74,12 +63,14 @@ Scenario type | Kubernetes
|
||||
[PVC scenario](docs/pvc_scenario.md) | :heavy_check_mark: |
|
||||
[Network_Chaos](docs/network_chaos.md) | :heavy_check_mark: |
|
||||
[ManagedCluster Scenarios](docs/managedcluster_scenarios.md) | :heavy_check_mark: |
|
||||
[Service Hijacking Scenarios](docs/service_hijacking_scenarios.md) | :heavy_check_mark: |
|
||||
[SYN Flood Scenarios](docs/syn_flood_scenarios.md) | :heavy_check_mark: |
|
||||
|
||||
|
||||
### Kraken scenario pass/fail criteria and report
|
||||
It is important to make sure to check if the targeted component recovered from the chaos injection and also if the Kubernetes/OpenShift cluster is healthy as failures in one component can have an adverse impact on other components. Kraken does this by:
|
||||
It is important to make sure to check if the targeted component recovered from the chaos injection and also if the Kubernetes cluster is healthy as failures in one component can have an adverse impact on other components. Kraken does this by:
|
||||
- Having built in checks for pod and node based scenarios to ensure the expected number of replicas and nodes are up. It also supports running custom scripts with the checks.
|
||||
- Leveraging [Cerberus](https://github.com/redhat-chaos/cerberus) to monitor the cluster under test and consuming the aggregated go/no-go signal to determine pass/fail post chaos. It is highly recommended to turn on the Cerberus health check feature available in Kraken. Instructions on installing and setting up Cerberus can be found [here](https://github.com/openshift-scale/cerberus#installation) or can be installed from Kraken using the [instructions](https://github.com/redhat-chaos/krkn#setting-up-infrastructure-dependencies). Once Cerberus is up and running, set cerberus_enabled to True and cerberus_url to the url where Cerberus publishes go/no-go signal in the Kraken config file. Cerberus can monitor [application routes](https://github.com/redhat-chaos/cerberus/blob/main/docs/config.md#watch-routes) during the chaos and fails the run if it encounters downtime as it is a potential downtime in a customers, or users environment as well. It is especially important during the control plane chaos scenarios including the API server, Etcd, Ingress etc. It can be enabled by setting `check_applicaton_routes: True` in the [Kraken config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) provided application routes are being monitored in the [cerberus config](https://github.com/redhat-chaos/krkn/blob/main/config/cerberus.yaml).
|
||||
- Leveraging [Cerberus](https://github.com/krkn-chaos/cerberus) to monitor the cluster under test and consuming the aggregated go/no-go signal to determine pass/fail post chaos. It is highly recommended to turn on the Cerberus health check feature available in Kraken. Instructions on installing and setting up Cerberus can be found [here](https://github.com/openshift-scale/cerberus#installation) or can be installed from Kraken using the [instructions](https://github.com/krkn-chaos/krkn#setting-up-infrastructure-dependencies). Once Cerberus is up and running, set cerberus_enabled to True and cerberus_url to the url where Cerberus publishes go/no-go signal in the Kraken config file. Cerberus can monitor [application routes](https://github.com/redhat-chaos/cerberus/blob/main/docs/config.md#watch-routes) during the chaos and fails the run if it encounters downtime as it is a potential downtime in a customers, or users environment as well. It is especially important during the control plane chaos scenarios including the API server, Etcd, Ingress etc. It can be enabled by setting `check_applicaton_routes: True` in the [Kraken config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) provided application routes are being monitored in the [cerberus config](https://github.com/redhat-chaos/krkn/blob/main/config/cerberus.yaml).
|
||||
- Leveraging built-in alert collection feature to fail the runs in case of critical alerts.
|
||||
|
||||
### Signaling
|
||||
@@ -103,7 +94,7 @@ Information on enabling and leveraging this feature can be found [here](docs/SLO
|
||||
|
||||
### OCM / ACM integration
|
||||
|
||||
Kraken supports injecting faults into [Open Cluster Management (OCM)](https://open-cluster-management.io/) and [Red Hat Advanced Cluster Management for Kubernetes (ACM)](https://www.redhat.com/en/technologies/management/advanced-cluster-management) managed clusters through [ManagedCluster Scenarios](docs/managedcluster_scenarios.md).
|
||||
Kraken supports injecting faults into [Open Cluster Management (OCM)](https://open-cluster-management.io/) and [Red Hat Advanced Cluster Management for Kubernetes (ACM)](https://www.krkn.com/en/technologies/management/advanced-cluster-management) managed clusters through [ManagedCluster Scenarios](docs/managedcluster_scenarios.md).
|
||||
|
||||
|
||||
### Blogs and other useful resources
|
||||
@@ -129,6 +120,7 @@ Please read [this file]((CI/README.md#adding-a-test-case)) for more information
|
||||
|
||||
|
||||
### Community
|
||||
Key Members(slack_usernames/full name): paigerube14/Paige Rubendall, mffiedler/Mike Fiedler, ravielluri/Naga Ravi Chaitanya Elluri.
|
||||
* [**#krkn on Kubernetes Slack**](https://kubernetes.slack.com)
|
||||
* [**#forum-chaos on CoreOS Slack internal to Red Hat**](https://coreos.slack.com)
|
||||
Key Members(slack_usernames/full name): paigerube14/Paige Rubendall, mffiedler/Mike Fiedler, tsebasti/Tullio Sebastiani, yogi/Yogananth Subramanian, sahil/Sahil Shah, pradeep/Pradeep Surisetty and ravielluri/Naga Ravi Chaitanya Elluri.
|
||||
* [**#krkn on Kubernetes Slack**](https://kubernetes.slack.com/messages/C05SFMHRWK1)
|
||||
|
||||
The Linux Foundation® (TLF) has registered trademarks and uses trademarks. For a list of TLF trademarks, see [Trademark Usage](https://www.linuxfoundation.org/legal/trademark-usage).
|
||||
|
||||
20
ROADMAP.md
20
ROADMAP.md
@@ -2,14 +2,14 @@
|
||||
|
||||
Following are a list of enhancements that we are planning to work on adding support in Krkn. Of course any help/contributions are greatly appreciated.
|
||||
|
||||
- [ ] [Ability to run multiple chaos scenarios in parallel under load to mimic real world outages](https://github.com/redhat-chaos/krkn/issues/424)
|
||||
- [x] [Centralized storage for chaos experiments artifacts](https://github.com/redhat-chaos/krkn/issues/423)
|
||||
- [ ] [Support for causing DNS outages](https://github.com/redhat-chaos/krkn/issues/394)
|
||||
- [x] [Chaos recommender](https://github.com/redhat-chaos/krkn/tree/main/utils/chaos-recommender) to suggest scenarios having probability of impacting the service under test using profiling results
|
||||
- [ ] [Ability to run multiple chaos scenarios in parallel under load to mimic real world outages](https://github.com/krkn-chaos/krkn/issues/424)
|
||||
- [x] [Centralized storage for chaos experiments artifacts](https://github.com/krkn-chaos/krkn/issues/423)
|
||||
- [ ] [Support for causing DNS outages](https://github.com/krkn-chaos/krkn/issues/394)
|
||||
- [x] [Chaos recommender](https://github.com/krkn-chaos/krkn/tree/main/utils/chaos-recommender) to suggest scenarios having probability of impacting the service under test using profiling results
|
||||
- [ ] Chaos AI integration to improve and automate test coverage
|
||||
- [x] [Support for pod level network traffic shaping](https://github.com/redhat-chaos/krkn/issues/393)
|
||||
- [ ] [Ability to visualize the metrics that are being captured by Kraken and stored in Elasticsearch](https://github.com/redhat-chaos/krkn/issues/124)
|
||||
- [ ] Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/redhat-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
|
||||
- [ ] Continue to improve [Chaos Testing Guide](https://redhat-chaos.github.io/krkn) in terms of adding best practices, test environment recommendations and scenarios to make sure the OpenShift platform, as well the applications running on top it, are resilient and performant under chaotic conditions.
|
||||
- [ ] [Switch documentation references to Kubernetes](https://github.com/redhat-chaos/krkn/issues/495)
|
||||
- [ ] [OCP and Kubernetes functionalities segregation](https://github.com/redhat-chaos/krkn/issues/497)
|
||||
- [x] [Support for pod level network traffic shaping](https://github.com/krkn-chaos/krkn/issues/393)
|
||||
- [ ] [Ability to visualize the metrics that are being captured by Kraken and stored in Elasticsearch](https://github.com/krkn-chaos/krkn/issues/124)
|
||||
- [ ] Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/krkn-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
|
||||
- [ ] Continue to improve [Chaos Testing Guide](https://krkn-chaos.github.io/krkn) in terms of adding best practices, test environment recommendations and scenarios to make sure the OpenShift platform, as well the applications running on top it, are resilient and performant under chaotic conditions.
|
||||
- [ ] [Switch documentation references to Kubernetes](https://github.com/krkn-chaos/krkn/issues/495)
|
||||
- [ ] [OCP and Kubernetes functionalities segregation](https://github.com/krkn-chaos/krkn/issues/497)
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
description: 10 minutes avg. 99th etcd fsync latency on {{$labels.pod}} higher than 1s. {{$value}}s
|
||||
severity: error
|
||||
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))[10m:]) > 0.007
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))[10m:]) > 0.03
|
||||
description: 10 minutes avg. 99th etcd commit latency on {{$labels.pod}} higher than 30ms. {{$value}}s
|
||||
severity: warning
|
||||
|
||||
@@ -88,3 +88,42 @@
|
||||
- expr: ALERTS{severity="critical", alertstate="firing"} > 0
|
||||
description: Critical prometheus alert. {{$labels.alertname}}
|
||||
severity: warning
|
||||
|
||||
# etcd CPU and usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-etcd', container='etcd'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: Etcd CPU usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# etcd memory usage increase
|
||||
- expr: sum(deriv(container_memory_usage_bytes{image!='', namespace='openshift-etcd', container='etcd'}[5m])) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: Etcd memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Openshift API server CPU and memory usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-apiserver', container='openshift-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: openshift apiserver cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-apiserver', container='openshift-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: openshift apiserver memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Openshift kube API server CPU and memory usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-kube-apiserver', container='kube-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: openshift apiserver cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-kube-apiserver', container='kube-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: openshift apiserver memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Master node CPU usage increase
|
||||
- expr: (sum((sum(deriv(pod:container_cpu_usage:sum{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: master nodes cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Master nodes memory usage increase
|
||||
- expr: (sum((sum(deriv(container_memory_usage_bytes{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: master nodes memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
|
||||
@@ -99,3 +99,41 @@
|
||||
- expr: ALERTS{severity="critical", alertstate="firing"} > 0
|
||||
description: Critical prometheus alert. {{$labels.alertname}}
|
||||
severity: warning
|
||||
|
||||
# etcd CPU and usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-etcd', container='etcd'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: Etcd CPU usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# etcd memory usage increase
|
||||
- expr: sum(deriv(container_memory_usage_bytes{image!='', namespace='openshift-etcd', container='etcd'}[5m])) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: Etcd memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Openshift API server CPU and memory usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-apiserver', container='openshift-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: openshift apiserver cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-apiserver', container='openshift-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: openshift apiserver memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Openshift kube API server CPU and memory usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-kube-apiserver', container='kube-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: openshift apiserver cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-kube-apiserver', container='kube-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: openshift apiserver memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Master node CPU usage increase
|
||||
- expr: (sum((sum(deriv(pod:container_cpu_usage:sum{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: master nodes cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Master nodes memory usage increase
|
||||
- expr: (sum((sum(deriv(container_memory_usage_bytes{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: master nodes memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
kraken:
|
||||
distribution: openshift # Distribution can be kubernetes or openshift
|
||||
distribution: kubernetes # Distribution can be kubernetes or openshift
|
||||
kubeconfig_path: ~/.kube/config # Path to kubeconfig
|
||||
exit_on_failure: False # Exit when a post action scenario fails
|
||||
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
|
||||
@@ -15,7 +15,7 @@ kraken:
|
||||
- application_outages:
|
||||
- scenarios/openshift/app_outage.yaml
|
||||
- container_scenarios: # List of chaos pod scenarios to load
|
||||
- - scenarios/openshift/container_etcd.yml
|
||||
- - scenarios/openshift/container_etcd.yml
|
||||
- plugin_scenarios:
|
||||
- scenarios/openshift/etcd.yml
|
||||
- scenarios/openshift/regex_openshift_pod_kill.yml
|
||||
@@ -23,7 +23,7 @@ kraken:
|
||||
- scenarios/openshift/network_chaos_ingress.yml
|
||||
- scenarios/openshift/prom_kill.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/node_scenarios_example.yml
|
||||
- scenarios/openshift/aws_node_scenarios.yml
|
||||
- plugin_scenarios:
|
||||
- scenarios/openshift/openshift-apiserver.yml
|
||||
- scenarios/openshift/openshift-kube-apiserver.yml
|
||||
@@ -42,6 +42,10 @@ kraken:
|
||||
- scenarios/openshift/pvc_scenario.yaml
|
||||
- network_chaos:
|
||||
- scenarios/openshift/network_chaos.yaml
|
||||
- service_hijacking:
|
||||
- scenarios/kube/service_hijacking.yaml
|
||||
- syn_flood:
|
||||
- scenarios/kube/syn_flood.yaml
|
||||
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
@@ -51,12 +55,27 @@ cerberus:
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_url: '' # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
enable_metrics: False
|
||||
alert_profile: config/alerts.yaml # Path or URL to alert profile with the prometheus queries
|
||||
metrics_profile: config/metrics.yaml
|
||||
check_critical_alerts: False # When enabled will check prometheus for critical alerts firing post chaos
|
||||
elastic:
|
||||
enable_elastic: False
|
||||
collect_metrics: False
|
||||
collect_alerts: False
|
||||
verify_certs: False
|
||||
elastic_url: "" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
|
||||
elastic_port: 32766
|
||||
username: "elastic"
|
||||
password: "test"
|
||||
metrics_index: "krkn-metrics"
|
||||
alerts_index: "krkn-alerts"
|
||||
telemetry_index: "krkn-telemetry"
|
||||
|
||||
tunings:
|
||||
wait_duration: 60 # Duration to wait between each chaos scenario
|
||||
iterations: 1 # Number of times to execute the scenarios
|
||||
@@ -65,14 +84,19 @@ telemetry:
|
||||
enabled: False # enable/disables the telemetry collection feature
|
||||
api_url: https://ulnmf9xv7j.execute-api.us-west-2.amazonaws.com/production #telemetry service endpoint
|
||||
username: username # telemetry service username
|
||||
password: password # telemetry service password
|
||||
password: password # telemetry service password
|
||||
prometheus_backup: True # enables/disables prometheus data collection
|
||||
prometheus_namespace: "" # namespace where prometheus is deployed (if distribution is kubernetes)
|
||||
prometheus_container_name: "" # name of the prometheus container name (if distribution is kubernetes)
|
||||
prometheus_pod_name: "" # name of the prometheus pod (if distribution is kubernetes)
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
archive_path: /tmp # local path where the archive files will be temporarly stored
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 500000 # the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
archive_size: 500000
|
||||
telemetry_group: '' # if set will archive the telemetry in the S3 bucket on a folder named after the value, otherwise will use "default"
|
||||
# the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
# the higher the number of archive files will be produced and uploaded (and processed by backup_threads
|
||||
# simultaneously).
|
||||
# For unstable/slow connection is better to keep this value low
|
||||
@@ -89,3 +113,4 @@ telemetry:
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -77,3 +77,8 @@ telemetry:
|
||||
- "kinit (\\d+/\\d+/\\d+\\s\\d{2}:\\d{2}:\\d{2})\\s+" # kinit 2023/09/15 11:20:36 log
|
||||
- "(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z).+" # 2023-09-15T11:20:36.123425532Z log
|
||||
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
|
||||
elastic:
|
||||
elastic_url: "" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
|
||||
elastic_index: "" # Elastic search index pattern to post results to
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
application: openshift-etcd
|
||||
namespace: openshift-etcd
|
||||
namespaces: openshift-etcd
|
||||
labels: app=openshift-etcd
|
||||
kubeconfig: ~/.kube/config.yaml
|
||||
prometheus_endpoint: <Prometheus_Endpoint>
|
||||
@@ -7,6 +7,8 @@ auth_token: <Auth_Token>
|
||||
scrape_duration: 10m
|
||||
chaos_library: "kraken"
|
||||
log_level: INFO
|
||||
json_output_file: False
|
||||
json_output_folder_path:
|
||||
|
||||
# for output purpose only do not change if not needed
|
||||
chaos_tests:
|
||||
@@ -26,4 +28,8 @@ chaos_tests:
|
||||
- pod_network_chaos
|
||||
MEM:
|
||||
- node_memory_hog
|
||||
- pvc_disk_fill
|
||||
- pvc_disk_fill
|
||||
|
||||
threshold: .7
|
||||
cpu_threshold: .5
|
||||
mem_threshold: .5
|
||||
|
||||
@@ -1,28 +1,55 @@
|
||||
# Dockerfile for kraken
|
||||
|
||||
FROM mcr.microsoft.com/azure-cli:latest as azure-cli
|
||||
|
||||
FROM registry.access.redhat.com/ubi8/ubi:latest
|
||||
|
||||
ENV KUBECONFIG /root/.kube/config
|
||||
|
||||
# Copy azure client binary from azure-cli image
|
||||
COPY --from=azure-cli /usr/local/bin/az /usr/bin/az
|
||||
|
||||
# Install dependencies
|
||||
RUN yum install -y git python39 python3-pip jq gettext wget && \
|
||||
python3.9 -m pip install -U pip && \
|
||||
git clone https://github.com/krkn-chaos/krkn.git --branch v1.5.4 /root/kraken && \
|
||||
mkdir -p /root/.kube && cd /root/kraken && \
|
||||
pip3.9 install -r requirements.txt && \
|
||||
pip3.9 install virtualenv && \
|
||||
wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq && chmod +x /usr/bin/yq
|
||||
|
||||
# Get Kubernetes and OpenShift clients from stable releases
|
||||
# oc build
|
||||
FROM golang:1.22.5 AS oc-build
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libkrb5-dev
|
||||
WORKDIR /tmp
|
||||
RUN wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-client-linux.tar.gz && tar -xvf openshift-client-linux.tar.gz && cp oc /usr/local/bin/oc && cp kubectl /usr/local/bin/kubectl
|
||||
RUN git clone --branch release-4.18 https://github.com/openshift/oc.git
|
||||
WORKDIR /tmp/oc
|
||||
RUN go mod edit -go 1.22.5 &&\
|
||||
go get github.com/moby/buildkit@v0.12.5 &&\
|
||||
go get github.com/containerd/containerd@v1.7.11&&\
|
||||
go get github.com/docker/docker@v25.0.6&&\
|
||||
go get github.com/opencontainers/runc@v1.1.14&&\
|
||||
go mod tidy && go mod vendor
|
||||
RUN make GO_REQUIRED_MIN_VERSION:= oc
|
||||
|
||||
WORKDIR /root/kraken
|
||||
FROM fedora:40
|
||||
ARG PR_NUMBER
|
||||
ARG TAG
|
||||
RUN groupadd -g 1001 krkn && useradd -m -u 1001 -g krkn krkn
|
||||
RUN dnf update -y
|
||||
|
||||
ENV KUBECONFIG /home/krkn/.kube/config
|
||||
|
||||
# install kubectl
|
||||
RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" &&\
|
||||
cp kubectl /usr/local/bin/kubectl && chmod +x /usr/local/bin/kubectl &&\
|
||||
cp kubectl /usr/bin/kubectl && chmod +x /usr/bin/kubectl
|
||||
|
||||
# This overwrites any existing configuration in /etc/yum.repos.d/kubernetes.repo
|
||||
RUN dnf update && dnf install -y --setopt=install_weak_deps=False \
|
||||
git python39 jq yq gettext wget which &&\
|
||||
dnf clean all
|
||||
|
||||
# copy oc client binary from oc-build image
|
||||
COPY --from=oc-build /tmp/oc/oc /usr/bin/oc
|
||||
|
||||
# krkn build
|
||||
RUN git clone https://github.com/krkn-chaos/krkn.git /home/krkn/kraken && \
|
||||
mkdir -p /home/krkn/.kube
|
||||
|
||||
WORKDIR /home/krkn/kraken
|
||||
|
||||
# default behaviour will be to build main
|
||||
# if it is a PR trigger the PR itself will be checked out
|
||||
RUN if [ -n "$PR_NUMBER" ]; then git fetch origin pull/${PR_NUMBER}/head:pr-${PR_NUMBER} && git checkout pr-${PR_NUMBER};fi
|
||||
# if it is a TAG trigger checkout the tag
|
||||
RUN if [ -n "$TAG" ]; then git checkout "$TAG";fi
|
||||
|
||||
RUN python3.9 -m ensurepip
|
||||
RUN pip3.9 install -r requirements.txt
|
||||
RUN pip3.9 install jsonschema
|
||||
|
||||
RUN chown -R krkn:krkn /home/krkn && chmod 755 /home/krkn
|
||||
USER krkn
|
||||
ENTRYPOINT ["python3.9", "run_kraken.py"]
|
||||
CMD ["--config=config/config.yaml"]
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
# Dockerfile for kraken
|
||||
|
||||
FROM ppc64le/centos:8
|
||||
|
||||
FROM mcr.microsoft.com/azure-cli:latest as azure-cli
|
||||
|
||||
LABEL org.opencontainers.image.authors="Red Hat OpenShift Chaos Engineering"
|
||||
|
||||
ENV KUBECONFIG /root/.kube/config
|
||||
|
||||
# Copy azure client binary from azure-cli image
|
||||
COPY --from=azure-cli /usr/local/bin/az /usr/bin/az
|
||||
|
||||
# Install dependencies
|
||||
RUN yum install -y git python39 python3-pip jq gettext wget && \
|
||||
python3.9 -m pip install -U pip && \
|
||||
git clone https://github.com/redhat-chaos/krkn.git --branch v1.5.4 /root/kraken && \
|
||||
mkdir -p /root/.kube && cd /root/kraken && \
|
||||
pip3.9 install -r requirements.txt && \
|
||||
pip3.9 install virtualenv && \
|
||||
wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq && chmod +x /usr/bin/yq
|
||||
|
||||
# Get Kubernetes and OpenShift clients from stable releases
|
||||
WORKDIR /tmp
|
||||
RUN wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-client-linux.tar.gz && tar -xvf openshift-client-linux.tar.gz && cp oc /usr/local/bin/oc && cp kubectl /usr/local/bin/kubectl
|
||||
|
||||
WORKDIR /root/kraken
|
||||
|
||||
ENTRYPOINT python3.9 run_kraken.py --config=config/config.yaml
|
||||
@@ -12,35 +12,3 @@ Refer [instructions](https://github.com/redhat-chaos/krkn/blob/main/docs/install
|
||||
### Run Custom Kraken Image
|
||||
|
||||
Refer to [instructions](https://github.com/redhat-chaos/krkn/blob/main/containers/build_own_image-README.md) for information on how to run a custom containerized version of kraken using podman.
|
||||
|
||||
|
||||
### Kraken as a KubeApp ( Unsupported and not recommended )
|
||||
|
||||
#### GENERAL NOTES:
|
||||
|
||||
- It is not generally recommended to run Kraken internal to the cluster as the pod which is running Kraken might get disrupted, the suggested use case to run kraken from inside k8s/OpenShift is to target **another** cluster (eg. to bypass network restrictions or to leverage cluster's computational resources)
|
||||
|
||||
- your kubeconfig might contain several cluster contexts and credentials so be sure, before creating the ConfigMap, to keep **only** the credentials related to the destination cluster. Please refer to the [Kubernetes documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) for more details
|
||||
- to add privileges to the service account you must be logged in the cluster with an highly privileged account (ideally kubeadmin)
|
||||
|
||||
|
||||
|
||||
To run containerized Kraken as a Kubernetes/OpenShift Deployment, follow these steps:
|
||||
|
||||
1. Configure the [config.yaml](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) file according to your requirements.
|
||||
|
||||
**NOTE**: both the scenarios ConfigMaps are needed regardless you're running kraken in Kubernetes or OpenShift
|
||||
|
||||
2. Create a namespace under which you want to run the kraken pod using `kubectl create ns <namespace>`.
|
||||
3. Switch to `<namespace>` namespace:
|
||||
- In Kubernetes, use `kubectl config set-context --current --namespace=<namespace>`
|
||||
- In OpenShift, use `oc project <namespace>`
|
||||
|
||||
4. Create a ConfigMap named kube-config using `kubectl create configmap kube-config --from-file=<path_to_kubeconfig>` *(eg. ~/.kube/config)*
|
||||
5. Create a ConfigMap named kraken-config using `kubectl create configmap kraken-config --from-file=<path_to_kraken>/config`
|
||||
6. Create a ConfigMap named scenarios-config using `kubectl create configmap scenarios-config --from-file=<path_to_kraken>/scenarios`
|
||||
7. Create a ConfigMap named scenarios-openshift-config using `kubectl create configmap scenarios-openshift-config --from-file=<path_to_kraken>/scenarios/openshift`
|
||||
8. Create a ConfigMap named scenarios-kube-config using `kubectl create configmap scenarios-kube-config --from-file=<path_to_kraken>/scenarios/kube`
|
||||
9. Create a service account to run the kraken pod `kubectl create serviceaccount useroot`.
|
||||
10. In Openshift, add privileges to service account and execute `oc adm policy add-scc-to-user privileged -z useroot`.
|
||||
11. Create a Job using `kubectl apply -f <path_to_kraken>/containers/kraken.yml` and monitor the status using `oc get jobs` and `oc get pods`.
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: kraken
|
||||
spec:
|
||||
parallelism: 1
|
||||
completions: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tool: Kraken
|
||||
spec:
|
||||
serviceAccountName: useroot
|
||||
containers:
|
||||
- name: kraken
|
||||
securityContext:
|
||||
privileged: true
|
||||
image: quay.io/redhat-chaos/krkn
|
||||
command: ["/bin/sh", "-c"]
|
||||
args: ["python3.9 run_kraken.py -c config/config.yaml"]
|
||||
volumeMounts:
|
||||
- mountPath: "/root/.kube"
|
||||
name: config
|
||||
- mountPath: "/root/kraken/config"
|
||||
name: kraken-config
|
||||
- mountPath: "/root/kraken/scenarios"
|
||||
name: scenarios-config
|
||||
- mountPath: "/root/kraken/scenarios/openshift"
|
||||
name: scenarios-openshift-config
|
||||
- mountPath: "/root/kraken/scenarios/kube"
|
||||
name: scenarios-kube-config
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: kube-config
|
||||
- name: kraken-config
|
||||
configMap:
|
||||
name: kraken-config
|
||||
- name: scenarios-config
|
||||
configMap:
|
||||
name: scenarios-config
|
||||
- name: scenarios-openshift-config
|
||||
configMap:
|
||||
name: scenarios-openshift-config
|
||||
- name: scenarios-kube-config
|
||||
configMap:
|
||||
name: scenarios-kube-config
|
||||
@@ -1,31 +0,0 @@
|
||||
version: "3"
|
||||
services:
|
||||
elastic:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:7.13.2
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
network_mode: host
|
||||
environment:
|
||||
discovery.type: single-node
|
||||
kibana:
|
||||
image: docker.elastic.co/kibana/kibana:7.13.2
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
network_mode: host
|
||||
environment:
|
||||
ELASTICSEARCH_HOSTS: "http://0.0.0.0:9200"
|
||||
cerberus:
|
||||
image: quay.io/openshift-scale/cerberus:latest
|
||||
privileged: true
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
network_mode: host
|
||||
volumes:
|
||||
- ./config/cerberus.yaml:/root/cerberus/config/config.yaml:Z # Modify the config in case of the need to monitor additional components
|
||||
- ${HOME}/.kube/config:/root/.kube/config:Z
|
||||
@@ -27,14 +27,12 @@ After creating the service account you will need to enable the account using the
|
||||
|
||||
## Azure
|
||||
|
||||
**NOTE**: For Azure node killing scenarios, make sure [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) is installed.
|
||||
|
||||
You will also need to create a service principal and give it the correct access, see [here](https://docs.openshift.com/container-platform/4.5/installing/installing_azure/installing-azure-account.html) for creating the service principal and setting the proper permissions.
|
||||
**NOTE**: You will need to create a service principal and give it the correct access, see [here](https://docs.openshift.com/container-platform/4.5/installing/installing_azure/installing-azure-account.html) for creating the service principal and setting the proper permissions.
|
||||
|
||||
To properly run the service principal requires “Azure Active Directory Graph/Application.ReadWrite.OwnedBy” api permission granted and “User Access Administrator”.
|
||||
|
||||
Before running you will need to set the following:
|
||||
1. Login using ```az login```
|
||||
1. ```export AZURE_SUBSCRIPTION_ID=<subscription_id>```
|
||||
|
||||
2. ```export AZURE_TENANT_ID=<tenant_id>```
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#### Kubernetes/OpenShift cluster shut down scenario
|
||||
Scenario to shut down all the nodes including the masters and restart them after specified duration. Cluster shut down scenario can be injected by placing the shut_down config file under cluster_shut_down_scenario option in the kraken config. Refer to [cluster_shut_down_scenario](https://github.com/redhat-chaos/krkn/blob/main/scenarios/cluster_shut_down_scenario.yml) config file.
|
||||
#### Kubernetes cluster shut down scenario
|
||||
Scenario to shut down all the nodes including the masters and restart them after specified duration. Cluster shut down scenario can be injected by placing the shut_down config file under cluster_shut_down_scenario option in the kraken config. Refer to [cluster_shut_down_scenario](https://github.com/krkn-chaos/krkn/blob/main/scenarios/cluster_shut_down_scenario.yml) config file.
|
||||
|
||||
Refer to [cloud setup](cloud_setup.md) to configure your cli properly for the cloud provider of the cluster you want to shut down.
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ This can be based on the pods namespace or labels. If you know the exact object
|
||||
These scenarios are in a simple yaml format that you can manipulate to run your specific tests or use the pre-existing scenarios to see how it works.
|
||||
|
||||
#### Example Config
|
||||
The following are the components of Kubernetes/OpenShift for which a basic chaos scenario config exists today.
|
||||
The following are the components of Kubernetes for which a basic chaos scenario config exists today.
|
||||
|
||||
```
|
||||
scenarios:
|
||||
@@ -25,7 +25,7 @@ In all scenarios we do a post chaos check to wait and verify the specific compon
|
||||
Here there are two options:
|
||||
1. Pass a custom script in the main config scenario list that will run before the chaos and verify the output matches post chaos scenario.
|
||||
|
||||
See [scenarios/post_action_etcd_container.py](https://github.com/redhat-chaos/krkn/blob/main/scenarios/post_action_etcd_container.py) for an example.
|
||||
See [scenarios/post_action_etcd_container.py](https://github.com/krkn-chaos/krkn/blob/main/scenarios/post_action_etcd_container.py) for an example.
|
||||
```
|
||||
- container_scenarios: # List of chaos pod scenarios to load.
|
||||
- - scenarios/container_etcd.yml
|
||||
|
||||
@@ -62,7 +62,7 @@ If changes go into the main repository while you're working on your code it is b
|
||||
|
||||
If not already configured, set the upstream url for kraken.
|
||||
```
|
||||
git remote add upstream https://github.com/redhat-chaos/krkn.git
|
||||
git remote add upstream https://github.com/krkn-chaos/krkn.git
|
||||
```
|
||||
|
||||
Rebase to upstream master branch.
|
||||
|
||||
@@ -14,11 +14,7 @@ For example, for adding a pod level scenario for a new application, refer to the
|
||||
namespace_pattern: ^<namespace>$
|
||||
label_selector: <pod label>
|
||||
kill: <number of pods to kill>
|
||||
- id: wait-for-pods
|
||||
config:
|
||||
namespace_pattern: ^<namespace>$
|
||||
label_selector: <pod label>
|
||||
count: <expected number of pods that match namespace and label>
|
||||
krkn_pod_recovery_time: <expected time for the pod to become ready>
|
||||
```
|
||||
|
||||
#### Node Scenario Yaml Template
|
||||
|
||||
@@ -3,13 +3,13 @@
|
||||
The following ways are supported to run Kraken:
|
||||
|
||||
- Standalone python program through Git.
|
||||
- Containerized version using either Podman or Docker as the runtime via [Krkn-hub](https://github.com/redhat-chaos/krkn-hub)
|
||||
- Containerized version using either Podman or Docker as the runtime via [Krkn-hub](https://github.com/krkn-chaos/krkn-hub)
|
||||
- Kubernetes or OpenShift deployment ( unsupported )
|
||||
|
||||
**NOTE**: It is recommended to run Kraken external to the cluster ( Standalone or Containerized ) hitting the Kubernetes/OpenShift API as running it internal to the cluster might be disruptive to itself and also might not report back the results if the chaos leads to cluster's API server instability.
|
||||
|
||||
**NOTE**: To run Kraken on Power (ppc64le) architecture, build and run a containerized version by following the
|
||||
instructions given [here](https://github.com/redhat-chaos/krkn/blob/main/containers/build_own_image-README.md).
|
||||
instructions given [here](https://github.com/krkn-chaos/krkn/blob/main/containers/build_own_image-README.md).
|
||||
|
||||
**NOTE**: Helper functions for interactions in Krkn are part of [krkn-lib](https://github.com/redhat-chaos/krkn-lib).
|
||||
Please feel free to reuse and expand them as you see fit when adding a new scenario or expanding
|
||||
@@ -19,10 +19,10 @@ the capabilities of the current supported scenarios.
|
||||
### Git
|
||||
|
||||
#### Clone the repository
|
||||
Pick the latest stable release to install [here](https://github.com/redhat-chaos/krkn/releases).
|
||||
Pick the latest stable release to install [here](https://github.com/krkn-chaos/krkn/releases).
|
||||
```
|
||||
$ git clone https://github.com/redhat-chaos/krkn.git --branch <release version>
|
||||
$ cd kraken
|
||||
$ git clone https://github.com/krkn-chaos/krkn.git --branch <release version>
|
||||
$ cd krkn
|
||||
```
|
||||
|
||||
#### Install the dependencies
|
||||
@@ -40,15 +40,6 @@ $ python3.9 run_kraken.py --config <config_file_location>
|
||||
```
|
||||
|
||||
### Run containerized version
|
||||
[Krkn-hub](https://github.com/redhat-chaos/krkn-hub) is a wrapper that allows running Krkn chaos scenarios via podman or docker runtime with scenario parameters/configuration defined as environment variables.
|
||||
[Krkn-hub](https://github.com/krkn-chaos/krkn-hub) is a wrapper that allows running Krkn chaos scenarios via podman or docker runtime with scenario parameters/configuration defined as environment variables.
|
||||
|
||||
Refer [instructions](https://github.com/redhat-chaos/krkn-hub#supported-chaos-scenarios) to get started.
|
||||
|
||||
|
||||
### Run Kraken as a Kubernetes deployment ( unsupported option - standalone or containerized deployers are recommended )
|
||||
Refer [Instructions](https://github.com/redhat-chaos/krkn/blob/main/containers/README.md) on how to deploy and run Kraken as a Kubernetes/OpenShift deployment.
|
||||
|
||||
|
||||
Refer to the [chaos-kraken chart manpage](https://artifacthub.io/packages/helm/startx/chaos-kraken)
|
||||
and especially the [kraken configuration values](https://artifacthub.io/packages/helm/startx/chaos-kraken#chaos-kraken-values-dictionary)
|
||||
for details on how to configure this chart.
|
||||
Refer [instructions](https://github.com/krkn-chaos/krkn-hub#supported-chaos-scenarios) to get started.
|
||||
|
||||
@@ -9,24 +9,28 @@ The following node chaos scenarios are supported:
|
||||
5. **node_reboot_scenario**: Scenario to reboot the node instance.
|
||||
6. **stop_kubelet_scenario**: Scenario to stop the kubelet of the node instance.
|
||||
7. **stop_start_kubelet_scenario**: Scenario to stop and start the kubelet of the node instance.
|
||||
8. **node_crash_scenario**: Scenario to crash the node instance.
|
||||
9. **stop_start_helper_node_scenario**: Scenario to stop and start the helper node and check service status.
|
||||
8. **restart_kubelet_scenario**: Scenario to restart the kubelet of the node instance.
|
||||
9. **node_crash_scenario**: Scenario to crash the node instance.
|
||||
10. **stop_start_helper_node_scenario**: Scenario to stop and start the helper node and check service status.
|
||||
|
||||
|
||||
**NOTE**: If the node does not recover from the node_crash_scenario injection, reboot the node to get it back to Ready state.
|
||||
|
||||
**NOTE**: node_start_scenario, node_stop_scenario, node_stop_start_scenario, node_termination_scenario
|
||||
, node_reboot_scenario and stop_start_kubelet_scenario are supported only on AWS, Azure, OpenStack, BareMetal, GCP
|
||||
, VMware and Alibaba as of now.
|
||||
|
||||
**NOTE**: Node scenarios are supported only when running the standalone version of Kraken until https://github.com/redhat-chaos/krkn/issues/106 gets fixed.
|
||||
, node_reboot_scenario and stop_start_kubelet_scenario are supported on AWS, Azure, OpenStack, BareMetal, GCP
|
||||
, VMware and Alibaba.
|
||||
|
||||
|
||||
#### AWS
|
||||
|
||||
How to set up AWS cli to run node scenarios is defined [here](cloud_setup.md#aws).
|
||||
Cloud setup instructions can be found [here](cloud_setup.md#aws). Sample scenario config can be found [here](https://github.com/krkn-chaos/krkn/blob/main/scenarios/openshift/aws_node_scenarios.yml).
|
||||
|
||||
|
||||
|
||||
#### Baremetal
|
||||
|
||||
Sample scenario config can be found [here](https://github.com/krkn-chaos/krkn/blob/main/scenarios/openshift/baremetal_node_scenarios.yml).
|
||||
|
||||
**NOTE**: Baremetal requires setting the IPMI user and password to power on, off, and reboot nodes, using the config options `bm_user` and `bm_password`. It can either be set in the root of the entry in the scenarios config, or it can be set per machine.
|
||||
|
||||
If no per-machine addresses are specified, kraken attempts to use the BMC value in the BareMetalHost object. To list them, you can do 'oc get bmh -o wide --all-namespaces'. If the BMC values are blank, you must specify them per-machine using the config option 'bmc_addr' as specified below.
|
||||
@@ -38,6 +42,8 @@ See the example node scenario or the example below.
|
||||
|
||||
**NOTE**: Baremetal machines are fragile. Some node actions can occasionally corrupt the filesystem if it does not shut down properly, and sometimes the kubelet does not start properly.
|
||||
|
||||
|
||||
|
||||
#### Docker
|
||||
|
||||
The Docker provider can be used to run node scenarios against kind clusters.
|
||||
@@ -46,8 +52,11 @@ The Docker provider can be used to run node scenarios against kind clusters.
|
||||
|
||||
kind was primarily designed for testing Kubernetes itself, but may be used for local development or CI.
|
||||
|
||||
|
||||
|
||||
#### GCP
|
||||
How to set up GCP cli to run node scenarios is defined [here](cloud_setup.md#gcp).
|
||||
Cloud setup instructions can be found [here](cloud_setup.md#gcp). Sample scenario config can be found [here](https://github.com/krkn-chaos/krkn/blob/main/scenarios/openshift/gcp_node_scenarios.yml).
|
||||
|
||||
|
||||
#### Openstack
|
||||
|
||||
@@ -60,9 +69,11 @@ The supported node level chaos scenarios on an OPENSTACK cloud are `node_stop_st
|
||||
To execute the scenario, ensure the value for `ssh_private_key` in the node scenarios config file is set with the correct private key file path for ssh connection to the helper node. Ensure passwordless ssh is configured on the host running Kraken and the helper node to avoid connection errors.
|
||||
|
||||
|
||||
|
||||
#### Azure
|
||||
|
||||
How to set up Azure cli to run node scenarios is defined [here](cloud_setup.md#azure).
|
||||
Cloud setup instructions can be found [here](cloud_setup.md#azure). Sample scenario config can be found [here](https://github.com/krkn-chaos/krkn/blob/main/scenarios/openshift/azure_node_scenarios.yml).
|
||||
|
||||
|
||||
|
||||
#### Alibaba
|
||||
@@ -73,43 +84,28 @@ How to set up Alibaba cli to run node scenarios is defined [here](cloud_setup.md
|
||||
. Releasing a node is 2 steps, stopping the node and then releasing it.
|
||||
|
||||
|
||||
|
||||
#### VMware
|
||||
How to set up VMware vSphere to run node scenarios is defined [here](cloud_setup.md#vmware)
|
||||
|
||||
This cloud type uses a different configuration style, see actions below and [example config file](../scenarios/openshift/vmware_node_scenarios.yml)
|
||||
|
||||
*vmware-node-terminate, vmware-node-reboot, vmware-node-stop, vmware-node-start*
|
||||
- vmware-node-terminate
|
||||
- vmware-node-reboot
|
||||
- vmware-node-stop
|
||||
- vmware-node-start
|
||||
|
||||
|
||||
|
||||
#### IBMCloud
|
||||
How to set up IBMCloud to run node scenarios is defined [here](cloud_setup.md#ibmcloud)
|
||||
|
||||
This cloud type uses a different configuration style, see actions below and [example config file](../scenarios/openshift/ibmcloud_node_scenarios.yml)
|
||||
|
||||
*ibmcloud-node-terminate, ibmcloud-node-reboot, ibmcloud-node-stop, ibmcloud-node-start
|
||||
*
|
||||
|
||||
|
||||
#### IBMCloud and Vmware example
|
||||
|
||||
|
||||
```
|
||||
- id: ibmcloud-node-stop
|
||||
config:
|
||||
name: "<node_name>"
|
||||
label_selector: "node-role.kubernetes.io/worker" # When node_name is not specified, a node with matching label_selector is selected for node chaos scenario injection
|
||||
runs: 1 # Number of times to inject each scenario under actions (will perform on same node each time)
|
||||
instance_count: 1 # Number of nodes to perform action/select that match the label selector
|
||||
timeout: 30 # Duration to wait for completion of node scenario injection
|
||||
skip_openshift_checks: False # Set to True if you don't want to wait for the status of the nodes to change on OpenShift before passing the scenario
|
||||
- id: ibmcloud-node-start
|
||||
config:
|
||||
name: "<node_name>" #Same name as before
|
||||
label_selector: "node-role.kubernetes.io/worker" # When node_name is not specified, a node with matching label_selector is selected for node chaos scenario injection
|
||||
runs: 1 # Number of times to inject each scenario under actions (will perform on same node each time)
|
||||
instance_count: 1 # Number of nodes to perform action/select that match the label selector
|
||||
timeout: 30 # Duration to wait for completion of node scenario injection
|
||||
skip_openshift_checks: False # Set to True if you don't want to wait for the status of the nodes to change on OpenShift before passing the scenario
|
||||
```
|
||||
- ibmcloud-node-terminate
|
||||
- ibmcloud-node-reboot
|
||||
- ibmcloud-node-stop
|
||||
- ibmcloud-node-start
|
||||
|
||||
|
||||
|
||||
@@ -118,60 +114,3 @@ This cloud type uses a different configuration style, see actions below and [exa
|
||||
**NOTE**: The `node_crash_scenario` and `stop_kubelet_scenario` scenario is supported independent of the cloud platform.
|
||||
|
||||
Use 'generic' or do not add the 'cloud_type' key to your scenario if your cluster is not set up using one of the current supported cloud types.
|
||||
|
||||
Node scenarios can be injected by placing the node scenarios config files under node_scenarios option in the kraken config. Refer to [node_scenarios_example](https://github.com/redhat-chaos/krkn/blob/main/scenarios/node_scenarios_example.yml) config file.
|
||||
|
||||
|
||||
```
|
||||
node_scenarios:
|
||||
- actions: # Node chaos scenarios to be injected.
|
||||
- node_stop_start_scenario
|
||||
- stop_start_kubelet_scenario
|
||||
- node_crash_scenario
|
||||
node_name: # Node on which scenario has to be injected.
|
||||
label_selector: node-role.kubernetes.io/worker # When node_name is not specified, a node with matching label_selector is selected for node chaos scenario injection.
|
||||
instance_count: 1 # Number of nodes to perform action/select that match the label selector.
|
||||
runs: 1 # Number of times to inject each scenario under actions (will perform on same node each time).
|
||||
timeout: 120 # Duration to wait for completion of node scenario injection.
|
||||
cloud_type: aws # Cloud type on which Kubernetes/OpenShift runs.
|
||||
- actions:
|
||||
- node_reboot_scenario
|
||||
node_name:
|
||||
label_selector: node-role.kubernetes.io/infra
|
||||
instance_count: 1
|
||||
timeout: 120
|
||||
cloud_type: azure
|
||||
- actions:
|
||||
- node_crash_scenario
|
||||
node_name:
|
||||
label_selector: node-role.kubernetes.io/infra
|
||||
instance_count: 1
|
||||
timeout: 120
|
||||
- actions:
|
||||
- stop_start_helper_node_scenario # Node chaos scenario for helper node.
|
||||
instance_count: 1
|
||||
timeout: 120
|
||||
helper_node_ip: # ip address of the helper node.
|
||||
service: # Check status of the services on the helper node.
|
||||
- haproxy
|
||||
- dhcpd
|
||||
- named
|
||||
ssh_private_key: /root/.ssh/id_rsa # ssh key to access the helper node.
|
||||
cloud_type: openstack
|
||||
- actions:
|
||||
- node_stop_start_scenario
|
||||
node_name:
|
||||
label_selector: node-role.kubernetes.io/worker
|
||||
instance_count: 1
|
||||
timeout: 120
|
||||
cloud_type: bm
|
||||
bmc_user: defaultuser # For baremetal (bm) cloud type. The default IPMI username. Optional if specified for all machines.
|
||||
bmc_password: defaultpass # For baremetal (bm) cloud type. The default IPMI password. Optional if specified for all machines.
|
||||
bmc_info: # This section is here to specify baremetal per-machine info, so it is optional if there is no per-machine info.
|
||||
node-1: # The node name for the baremetal machine
|
||||
bmc_addr: mgmt-machine1.example.com # Optional. For baremetal nodes with the IPMI BMC address missing from 'oc get bmh'.
|
||||
node-2:
|
||||
bmc_addr: mgmt-machine2.example.com
|
||||
bmc_user: user # The baremetal IPMI user. Overrides the default IPMI user specified above. Optional if the default is set.
|
||||
bmc_password: pass # The baremetal IPMI password. Overrides the default IPMI user specified above. Optional if the default is set.
|
||||
```
|
||||
|
||||
@@ -17,11 +17,8 @@ You can then create the scenario file with the following contents:
|
||||
config:
|
||||
namespace_pattern: ^kube-system$
|
||||
label_selector: k8s-app=kube-scheduler
|
||||
- id: wait-for-pods
|
||||
config:
|
||||
namespace_pattern: ^kube-system$
|
||||
label_selector: k8s-app=kube-scheduler
|
||||
count: 3
|
||||
krkn_pod_recovery_time: 120
|
||||
|
||||
```
|
||||
|
||||
Please adjust the schema reference to point to the [schema file](../scenarios/plugin.schema.json). This file will give you code completion and documentation for the available options in your IDE.
|
||||
|
||||
@@ -16,7 +16,7 @@ Set to '^.*$' and label_selector to "" to randomly select any namespace in your
|
||||
|
||||
**sleep:** Number of seconds to wait between each iteration/count of killing namespaces. Defaults to 10 seconds if not set
|
||||
|
||||
Refer to [namespace_scenarios_example](https://github.com/redhat-chaos/krkn/blob/main/scenarios/regex_namespace.yaml) config file.
|
||||
Refer to [namespace_scenarios_example](https://github.com/krkn-chaos/krkn/blob/main/scenarios/regex_namespace.yaml) config file.
|
||||
|
||||
```
|
||||
scenarios:
|
||||
|
||||
80
docs/service_hijacking_scenarios.md
Normal file
80
docs/service_hijacking_scenarios.md
Normal file
@@ -0,0 +1,80 @@
|
||||
### Service Hijacking Scenarios
|
||||
|
||||
Service Hijacking Scenarios aim to simulate fake HTTP responses from a workload targeted by a
|
||||
`Service` already deployed in the cluster.
|
||||
This scenario is executed by deploying a custom-made web service and modifying the target `Service`
|
||||
selector to direct traffic to this web service for a specified duration.
|
||||
|
||||
The web service's source code is available [here](https://github.com/krkn-chaos/krkn-service-hijacking).
|
||||
It employs a time-based test plan from the scenario configuration file, which specifies the behavior of resources during the chaos scenario as follows:
|
||||
|
||||
```yaml
|
||||
service_target_port: http-web-svc # The port of the service to be hijacked (can be named or numeric, based on the workload and service configuration).
|
||||
service_name: nginx-service # The name of the service that will be hijacked.
|
||||
service_namespace: default # The namespace where the target service is located.
|
||||
image: quay.io/krkn-chaos/krkn-service-hijacking:v0.1.3 # Image of the krkn web service to be deployed to receive traffic.
|
||||
chaos_duration: 30 # Total duration of the chaos scenario in seconds.
|
||||
plan:
|
||||
- resource: "/list/index.php" # Specifies the resource or path to respond to in the scenario. For paths, both the path and query parameters are captured but ignored. For resources, only query parameters are captured.
|
||||
|
||||
steps: # A time-based plan consisting of steps can be defined for each resource.
|
||||
GET: # One or more HTTP methods can be specified for each step. Note: Non-standard methods are supported for fully custom web services (e.g., using NONEXISTENT instead of POST).
|
||||
|
||||
- duration: 15 # Duration in seconds for this step before moving to the next one, if defined. Otherwise, this step will continue until the chaos scenario ends.
|
||||
|
||||
status: 500 # HTTP status code to be returned in this step.
|
||||
mime_type: "application/json" # MIME type of the response for this step.
|
||||
payload: | # The response payload for this step.
|
||||
{
|
||||
"status":"internal server error"
|
||||
}
|
||||
- duration: 15
|
||||
status: 201
|
||||
mime_type: "application/json"
|
||||
payload: |
|
||||
{
|
||||
"status":"resource created"
|
||||
}
|
||||
POST:
|
||||
- duration: 15
|
||||
status: 401
|
||||
mime_type: "application/json"
|
||||
payload: |
|
||||
{
|
||||
"status": "unauthorized"
|
||||
}
|
||||
- duration: 15
|
||||
status: 404
|
||||
mime_type: "text/plain"
|
||||
payload: "not found"
|
||||
|
||||
|
||||
```
|
||||
The scenario will focus on the `service_name` within the `service_namespace`,
|
||||
substituting the selector with a randomly generated one, which is added as a label in the mock service manifest.
|
||||
This allows multiple scenarios to be executed in the same namespace, each targeting different services without
|
||||
causing conflicts.
|
||||
|
||||
The newly deployed mock web service will expose a `service_target_port`,
|
||||
which can be either a named or numeric port based on the service configuration.
|
||||
This ensures that the Service correctly routes HTTP traffic to the mock web service during the chaos run.
|
||||
|
||||
Each step will last for `duration` seconds from the deployment of the mock web service in the cluster.
|
||||
For each HTTP resource, defined as a top-level YAML property of the plan
|
||||
(it could be a specific resource, e.g., /list/index.php, or a path-based resource typical in MVC frameworks),
|
||||
one or more HTTP request methods can be specified. Both standard and custom request methods are supported.
|
||||
|
||||
During this time frame, the web service will respond with:
|
||||
|
||||
- `status`: The [HTTP status code](https://datatracker.ietf.org/doc/html/rfc7231#section-6) (can be standard or custom).
|
||||
- `mime_type`: The [MIME type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types) (can be standard or custom).
|
||||
- `payload`: The response body to be returned to the client.
|
||||
|
||||
At the end of the step `duration`, the web service will proceed to the next step (if available) until
|
||||
the global `chaos_duration` concludes. At this point, the original service will be restored,
|
||||
and the custom web service and its resources will be undeployed.
|
||||
|
||||
__NOTE__: Some clients (e.g., cURL, jQuery) may optimize queries using lightweight methods (like HEAD or OPTIONS)
|
||||
to probe API behavior. If these methods are not defined in the test plan, the web service may respond with
|
||||
a `405` or `404` status code. If you encounter unexpected behavior, consider this use case.
|
||||
|
||||
33
docs/syn_flood_scenarios.md
Normal file
33
docs/syn_flood_scenarios.md
Normal file
@@ -0,0 +1,33 @@
|
||||
### SYN Flood Scenarios
|
||||
|
||||
This scenario generates a substantial amount of TCP traffic directed at one or more Kubernetes services within
|
||||
the cluster to test the server's resiliency under extreme traffic conditions.
|
||||
It can also target hosts outside the cluster by specifying a reachable IP address or hostname.
|
||||
This scenario leverages the distributed nature of Kubernetes clusters to instantiate multiple instances
|
||||
of the same pod against a single host, significantly increasing the effectiveness of the attack.
|
||||
The configuration also allows for the specification of multiple node selectors, enabling Kubernetes to schedule
|
||||
the attacker pods on a user-defined subset of nodes to make the test more realistic.
|
||||
|
||||
```yaml
|
||||
packet-size: 120 # hping3 packet size
|
||||
window-size: 64 # hping 3 TCP window size
|
||||
duration: 10 # chaos scenario duration
|
||||
namespace: default # namespace where the target service(s) are deployed
|
||||
target-service: target-svc # target service name (if set target-service-label must be empty)
|
||||
target-port: 80 # target service TCP port
|
||||
target-service-label : "" # target service label, can be used to target multiple target at the same time
|
||||
# if they have the same label set (if set target-service must be empty)
|
||||
number-of-pods: 2 # number of attacker pod instantiated per each target
|
||||
image: quay.io/krkn-chaos/krkn-syn-flood # syn flood attacker container image
|
||||
attacker-nodes: # this will set the node affinity to schedule the attacker node. Per each node label selector
|
||||
# can be specified multiple values in this way the kube scheduler will schedule the attacker pods
|
||||
# in the best way possible based on the provided labels. Multiple labels can be specified
|
||||
kubernetes.io/hostname:
|
||||
- host_1
|
||||
- host_2
|
||||
kubernetes.io/os:
|
||||
- linux
|
||||
|
||||
```
|
||||
|
||||
The attacker container source code is available [here](https://github.com/krkn-chaos/krkn-syn-flood).
|
||||
@@ -16,7 +16,7 @@ Configuration Options:
|
||||
|
||||
**object_name:** List of the names of pods or nodes you want to skew.
|
||||
|
||||
Refer to [time_scenarios_example](https://github.com/redhat-chaos/krkn/blob/main/scenarios/time_scenarios_example.yml) config file.
|
||||
Refer to [time_scenarios_example](https://github.com/krkn-chaos/krkn/blob/main/scenarios/time_scenarios_example.yml) config file.
|
||||
|
||||
```
|
||||
time_scenarios:
|
||||
|
||||
@@ -2,6 +2,9 @@ kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
extraPortMappings:
|
||||
- containerPort: 30036
|
||||
hostPort: 8888
|
||||
- role: control-plane
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
|
||||
@@ -1,25 +1,32 @@
|
||||
import yaml
|
||||
import logging
|
||||
import time
|
||||
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
import kraken.cerberus.setup as cerberus
|
||||
from jinja2 import Template
|
||||
import kraken.invoke.command as runcommand
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
|
||||
from kraken import utils
|
||||
|
||||
|
||||
# Reads the scenario config, applies and deletes a network policy to
|
||||
# block the traffic for the specified duration
|
||||
def run(scenarios_list, config, wait_duration, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
def run(scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
telemetry: KrknTelemetryOpenshift,
|
||||
telemetry_request_id: str) -> (list[str], list[ScenarioTelemetry]):
|
||||
failed_post_scenarios = ""
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for app_outage_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = app_outage_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, app_outage_config)
|
||||
scenario_telemetry.start_timestamp = time.time()
|
||||
parsed_scenario_config = telemetry.set_parameters_base64(scenario_telemetry, app_outage_config)
|
||||
if len(app_outage_config) > 1:
|
||||
try:
|
||||
with open(app_outage_config, "r") as f:
|
||||
@@ -49,25 +56,22 @@ spec:
|
||||
podSelector:
|
||||
matchLabels: {{ pod_selector }}
|
||||
policyTypes: {{ traffic_type }}
|
||||
"""
|
||||
"""
|
||||
t = Template(network_policy_template)
|
||||
rendered_spec = t.render(pod_selector=pod_selector, traffic_type=traffic_type)
|
||||
# Write the rendered template to a file
|
||||
with open("kraken_network_policy.yaml", "w") as f:
|
||||
f.write(rendered_spec)
|
||||
yaml_spec = yaml.safe_load(rendered_spec)
|
||||
# Block the traffic by creating network policy
|
||||
logging.info("Creating the network policy")
|
||||
runcommand.invoke(
|
||||
"kubectl create -f %s -n %s --validate=false" % ("kraken_network_policy.yaml", namespace)
|
||||
)
|
||||
|
||||
telemetry.kubecli.create_net_policy(yaml_spec, namespace)
|
||||
|
||||
# wait for the specified duration
|
||||
logging.info("Waiting for the specified duration in the config: %s" % (duration))
|
||||
time.sleep(duration)
|
||||
|
||||
# unblock the traffic by deleting the network policy
|
||||
logging.info("Deleting the network policy")
|
||||
runcommand.invoke("kubectl delete -f %s -n %s" % ("kraken_network_policy.yaml", namespace))
|
||||
telemetry.kubecli.delete_net_policy("kraken-deny", namespace)
|
||||
|
||||
logging.info("End of scenario. Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
@@ -75,12 +79,22 @@ spec:
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
except Exception as e :
|
||||
scenario_telemetry.exitStatus = 1
|
||||
scenario_telemetry.exit_status = 1
|
||||
failed_scenarios.append(app_outage_config)
|
||||
log_exception(app_outage_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetry.exit_status = 0
|
||||
scenario_telemetry.end_timestamp = time.time()
|
||||
utils.collect_and_put_ocp_logs(telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry_request_id,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
utils.populate_cluster_events(scenario_telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry.kubecli,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
@@ -5,23 +5,47 @@ import yaml
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
from .context_auth import ContextAuth
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
|
||||
from .. import utils
|
||||
|
||||
def run(scenarios_list: List[str], kubeconfig_path: str, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
|
||||
def run(scenarios_list: List[str],
|
||||
telemetry: KrknTelemetryOpenshift,
|
||||
telemetry_request_id: str
|
||||
) -> (list[str], list[ScenarioTelemetry]):
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_post_scenarios = []
|
||||
for scenario in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = scenario
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry,scenario)
|
||||
start_time = time.time()
|
||||
scenario_telemetry.start_timestamp = start_time
|
||||
parsed_scenario_config = telemetry.set_parameters_base64(scenario_telemetry, scenario)
|
||||
engine_args = build_args(scenario)
|
||||
status_code = run_workflow(engine_args, kubeconfig_path)
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetry.exitStatus = status_code
|
||||
status_code = run_workflow(engine_args, telemetry.kubecli.get_kubeconfig_path())
|
||||
end_time = time.time()
|
||||
scenario_telemetry.end_timestamp = end_time
|
||||
scenario_telemetry.exit_status = status_code
|
||||
|
||||
utils.populate_cluster_events(scenario_telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry.kubecli,
|
||||
int(start_time),
|
||||
int(end_time))
|
||||
|
||||
# this is the design proposal for the namespaced logs collection
|
||||
# check the krkn-lib latest commit to follow also the changes made here
|
||||
utils.collect_and_put_ocp_logs(telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry_request_id,
|
||||
int(start_time),
|
||||
int(end_time))
|
||||
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
if status_code != 0:
|
||||
failed_post_scenarios.append(scenario)
|
||||
@@ -36,9 +60,10 @@ def run_workflow(engine_args: arcaflow.EngineArgs, kubeconfig_path: str) -> int:
|
||||
|
||||
def build_args(input_file: str) -> arcaflow.EngineArgs:
|
||||
"""sets the kubeconfig parsed by setArcaKubeConfig as an input to the arcaflow workflow"""
|
||||
context = Path(input_file).parent
|
||||
workflow = "{}/workflow.yaml".format(context)
|
||||
config = "{}/config.yaml".format(context)
|
||||
current_path = Path().resolve()
|
||||
context = f"{current_path}/{Path(input_file).parent}"
|
||||
workflow = f"{context}/workflow.yaml"
|
||||
config = f"{context}/config.yaml"
|
||||
if not os.path.exists(context):
|
||||
raise Exception(
|
||||
"context folder for arcaflow workflow not found: {}".format(
|
||||
@@ -61,7 +86,8 @@ def build_args(input_file: str) -> arcaflow.EngineArgs:
|
||||
engine_args = arcaflow.EngineArgs()
|
||||
engine_args.context = context
|
||||
engine_args.config = config
|
||||
engine_args.input = input_file
|
||||
engine_args.workflow = workflow
|
||||
engine_args.input = f"{current_path}/{input_file}"
|
||||
return engine_args
|
||||
|
||||
|
||||
|
||||
@@ -4,13 +4,10 @@ import pandas as pd
|
||||
import kraken.chaos_recommender.kraken_tests as kraken_tests
|
||||
import time
|
||||
|
||||
threshold = .7 # Adjust the threshold as needed
|
||||
heatmap_cpu_threshold = .5
|
||||
heatmap_mem_threshold = .5
|
||||
|
||||
KRAKEN_TESTS_PATH = "./kraken_chaos_tests.txt"
|
||||
|
||||
#Placeholder, this should be done with topology
|
||||
|
||||
# Placeholder, this should be done with topology
|
||||
def return_critical_services():
|
||||
return ["web", "cart"]
|
||||
|
||||
@@ -19,15 +16,18 @@ def load_telemetry_data(file_path):
|
||||
data = pd.read_csv(file_path, delimiter=r"\s+")
|
||||
return data
|
||||
|
||||
|
||||
def calculate_zscores(data):
|
||||
zscores = pd.DataFrame()
|
||||
zscores["Namespace"] = data["namespace"]
|
||||
zscores["Service"] = data["service"]
|
||||
zscores["CPU"] = (data["CPU"] - data["CPU"].mean()) / data["CPU"].std()
|
||||
zscores["Memory"] = (data["MEM"] - data["MEM"].mean()) / data["MEM"].std()
|
||||
zscores["Network"] = (data["NETWORK"] - data["NETWORK"].mean()) / data["NETWORK"].std()
|
||||
return zscores
|
||||
|
||||
def identify_outliers(data):
|
||||
|
||||
def identify_outliers(data, threshold):
|
||||
outliers_cpu = data[data["CPU"] > threshold]["Service"].tolist()
|
||||
outliers_memory = data[data["Memory"] > threshold]["Service"].tolist()
|
||||
outliers_network = data[data["Network"] > threshold]["Service"].tolist()
|
||||
@@ -47,44 +47,85 @@ def get_services_above_heatmap_threshold(dataframe, cpu_threshold, mem_threshold
|
||||
return cpu_services, mem_services
|
||||
|
||||
|
||||
def analysis(file_path, chaos_tests_config):
|
||||
def analysis(file_path, namespaces, chaos_tests_config, threshold,
|
||||
heatmap_cpu_threshold, heatmap_mem_threshold):
|
||||
# Load the telemetry data from file
|
||||
logging.info("Fetching the Telemetry data...")
|
||||
data = load_telemetry_data(file_path)
|
||||
|
||||
# Calculate Z-scores for CPU, Memory, and Network columns
|
||||
zscores = calculate_zscores(data)
|
||||
# Dict for saving analysis data -- key is the namespace
|
||||
analysis_data = {}
|
||||
|
||||
# Identify outliers
|
||||
outliers_cpu, outliers_memory, outliers_network = identify_outliers(zscores)
|
||||
cpu_services, mem_services = get_services_above_heatmap_threshold(data, heatmap_cpu_threshold, heatmap_mem_threshold)
|
||||
# Identify outliers for each namespace
|
||||
for namespace in namespaces:
|
||||
|
||||
# Display the identified outliers
|
||||
logging.info("======================== Profiling ==================================")
|
||||
logging.info(f"CPU outliers: {outliers_cpu}")
|
||||
logging.info(f"Memory outliers: {outliers_memory}")
|
||||
logging.info(f"Network outliers: {outliers_network}")
|
||||
logging.info("===================== HeatMap Analysis ==============================")
|
||||
logging.info(f"Identifying outliers for namespace {namespace}...")
|
||||
|
||||
namespace_zscores = zscores.loc[zscores["Namespace"] == namespace]
|
||||
namespace_data = data.loc[data["namespace"] == namespace]
|
||||
outliers_cpu, outliers_memory, outliers_network = identify_outliers(
|
||||
namespace_zscores, threshold)
|
||||
cpu_services, mem_services = get_services_above_heatmap_threshold(
|
||||
namespace_data, heatmap_cpu_threshold, heatmap_mem_threshold)
|
||||
|
||||
analysis_data[namespace] = analysis_json(outliers_cpu, outliers_memory,
|
||||
outliers_network,
|
||||
cpu_services, mem_services,
|
||||
chaos_tests_config)
|
||||
|
||||
if cpu_services:
|
||||
logging.info(f"These services use significant CPU compared to "
|
||||
f"their assigned limits: {cpu_services}")
|
||||
else:
|
||||
logging.info("There are no services that are using significant "
|
||||
"CPU compared to their assigned limits "
|
||||
"(infinite in case no limits are set).")
|
||||
if mem_services:
|
||||
logging.info(f"These services use significant MEMORY compared to "
|
||||
f"their assigned limits: {mem_services}")
|
||||
else:
|
||||
logging.info("There are no services that are using significant "
|
||||
"MEMORY compared to their assigned limits "
|
||||
"(infinite in case no limits are set).")
|
||||
time.sleep(2)
|
||||
|
||||
logging.info("Please check data in utilisation.txt for further analysis")
|
||||
|
||||
return analysis_data
|
||||
|
||||
|
||||
def analysis_json(outliers_cpu, outliers_memory, outliers_network,
|
||||
cpu_services, mem_services, chaos_tests_config):
|
||||
|
||||
profiling = {
|
||||
"cpu_outliers": outliers_cpu,
|
||||
"memory_outliers": outliers_memory,
|
||||
"network_outliers": outliers_network
|
||||
}
|
||||
|
||||
heatmap = {
|
||||
"services_with_cpu_heatmap_above_threshold": cpu_services,
|
||||
"services_with_mem_heatmap_above_threshold": mem_services
|
||||
}
|
||||
|
||||
recommendations = {}
|
||||
|
||||
if cpu_services:
|
||||
logging.info("Services with CPU_HEATMAP above threshold:", cpu_services)
|
||||
else:
|
||||
logging.info("There are no services that are using siginificant CPU compared to their assigned limits (infinite in case no limits are set).")
|
||||
cpu_recommend = {"services": cpu_services,
|
||||
"tests": chaos_tests_config['CPU']}
|
||||
recommendations["cpu_services_recommendations"] = cpu_recommend
|
||||
|
||||
if mem_services:
|
||||
logging.info("Services with MEM_HEATMAP above threshold:", mem_services)
|
||||
else:
|
||||
logging.info("There are no services that are using siginificant MEMORY compared to their assigned limits (infinite in case no limits are set).")
|
||||
time.sleep(2)
|
||||
logging.info("======================= Recommendations =============================")
|
||||
if cpu_services:
|
||||
logging.info(f"Recommended tests for {str(cpu_services)} :\n {chaos_tests_config['CPU']}")
|
||||
logging.info("\n")
|
||||
if mem_services:
|
||||
logging.info(f"Recommended tests for {str(mem_services)} :\n {chaos_tests_config['MEM']}")
|
||||
logging.info("\n")
|
||||
mem_recommend = {"services": mem_services,
|
||||
"tests": chaos_tests_config['MEM']}
|
||||
recommendations["mem_services_recommendations"] = mem_recommend
|
||||
|
||||
if outliers_network:
|
||||
logging.info(f"Recommended tests for str(outliers_network) :\n {chaos_tests_config['NETWORK']}")
|
||||
logging.info("\n")
|
||||
outliers_network_recommend = {"outliers_networks": outliers_network,
|
||||
"tests": chaos_tests_config['NETWORK']}
|
||||
recommendations["outliers_network_recommendations"] = (
|
||||
outliers_network_recommend)
|
||||
|
||||
logging.info("\n")
|
||||
logging.info("Please check data in utilisation.txt for further analysis")
|
||||
return [profiling, heatmap, recommendations]
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import logging
|
||||
|
||||
import pandas
|
||||
from prometheus_api_client import PrometheusConnect
|
||||
import pandas as pd
|
||||
import urllib3
|
||||
@@ -8,6 +7,7 @@ import urllib3
|
||||
|
||||
saved_metrics_path = "./utilisation.txt"
|
||||
|
||||
|
||||
def convert_data_to_dataframe(data, label):
|
||||
df = pd.DataFrame()
|
||||
df['service'] = [item['metric']['pod'] for item in data]
|
||||
@@ -17,29 +17,60 @@ def convert_data_to_dataframe(data, label):
|
||||
|
||||
|
||||
def convert_data(data, service):
|
||||
|
||||
result = {}
|
||||
for entry in data:
|
||||
pod_name = entry['metric']['pod']
|
||||
value = entry['value'][1]
|
||||
result[pod_name] = value
|
||||
return result.get(service, '100000000000') # for those pods whose limits are not defined they can take as much resources, there assigning a very high value
|
||||
|
||||
def save_utilization_to_file(cpu_data, cpu_limits_result, mem_data, mem_limits_result, network_data, filename):
|
||||
df_cpu = convert_data_to_dataframe(cpu_data, "CPU")
|
||||
merged_df = pd.DataFrame(columns=['service','CPU','CPU_LIMITS','MEM','MEM_LIMITS','NETWORK'])
|
||||
services = df_cpu.service.unique()
|
||||
logging.info(services)
|
||||
|
||||
for s in services:
|
||||
|
||||
new_row_df = pd.DataFrame( {"service": s, "CPU" : convert_data(cpu_data, s),
|
||||
"CPU_LIMITS" : convert_data(cpu_limits_result, s),
|
||||
"MEM" : convert_data(mem_data, s), "MEM_LIMITS" : convert_data(mem_limits_result, s),
|
||||
"NETWORK" : convert_data(network_data, s)}, index=[0])
|
||||
merged_df = pd.concat([merged_df, new_row_df], ignore_index=True)
|
||||
return result.get(service) # for those pods whose limits are not defined they can take as much resources, there assigning a very high value
|
||||
|
||||
|
||||
def convert_data_limits(data, node_data, service, prometheus):
|
||||
result = {}
|
||||
for entry in data:
|
||||
pod_name = entry['metric']['pod']
|
||||
value = entry['value'][1]
|
||||
result[pod_name] = value
|
||||
return result.get(service, get_node_capacity(node_data, service, prometheus)) # for those pods whose limits are not defined they can take as much resources, there assigning a very high value
|
||||
|
||||
def get_node_capacity(node_data, pod_name, prometheus ):
|
||||
|
||||
# Get the node name on which the pod is running
|
||||
query = f'kube_pod_info{{pod="{pod_name}"}}'
|
||||
result = prometheus.custom_query(query)
|
||||
if not result:
|
||||
return None
|
||||
|
||||
node_name = result[0]['metric']['node']
|
||||
|
||||
for item in node_data:
|
||||
if item['metric']['node'] == node_name:
|
||||
return item['value'][1]
|
||||
|
||||
return '1000000000'
|
||||
|
||||
|
||||
def save_utilization_to_file(utilization, filename, prometheus):
|
||||
|
||||
merged_df = pd.DataFrame(columns=['namespace', 'service', 'CPU', 'CPU_LIMITS', 'MEM', 'MEM_LIMITS', 'NETWORK'])
|
||||
for namespace in utilization:
|
||||
# Loading utilization_data[] for namespace
|
||||
# indexes -- 0 CPU, 1 CPU limits, 2 mem, 3 mem limits, 4 network
|
||||
utilization_data = utilization[namespace]
|
||||
df_cpu = convert_data_to_dataframe(utilization_data[0], "CPU")
|
||||
services = df_cpu.service.unique()
|
||||
logging.info(f"Services for namespace {namespace}: {services}")
|
||||
|
||||
for s in services:
|
||||
|
||||
new_row_df = pd.DataFrame({
|
||||
"namespace": namespace, "service": s,
|
||||
"CPU": convert_data(utilization_data[0], s),
|
||||
"CPU_LIMITS": convert_data_limits(utilization_data[1],utilization_data[5], s, prometheus),
|
||||
"MEM": convert_data(utilization_data[2], s),
|
||||
"MEM_LIMITS": convert_data_limits(utilization_data[3], utilization_data[6], s, prometheus),
|
||||
"NETWORK": convert_data(utilization_data[4], s)}, index=[0])
|
||||
merged_df = pd.concat([merged_df, new_row_df], ignore_index=True)
|
||||
|
||||
# Convert columns to string
|
||||
merged_df['CPU'] = merged_df['CPU'].astype(str)
|
||||
@@ -49,48 +80,65 @@ def save_utilization_to_file(cpu_data, cpu_limits_result, mem_data, mem_limits_r
|
||||
merged_df['NETWORK'] = merged_df['NETWORK'].astype(str)
|
||||
|
||||
# Extract integer part before the decimal point
|
||||
merged_df['CPU'] = merged_df['CPU'].str.split('.').str[0]
|
||||
merged_df['MEM'] = merged_df['MEM'].str.split('.').str[0]
|
||||
merged_df['CPU_LIMITS'] = merged_df['CPU_LIMITS'].str.split('.').str[0]
|
||||
merged_df['MEM_LIMITS'] = merged_df['MEM_LIMITS'].str.split('.').str[0]
|
||||
merged_df['NETWORK'] = merged_df['NETWORK'].str.split('.').str[0]
|
||||
#merged_df['CPU'] = merged_df['CPU'].str.split('.').str[0]
|
||||
#merged_df['MEM'] = merged_df['MEM'].str.split('.').str[0]
|
||||
#merged_df['CPU_LIMITS'] = merged_df['CPU_LIMITS'].str.split('.').str[0]
|
||||
#merged_df['MEM_LIMITS'] = merged_df['MEM_LIMITS'].str.split('.').str[0]
|
||||
#merged_df['NETWORK'] = merged_df['NETWORK'].str.split('.').str[0]
|
||||
|
||||
merged_df.to_csv(filename, sep='\t', index=False)
|
||||
|
||||
def fetch_utilization_from_prometheus(prometheus_endpoint, auth_token, namespace, scrape_duration):
|
||||
|
||||
def fetch_utilization_from_prometheus(prometheus_endpoint, auth_token,
|
||||
namespaces, scrape_duration):
|
||||
urllib3.disable_warnings()
|
||||
prometheus = PrometheusConnect(url=prometheus_endpoint, headers={'Authorization':'Bearer {}'.format(auth_token)}, disable_ssl=True)
|
||||
prometheus = PrometheusConnect(url=prometheus_endpoint, headers={
|
||||
'Authorization':'Bearer {}'.format(auth_token)}, disable_ssl=True)
|
||||
|
||||
# Fetch CPU utilization
|
||||
cpu_query = 'sum (rate (container_cpu_usage_seconds_total{image!="", namespace="%s"}[%s])) by (pod) *1000' % (namespace,scrape_duration)
|
||||
logging.info(cpu_query)
|
||||
cpu_result = prometheus.custom_query(cpu_query)
|
||||
cpu_data = cpu_result
|
||||
|
||||
|
||||
cpu_limits_query = '(sum by (pod) (kube_pod_container_resource_limits{resource="cpu", namespace="%s"}))*1000' %(namespace)
|
||||
logging.info(cpu_limits_query)
|
||||
cpu_limits_result = prometheus.custom_query(cpu_limits_query)
|
||||
|
||||
|
||||
mem_query = 'sum by (pod) (avg_over_time(container_memory_usage_bytes{image!="", namespace="%s"}[%s]))' % (namespace, scrape_duration)
|
||||
logging.info(mem_query)
|
||||
mem_result = prometheus.custom_query(mem_query)
|
||||
mem_data = mem_result
|
||||
|
||||
mem_limits_query = 'sum by (pod) (kube_pod_container_resource_limits{resource="memory", namespace="%s"}) ' %(namespace)
|
||||
logging.info(mem_limits_query)
|
||||
mem_limits_result = prometheus.custom_query(mem_limits_query)
|
||||
|
||||
|
||||
network_query = 'sum by (pod) ((avg_over_time(container_network_transmit_bytes_total{namespace="%s"}[%s])) + \
|
||||
(avg_over_time(container_network_receive_bytes_total{namespace="%s"}[%s])))' % (namespace, scrape_duration, namespace, scrape_duration)
|
||||
network_result = prometheus.custom_query(network_query)
|
||||
logging.info(network_query)
|
||||
network_data = network_result
|
||||
|
||||
|
||||
save_utilization_to_file(cpu_data, cpu_limits_result, mem_data, mem_limits_result, network_data, saved_metrics_path)
|
||||
return saved_metrics_path
|
||||
# Dicts for saving utilisation and queries -- key is namespace
|
||||
utilization = {}
|
||||
queries = {}
|
||||
|
||||
logging.info("Fetching utilization...")
|
||||
for namespace in namespaces:
|
||||
|
||||
# Fetch CPU utilization
|
||||
cpu_query = 'sum (rate (container_cpu_usage_seconds_total{image!="", namespace="%s"}[%s])) by (pod) *1000' % (namespace,scrape_duration)
|
||||
cpu_result = prometheus.custom_query(cpu_query)
|
||||
|
||||
cpu_limits_query = '(sum by (pod) (kube_pod_container_resource_limits{resource="cpu", namespace="%s"}))*1000' %(namespace)
|
||||
cpu_limits_result = prometheus.custom_query(cpu_limits_query)
|
||||
|
||||
node_cpu_limits_query = 'kube_node_status_capacity{resource="cpu", unit="core"}*1000'
|
||||
node_cpu_limits_result = prometheus.custom_query(node_cpu_limits_query)
|
||||
|
||||
mem_query = 'sum by (pod) (avg_over_time(container_memory_usage_bytes{image!="", namespace="%s"}[%s]))' % (namespace, scrape_duration)
|
||||
mem_result = prometheus.custom_query(mem_query)
|
||||
|
||||
mem_limits_query = 'sum by (pod) (kube_pod_container_resource_limits{resource="memory", namespace="%s"}) ' %(namespace)
|
||||
mem_limits_result = prometheus.custom_query(mem_limits_query)
|
||||
|
||||
node_mem_limits_query = 'kube_node_status_capacity{resource="memory", unit="byte"}'
|
||||
node_mem_limits_result = prometheus.custom_query(node_mem_limits_query)
|
||||
|
||||
network_query = 'sum by (pod) ((avg_over_time(container_network_transmit_bytes_total{namespace="%s"}[%s])) + \
|
||||
(avg_over_time(container_network_receive_bytes_total{namespace="%s"}[%s])))' % (namespace, scrape_duration, namespace, scrape_duration)
|
||||
network_result = prometheus.custom_query(network_query)
|
||||
|
||||
utilization[namespace] = [cpu_result, cpu_limits_result, mem_result, mem_limits_result, network_result, node_cpu_limits_result, node_mem_limits_result ]
|
||||
queries[namespace] = json_queries(cpu_query, cpu_limits_query, mem_query, mem_limits_query, network_query)
|
||||
|
||||
save_utilization_to_file(utilization, saved_metrics_path, prometheus)
|
||||
|
||||
return saved_metrics_path, queries
|
||||
|
||||
|
||||
def json_queries(cpu_query, cpu_limits_query, mem_query, mem_limits_query, network_query):
|
||||
queries = {
|
||||
"cpu_query": cpu_query,
|
||||
"cpu_limit_query": cpu_limits_query,
|
||||
"memory_query": mem_query,
|
||||
"memory_limit_query": mem_limits_query,
|
||||
"network_query": network_query
|
||||
}
|
||||
return queries
|
||||
|
||||
@@ -1,892 +0,0 @@
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
from kubernetes import client, config, utils, watch
|
||||
from kubernetes.client.rest import ApiException
|
||||
from kubernetes.dynamic.client import DynamicClient
|
||||
from kubernetes.stream import stream
|
||||
|
||||
from ..kubernetes.resources import (PVC, ChaosEngine, ChaosResult, Container,
|
||||
LitmusChaosObject, Pod, Volume,
|
||||
VolumeMount)
|
||||
|
||||
kraken_node_name = ""
|
||||
|
||||
|
||||
# Load kubeconfig and initialize kubernetes python client
|
||||
def initialize_clients(kubeconfig_path):
|
||||
global cli
|
||||
global batch_cli
|
||||
global watch_resource
|
||||
global api_client
|
||||
global dyn_client
|
||||
global custom_object_client
|
||||
try:
|
||||
if kubeconfig_path:
|
||||
config.load_kube_config(kubeconfig_path)
|
||||
else:
|
||||
config.load_incluster_config()
|
||||
api_client = client.ApiClient()
|
||||
cli = client.CoreV1Api(api_client)
|
||||
batch_cli = client.BatchV1Api(api_client)
|
||||
custom_object_client = client.CustomObjectsApi(api_client)
|
||||
dyn_client = DynamicClient(api_client)
|
||||
watch_resource = watch.Watch()
|
||||
except ApiException as e:
|
||||
logging.error("Failed to initialize kubernetes client: %s\n" % e)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def get_host() -> str:
|
||||
"""Returns the Kubernetes server URL"""
|
||||
return client.configuration.Configuration.get_default_copy().host
|
||||
|
||||
|
||||
def get_clusterversion_string() -> str:
|
||||
"""
|
||||
Returns clusterversion status text on OpenShift, empty string
|
||||
on other distributions
|
||||
"""
|
||||
try:
|
||||
cvs = custom_object_client.list_cluster_custom_object(
|
||||
"config.openshift.io",
|
||||
"v1",
|
||||
"clusterversions",
|
||||
)
|
||||
for cv in cvs["items"]:
|
||||
for condition in cv["status"]["conditions"]:
|
||||
if condition["type"] == "Progressing":
|
||||
return condition["message"]
|
||||
return ""
|
||||
except client.exceptions.ApiException as e:
|
||||
if e.status == 404:
|
||||
return ""
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
# List all namespaces
|
||||
def list_namespaces(label_selector=None):
|
||||
namespaces = []
|
||||
try:
|
||||
if label_selector:
|
||||
ret = cli.list_namespace(
|
||||
pretty=True,
|
||||
label_selector=label_selector
|
||||
)
|
||||
else:
|
||||
ret = cli.list_namespace(pretty=True)
|
||||
except ApiException as e:
|
||||
logging.error(
|
||||
"Exception when calling CoreV1Api->list_namespaced_pod: %s\n" % e
|
||||
)
|
||||
raise e
|
||||
for namespace in ret.items:
|
||||
namespaces.append(namespace.metadata.name)
|
||||
return namespaces
|
||||
|
||||
|
||||
def get_namespace_status(namespace_name):
|
||||
"""Get status of a given namespace"""
|
||||
ret = ""
|
||||
try:
|
||||
ret = cli.read_namespace_status(namespace_name)
|
||||
except ApiException as e:
|
||||
logging.error(
|
||||
"Exception when calling CoreV1Api->read_namespace_status: %s\n" % e
|
||||
)
|
||||
return ret.status.phase
|
||||
|
||||
|
||||
def delete_namespace(namespace):
|
||||
"""Deletes a given namespace using kubernetes python client"""
|
||||
try:
|
||||
api_response = cli.delete_namespace(namespace)
|
||||
logging.debug(
|
||||
"Namespace deleted. status='%s'" % str(api_response.status)
|
||||
)
|
||||
return api_response
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
CoreV1Api->delete_namespace: %s\n"
|
||||
% e
|
||||
)
|
||||
|
||||
|
||||
def check_namespaces(namespaces, label_selectors=None):
|
||||
"""Check if all the watch_namespaces are valid"""
|
||||
try:
|
||||
valid_namespaces = list_namespaces(label_selectors)
|
||||
regex_namespaces = set(namespaces) - set(valid_namespaces)
|
||||
final_namespaces = set(namespaces) - set(regex_namespaces)
|
||||
valid_regex = set()
|
||||
if regex_namespaces:
|
||||
for namespace in valid_namespaces:
|
||||
for regex_namespace in regex_namespaces:
|
||||
if re.search(regex_namespace, namespace):
|
||||
final_namespaces.add(namespace)
|
||||
valid_regex.add(regex_namespace)
|
||||
break
|
||||
invalid_namespaces = regex_namespaces - valid_regex
|
||||
if invalid_namespaces:
|
||||
raise Exception(
|
||||
"There exists no namespaces matching: %s" %
|
||||
(invalid_namespaces)
|
||||
)
|
||||
return list(final_namespaces)
|
||||
except Exception as e:
|
||||
logging.info("%s" % (e))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# List nodes in the cluster
|
||||
def list_nodes(label_selector=None):
|
||||
nodes = []
|
||||
try:
|
||||
if label_selector:
|
||||
ret = cli.list_node(pretty=True, label_selector=label_selector)
|
||||
else:
|
||||
ret = cli.list_node(pretty=True)
|
||||
except ApiException as e:
|
||||
logging.error("Exception when calling CoreV1Api->list_node: %s\n" % e)
|
||||
raise e
|
||||
for node in ret.items:
|
||||
nodes.append(node.metadata.name)
|
||||
return nodes
|
||||
|
||||
|
||||
# List nodes in the cluster that can be killed
|
||||
def list_killable_nodes(label_selector=None):
|
||||
nodes = []
|
||||
try:
|
||||
if label_selector:
|
||||
ret = cli.list_node(pretty=True, label_selector=label_selector)
|
||||
else:
|
||||
ret = cli.list_node(pretty=True)
|
||||
except ApiException as e:
|
||||
logging.error("Exception when calling CoreV1Api->list_node: %s\n" % e)
|
||||
raise e
|
||||
for node in ret.items:
|
||||
if kraken_node_name != node.metadata.name:
|
||||
for cond in node.status.conditions:
|
||||
if str(cond.type) == "Ready" and str(cond.status) == "True":
|
||||
nodes.append(node.metadata.name)
|
||||
return nodes
|
||||
|
||||
|
||||
# List managedclusters attached to the hub that can be killed
|
||||
def list_killable_managedclusters(label_selector=None):
|
||||
managedclusters = []
|
||||
try:
|
||||
ret = custom_object_client.list_cluster_custom_object(
|
||||
group="cluster.open-cluster-management.io",
|
||||
version="v1",
|
||||
plural="managedclusters",
|
||||
label_selector=label_selector
|
||||
)
|
||||
except ApiException as e:
|
||||
logging.error("Exception when calling CustomObjectsApi->list_cluster_custom_object: %s\n" % e)
|
||||
raise e
|
||||
for managedcluster in ret['items']:
|
||||
conditions = managedcluster['status']['conditions']
|
||||
available = list(filter(lambda condition: condition['reason'] == 'ManagedClusterAvailable', conditions))
|
||||
if available and available[0]['status'] == 'True':
|
||||
managedclusters.append(managedcluster['metadata']['name'])
|
||||
return managedclusters
|
||||
|
||||
# List pods in the given namespace
|
||||
def list_pods(namespace, label_selector=None):
|
||||
pods = []
|
||||
try:
|
||||
if label_selector:
|
||||
ret = cli.list_namespaced_pod(
|
||||
namespace,
|
||||
pretty=True,
|
||||
label_selector=label_selector
|
||||
)
|
||||
else:
|
||||
ret = cli.list_namespaced_pod(namespace, pretty=True)
|
||||
except ApiException as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
CoreV1Api->list_namespaced_pod: %s\n"
|
||||
% e
|
||||
)
|
||||
raise e
|
||||
for pod in ret.items:
|
||||
pods.append(pod.metadata.name)
|
||||
return pods
|
||||
|
||||
|
||||
def get_all_pods(label_selector=None):
|
||||
pods = []
|
||||
if label_selector:
|
||||
ret = cli.list_pod_for_all_namespaces(
|
||||
pretty=True,
|
||||
label_selector=label_selector
|
||||
)
|
||||
else:
|
||||
ret = cli.list_pod_for_all_namespaces(pretty=True)
|
||||
for pod in ret.items:
|
||||
pods.append([pod.metadata.name, pod.metadata.namespace])
|
||||
return pods
|
||||
|
||||
|
||||
# Execute command in pod
|
||||
def exec_cmd_in_pod(
|
||||
command,
|
||||
pod_name,
|
||||
namespace,
|
||||
container=None,
|
||||
base_command="bash"
|
||||
):
|
||||
|
||||
exec_command = [base_command, "-c", command]
|
||||
try:
|
||||
if container:
|
||||
ret = stream(
|
||||
cli.connect_get_namespaced_pod_exec,
|
||||
pod_name,
|
||||
namespace,
|
||||
container=container,
|
||||
command=exec_command,
|
||||
stderr=True,
|
||||
stdin=False,
|
||||
stdout=True,
|
||||
tty=False,
|
||||
)
|
||||
else:
|
||||
ret = stream(
|
||||
cli.connect_get_namespaced_pod_exec,
|
||||
pod_name,
|
||||
namespace,
|
||||
command=exec_command,
|
||||
stderr=True,
|
||||
stdin=False,
|
||||
stdout=True,
|
||||
tty=False,
|
||||
)
|
||||
except Exception:
|
||||
return False
|
||||
return ret
|
||||
|
||||
|
||||
def delete_pod(name, namespace):
|
||||
try:
|
||||
cli.delete_namespaced_pod(name=name, namespace=namespace)
|
||||
while cli.read_namespaced_pod(name=name, namespace=namespace):
|
||||
time.sleep(1)
|
||||
except ApiException as e:
|
||||
if e.status == 404:
|
||||
logging.info("Pod already deleted")
|
||||
else:
|
||||
logging.error("Failed to delete pod %s" % e)
|
||||
raise e
|
||||
|
||||
|
||||
def create_pod(body, namespace, timeout=120):
|
||||
try:
|
||||
pod_stat = None
|
||||
pod_stat = cli.create_namespaced_pod(body=body, namespace=namespace)
|
||||
end_time = time.time() + timeout
|
||||
while True:
|
||||
pod_stat = cli.read_namespaced_pod(
|
||||
name=body["metadata"]["name"],
|
||||
namespace=namespace
|
||||
)
|
||||
if pod_stat.status.phase == "Running":
|
||||
break
|
||||
if time.time() > end_time:
|
||||
raise Exception("Starting pod failed")
|
||||
time.sleep(1)
|
||||
except Exception as e:
|
||||
logging.error("Pod creation failed %s" % e)
|
||||
if pod_stat:
|
||||
logging.error(pod_stat.status.container_statuses)
|
||||
delete_pod(body["metadata"]["name"], namespace)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def read_pod(name, namespace="default"):
|
||||
return cli.read_namespaced_pod(name=name, namespace=namespace)
|
||||
|
||||
|
||||
def get_pod_log(name, namespace="default"):
|
||||
return cli.read_namespaced_pod_log(
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
_return_http_data_only=True,
|
||||
_preload_content=False
|
||||
)
|
||||
|
||||
|
||||
def get_containers_in_pod(pod_name, namespace):
|
||||
pod_info = cli.read_namespaced_pod(pod_name, namespace)
|
||||
container_names = []
|
||||
|
||||
for cont in pod_info.spec.containers:
|
||||
container_names.append(cont.name)
|
||||
return container_names
|
||||
|
||||
|
||||
def delete_job(name, namespace="default"):
|
||||
try:
|
||||
api_response = batch_cli.delete_namespaced_job(
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
body=client.V1DeleteOptions(
|
||||
propagation_policy="Foreground",
|
||||
grace_period_seconds=0
|
||||
),
|
||||
)
|
||||
logging.debug("Job deleted. status='%s'" % str(api_response.status))
|
||||
return api_response
|
||||
except ApiException as api:
|
||||
logging.warn(
|
||||
"Exception when calling \
|
||||
BatchV1Api->create_namespaced_job: %s"
|
||||
% api
|
||||
)
|
||||
logging.warn("Job already deleted\n")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
BatchV1Api->delete_namespaced_job: %s\n"
|
||||
% e
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def create_job(body, namespace="default"):
|
||||
try:
|
||||
api_response = batch_cli.create_namespaced_job(
|
||||
body=body,
|
||||
namespace=namespace
|
||||
)
|
||||
return api_response
|
||||
except ApiException as api:
|
||||
logging.warn(
|
||||
"Exception when calling \
|
||||
BatchV1Api->create_job: %s"
|
||||
% api
|
||||
)
|
||||
if api.status == 409:
|
||||
logging.warn("Job already present")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
BatchV1Api->create_namespaced_job: %s"
|
||||
% e
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
def create_manifestwork(body, namespace):
|
||||
try:
|
||||
api_response = custom_object_client.create_namespaced_custom_object(
|
||||
group="work.open-cluster-management.io",
|
||||
version="v1",
|
||||
plural="manifestworks",
|
||||
body=body,
|
||||
namespace=namespace
|
||||
)
|
||||
return api_response
|
||||
except ApiException as e:
|
||||
print("Exception when calling CustomObjectsApi->create_namespaced_custom_object: %s\n" % e)
|
||||
|
||||
|
||||
def delete_manifestwork(namespace):
|
||||
try:
|
||||
api_response = custom_object_client.delete_namespaced_custom_object(
|
||||
group="work.open-cluster-management.io",
|
||||
version="v1",
|
||||
plural="manifestworks",
|
||||
name="managedcluster-scenarios-template",
|
||||
namespace=namespace
|
||||
)
|
||||
return api_response
|
||||
except ApiException as e:
|
||||
print("Exception when calling CustomObjectsApi->delete_namespaced_custom_object: %s\n" % e)
|
||||
|
||||
def get_job_status(name, namespace="default"):
|
||||
try:
|
||||
return batch_cli.read_namespaced_job_status(
|
||||
name=name,
|
||||
namespace=namespace
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
BatchV1Api->read_namespaced_job_status: %s"
|
||||
% e
|
||||
)
|
||||
raise
|
||||
|
||||
|
||||
# Monitor the status of the cluster nodes and set the status to true or false
|
||||
def monitor_nodes():
|
||||
nodes = list_nodes()
|
||||
notready_nodes = []
|
||||
node_kerneldeadlock_status = "False"
|
||||
for node in nodes:
|
||||
try:
|
||||
node_info = cli.read_node_status(node, pretty=True)
|
||||
except ApiException as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
CoreV1Api->read_node_status: %s\n"
|
||||
% e
|
||||
)
|
||||
raise e
|
||||
for condition in node_info.status.conditions:
|
||||
if condition.type == "KernelDeadlock":
|
||||
node_kerneldeadlock_status = condition.status
|
||||
elif condition.type == "Ready":
|
||||
node_ready_status = condition.status
|
||||
else:
|
||||
continue
|
||||
if node_kerneldeadlock_status != "False" or node_ready_status != "True": # noqa # noqa
|
||||
notready_nodes.append(node)
|
||||
if len(notready_nodes) != 0:
|
||||
status = False
|
||||
else:
|
||||
status = True
|
||||
return status, notready_nodes
|
||||
|
||||
|
||||
# Monitor the status of the pods in the specified namespace
|
||||
# and set the status to true or false
|
||||
def monitor_namespace(namespace):
|
||||
pods = list_pods(namespace)
|
||||
notready_pods = []
|
||||
for pod in pods:
|
||||
try:
|
||||
pod_info = cli.read_namespaced_pod_status(
|
||||
pod,
|
||||
namespace,
|
||||
pretty=True
|
||||
)
|
||||
except ApiException as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
CoreV1Api->read_namespaced_pod_status: %s\n"
|
||||
% e
|
||||
)
|
||||
raise e
|
||||
pod_status = pod_info.status.phase
|
||||
if (
|
||||
pod_status != "Running" and
|
||||
pod_status != "Completed" and
|
||||
pod_status != "Succeeded"
|
||||
):
|
||||
notready_pods.append(pod)
|
||||
if len(notready_pods) != 0:
|
||||
status = False
|
||||
else:
|
||||
status = True
|
||||
return status, notready_pods
|
||||
|
||||
|
||||
# Monitor component namespace
|
||||
def monitor_component(iteration, component_namespace):
|
||||
watch_component_status, failed_component_pods = \
|
||||
monitor_namespace(component_namespace)
|
||||
logging.info(
|
||||
"Iteration %s: %s: %s" % (
|
||||
iteration,
|
||||
component_namespace,
|
||||
watch_component_status
|
||||
)
|
||||
)
|
||||
return watch_component_status, failed_component_pods
|
||||
|
||||
|
||||
def apply_yaml(path, namespace='default'):
|
||||
"""
|
||||
Apply yaml config to create Kubernetes resources
|
||||
|
||||
Args:
|
||||
path (string)
|
||||
- Path to the YAML file
|
||||
namespace (string)
|
||||
- Namespace to create the resource
|
||||
|
||||
Returns:
|
||||
The object created
|
||||
"""
|
||||
|
||||
return utils.create_from_yaml(
|
||||
api_client,
|
||||
yaml_file=path,
|
||||
namespace=namespace
|
||||
)
|
||||
|
||||
|
||||
def get_pod_info(name: str, namespace: str = 'default') -> Pod:
|
||||
"""
|
||||
Function to retrieve information about a specific pod
|
||||
in a given namespace. The kubectl command is given by:
|
||||
kubectl get pods <name> -n <namespace>
|
||||
|
||||
Args:
|
||||
name (string)
|
||||
- Name of the pod
|
||||
|
||||
namespace (string)
|
||||
- Namespace to look for the pod
|
||||
|
||||
Returns:
|
||||
- Data class object of type Pod with the output of the above
|
||||
kubectl command in the given format if the pod exists
|
||||
- Returns None if the pod doesn't exist
|
||||
"""
|
||||
pod_exists = check_if_pod_exists(name=name, namespace=namespace)
|
||||
if pod_exists:
|
||||
response = cli.read_namespaced_pod(
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
pretty='true'
|
||||
)
|
||||
container_list = []
|
||||
|
||||
# Create a list of containers present in the pod
|
||||
for container in response.spec.containers:
|
||||
volume_mount_list = []
|
||||
for volume_mount in container.volume_mounts:
|
||||
volume_mount_list.append(
|
||||
VolumeMount(
|
||||
name=volume_mount.name,
|
||||
mountPath=volume_mount.mount_path
|
||||
)
|
||||
)
|
||||
container_list.append(
|
||||
Container(
|
||||
name=container.name,
|
||||
image=container.image,
|
||||
volumeMounts=volume_mount_list
|
||||
)
|
||||
)
|
||||
|
||||
for i, container in enumerate(response.status.container_statuses):
|
||||
container_list[i].ready = container.ready
|
||||
|
||||
# Create a list of volumes associated with the pod
|
||||
volume_list = []
|
||||
for volume in response.spec.volumes:
|
||||
volume_name = volume.name
|
||||
pvc_name = (
|
||||
volume.persistent_volume_claim.claim_name
|
||||
if volume.persistent_volume_claim is not None
|
||||
else None
|
||||
)
|
||||
volume_list.append(Volume(name=volume_name, pvcName=pvc_name))
|
||||
|
||||
# Create the Pod data class object
|
||||
pod_info = Pod(
|
||||
name=response.metadata.name,
|
||||
podIP=response.status.pod_ip,
|
||||
namespace=response.metadata.namespace,
|
||||
containers=container_list,
|
||||
nodeName=response.spec.node_name,
|
||||
volumes=volume_list
|
||||
)
|
||||
return pod_info
|
||||
else:
|
||||
logging.error(
|
||||
"Pod '%s' doesn't exist in namespace '%s'" % (
|
||||
str(name),
|
||||
str(namespace)
|
||||
)
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def get_litmus_chaos_object(
|
||||
kind: str,
|
||||
name: str,
|
||||
namespace: str
|
||||
) -> LitmusChaosObject:
|
||||
"""
|
||||
Function that returns an object of a custom resource type of
|
||||
the litmus project. Currently, only ChaosEngine and ChaosResult
|
||||
objects are supported.
|
||||
|
||||
Args:
|
||||
kind (string)
|
||||
- The custom resource type
|
||||
|
||||
namespace (string)
|
||||
- Namespace where the custom object is present
|
||||
|
||||
Returns:
|
||||
Data class object of a subclass of LitmusChaosObject
|
||||
"""
|
||||
|
||||
group = 'litmuschaos.io'
|
||||
version = 'v1alpha1'
|
||||
|
||||
if kind.lower() == 'chaosengine':
|
||||
plural = 'chaosengines'
|
||||
response = custom_object_client.get_namespaced_custom_object(
|
||||
group=group,
|
||||
plural=plural,
|
||||
version=version,
|
||||
namespace=namespace,
|
||||
name=name
|
||||
)
|
||||
try:
|
||||
engine_status = response['status']['engineStatus']
|
||||
exp_status = response['status']['experiments'][0]['status']
|
||||
except Exception:
|
||||
engine_status = 'Not Initialized'
|
||||
exp_status = 'Not Initialized'
|
||||
custom_object = ChaosEngine(
|
||||
kind='ChaosEngine',
|
||||
group=group,
|
||||
namespace=namespace,
|
||||
name=name,
|
||||
plural=plural,
|
||||
version=version,
|
||||
engineStatus=engine_status,
|
||||
expStatus=exp_status
|
||||
)
|
||||
elif kind.lower() == 'chaosresult':
|
||||
plural = 'chaosresults'
|
||||
response = custom_object_client.get_namespaced_custom_object(
|
||||
group=group,
|
||||
plural=plural,
|
||||
version=version,
|
||||
namespace=namespace,
|
||||
name=name
|
||||
)
|
||||
try:
|
||||
verdict = response['status']['experimentStatus']['verdict']
|
||||
fail_step = response['status']['experimentStatus']['failStep']
|
||||
except Exception:
|
||||
verdict = 'N/A'
|
||||
fail_step = 'N/A'
|
||||
custom_object = ChaosResult(
|
||||
kind='ChaosResult',
|
||||
group=group,
|
||||
namespace=namespace,
|
||||
name=name,
|
||||
plural=plural,
|
||||
version=version,
|
||||
verdict=verdict,
|
||||
failStep=fail_step
|
||||
)
|
||||
else:
|
||||
logging.error("Invalid litmus chaos custom resource name")
|
||||
custom_object = None
|
||||
return custom_object
|
||||
|
||||
|
||||
def check_if_namespace_exists(name: str) -> bool:
|
||||
"""
|
||||
Function that checks if a namespace exists by parsing through
|
||||
the list of projects.
|
||||
Args:
|
||||
name (string)
|
||||
- Namespace name
|
||||
|
||||
Returns:
|
||||
Boolean value indicating whether the namespace exists or not
|
||||
"""
|
||||
|
||||
v1_projects = dyn_client.resources.get(
|
||||
api_version='project.openshift.io/v1',
|
||||
kind='Project'
|
||||
)
|
||||
project_list = v1_projects.get()
|
||||
return True if name in str(project_list) else False
|
||||
|
||||
|
||||
def check_if_pod_exists(name: str, namespace: str) -> bool:
|
||||
"""
|
||||
Function that checks if a pod exists in the given namespace
|
||||
Args:
|
||||
name (string)
|
||||
- Pod name
|
||||
|
||||
namespace (string)
|
||||
- Namespace name
|
||||
|
||||
Returns:
|
||||
Boolean value indicating whether the pod exists or not
|
||||
"""
|
||||
|
||||
namespace_exists = check_if_namespace_exists(namespace)
|
||||
if namespace_exists:
|
||||
pod_list = list_pods(namespace=namespace)
|
||||
if name in pod_list:
|
||||
return True
|
||||
else:
|
||||
logging.error("Namespace '%s' doesn't exist" % str(namespace))
|
||||
return False
|
||||
|
||||
|
||||
def check_if_pvc_exists(name: str, namespace: str) -> bool:
|
||||
"""
|
||||
Function that checks if a namespace exists by parsing through
|
||||
the list of projects.
|
||||
Args:
|
||||
name (string)
|
||||
- PVC name
|
||||
|
||||
namespace (string)
|
||||
- Namespace name
|
||||
|
||||
Returns:
|
||||
Boolean value indicating whether the Persistent Volume Claim
|
||||
exists or not.
|
||||
"""
|
||||
namespace_exists = check_if_namespace_exists(namespace)
|
||||
if namespace_exists:
|
||||
response = cli.list_namespaced_persistent_volume_claim(
|
||||
namespace=namespace
|
||||
)
|
||||
pvc_list = [pvc.metadata.name for pvc in response.items]
|
||||
if name in pvc_list:
|
||||
return True
|
||||
else:
|
||||
logging.error("Namespace '%s' doesn't exist" % str(namespace))
|
||||
return False
|
||||
|
||||
|
||||
def get_pvc_info(name: str, namespace: str) -> PVC:
|
||||
"""
|
||||
Function to retrieve information about a Persistent Volume Claim in a
|
||||
given namespace
|
||||
|
||||
Args:
|
||||
name (string)
|
||||
- Name of the persistent volume claim
|
||||
|
||||
namespace (string)
|
||||
- Namespace where the persistent volume claim is present
|
||||
|
||||
Returns:
|
||||
- A PVC data class containing the name, capacity, volume name,
|
||||
namespace and associated pod names of the PVC if the PVC exists
|
||||
- Returns None if the PVC doesn't exist
|
||||
"""
|
||||
|
||||
pvc_exists = check_if_pvc_exists(name=name, namespace=namespace)
|
||||
if pvc_exists:
|
||||
pvc_info_response = cli.read_namespaced_persistent_volume_claim(
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
pretty=True
|
||||
)
|
||||
pod_list_response = cli.list_namespaced_pod(namespace=namespace)
|
||||
|
||||
capacity = pvc_info_response.status.capacity['storage']
|
||||
volume_name = pvc_info_response.spec.volume_name
|
||||
|
||||
# Loop through all pods in the namespace to find associated PVCs
|
||||
pvc_pod_list = []
|
||||
for pod in pod_list_response.items:
|
||||
for volume in pod.spec.volumes:
|
||||
if (
|
||||
volume.persistent_volume_claim is not None
|
||||
and volume.persistent_volume_claim.claim_name == name
|
||||
):
|
||||
pvc_pod_list.append(pod.metadata.name)
|
||||
|
||||
pvc_info = PVC(
|
||||
name=name,
|
||||
capacity=capacity,
|
||||
volumeName=volume_name,
|
||||
podNames=pvc_pod_list,
|
||||
namespace=namespace
|
||||
)
|
||||
return pvc_info
|
||||
else:
|
||||
logging.error(
|
||||
"PVC '%s' doesn't exist in namespace '%s'" % (
|
||||
str(name),
|
||||
str(namespace)
|
||||
)
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
# Find the node kraken is deployed on
|
||||
# Set global kraken node to not delete
|
||||
def find_kraken_node():
|
||||
pods = get_all_pods()
|
||||
kraken_pod_name = None
|
||||
for pod in pods:
|
||||
if "kraken-deployment" in pod[0]:
|
||||
kraken_pod_name = pod[0]
|
||||
kraken_project = pod[1]
|
||||
break
|
||||
# have to switch to proper project
|
||||
|
||||
if kraken_pod_name:
|
||||
# get kraken-deployment pod, find node name
|
||||
try:
|
||||
node_name = get_pod_info(kraken_pod_name, kraken_project).nodeName
|
||||
global kraken_node_name
|
||||
kraken_node_name = node_name
|
||||
except Exception as e:
|
||||
logging.info("%s" % (e))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Watch for a specific node status
|
||||
def watch_node_status(node, status, timeout, resource_version):
|
||||
count = timeout
|
||||
for event in watch_resource.stream(
|
||||
cli.list_node,
|
||||
field_selector=f"metadata.name={node}",
|
||||
timeout_seconds=timeout,
|
||||
resource_version=f"{resource_version}"
|
||||
):
|
||||
conditions = [
|
||||
status
|
||||
for status in event["object"].status.conditions
|
||||
if status.type == "Ready"
|
||||
]
|
||||
if conditions[0].status == status:
|
||||
watch_resource.stop()
|
||||
break
|
||||
else:
|
||||
count -= 1
|
||||
logging.info(
|
||||
"Status of node " + node + ": " + str(conditions[0].status)
|
||||
)
|
||||
if not count:
|
||||
watch_resource.stop()
|
||||
|
||||
|
||||
# Watch for a specific managedcluster status
|
||||
# TODO: Implement this with a watcher instead of polling
|
||||
def watch_managedcluster_status(managedcluster, status, timeout):
|
||||
elapsed_time = 0
|
||||
while True:
|
||||
conditions = custom_object_client.get_cluster_custom_object_status(
|
||||
"cluster.open-cluster-management.io", "v1", "managedclusters", managedcluster
|
||||
)['status']['conditions']
|
||||
available = list(filter(lambda condition: condition['reason'] == 'ManagedClusterAvailable', conditions))
|
||||
if status == "True":
|
||||
if available and available[0]['status'] == "True":
|
||||
logging.info("Status of managedcluster " + managedcluster + ": Available")
|
||||
return True
|
||||
else:
|
||||
if not available:
|
||||
logging.info("Status of managedcluster " + managedcluster + ": Unavailable")
|
||||
return True
|
||||
time.sleep(2)
|
||||
elapsed_time += 2
|
||||
if elapsed_time >= timeout:
|
||||
logging.info("Timeout waiting for managedcluster " + managedcluster + " to become: " + status)
|
||||
return False
|
||||
|
||||
|
||||
# Get the resource version for the specified node
|
||||
def get_node_resource_version(node):
|
||||
return cli.read_node(name=node).metadata.resource_version
|
||||
@@ -1,74 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import List
|
||||
|
||||
|
||||
@dataclass(frozen=True, order=False)
|
||||
class Volume:
|
||||
"""Data class to hold information regarding volumes in a pod"""
|
||||
name: str
|
||||
pvcName: str
|
||||
|
||||
|
||||
@dataclass(order=False)
|
||||
class VolumeMount:
|
||||
"""Data class to hold information regarding volume mounts"""
|
||||
name: str
|
||||
mountPath: str
|
||||
|
||||
|
||||
@dataclass(frozen=True, order=False)
|
||||
class PVC:
|
||||
"""Data class to hold information regarding persistent volume claims"""
|
||||
name: str
|
||||
capacity: str
|
||||
volumeName: str
|
||||
podNames: List[str]
|
||||
namespace: str
|
||||
|
||||
|
||||
@dataclass(order=False)
|
||||
class Container:
|
||||
"""Data class to hold information regarding containers in a pod"""
|
||||
image: str
|
||||
name: str
|
||||
volumeMounts: List[VolumeMount]
|
||||
ready: bool = False
|
||||
|
||||
|
||||
@dataclass(frozen=True, order=False)
|
||||
class Pod:
|
||||
"""Data class to hold information regarding a pod"""
|
||||
name: str
|
||||
podIP: str
|
||||
namespace: str
|
||||
containers: List[Container]
|
||||
nodeName: str
|
||||
volumes: List[Volume]
|
||||
|
||||
|
||||
@dataclass(frozen=True, order=False)
|
||||
class LitmusChaosObject:
|
||||
"""Data class to hold information regarding a custom object of litmus project"""
|
||||
kind: str
|
||||
group: str
|
||||
namespace: str
|
||||
name: str
|
||||
plural: str
|
||||
version: str
|
||||
|
||||
|
||||
@dataclass(frozen=True, order=False)
|
||||
class ChaosEngine(LitmusChaosObject):
|
||||
"""Data class to hold information regarding a ChaosEngine object"""
|
||||
engineStatus: str
|
||||
expStatus: str
|
||||
|
||||
|
||||
@dataclass(frozen=True, order=False)
|
||||
class ChaosResult(LitmusChaosObject):
|
||||
"""Data class to hold information regarding a ChaosResult object"""
|
||||
verdict: str
|
||||
failStep: str
|
||||
|
||||
|
||||
|
||||
@@ -3,19 +3,26 @@ import logging
|
||||
import time
|
||||
import os
|
||||
import random
|
||||
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
import kraken.cerberus.setup as cerberus
|
||||
import kraken.node_actions.common_node_functions as common_node_functions
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
|
||||
from kraken import utils
|
||||
|
||||
|
||||
# krkn_lib
|
||||
# Reads the scenario config and introduces traffic variations in Node's host network interface.
|
||||
def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
failed_post_scenarios = ""
|
||||
def run(scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
telemetry: KrknTelemetryOpenshift,
|
||||
telemetry_request_id: str) -> (list[str], list[ScenarioTelemetry]):
|
||||
logging.info("Runing the Network Chaos tests")
|
||||
failed_post_scenarios = ""
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
@@ -23,8 +30,8 @@ def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetr
|
||||
for net_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = net_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, net_config)
|
||||
scenario_telemetry.start_timestamp = time.time()
|
||||
parsed_scenario_config = telemetry.set_parameters_base64(scenario_telemetry, net_config)
|
||||
try:
|
||||
with open(net_config, "r") as file:
|
||||
param_lst = ["latency", "loss", "bandwidth"]
|
||||
@@ -56,11 +63,11 @@ def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetr
|
||||
node_name_list = [test_node]
|
||||
nodelst = []
|
||||
for single_node_name in node_name_list:
|
||||
nodelst.extend(common_node_functions.get_node(single_node_name, test_node_label, test_instance_count, kubecli))
|
||||
nodelst.extend(common_node_functions.get_node(single_node_name, test_node_label, test_instance_count, telemetry.kubecli))
|
||||
file_loader = FileSystemLoader(os.path.abspath(os.path.dirname(__file__)))
|
||||
env = Environment(loader=file_loader, autoescape=True)
|
||||
pod_template = env.get_template("pod.j2")
|
||||
test_interface = verify_interface(test_interface, nodelst, pod_template, kubecli)
|
||||
test_interface = verify_interface(test_interface, nodelst, pod_template, telemetry.kubecli)
|
||||
joblst = []
|
||||
egress_lst = [i for i in param_lst if i in test_egress]
|
||||
chaos_config = {
|
||||
@@ -86,13 +93,13 @@ def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetr
|
||||
job_template.render(jobname=i + str(hash(node))[:5], nodename=node, cmd=exec_cmd)
|
||||
)
|
||||
joblst.append(job_body["metadata"]["name"])
|
||||
api_response = kubecli.create_job(job_body)
|
||||
api_response = telemetry.kubecli.create_job(job_body)
|
||||
if api_response is None:
|
||||
raise Exception("Error creating job")
|
||||
if test_execution == "serial":
|
||||
logging.info("Waiting for serial job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(joblst[:], kubecli, test_duration + 300)
|
||||
wait_for_job(joblst[:], telemetry.kubecli, test_duration + 300)
|
||||
logging.info("Waiting for wait_duration %s" % wait_duration)
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
@@ -102,7 +109,7 @@ def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetr
|
||||
if test_execution == "parallel":
|
||||
logging.info("Waiting for parallel job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(joblst[:], kubecli, test_duration + 300)
|
||||
wait_for_job(joblst[:], telemetry.kubecli, test_duration + 300)
|
||||
logging.info("Waiting for wait_duration %s" % wait_duration)
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
@@ -112,13 +119,24 @@ def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetr
|
||||
raise RuntimeError()
|
||||
finally:
|
||||
logging.info("Deleting jobs")
|
||||
delete_job(joblst[:], kubecli)
|
||||
delete_job(joblst[:], telemetry.kubecli)
|
||||
except (RuntimeError, Exception):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
scenario_telemetry.exit_status = 1
|
||||
failed_scenarios.append(net_config)
|
||||
log_exception(net_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.exit_status = 0
|
||||
scenario_telemetry.end_timestamp = time.time()
|
||||
utils.collect_and_put_ocp_logs(telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry_request_id,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
utils.populate_cluster_events(scenario_telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry.kubecli,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import sys
|
||||
import logging
|
||||
import time
|
||||
import kraken.invoke.command as runcommand
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
@@ -18,9 +19,11 @@ class abstract_node_scenarios:
|
||||
pass
|
||||
|
||||
# Node scenario to stop and then start the node
|
||||
def node_stop_start_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_stop_start_scenario(self, instance_kill_count, node, timeout, duration):
|
||||
logging.info("Starting node_stop_start_scenario injection")
|
||||
self.node_stop_scenario(instance_kill_count, node, timeout)
|
||||
logging.info("Waiting for %s seconds before starting the node" % (duration))
|
||||
time.sleep(duration)
|
||||
self.node_start_scenario(instance_kill_count, node, timeout)
|
||||
logging.info("node_stop_start_scenario has been successfully injected!")
|
||||
|
||||
@@ -62,6 +65,26 @@ class abstract_node_scenarios:
|
||||
self.node_reboot_scenario(instance_kill_count, node, timeout)
|
||||
logging.info("stop_start_kubelet_scenario has been successfully injected!")
|
||||
|
||||
|
||||
# Node scenario to restart the kubelet
|
||||
def restart_kubelet_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
try:
|
||||
logging.info("Starting restart_kubelet_scenario injection")
|
||||
logging.info("Restarting the kubelet of the node %s" % (node))
|
||||
runcommand.run("oc debug node/" + node + " -- chroot /host systemctl restart kubelet &")
|
||||
nodeaction.wait_for_not_ready_status(node, timeout, self.kubecli)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
logging.info("The kubelet of the node %s has been restarted" % (node))
|
||||
logging.info("restart_kubelet_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to restart the kubelet of the node. Encountered following " "exception: %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("restart_kubelet_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Node scenario to crash the node
|
||||
def node_crash_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
|
||||
@@ -13,7 +13,11 @@ class AWS:
|
||||
|
||||
# Get the instance ID of the node
|
||||
def get_instance_id(self, node):
|
||||
return self.boto_client.describe_instances(Filters=[{"Name": "private-dns-name", "Values": [node]}])[
|
||||
instance = self.boto_client.describe_instances(Filters=[{"Name": "private-dns-name", "Values": [node]}])
|
||||
if len(instance['Reservations']) == 0:
|
||||
node = node[3:].replace('-','.')
|
||||
instance = self.boto_client.describe_instances(Filters=[{"Name": "private-ip-address", "Values": [node]}])
|
||||
return instance[
|
||||
"Reservations"
|
||||
][0]["Instances"][0]["InstanceId"]
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
|
||||
import time
|
||||
import yaml
|
||||
import os
|
||||
import kraken.invoke.command as runcommand
|
||||
import logging
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
@@ -17,9 +17,9 @@ class Azure:
|
||||
# Acquire a credential object using CLI-based authentication.
|
||||
credentials = DefaultAzureCredential()
|
||||
logging.info("credential " + str(credentials))
|
||||
az_account = runcommand.invoke("az account list -o yaml")
|
||||
az_account_yaml = yaml.safe_load(az_account, Loader=yaml.FullLoader)
|
||||
subscription_id = az_account_yaml[0]["id"]
|
||||
# az_account = runcommand.invoke("az account list -o yaml")
|
||||
# az_account_yaml = yaml.safe_load(az_account, Loader=yaml.FullLoader)
|
||||
subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID")
|
||||
self.compute_client = ComputeManagementClient(credentials, subscription_id)
|
||||
|
||||
# Get the instance ID of the node
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import json
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
from kraken.node_actions.abstract_node_scenarios import abstract_node_scenarios
|
||||
from googleapiclient import discovery
|
||||
@@ -10,11 +12,19 @@ from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
class GCP:
|
||||
def __init__(self):
|
||||
try:
|
||||
gapp_creds = os.getenv("GOOGLE_APPLICATION_CREDENTIALS")
|
||||
with open(gapp_creds, "r") as f:
|
||||
f_str = f.read()
|
||||
self.project = json.loads(f_str)['project_id']
|
||||
#self.project = runcommand.invoke("gcloud config get-value project").split("/n")[0].strip()
|
||||
logging.info("project " + str(self.project) + "!")
|
||||
credentials = GoogleCredentials.get_application_default()
|
||||
self.client = discovery.build("compute", "v1", credentials=credentials, cache_discovery=False)
|
||||
|
||||
self.project = runcommand.invoke("gcloud config get-value project").split("/n")[0].strip()
|
||||
logging.info("project " + str(self.project) + "!")
|
||||
credentials = GoogleCredentials.get_application_default()
|
||||
self.client = discovery.build("compute", "v1", credentials=credentials, cache_discovery=False)
|
||||
except Exception as e:
|
||||
logging.error("Error on setting up GCP connection: " + str(e))
|
||||
sys.exit(1)
|
||||
|
||||
# Get the instance ID of the node
|
||||
def get_instance_id(self, node):
|
||||
|
||||
@@ -2,6 +2,10 @@ import yaml
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
from kraken import utils
|
||||
from kraken.node_actions.aws_node_scenarios import aws_node_scenarios
|
||||
from kraken.node_actions.general_cloud_node_scenarios import general_node_scenarios
|
||||
from kraken.node_actions.az_node_scenarios import azure_node_scenarios
|
||||
@@ -15,7 +19,7 @@ import kraken.cerberus.setup as cerberus
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
|
||||
node_general = False
|
||||
|
||||
@@ -55,36 +59,50 @@ def get_node_scenario_object(node_scenario, kubecli: KrknKubernetes):
|
||||
|
||||
# Run defined scenarios
|
||||
# krkn_lib
|
||||
def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
def run(scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
telemetry: KrknTelemetryOpenshift,
|
||||
telemetry_request_id: str) -> (list[str], list[ScenarioTelemetry]):
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for node_scenario_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = node_scenario_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, node_scenario_config)
|
||||
scenario_telemetry.start_timestamp = time.time()
|
||||
parsed_scenario_config = telemetry.set_parameters_base64(scenario_telemetry, node_scenario_config)
|
||||
with open(node_scenario_config, "r") as f:
|
||||
node_scenario_config = yaml.full_load(f)
|
||||
for node_scenario in node_scenario_config["node_scenarios"]:
|
||||
node_scenario_object = get_node_scenario_object(node_scenario, kubecli)
|
||||
node_scenario_object = get_node_scenario_object(node_scenario, telemetry.kubecli)
|
||||
if node_scenario["actions"]:
|
||||
for action in node_scenario["actions"]:
|
||||
start_time = int(time.time())
|
||||
try:
|
||||
inject_node_scenario(action, node_scenario, node_scenario_object, kubecli)
|
||||
inject_node_scenario(action, node_scenario, node_scenario_object, telemetry.kubecli)
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.get_status(config, start_time, end_time)
|
||||
logging.info("")
|
||||
except (RuntimeError, Exception) as e:
|
||||
scenario_telemetry.exitStatus = 1
|
||||
scenario_telemetry.exit_status = 1
|
||||
failed_scenarios.append(node_scenario_config)
|
||||
log_exception(node_scenario_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.exit_status = 0
|
||||
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetry.end_timestamp = time.time()
|
||||
utils.collect_and_put_ocp_logs(telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry_request_id,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
utils.populate_cluster_events(scenario_telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry.kubecli,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
return failed_scenarios, scenario_telemetries
|
||||
@@ -100,6 +118,8 @@ def inject_node_scenario(action, node_scenario, node_scenario_object, kubecli: K
|
||||
)
|
||||
node_name = get_yaml_item_value(node_scenario, "node_name", "")
|
||||
label_selector = get_yaml_item_value(node_scenario, "label_selector", "")
|
||||
if action == "node_stop_start_scenario":
|
||||
duration = get_yaml_item_value(node_scenario, "duration", 120)
|
||||
timeout = get_yaml_item_value(node_scenario, "timeout", 120)
|
||||
service = get_yaml_item_value(node_scenario, "service", "")
|
||||
ssh_private_key = get_yaml_item_value(
|
||||
@@ -121,13 +141,15 @@ def inject_node_scenario(action, node_scenario, node_scenario_object, kubecli: K
|
||||
elif action == "node_stop_scenario":
|
||||
node_scenario_object.node_stop_scenario(run_kill_count, single_node, timeout)
|
||||
elif action == "node_stop_start_scenario":
|
||||
node_scenario_object.node_stop_start_scenario(run_kill_count, single_node, timeout)
|
||||
node_scenario_object.node_stop_start_scenario(run_kill_count, single_node, timeout, duration)
|
||||
elif action == "node_termination_scenario":
|
||||
node_scenario_object.node_termination_scenario(run_kill_count, single_node, timeout)
|
||||
elif action == "node_reboot_scenario":
|
||||
node_scenario_object.node_reboot_scenario(run_kill_count, single_node, timeout)
|
||||
elif action == "stop_start_kubelet_scenario":
|
||||
node_scenario_object.stop_start_kubelet_scenario(run_kill_count, single_node, timeout)
|
||||
elif action == "restart_kubelet_scenario":
|
||||
node_scenario_object.restart_kubelet_scenario(run_kill_count, single_node, timeout)
|
||||
elif action == "stop_kubelet_scenario":
|
||||
node_scenario_object.stop_kubelet_scenario(run_kill_count, single_node, timeout)
|
||||
elif action == "node_crash_scenario":
|
||||
|
||||
@@ -2,13 +2,18 @@ import dataclasses
|
||||
import json
|
||||
import logging
|
||||
from os.path import abspath
|
||||
from typing import List, Dict
|
||||
from typing import List, Dict, Any
|
||||
import time
|
||||
|
||||
from arcaflow_plugin_sdk import schema, serialization, jsonschema
|
||||
from arcaflow_plugin_kill_pod import kill_pods, wait_for_pods
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.k8s.pods_monitor_pool import PodsMonitorPool
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
import kraken.plugins.node_scenarios.vmware_plugin as vmware_plugin
|
||||
import kraken.plugins.node_scenarios.ibmcloud_plugin as ibmcloud_plugin
|
||||
from kraken import utils
|
||||
from kraken.plugins.run_python_plugin import run_python_file
|
||||
from kraken.plugins.network.ingress_shaping import network_chaos
|
||||
from kraken.plugins.pod_network_outage.pod_network_outage_plugin import pod_outage
|
||||
@@ -47,11 +52,14 @@ class Plugins:
|
||||
)
|
||||
self.steps_by_id[step.schema.id] = step
|
||||
|
||||
def run(self, file: str, kubeconfig_path: str, kraken_config: str):
|
||||
def unserialize_scenario(self, file: str) -> Any:
|
||||
return serialization.load_from_file(abspath(file))
|
||||
|
||||
def run(self, file: str, kubeconfig_path: str, kraken_config: str, run_uuid:str):
|
||||
"""
|
||||
Run executes a series of steps
|
||||
"""
|
||||
data = serialization.load_from_file(abspath(file))
|
||||
data = self.unserialize_scenario(abspath(file))
|
||||
if not isinstance(data, list):
|
||||
raise Exception(
|
||||
"Invalid scenario configuration file: {} expected list, found {}".format(file, type(data).__name__)
|
||||
@@ -96,7 +104,8 @@ class Plugins:
|
||||
unserialized_input.kubeconfig_path = kubeconfig_path
|
||||
if "kraken_config" in step.schema.input.properties:
|
||||
unserialized_input.kraken_config = kraken_config
|
||||
output_id, output_data = step.schema(unserialized_input)
|
||||
output_id, output_data = step.schema(params=unserialized_input, run_id=run_uuid)
|
||||
|
||||
logging.info(step.render_output(output_id, output_data) + "\n")
|
||||
if output_id in step.error_output_ids:
|
||||
raise Exception(
|
||||
@@ -213,6 +222,12 @@ PLUGINS = Plugins(
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
network_chaos,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
pod_outage,
|
||||
[
|
||||
@@ -235,25 +250,83 @@ PLUGINS = Plugins(
|
||||
)
|
||||
|
||||
|
||||
def run(scenarios: List[str], kubeconfig_path: str, kraken_config: str, failed_post_scenarios: List[str], wait_duration: int, telemetry: KrknTelemetryKubernetes) -> (List[str], list[ScenarioTelemetry]):
|
||||
def run(scenarios: List[str],
|
||||
kraken_config: str,
|
||||
failed_post_scenarios: List[str],
|
||||
wait_duration: int,
|
||||
telemetry: KrknTelemetryOpenshift,
|
||||
run_uuid: str,
|
||||
telemetry_request_id: str,
|
||||
) -> (List[str], list[ScenarioTelemetry]):
|
||||
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
for scenario in scenarios:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = scenario
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, scenario)
|
||||
scenario_telemetry.start_timestamp = time.time()
|
||||
parsed_scenario_config = telemetry.set_parameters_base64(scenario_telemetry, scenario)
|
||||
logging.info('scenario ' + str(scenario))
|
||||
pool = PodsMonitorPool(telemetry.kubecli)
|
||||
kill_scenarios = [kill_scenario for kill_scenario in PLUGINS.unserialize_scenario(scenario) if kill_scenario["id"] == "kill-pods"]
|
||||
|
||||
try:
|
||||
PLUGINS.run(scenario, kubeconfig_path, kraken_config)
|
||||
start_monitoring(pool, kill_scenarios)
|
||||
PLUGINS.run(scenario, telemetry.kubecli.get_kubeconfig_path(), kraken_config, run_uuid)
|
||||
result = pool.join()
|
||||
scenario_telemetry.affected_pods = result
|
||||
if result.error:
|
||||
raise Exception(f"unrecovered pods: {result.error}")
|
||||
|
||||
except Exception as e:
|
||||
scenario_telemetry.exitStatus = 1
|
||||
logging.error(f"scenario exception: {str(e)}")
|
||||
scenario_telemetry.exit_status = 1
|
||||
pool.cancel()
|
||||
failed_post_scenarios.append(scenario)
|
||||
log_exception(scenario)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.exit_status = 0
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
scenario_telemetry.end_timestamp = time.time()
|
||||
utils.collect_and_put_ocp_logs(telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry_request_id,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
utils.populate_cluster_events(scenario_telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry.kubecli,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
|
||||
return failed_post_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
def start_monitoring(pool: PodsMonitorPool, scenarios: list[Any]):
|
||||
for kill_scenario in scenarios:
|
||||
recovery_time = kill_scenario["config"]["krkn_pod_recovery_time"]
|
||||
if ("namespace_pattern" in kill_scenario["config"] and
|
||||
"label_selector" in kill_scenario["config"]):
|
||||
namespace_pattern = kill_scenario["config"]["namespace_pattern"]
|
||||
label_selector = kill_scenario["config"]["label_selector"]
|
||||
pool.select_and_monitor_by_namespace_pattern_and_label(
|
||||
namespace_pattern=namespace_pattern,
|
||||
label_selector=label_selector,
|
||||
max_timeout=recovery_time)
|
||||
logging.info(
|
||||
f"waiting {recovery_time} seconds for pod recovery, "
|
||||
f"pod label selector: {label_selector} namespace pattern: {namespace_pattern}")
|
||||
|
||||
elif ("namespace_pattern" in kill_scenario["config"] and
|
||||
"name_pattern" in kill_scenario["config"]):
|
||||
namespace_pattern = kill_scenario["config"]["namespace_pattern"]
|
||||
name_pattern = kill_scenario["config"]["name_pattern"]
|
||||
pool.select_and_monitor_by_name_pattern_and_namespace_pattern(pod_name_pattern=name_pattern,
|
||||
namespace_pattern=namespace_pattern,
|
||||
max_timeout=recovery_time)
|
||||
logging.info(f"waiting {recovery_time} seconds for pod recovery, "
|
||||
f"pod name pattern: {name_pattern} namespace pattern: {namespace_pattern}")
|
||||
else:
|
||||
raise Exception(f"impossible to determine monitor parameters, check {kill_scenario} configuration")
|
||||
|
||||
@@ -62,7 +62,7 @@ class NetworkScenarioConfig:
|
||||
typing.Optional[int],
|
||||
validation.min(1)
|
||||
] = field(
|
||||
default=300,
|
||||
default=30,
|
||||
metadata={
|
||||
"name": "Wait Duration",
|
||||
"description":
|
||||
@@ -864,7 +864,7 @@ def network_chaos(cfg: NetworkScenarioConfig) -> typing.Tuple[
|
||||
)
|
||||
logging.info("Waiting for parallel job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(batch_cli, job_list[:], cfg.wait_duration)
|
||||
wait_for_job(batch_cli, job_list[:], cfg.test_duration+100)
|
||||
end_time = int(time.time())
|
||||
if publish:
|
||||
cerberus.publish_kraken_status(
|
||||
@@ -893,7 +893,7 @@ def network_chaos(cfg: NetworkScenarioConfig) -> typing.Tuple[
|
||||
)
|
||||
logging.info("Waiting for serial job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(batch_cli, job_list[:], cfg.wait_duration)
|
||||
wait_for_job(batch_cli, job_list[:], cfg.test_duration+100)
|
||||
logging.info("Deleting jobs")
|
||||
delete_jobs(cli, batch_cli, job_list[:])
|
||||
job_list = []
|
||||
|
||||
@@ -119,11 +119,11 @@ class vSphere:
|
||||
vm = self.get_vm(instance_id)
|
||||
try:
|
||||
self.client.vcenter.vm.Power.stop(vm)
|
||||
logging.info("Stopped VM -- '{}-({})'", instance_id, vm)
|
||||
logging.info(f"Stopped VM -- '{instance_id}-({vm})'")
|
||||
return True
|
||||
except AlreadyInDesiredState:
|
||||
logging.info(
|
||||
"VM '{}'-'({})' is already Powered Off", instance_id, vm
|
||||
f"VM '{instance_id}'-'({vm})' is already Powered Off"
|
||||
)
|
||||
return False
|
||||
|
||||
@@ -136,11 +136,11 @@ class vSphere:
|
||||
vm = self.get_vm(instance_id)
|
||||
try:
|
||||
self.client.vcenter.vm.Power.start(vm)
|
||||
logging.info("Started VM -- '{}-({})'", instance_id, vm)
|
||||
logging.info(f"Started VM -- '{instance_id}-({vm})'")
|
||||
return True
|
||||
except AlreadyInDesiredState:
|
||||
logging.info(
|
||||
"VM '{}'-'({})' is already Powered On", instance_id, vm
|
||||
f"VM '{instance_id}'-'({vm})' is already Powered On"
|
||||
)
|
||||
return False
|
||||
|
||||
@@ -318,12 +318,12 @@ class vSphere:
|
||||
try:
|
||||
vm = self.get_vm(instance_id)
|
||||
state = self.client.vcenter.vm.Power.get(vm).state
|
||||
logging.info("Check instance %s status", instance_id)
|
||||
logging.info(f"Check instance {instance_id} status")
|
||||
return state
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to get node instance status %s. Encountered following "
|
||||
"exception: %s.", instance_id, e
|
||||
f"Failed to get node instance status {instance_id}. Encountered following "
|
||||
f"exception: {str(e)}. "
|
||||
)
|
||||
return None
|
||||
|
||||
@@ -338,16 +338,14 @@ class vSphere:
|
||||
while vm is not None:
|
||||
vm = self.get_vm(instance_id)
|
||||
logging.info(
|
||||
"VM %s is still being deleted, "
|
||||
"sleeping for 5 seconds",
|
||||
instance_id
|
||||
f"VM {instance_id} is still being deleted, "
|
||||
f"sleeping for 5 seconds"
|
||||
)
|
||||
time.sleep(5)
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info(
|
||||
"VM %s is still not deleted in allotted time",
|
||||
instance_id
|
||||
f"VM {instance_id} is still not deleted in allotted time"
|
||||
)
|
||||
return False
|
||||
return True
|
||||
@@ -371,8 +369,7 @@ class vSphere:
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info(
|
||||
"VM %s is still not ready in allotted time",
|
||||
instance_id
|
||||
f"VM {instance_id} is still not ready in allotted time"
|
||||
)
|
||||
return False
|
||||
return True
|
||||
@@ -388,16 +385,14 @@ class vSphere:
|
||||
while status != Power.State.POWERED_OFF:
|
||||
status = self.get_vm_status(instance_id)
|
||||
logging.info(
|
||||
"VM %s is still not running, "
|
||||
"sleeping for 5 seconds",
|
||||
instance_id
|
||||
f"VM {instance_id} is still not running, "
|
||||
f"sleeping for 5 seconds"
|
||||
)
|
||||
time.sleep(5)
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info(
|
||||
"VM %s is still not ready in allotted time",
|
||||
instance_id
|
||||
f"VM {instance_id} is still not ready in allotted time"
|
||||
)
|
||||
return False
|
||||
return True
|
||||
@@ -561,7 +556,7 @@ def node_start(
|
||||
try:
|
||||
for _ in range(cfg.runs):
|
||||
logging.info("Starting node_start_scenario injection")
|
||||
logging.info("Starting the node %s ", name)
|
||||
logging.info(f"Starting the node {name} ")
|
||||
vm_started = vsphere.start_instances(name)
|
||||
if vm_started:
|
||||
vsphere.wait_until_running(name, cfg.timeout)
|
||||
@@ -571,7 +566,7 @@ def node_start(
|
||||
)
|
||||
nodes_started[int(time.time_ns())] = Node(name=name)
|
||||
logging.info(
|
||||
"Node with instance ID: %s is in running state", name
|
||||
f"Node with instance ID: {name} is in running state"
|
||||
)
|
||||
logging.info(
|
||||
"node_start_scenario has been successfully injected!"
|
||||
@@ -579,8 +574,8 @@ def node_start(
|
||||
except Exception as e:
|
||||
logging.error("Failed to start node instance. Test Failed")
|
||||
logging.error(
|
||||
"node_start_scenario injection failed! "
|
||||
"Error was: %s", str(e)
|
||||
f"node_start_scenario injection failed! "
|
||||
f"Error was: {str(e)}"
|
||||
)
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
format_exc(), kube_helper.Actions.START
|
||||
@@ -620,7 +615,7 @@ def node_stop(
|
||||
try:
|
||||
for _ in range(cfg.runs):
|
||||
logging.info("Starting node_stop_scenario injection")
|
||||
logging.info("Stopping the node %s ", name)
|
||||
logging.info(f"Stopping the node {name} ")
|
||||
vm_stopped = vsphere.stop_instances(name)
|
||||
if vm_stopped:
|
||||
vsphere.wait_until_stopped(name, cfg.timeout)
|
||||
@@ -630,7 +625,7 @@ def node_stop(
|
||||
)
|
||||
nodes_stopped[int(time.time_ns())] = Node(name=name)
|
||||
logging.info(
|
||||
"Node with instance ID: %s is in stopped state", name
|
||||
f"Node with instance ID: {name} is in stopped state"
|
||||
)
|
||||
logging.info(
|
||||
"node_stop_scenario has been successfully injected!"
|
||||
@@ -638,8 +633,8 @@ def node_stop(
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Test Failed")
|
||||
logging.error(
|
||||
"node_stop_scenario injection failed! "
|
||||
"Error was: %s", str(e)
|
||||
f"node_stop_scenario injection failed! "
|
||||
f"Error was: {str(e)}"
|
||||
)
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
format_exc(), kube_helper.Actions.STOP
|
||||
@@ -679,7 +674,7 @@ def node_reboot(
|
||||
try:
|
||||
for _ in range(cfg.runs):
|
||||
logging.info("Starting node_reboot_scenario injection")
|
||||
logging.info("Rebooting the node %s ", name)
|
||||
logging.info(f"Rebooting the node {name} ")
|
||||
vsphere.reboot_instances(name)
|
||||
if not cfg.skip_openshift_checks:
|
||||
kube_helper.wait_for_unknown_status(
|
||||
@@ -690,8 +685,8 @@ def node_reboot(
|
||||
)
|
||||
nodes_rebooted[int(time.time_ns())] = Node(name=name)
|
||||
logging.info(
|
||||
"Node with instance ID: %s has rebooted "
|
||||
"successfully", name
|
||||
f"Node with instance ID: {name} has rebooted "
|
||||
"successfully"
|
||||
)
|
||||
logging.info(
|
||||
"node_reboot_scenario has been successfully injected!"
|
||||
@@ -699,8 +694,8 @@ def node_reboot(
|
||||
except Exception as e:
|
||||
logging.error("Failed to reboot node instance. Test Failed")
|
||||
logging.error(
|
||||
"node_reboot_scenario injection failed! "
|
||||
"Error was: %s", str(e)
|
||||
f"node_reboot_scenario injection failed! "
|
||||
f"Error was: {str(e)}"
|
||||
)
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
format_exc(), kube_helper.Actions.REBOOT
|
||||
@@ -739,13 +734,13 @@ def node_terminate(
|
||||
vsphere.stop_instances(name)
|
||||
vsphere.wait_until_stopped(name, cfg.timeout)
|
||||
logging.info(
|
||||
"Releasing the node with instance ID: %s ", name
|
||||
f"Releasing the node with instance ID: {name} "
|
||||
)
|
||||
vsphere.release_instances(name)
|
||||
vsphere.wait_until_released(name, cfg.timeout)
|
||||
nodes_terminated[int(time.time_ns())] = Node(name=name)
|
||||
logging.info(
|
||||
"Node with instance ID: %s has been released", name
|
||||
f"Node with instance ID: {name} has been released"
|
||||
)
|
||||
logging.info(
|
||||
"node_terminate_scenario has been "
|
||||
@@ -754,8 +749,8 @@ def node_terminate(
|
||||
except Exception as e:
|
||||
logging.error("Failed to terminate node instance. Test Failed")
|
||||
logging.error(
|
||||
"node_terminate_scenario injection failed! "
|
||||
"Error was: %s", str(e)
|
||||
f"node_terminate_scenario injection failed! "
|
||||
f"Error was: {str(e)}"
|
||||
)
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
format_exc(), kube_helper.Actions.TERMINATE
|
||||
|
||||
@@ -1,17 +1,23 @@
|
||||
import logging
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
import yaml
|
||||
import sys
|
||||
import random
|
||||
import arcaflow_plugin_kill_pod
|
||||
from krkn_lib.k8s.pods_monitor_pool import PodsMonitorPool
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
import kraken.cerberus.setup as cerberus
|
||||
import kraken.post_actions.actions as post_actions
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from arcaflow_plugin_sdk import serialization
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
|
||||
from kraken import utils
|
||||
|
||||
|
||||
# Run pod based scenarios
|
||||
def run(kubeconfig_path, scenarios_list, config, failed_post_scenarios, wait_duration):
|
||||
@@ -69,45 +75,41 @@ def run(kubeconfig_path, scenarios_list, config, failed_post_scenarios, wait_dur
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def container_run(kubeconfig_path,
|
||||
def container_run(
|
||||
scenarios_list,
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
wait_duration,
|
||||
kubecli: KrknKubernetes,
|
||||
telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
telemetry: KrknTelemetryOpenshift,
|
||||
telemetry_request_id: str
|
||||
) -> (list[str], list[ScenarioTelemetry]):
|
||||
|
||||
failed_scenarios = []
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
pool = PodsMonitorPool(telemetry.kubecli)
|
||||
|
||||
for container_scenario_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = container_scenario_config[0]
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, container_scenario_config[0])
|
||||
scenario_telemetry.start_timestamp = time.time()
|
||||
parsed_scenario_config = telemetry.set_parameters_base64(scenario_telemetry, container_scenario_config[0])
|
||||
if len(container_scenario_config) > 1:
|
||||
pre_action_output = post_actions.run(kubeconfig_path, container_scenario_config[1])
|
||||
pre_action_output = post_actions.run(telemetry.kubecli.get_kubeconfig_path(), container_scenario_config[1])
|
||||
else:
|
||||
pre_action_output = ""
|
||||
with open(container_scenario_config[0], "r") as f:
|
||||
cont_scenario_config = yaml.full_load(f)
|
||||
start_monitoring(kill_scenarios=cont_scenario_config["scenarios"], pool=pool)
|
||||
for cont_scenario in cont_scenario_config["scenarios"]:
|
||||
# capture start time
|
||||
start_time = int(time.time())
|
||||
try:
|
||||
killed_containers = container_killing_in_pod(cont_scenario, kubecli)
|
||||
if len(container_scenario_config) > 1:
|
||||
failed_post_scenarios = post_actions.check_recovery(
|
||||
kubeconfig_path,
|
||||
container_scenario_config,
|
||||
failed_post_scenarios,
|
||||
pre_action_output
|
||||
)
|
||||
else:
|
||||
failed_post_scenarios = check_failed_containers(
|
||||
killed_containers, cont_scenario.get("retry_wait", 120), kubecli
|
||||
)
|
||||
|
||||
killed_containers = container_killing_in_pod(cont_scenario, telemetry.kubecli)
|
||||
logging.info(f"killed containers: {str(killed_containers)}")
|
||||
result = pool.join()
|
||||
if result.error:
|
||||
raise Exception(f"pods failed to recovery: {result.error}")
|
||||
scenario_telemetry.affected_pods = result
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
|
||||
@@ -117,18 +119,39 @@ def container_run(kubeconfig_path,
|
||||
# publish cerberus status
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
except (RuntimeError, Exception):
|
||||
pool.cancel()
|
||||
failed_scenarios.append(container_scenario_config[0])
|
||||
log_exception(container_scenario_config[0])
|
||||
scenario_telemetry.exitStatus = 1
|
||||
scenario_telemetry.exit_status = 1
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetry.exit_status = 0
|
||||
scenario_telemetry.end_timestamp = time.time()
|
||||
utils.populate_cluster_events(scenario_telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry.kubecli,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
utils.collect_and_put_ocp_logs(telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry_request_id,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
def start_monitoring(kill_scenarios: list[Any], pool: PodsMonitorPool):
|
||||
for kill_scenario in kill_scenarios:
|
||||
namespace_pattern = f"^{kill_scenario['namespace']}$"
|
||||
label_selector = kill_scenario["label_selector"]
|
||||
recovery_time = kill_scenario["expected_recovery_time"]
|
||||
pool.select_and_monitor_by_namespace_pattern_and_label(
|
||||
namespace_pattern=namespace_pattern,
|
||||
label_selector=label_selector,
|
||||
max_timeout=recovery_time)
|
||||
|
||||
|
||||
def container_killing_in_pod(cont_scenario, kubecli: KrknKubernetes):
|
||||
scenario_name = get_yaml_item_value(cont_scenario, "name", "")
|
||||
|
||||
@@ -1,13 +1,30 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime
|
||||
import os.path
|
||||
from typing import Optional, List, Dict, Any
|
||||
|
||||
import urllib3
|
||||
import logging
|
||||
import sys
|
||||
|
||||
import yaml
|
||||
from krkn_lib.elastic.krkn_elastic import KrknElastic
|
||||
from krkn_lib.models.elastic.models import ElasticAlert
|
||||
from krkn_lib.models.krkn import ChaosRunAlertSummary, ChaosRunAlert
|
||||
from krkn_lib.prometheus.krkn_prometheus import KrknPrometheus
|
||||
|
||||
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
def alerts(prom_cli: KrknPrometheus, start_time, end_time, alert_profile):
|
||||
def alerts(prom_cli: KrknPrometheus,
|
||||
elastic: KrknElastic,
|
||||
run_uuid,
|
||||
start_time,
|
||||
end_time,
|
||||
alert_profile,
|
||||
elastic_collect_alerts,
|
||||
elastic_alerts_index
|
||||
):
|
||||
|
||||
if alert_profile is None or os.path.exists(alert_profile) is False:
|
||||
logging.error(f"{alert_profile} alert profile does not exist")
|
||||
@@ -17,7 +34,7 @@ def alerts(prom_cli: KrknPrometheus, start_time, end_time, alert_profile):
|
||||
profile_yaml = yaml.safe_load(profile)
|
||||
if not isinstance(profile_yaml, list):
|
||||
logging.error(f"{alert_profile} wrong file format, alert profile must be "
|
||||
f"a valid yaml file containing a list of items with 3 properties: "
|
||||
f"a valid yaml file containing a list of items with at least 3 properties: "
|
||||
f"expr, description, severity" )
|
||||
sys.exit(1)
|
||||
|
||||
@@ -25,6 +42,126 @@ def alerts(prom_cli: KrknPrometheus, start_time, end_time, alert_profile):
|
||||
if list(alert.keys()).sort() != ["expr", "description", "severity"].sort():
|
||||
logging.error(f"wrong alert {alert}, skipping")
|
||||
|
||||
prom_cli.process_alert(alert,
|
||||
processed_alert = prom_cli.process_alert(alert,
|
||||
datetime.datetime.fromtimestamp(start_time),
|
||||
datetime.datetime.fromtimestamp(end_time))
|
||||
datetime.datetime.fromtimestamp(end_time))
|
||||
if processed_alert[0] and processed_alert[1] and elastic and elastic_collect_alerts:
|
||||
elastic_alert = ElasticAlert(run_uuid=run_uuid,
|
||||
severity=alert["severity"],
|
||||
alert=processed_alert[1],
|
||||
created_at=datetime.datetime.fromtimestamp(processed_alert[0])
|
||||
)
|
||||
result = elastic.push_alert(elastic_alert, elastic_alerts_index)
|
||||
if result == -1:
|
||||
logging.error("failed to save alert on ElasticSearch")
|
||||
pass
|
||||
|
||||
|
||||
|
||||
def critical_alerts(prom_cli: KrknPrometheus,
|
||||
summary: ChaosRunAlertSummary,
|
||||
run_id,
|
||||
scenario,
|
||||
start_time,
|
||||
end_time):
|
||||
summary.scenario = scenario
|
||||
summary.run_id = run_id
|
||||
query = r"""ALERTS{severity="critical"}"""
|
||||
logging.info("Checking for critical alerts firing post chaos")
|
||||
|
||||
during_critical_alerts = prom_cli.process_prom_query_in_range(
|
||||
query,
|
||||
start_time=datetime.datetime.fromtimestamp(start_time),
|
||||
end_time=end_time
|
||||
|
||||
)
|
||||
|
||||
for alert in during_critical_alerts:
|
||||
if "metric" in alert:
|
||||
alertname = alert["metric"]["alertname"] if "alertname" in alert["metric"] else "none"
|
||||
alertstate = alert["metric"]["alertstate"] if "alertstate" in alert["metric"] else "none"
|
||||
namespace = alert["metric"]["namespace"] if "namespace" in alert["metric"] else "none"
|
||||
severity = alert["metric"]["severity"] if "severity" in alert["metric"] else "none"
|
||||
alert = ChaosRunAlert(alertname, alertstate, namespace, severity)
|
||||
summary.chaos_alerts.append(alert)
|
||||
|
||||
|
||||
post_critical_alerts = prom_cli.process_query(
|
||||
query
|
||||
)
|
||||
|
||||
for alert in post_critical_alerts:
|
||||
if "metric" in alert:
|
||||
alertname = alert["metric"]["alertname"] if "alertname" in alert["metric"] else "none"
|
||||
alertstate = alert["metric"]["alertstate"] if "alertstate" in alert["metric"] else "none"
|
||||
namespace = alert["metric"]["namespace"] if "namespace" in alert["metric"] else "none"
|
||||
severity = alert["metric"]["severity"] if "severity" in alert["metric"] else "none"
|
||||
alert = ChaosRunAlert(alertname, alertstate, namespace, severity)
|
||||
summary.post_chaos_alerts.append(alert)
|
||||
|
||||
during_critical_alerts_count = len(during_critical_alerts)
|
||||
post_critical_alerts_count = len(post_critical_alerts)
|
||||
firing_alerts = False
|
||||
|
||||
if during_critical_alerts_count > 0:
|
||||
firing_alerts = True
|
||||
|
||||
if post_critical_alerts_count > 0:
|
||||
firing_alerts = True
|
||||
|
||||
if not firing_alerts:
|
||||
logging.info("No critical alerts are firing!!")
|
||||
|
||||
|
||||
def metrics(prom_cli: KrknPrometheus,
|
||||
elastic: KrknElastic,
|
||||
run_uuid,
|
||||
start_time,
|
||||
end_time,
|
||||
metrics_profile,
|
||||
elastic_collect_metrics,
|
||||
elastic_metrics_index
|
||||
) -> list[dict[str, list[(int, float)] | str]]:
|
||||
metrics_list: list[dict[str, list[(int, float)] | str]] = []
|
||||
if metrics_profile is None or os.path.exists(metrics_profile) is False:
|
||||
logging.error(f"{metrics_profile} alert profile does not exist")
|
||||
sys.exit(1)
|
||||
with open(metrics_profile) as profile:
|
||||
profile_yaml = yaml.safe_load(profile)
|
||||
if not profile_yaml["metrics"] or not isinstance(profile_yaml["metrics"], list):
|
||||
logging.error(f"{metrics_profile} wrong file format, alert profile must be "
|
||||
f"a valid yaml file containing a list of items with 3 properties: "
|
||||
f"expr, description, severity" )
|
||||
sys.exit(1)
|
||||
|
||||
for metric_query in profile_yaml["metrics"]:
|
||||
if list(metric_query.keys()).sort() != ["query", "metricName", "instant"].sort():
|
||||
logging.error(f"wrong alert {metric_query}, skipping")
|
||||
metrics_result = prom_cli.process_prom_query_in_range(
|
||||
metric_query["query"],
|
||||
start_time=datetime.datetime.fromtimestamp(start_time),
|
||||
end_time=datetime.datetime.fromtimestamp(end_time)
|
||||
|
||||
)
|
||||
|
||||
metric = {"name": metric_query["metricName"], "values":[]}
|
||||
for returned_metric in metrics_result:
|
||||
if "values" in returned_metric:
|
||||
for value in returned_metric["values"]:
|
||||
try:
|
||||
metric["values"].append((value[0], float(value[1])))
|
||||
except ValueError:
|
||||
pass
|
||||
metrics_list.append(metric)
|
||||
|
||||
if elastic_collect_metrics and elastic:
|
||||
result = elastic.upload_metrics_to_elasticsearch(run_uuid=run_uuid, index=elastic_metrics_index, raw_data=metrics_list)
|
||||
if result == -1:
|
||||
logging.error("failed to save metrics on ElasticSearch")
|
||||
|
||||
|
||||
return metrics_list
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -3,15 +3,21 @@ import random
|
||||
import re
|
||||
import time
|
||||
import yaml
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
from .. import utils
|
||||
from ..cerberus import setup as cerberus
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def run(scenarios_list, config, kubecli: KrknKubernetes, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
def run(scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
telemetry: KrknTelemetryOpenshift,
|
||||
telemetry_request_id: str) -> (list[str], list[ScenarioTelemetry]):
|
||||
"""
|
||||
Reads the scenario config and creates a temp file to fill up the PVC
|
||||
"""
|
||||
@@ -21,8 +27,8 @@ def run(scenarios_list, config, kubecli: KrknKubernetes, telemetry: KrknTelemetr
|
||||
for app_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = app_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, app_config)
|
||||
scenario_telemetry.start_timestamp = time.time()
|
||||
parsed_scenario_config = telemetry.set_parameters_base64(scenario_telemetry, app_config)
|
||||
try:
|
||||
if len(app_config) > 1:
|
||||
with open(app_config, "r") as f:
|
||||
@@ -85,7 +91,7 @@ def run(scenarios_list, config, kubecli: KrknKubernetes, telemetry: KrknTelemetr
|
||||
"pod_name '%s' will be overridden with one of "
|
||||
"the pods mounted in the PVC" % (str(pod_name))
|
||||
)
|
||||
pvc = kubecli.get_pvc_info(pvc_name, namespace)
|
||||
pvc = telemetry.kubecli.get_pvc_info(pvc_name, namespace)
|
||||
try:
|
||||
# random generator not used for
|
||||
# security/cryptographic purposes.
|
||||
@@ -100,7 +106,7 @@ def run(scenarios_list, config, kubecli: KrknKubernetes, telemetry: KrknTelemetr
|
||||
raise RuntimeError()
|
||||
|
||||
# Get volume name
|
||||
pod = kubecli.get_pod_info(name=pod_name, namespace=namespace)
|
||||
pod = telemetry.kubecli.get_pod_info(name=pod_name, namespace=namespace)
|
||||
|
||||
if pod is None:
|
||||
logging.error(
|
||||
@@ -117,7 +123,7 @@ def run(scenarios_list, config, kubecli: KrknKubernetes, telemetry: KrknTelemetr
|
||||
if volume.pvcName is not None:
|
||||
volume_name = volume.name
|
||||
pvc_name = volume.pvcName
|
||||
pvc = kubecli.get_pvc_info(pvc_name, namespace)
|
||||
pvc = telemetry.kubecli.get_pvc_info(pvc_name, namespace)
|
||||
break
|
||||
if 'pvc' not in locals():
|
||||
logging.error(
|
||||
@@ -144,7 +150,7 @@ def run(scenarios_list, config, kubecli: KrknKubernetes, telemetry: KrknTelemetr
|
||||
# Get PVC capacity and used bytes
|
||||
command = "df %s -B 1024 | sed 1d" % (str(mount_path))
|
||||
command_output = (
|
||||
kubecli.exec_cmd_in_pod(
|
||||
telemetry.kubecli.exec_cmd_in_pod(
|
||||
command,
|
||||
pod_name,
|
||||
namespace,
|
||||
@@ -206,7 +212,7 @@ def run(scenarios_list, config, kubecli: KrknKubernetes, telemetry: KrknTelemetr
|
||||
logging.debug(
|
||||
"Create temp file in the PVC command:\n %s" % command
|
||||
)
|
||||
kubecli.exec_cmd_in_pod(
|
||||
telemetry.kubecli.exec_cmd_in_pod(
|
||||
command,
|
||||
pod_name,
|
||||
namespace,
|
||||
@@ -216,7 +222,7 @@ def run(scenarios_list, config, kubecli: KrknKubernetes, telemetry: KrknTelemetr
|
||||
# Check if file is created
|
||||
command = "ls -lh %s" % (str(mount_path))
|
||||
logging.debug("Check file is created command:\n %s" % command)
|
||||
response = kubecli.exec_cmd_in_pod(
|
||||
response = telemetry.kubecli.exec_cmd_in_pod(
|
||||
command, pod_name, namespace, container_name
|
||||
)
|
||||
logging.info("\n" + str(response))
|
||||
@@ -238,7 +244,7 @@ def run(scenarios_list, config, kubecli: KrknKubernetes, telemetry: KrknTelemetr
|
||||
container_name,
|
||||
mount_path,
|
||||
file_size_kb,
|
||||
kubecli
|
||||
telemetry.kubecli
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
@@ -275,14 +281,14 @@ def run(scenarios_list, config, kubecli: KrknKubernetes, telemetry: KrknTelemetr
|
||||
logging.debug(
|
||||
"Create temp file in the PVC command:\n %s" % command
|
||||
)
|
||||
kubecli.exec_cmd_in_pod(
|
||||
telemetry.kubecli.exec_cmd_in_pod(
|
||||
command, pod_name, namespace, container_name
|
||||
)
|
||||
|
||||
# Check if file is created
|
||||
command = "ls -lh %s" % (str(mount_path))
|
||||
logging.debug("Check file is created command:\n %s" % command)
|
||||
response = kubecli.exec_cmd_in_pod(
|
||||
response = telemetry.kubecli.exec_cmd_in_pod(
|
||||
command, pod_name, namespace, container_name
|
||||
)
|
||||
logging.info("\n" + str(response))
|
||||
@@ -303,9 +309,11 @@ def run(scenarios_list, config, kubecli: KrknKubernetes, telemetry: KrknTelemetr
|
||||
container_name,
|
||||
mount_path,
|
||||
file_size_kb,
|
||||
kubecli
|
||||
telemetry.kubecli
|
||||
)
|
||||
|
||||
logging.info("End of scenario. Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
@@ -314,11 +322,23 @@ def run(scenarios_list, config, kubecli: KrknKubernetes, telemetry: KrknTelemetr
|
||||
end_time
|
||||
)
|
||||
except (RuntimeError, Exception):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
scenario_telemetry.exit_status = 1
|
||||
failed_scenarios.append(app_config)
|
||||
log_exception(app_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.exit_status = 0
|
||||
|
||||
scenario_telemetry.end_timestamp = time.time()
|
||||
utils.collect_and_put_ocp_logs(telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry_request_id,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
utils.populate_cluster_events(scenario_telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry.kubecli,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
@@ -1,14 +1,18 @@
|
||||
import time
|
||||
import random
|
||||
import logging
|
||||
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
import kraken.cerberus.setup as cerberus
|
||||
import kraken.post_actions.actions as post_actions
|
||||
import yaml
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
|
||||
from kraken import utils
|
||||
|
||||
|
||||
def delete_objects(kubecli, namespace):
|
||||
|
||||
@@ -156,20 +160,19 @@ def run(
|
||||
config,
|
||||
wait_duration,
|
||||
failed_post_scenarios,
|
||||
kubeconfig_path,
|
||||
kubecli: KrknKubernetes,
|
||||
telemetry: KrknTelemetryKubernetes
|
||||
telemetry: KrknTelemetryOpenshift,
|
||||
telemetry_request_id: str
|
||||
) -> (list[str], list[ScenarioTelemetry]):
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for scenario_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = scenario_config[0]
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, scenario_config[0])
|
||||
scenario_telemetry.start_timestamp = time.time()
|
||||
parsed_scenario_config = telemetry.set_parameters_base64(scenario_telemetry, scenario_config[0])
|
||||
try:
|
||||
if len(scenario_config) > 1:
|
||||
pre_action_output = post_actions.run(kubeconfig_path, scenario_config[1])
|
||||
pre_action_output = post_actions.run(telemetry.kubecli.get_kubeconfig_path(), scenario_config[1])
|
||||
else:
|
||||
pre_action_output = ""
|
||||
with open(scenario_config[0], "r") as f:
|
||||
@@ -206,7 +209,7 @@ def run(
|
||||
start_time = int(time.time())
|
||||
for i in range(run_count):
|
||||
killed_namespaces = {}
|
||||
namespaces = kubecli.check_namespaces([scenario_namespace], scenario_label)
|
||||
namespaces = telemetry.kubecli.check_namespaces([scenario_namespace], scenario_label)
|
||||
for j in range(delete_count):
|
||||
if len(namespaces) == 0:
|
||||
logging.error(
|
||||
@@ -220,7 +223,7 @@ def run(
|
||||
logging.info('Delete objects in selected namespace: ' + selected_namespace )
|
||||
try:
|
||||
# delete all pods in namespace
|
||||
objects = delete_objects(kubecli,selected_namespace)
|
||||
objects = delete_objects(telemetry.kubecli,selected_namespace)
|
||||
killed_namespaces[selected_namespace] = objects
|
||||
logging.info("Deleted all objects in namespace %s was successful" % str(selected_namespace))
|
||||
except Exception as e:
|
||||
@@ -236,7 +239,7 @@ def run(
|
||||
if len(scenario_config) > 1:
|
||||
try:
|
||||
failed_post_scenarios = post_actions.check_recovery(
|
||||
kubeconfig_path, scenario_config, failed_post_scenarios, pre_action_output
|
||||
telemetry.kubecli.get_kubeconfig_path(), scenario_config, failed_post_scenarios, pre_action_output
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to run post action checks: %s" % e)
|
||||
@@ -244,17 +247,27 @@ def run(
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
else:
|
||||
failed_post_scenarios = check_all_running_deployment(killed_namespaces, wait_time, kubecli)
|
||||
failed_post_scenarios = check_all_running_deployment(killed_namespaces, wait_time, telemetry.kubecli)
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
except (Exception, RuntimeError):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
scenario_telemetry.exit_status = 1
|
||||
failed_scenarios.append(scenario_config[0])
|
||||
log_exception(scenario_config[0])
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetry.exit_status = 0
|
||||
scenario_telemetry.end_timestamp = time.time()
|
||||
utils.collect_and_put_ocp_logs(telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry_request_id,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
utils.populate_cluster_events(scenario_telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry.kubecli,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
103
kraken/service_hijacking/service_hijacking.py
Normal file
103
kraken/service_hijacking/service_hijacking.py
Normal file
@@ -0,0 +1,103 @@
|
||||
import logging
|
||||
import time
|
||||
import yaml
|
||||
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.utils import log_exception
|
||||
|
||||
from kraken import utils
|
||||
|
||||
|
||||
def run(scenarios_list: list[str],
|
||||
wait_duration: int,
|
||||
telemetry: KrknTelemetryOpenshift,
|
||||
telemetry_request_id: str) -> (list[str], list[ScenarioTelemetry]):
|
||||
|
||||
scenario_telemetries = list[ScenarioTelemetry]()
|
||||
failed_post_scenarios = []
|
||||
for scenario in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = scenario
|
||||
scenario_telemetry.start_timestamp = time.time()
|
||||
parsed_scenario_config = telemetry.set_parameters_base64(scenario_telemetry, scenario)
|
||||
with open(scenario) as stream:
|
||||
scenario_config = yaml.safe_load(stream)
|
||||
|
||||
service_name = scenario_config['service_name']
|
||||
service_namespace = scenario_config['service_namespace']
|
||||
plan = scenario_config["plan"]
|
||||
image = scenario_config["image"]
|
||||
target_port = scenario_config["service_target_port"]
|
||||
chaos_duration = scenario_config["chaos_duration"]
|
||||
|
||||
logging.info(f"checking service {service_name} in namespace: {service_namespace}")
|
||||
if not telemetry.kubecli.service_exists(service_name, service_namespace):
|
||||
logging.error(f"service: {service_name} not found in namespace: {service_namespace}, failed to run scenario.")
|
||||
fail_scenario_telemetry(scenario_telemetry)
|
||||
failed_post_scenarios.append(scenario)
|
||||
break
|
||||
try:
|
||||
logging.info(f"service: {service_name} found in namespace: {service_namespace}")
|
||||
logging.info(f"creating webservice and initializing test plan...")
|
||||
# both named ports and port numbers can be used
|
||||
if isinstance(target_port, int):
|
||||
logging.info(f"webservice will listen on port {target_port}")
|
||||
webservice = telemetry.kubecli.deploy_service_hijacking(service_namespace, plan, image, port_number=target_port)
|
||||
else:
|
||||
logging.info(f"traffic will be redirected to named port: {target_port}")
|
||||
webservice = telemetry.kubecli.deploy_service_hijacking(service_namespace, plan, image, port_name=target_port)
|
||||
logging.info(f"successfully deployed pod: {webservice.pod_name} "
|
||||
f"in namespace:{service_namespace} with selector {webservice.selector}!"
|
||||
)
|
||||
logging.info(f"patching service: {service_name} to hijack traffic towards: {webservice.pod_name}")
|
||||
original_service = telemetry.kubecli.replace_service_selector([webservice.selector], service_name, service_namespace)
|
||||
if original_service is None:
|
||||
logging.error(f"failed to patch service: {service_name}, namespace: {service_namespace} with selector {webservice.selector}")
|
||||
fail_scenario_telemetry(scenario_telemetry)
|
||||
failed_post_scenarios.append(scenario)
|
||||
break
|
||||
|
||||
logging.info(f"service: {service_name} successfully patched!")
|
||||
logging.info(f"original service manifest:\n\n{yaml.dump(original_service)}")
|
||||
logging.info(f"waiting {chaos_duration} before restoring the service")
|
||||
time.sleep(chaos_duration)
|
||||
selectors = ["=".join([key, original_service["spec"]["selector"][key]]) for key in original_service["spec"]["selector"].keys()]
|
||||
logging.info(f"restoring the service selectors {selectors}")
|
||||
original_service = telemetry.kubecli.replace_service_selector(selectors, service_name, service_namespace)
|
||||
if original_service is None:
|
||||
logging.error(f"failed to restore original service: {service_name}, namespace: {service_namespace} with selectors: {selectors}")
|
||||
fail_scenario_telemetry(scenario_telemetry)
|
||||
failed_post_scenarios.append(scenario)
|
||||
break
|
||||
logging.info("selectors successfully restored")
|
||||
logging.info("undeploying service-hijacking resources...")
|
||||
telemetry.kubecli.undeploy_service_hijacking(webservice)
|
||||
|
||||
logging.info("End of scenario. Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
scenario_telemetry.exit_status = 0
|
||||
logging.info("success")
|
||||
except Exception as e:
|
||||
logging.error(f"scenario {scenario} failed with exception: {e}")
|
||||
fail_scenario_telemetry(scenario_telemetry)
|
||||
log_exception(scenario)
|
||||
|
||||
scenario_telemetry.end_timestamp = time.time()
|
||||
utils.collect_and_put_ocp_logs(telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry_request_id,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
utils.populate_cluster_events(scenario_telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry.kubecli,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
return failed_post_scenarios, scenario_telemetries
|
||||
|
||||
def fail_scenario_telemetry(scenario_telemetry: ScenarioTelemetry):
|
||||
scenario_telemetry.exit_status = 1
|
||||
scenario_telemetry.end_timestamp = time.time()
|
||||
@@ -3,6 +3,10 @@ import yaml
|
||||
import logging
|
||||
import time
|
||||
from multiprocessing.pool import ThreadPool
|
||||
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
from .. import utils
|
||||
from ..cerberus import setup as cerberus
|
||||
from ..post_actions import actions as post_actions
|
||||
from ..node_actions.aws_node_scenarios import AWS
|
||||
@@ -10,15 +14,17 @@ from ..node_actions.openstack_node_scenarios import OPENSTACKCLOUD
|
||||
from ..node_actions.az_node_scenarios import Azure
|
||||
from ..node_actions.gcp_node_scenarios import GCP
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import log_exception
|
||||
|
||||
def multiprocess_nodes(cloud_object_function, nodes):
|
||||
def multiprocess_nodes(cloud_object_function, nodes, processes=0):
|
||||
try:
|
||||
# pool object with number of element
|
||||
|
||||
pool = ThreadPool(processes=len(nodes))
|
||||
if processes == 0:
|
||||
pool = ThreadPool(processes=len(nodes))
|
||||
else:
|
||||
pool = ThreadPool(processes=processes)
|
||||
logging.info("nodes type " + str(type(nodes[0])))
|
||||
if type(nodes[0]) is tuple:
|
||||
node_id = []
|
||||
@@ -45,10 +51,12 @@ def cluster_shut_down(shut_down_config, kubecli: KrknKubernetes):
|
||||
shut_down_duration = shut_down_config["shut_down_duration"]
|
||||
cloud_type = shut_down_config["cloud_type"]
|
||||
timeout = shut_down_config["timeout"]
|
||||
processes = 0
|
||||
if cloud_type.lower() == "aws":
|
||||
cloud_object = AWS()
|
||||
elif cloud_type.lower() == "gcp":
|
||||
cloud_object = GCP()
|
||||
processes = 1
|
||||
elif cloud_type.lower() == "openstack":
|
||||
cloud_object = OPENSTACKCLOUD()
|
||||
elif cloud_type.lower() in ["azure", "az"]:
|
||||
@@ -71,7 +79,7 @@ def cluster_shut_down(shut_down_config, kubecli: KrknKubernetes):
|
||||
for _ in range(runs):
|
||||
logging.info("Starting cluster_shut_down scenario injection")
|
||||
stopping_nodes = set(node_id)
|
||||
multiprocess_nodes(cloud_object.stop_instances, node_id)
|
||||
multiprocess_nodes(cloud_object.stop_instances, node_id, processes)
|
||||
stopped_nodes = stopping_nodes.copy()
|
||||
while len(stopping_nodes) > 0:
|
||||
for node in stopping_nodes:
|
||||
@@ -101,7 +109,7 @@ def cluster_shut_down(shut_down_config, kubecli: KrknKubernetes):
|
||||
time.sleep(shut_down_duration)
|
||||
logging.info("Restarting the nodes")
|
||||
restarted_nodes = set(node_id)
|
||||
multiprocess_nodes(cloud_object.start_instances, node_id)
|
||||
multiprocess_nodes(cloud_object.start_instances, node_id, processes)
|
||||
logging.info("Wait for each node to be running again")
|
||||
not_running_nodes = restarted_nodes.copy()
|
||||
while len(not_running_nodes) > 0:
|
||||
@@ -129,7 +137,11 @@ def cluster_shut_down(shut_down_config, kubecli: KrknKubernetes):
|
||||
|
||||
# krkn_lib
|
||||
|
||||
def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
def run(scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
telemetry: KrknTelemetryOpenshift,
|
||||
telemetry_request_id: str) -> (list[str], list[ScenarioTelemetry]):
|
||||
failed_post_scenarios = []
|
||||
failed_scenarios = []
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
@@ -147,8 +159,8 @@ def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetr
|
||||
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = config_path
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, config_path)
|
||||
scenario_telemetry.start_timestamp = time.time()
|
||||
parsed_scenario_config = telemetry.set_parameters_base64(scenario_telemetry, config_path)
|
||||
|
||||
with open(config_path, "r") as f:
|
||||
shut_down_config_yaml = yaml.full_load(f)
|
||||
@@ -156,7 +168,7 @@ def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetr
|
||||
shut_down_config_yaml["cluster_shut_down_scenario"]
|
||||
start_time = int(time.time())
|
||||
try:
|
||||
cluster_shut_down(shut_down_config_scenario, kubecli)
|
||||
cluster_shut_down(shut_down_config_scenario, telemetry.kubecli)
|
||||
logging.info(
|
||||
"Waiting for the specified duration: %s" % (wait_duration)
|
||||
)
|
||||
@@ -175,11 +187,21 @@ def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetr
|
||||
except (RuntimeError, Exception):
|
||||
log_exception(config_path)
|
||||
failed_scenarios.append(config_path)
|
||||
scenario_telemetry.exitStatus = 1
|
||||
scenario_telemetry.exit_status = 1
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.exit_status = 0
|
||||
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetry.end_timestamp = time.time()
|
||||
utils.collect_and_put_ocp_logs(telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry_request_id,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
utils.populate_cluster_events(scenario_telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry.kubecli,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
1
kraken/syn_flood/__init__.py
Normal file
1
kraken/syn_flood/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from .syn_flood import *
|
||||
148
kraken/syn_flood/syn_flood.py
Normal file
148
kraken/syn_flood/syn_flood.py
Normal file
@@ -0,0 +1,148 @@
|
||||
import logging
|
||||
import os.path
|
||||
import time
|
||||
from typing import List
|
||||
|
||||
import krkn_lib.utils
|
||||
import yaml
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
from kraken import utils
|
||||
|
||||
|
||||
def run(scenarios_list: list[str],
|
||||
telemetry: KrknTelemetryOpenshift,
|
||||
telemetry_request_id: str
|
||||
) -> (list[str], list[ScenarioTelemetry]):
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_post_scenarios = []
|
||||
for scenario in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = scenario
|
||||
scenario_telemetry.start_timestamp = time.time()
|
||||
parsed_scenario_config = telemetry.set_parameters_base64(scenario_telemetry, scenario)
|
||||
|
||||
try:
|
||||
pod_names = []
|
||||
config = parse_config(scenario)
|
||||
if config["target-service-label"]:
|
||||
target_services = telemetry.kubecli.select_service_by_label(config["namespace"], config["target-service-label"])
|
||||
else:
|
||||
target_services = [config["target-service"]]
|
||||
|
||||
for target in target_services:
|
||||
if not telemetry.kubecli.service_exists(target, config["namespace"]):
|
||||
raise Exception(f"{target} service not found")
|
||||
for i in range(config["number-of-pods"]):
|
||||
pod_name = "syn-flood-" + krkn_lib.utils.get_random_string(10)
|
||||
telemetry.kubecli.deploy_syn_flood(pod_name,
|
||||
config["namespace"],
|
||||
config["image"],
|
||||
target,
|
||||
config["target-port"],
|
||||
config["packet-size"],
|
||||
config["window-size"],
|
||||
config["duration"],
|
||||
config["attacker-nodes"]
|
||||
)
|
||||
pod_names.append(pod_name)
|
||||
|
||||
logging.info("waiting all the attackers to finish:")
|
||||
did_finish = False
|
||||
finished_pods = []
|
||||
while not did_finish:
|
||||
for pod_name in pod_names:
|
||||
if not telemetry.kubecli.is_pod_running(pod_name, config["namespace"]):
|
||||
finished_pods.append(pod_name)
|
||||
if set(pod_names) == set(finished_pods):
|
||||
did_finish = True
|
||||
time.sleep(1)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to run syn flood scenario {scenario}: {e}")
|
||||
failed_post_scenarios.append(scenario)
|
||||
scenario_telemetry.exit_status = 1
|
||||
else:
|
||||
scenario_telemetry.exit_status = 0
|
||||
scenario_telemetry.end_timestamp = time.time()
|
||||
utils.collect_and_put_ocp_logs(telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry_request_id,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
utils.populate_cluster_events(scenario_telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry.kubecli,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
return failed_post_scenarios, scenario_telemetries
|
||||
|
||||
def parse_config(scenario_file: str) -> dict[str,any]:
|
||||
if not os.path.exists(scenario_file):
|
||||
raise Exception(f"failed to load scenario file {scenario_file}")
|
||||
|
||||
try:
|
||||
with open(scenario_file) as stream:
|
||||
config = yaml.safe_load(stream)
|
||||
except Exception:
|
||||
raise Exception(f"{scenario_file} is not a valid yaml file")
|
||||
|
||||
missing = []
|
||||
if not check_key_value(config ,"packet-size"):
|
||||
missing.append("packet-size")
|
||||
if not check_key_value(config,"window-size"):
|
||||
missing.append("window-size")
|
||||
if not check_key_value(config, "duration"):
|
||||
missing.append("duration")
|
||||
if not check_key_value(config, "namespace"):
|
||||
missing.append("namespace")
|
||||
if not check_key_value(config, "number-of-pods"):
|
||||
missing.append("number-of-pods")
|
||||
if not check_key_value(config, "target-port"):
|
||||
missing.append("target-port")
|
||||
if not check_key_value(config, "image"):
|
||||
missing.append("image")
|
||||
if "target-service" not in config.keys():
|
||||
missing.append("target-service")
|
||||
if "target-service-label" not in config.keys():
|
||||
missing.append("target-service-label")
|
||||
|
||||
|
||||
|
||||
|
||||
if len(missing) > 0:
|
||||
raise Exception(f"{(',').join(missing)} parameter(s) are missing")
|
||||
|
||||
if not config["target-service"] and not config["target-service-label"]:
|
||||
raise Exception("you have either to set a target service or a label")
|
||||
if config["target-service"] and config["target-service-label"]:
|
||||
raise Exception("you cannot select both target-service and target-service-label")
|
||||
|
||||
if 'attacker-nodes' and not is_node_affinity_correct(config['attacker-nodes']):
|
||||
raise Exception("attacker-nodes format is not correct")
|
||||
return config
|
||||
|
||||
def check_key_value(dictionary, key):
|
||||
if key in dictionary:
|
||||
value = dictionary[key]
|
||||
if value is not None and value != '':
|
||||
return True
|
||||
return False
|
||||
|
||||
def is_node_affinity_correct(obj) -> bool:
|
||||
if not isinstance(obj, dict):
|
||||
return False
|
||||
for key in obj.keys():
|
||||
if not isinstance(key, str):
|
||||
return False
|
||||
if not isinstance(obj[key], list):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -6,12 +6,12 @@ import re
|
||||
import yaml
|
||||
import random
|
||||
|
||||
from krkn_lib import utils
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from kubernetes.client import ApiException
|
||||
|
||||
from .. import utils
|
||||
from ..cerberus import setup as cerberus
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception, get_random_string
|
||||
|
||||
@@ -348,21 +348,25 @@ def check_date_time(object_type, names, kubecli:KrknKubernetes):
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def run(scenarios_list, config, wait_duration, kubecli:KrknKubernetes, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
def run(scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
telemetry: KrknTelemetryOpenshift,
|
||||
telemetry_request_id: str) -> (list[str], list[ScenarioTelemetry]):
|
||||
failed_scenarios = []
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
for time_scenario_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = time_scenario_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, time_scenario_config)
|
||||
scenario_telemetry.start_timestamp = time.time()
|
||||
parsed_scenario_config = telemetry.set_parameters_base64(scenario_telemetry, time_scenario_config)
|
||||
try:
|
||||
with open(time_scenario_config, "r") as f:
|
||||
scenario_config = yaml.full_load(f)
|
||||
for time_scenario in scenario_config["time_scenarios"]:
|
||||
start_time = int(time.time())
|
||||
object_type, object_names = skew_time(time_scenario, kubecli)
|
||||
not_reset = check_date_time(object_type, object_names, kubecli)
|
||||
object_type, object_names = skew_time(time_scenario, telemetry.kubecli)
|
||||
not_reset = check_date_time(object_type, object_names, telemetry.kubecli)
|
||||
if len(not_reset) > 0:
|
||||
logging.info("Object times were not reset")
|
||||
logging.info(
|
||||
@@ -377,12 +381,22 @@ def run(scenarios_list, config, wait_duration, kubecli:KrknKubernetes, telemetry
|
||||
end_time
|
||||
)
|
||||
except (RuntimeError, Exception):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
scenario_telemetry.exit_status = 1
|
||||
log_exception(time_scenario_config)
|
||||
failed_scenarios.append(time_scenario_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetry.exit_status = 0
|
||||
scenario_telemetry.end_timestamp = time.time()
|
||||
utils.collect_and_put_ocp_logs(telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry_request_id,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
utils.populate_cluster_events(scenario_telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry.kubecli,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
12
kraken/utils/TeeLogHandler.py
Normal file
12
kraken/utils/TeeLogHandler.py
Normal file
@@ -0,0 +1,12 @@
|
||||
import logging
|
||||
class TeeLogHandler(logging.Handler):
|
||||
logs: list[str] = []
|
||||
name = "TeeLogHandler"
|
||||
|
||||
def get_output(self) -> str:
|
||||
return "\n".join(self.logs)
|
||||
|
||||
def emit(self, record):
|
||||
self.logs.append(self.formatter.format(record))
|
||||
def __del__(self):
|
||||
pass
|
||||
2
kraken/utils/__init__.py
Normal file
2
kraken/utils/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
from .TeeLogHandler import TeeLogHandler
|
||||
from .functions import *
|
||||
60
kraken/utils/functions.py
Normal file
60
kraken/utils/functions.py
Normal file
@@ -0,0 +1,60 @@
|
||||
import krkn_lib.utils
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from tzlocal.unix import get_localzone
|
||||
|
||||
|
||||
def populate_cluster_events(scenario_telemetry: ScenarioTelemetry,
|
||||
scenario_config: dict,
|
||||
kubecli: KrknKubernetes,
|
||||
start_timestamp: int,
|
||||
end_timestamp: int
|
||||
):
|
||||
events = []
|
||||
namespaces = __retrieve_namespaces(scenario_config, kubecli)
|
||||
|
||||
if len(namespaces) == 0:
|
||||
events.extend(kubecli.collect_and_parse_cluster_events(start_timestamp, end_timestamp, str(get_localzone())))
|
||||
else:
|
||||
for namespace in namespaces:
|
||||
events.extend(kubecli.collect_and_parse_cluster_events(start_timestamp, end_timestamp, str(get_localzone()),
|
||||
namespace=namespace))
|
||||
|
||||
scenario_telemetry.set_cluster_events(events)
|
||||
|
||||
|
||||
def collect_and_put_ocp_logs(telemetry_ocp: KrknTelemetryOpenshift,
|
||||
scenario_config: dict,
|
||||
request_id: str,
|
||||
start_timestamp: int,
|
||||
end_timestamp: int,
|
||||
):
|
||||
if (
|
||||
telemetry_ocp.krkn_telemetry_config and
|
||||
telemetry_ocp.krkn_telemetry_config["enabled"] and
|
||||
telemetry_ocp.krkn_telemetry_config["logs_backup"] and
|
||||
not telemetry_ocp.kubecli.is_kubernetes()
|
||||
):
|
||||
namespaces = __retrieve_namespaces(scenario_config, telemetry_ocp.kubecli)
|
||||
if len(namespaces) > 0:
|
||||
for namespace in namespaces:
|
||||
telemetry_ocp.put_ocp_logs(request_id,
|
||||
telemetry_ocp.krkn_telemetry_config,
|
||||
start_timestamp,
|
||||
end_timestamp,
|
||||
namespace)
|
||||
else:
|
||||
telemetry_ocp.put_ocp_logs(request_id,
|
||||
telemetry_ocp.krkn_telemetry_config,
|
||||
start_timestamp,
|
||||
end_timestamp)
|
||||
|
||||
|
||||
def __retrieve_namespaces(scenario_config: dict, kubecli: KrknKubernetes) -> set[str]:
|
||||
namespaces = list()
|
||||
namespaces.extend(krkn_lib.utils.deep_get_attribute("namespace", scenario_config))
|
||||
namespace_patterns = krkn_lib.utils.deep_get_attribute("namespace_pattern", scenario_config)
|
||||
for pattern in namespace_patterns:
|
||||
namespaces.extend(kubecli.list_namespaces_by_regex(pattern))
|
||||
return set(namespaces)
|
||||
@@ -1,13 +1,20 @@
|
||||
import yaml
|
||||
import logging
|
||||
import time
|
||||
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
from .. import utils
|
||||
from ..node_actions.aws_node_scenarios import AWS
|
||||
from ..cerberus import setup as cerberus
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import log_exception
|
||||
|
||||
def run(scenarios_list, config, wait_duration, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]) :
|
||||
def run(scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
telemetry: KrknTelemetryOpenshift,
|
||||
telemetry_request_id: str) -> (list[str], list[ScenarioTelemetry]) :
|
||||
"""
|
||||
filters the subnet of interest and applies the network acl
|
||||
to create zone outage
|
||||
@@ -19,8 +26,8 @@ def run(scenarios_list, config, wait_duration, telemetry: KrknTelemetryKubernete
|
||||
for zone_outage_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = zone_outage_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, zone_outage_config)
|
||||
scenario_telemetry.start_timestamp = time.time()
|
||||
parsed_scenario_config = telemetry.set_parameters_base64(scenario_telemetry, zone_outage_config)
|
||||
try:
|
||||
if len(zone_outage_config) > 1:
|
||||
with open(zone_outage_config, "r") as f:
|
||||
@@ -110,12 +117,22 @@ def run(scenarios_list, config, wait_duration, telemetry: KrknTelemetryKubernete
|
||||
end_time
|
||||
)
|
||||
except (RuntimeError, Exception):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
scenario_telemetry.exit_status = 1
|
||||
failed_scenarios.append(zone_outage_config)
|
||||
log_exception(zone_outage_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetry.exit_status = 0
|
||||
scenario_telemetry.end_timestamp = time.time()
|
||||
utils.collect_and_put_ocp_logs(telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry_request_id,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
utils.populate_cluster_events(scenario_telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry.kubecli,
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp))
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
@@ -1,40 +1,41 @@
|
||||
PyYAML>=5.1
|
||||
aliyun-python-sdk-core==2.13.36
|
||||
aliyun-python-sdk-ecs==4.24.25
|
||||
arcaflow==0.9.0
|
||||
arcaflow-plugin-sdk==0.10.0
|
||||
azure-identity
|
||||
azure-keyvault
|
||||
azure-mgmt-compute
|
||||
arcaflow-plugin-sdk==0.14.0
|
||||
arcaflow==0.19.1
|
||||
boto3==1.28.61
|
||||
coverage
|
||||
datetime
|
||||
docker
|
||||
docker-compose
|
||||
git+https://github.com/redhat-chaos/arcaflow-plugin-kill-pod.git
|
||||
git+https://github.com/vmware/vsphere-automation-sdk-python.git@v8.0.0.0
|
||||
gitpython
|
||||
google-api-python-client
|
||||
ibm_cloud_sdk_core
|
||||
ibm_vpc
|
||||
azure-identity==1.16.1
|
||||
azure-keyvault==4.2.0
|
||||
azure-mgmt-compute==30.5.0
|
||||
itsdangerous==2.0.1
|
||||
jinja2==3.1.3
|
||||
krkn-lib >= 1.4.6
|
||||
kubernetes
|
||||
lxml >= 4.3.0
|
||||
oauth2client>=4.1.3
|
||||
openshift-client
|
||||
paramiko
|
||||
podman-compose
|
||||
pyVmomi >= 6.7
|
||||
pyfiglet
|
||||
pytest
|
||||
python-ipmi
|
||||
python-openstackclient
|
||||
requests
|
||||
service_identity
|
||||
setuptools==65.5.1
|
||||
werkzeug==3.0.1
|
||||
wheel
|
||||
coverage==7.4.1
|
||||
datetime==5.4
|
||||
docker==7.0.0
|
||||
gitpython==3.1.41
|
||||
google-api-python-client==2.116.0
|
||||
ibm_cloud_sdk_core==3.18.0
|
||||
ibm_vpc==0.20.0
|
||||
jinja2==3.1.4
|
||||
krkn-lib==3.1.0
|
||||
lxml==5.1.0
|
||||
kubernetes==28.1.0
|
||||
numpy==1.26.4
|
||||
oauth2client==4.1.3
|
||||
pandas==2.2.0
|
||||
openshift-client==1.0.21
|
||||
paramiko==3.4.0
|
||||
pyVmomi==8.0.2.0.1
|
||||
pyfiglet==1.0.2
|
||||
pytest==8.0.0
|
||||
python-ipmi==0.5.4
|
||||
python-openstackclient==6.5.0
|
||||
requests==2.32.2
|
||||
service_identity==24.1.0
|
||||
PyYAML==6.0.1
|
||||
setuptools==70.0.0
|
||||
werkzeug==3.0.3
|
||||
wheel==0.42.0
|
||||
zope.interface==5.4.0
|
||||
pandas<2.0.0
|
||||
|
||||
git+https://github.com/krkn-chaos/arcaflow-plugin-kill-pod.git@v0.1.0
|
||||
git+https://github.com/vmware/vsphere-automation-sdk-python.git@v8.0.0.0
|
||||
cryptography>=42.0.4 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
|
||||
429
run_kraken.py
429
run_kraken.py
@@ -9,7 +9,13 @@ import optparse
|
||||
import pyfiglet
|
||||
import uuid
|
||||
import time
|
||||
|
||||
from krkn_lib.elastic.krkn_elastic import KrknElastic
|
||||
from krkn_lib.models.elastic import ElasticChaosRunTelemetry
|
||||
from krkn_lib.models.krkn import ChaosRunOutput, ChaosRunAlertSummary
|
||||
from krkn_lib.prometheus.krkn_prometheus import KrknPrometheus
|
||||
from tzlocal.unix import get_localzone
|
||||
|
||||
import kraken.time_actions.common_time_functions as time_actions
|
||||
import kraken.performance_dashboards.setup as performance_dashboards
|
||||
import kraken.pod_scenarios.setup as pod_scenarios
|
||||
@@ -23,21 +29,23 @@ import kraken.pvc.pvc_scenario as pvc_scenario
|
||||
import kraken.network_chaos.actions as network_chaos
|
||||
import kraken.arcaflow_plugin as arcaflow_plugin
|
||||
import kraken.prometheus as prometheus_plugin
|
||||
import kraken.service_hijacking.service_hijacking as service_hijacking_plugin
|
||||
import server as server
|
||||
from kraken import plugins
|
||||
from kraken import plugins, syn_flood
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.ocp import KrknOpenshift
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.models.telemetry import ChaosRunTelemetry
|
||||
from krkn_lib.utils import SafeLogger
|
||||
from krkn_lib.utils.functions import get_yaml_item_value
|
||||
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, get_junit_test_case
|
||||
|
||||
from kraken.utils import TeeLogHandler
|
||||
|
||||
report_file = ""
|
||||
|
||||
# Main function
|
||||
def main(cfg):
|
||||
def main(cfg) -> int:
|
||||
# Start kraken
|
||||
print(pyfiglet.figlet_format("kraken"))
|
||||
logging.info("Starting kraken")
|
||||
@@ -89,11 +97,61 @@ def main(cfg):
|
||||
enable_alerts = get_yaml_item_value(
|
||||
config["performance_monitoring"], "enable_alerts", False
|
||||
)
|
||||
enable_metrics = get_yaml_item_value(
|
||||
config["performance_monitoring"], "enable_metrics", False
|
||||
)
|
||||
# elastic search
|
||||
enable_elastic = get_yaml_item_value(
|
||||
config["elastic"], "enable_elastic", False
|
||||
)
|
||||
elastic_collect_metrics = get_yaml_item_value(
|
||||
config["elastic"], "collect_metrics", False
|
||||
)
|
||||
|
||||
elastic_colllect_alerts = get_yaml_item_value(
|
||||
config["elastic"], "collect_alerts", False
|
||||
)
|
||||
|
||||
elastic_url = get_yaml_item_value(
|
||||
config["elastic"], "elastic_url", ""
|
||||
)
|
||||
|
||||
elastic_verify_certs = get_yaml_item_value(
|
||||
config["elastic"], "verify_certs", False
|
||||
)
|
||||
|
||||
elastic_port = get_yaml_item_value(
|
||||
config["elastic"], "elastic_port", 32766
|
||||
)
|
||||
|
||||
elastic_username = get_yaml_item_value(
|
||||
config["elastic"], "username", ""
|
||||
)
|
||||
elastic_password = get_yaml_item_value(
|
||||
config["elastic"], "password", ""
|
||||
)
|
||||
|
||||
elastic_metrics_index = get_yaml_item_value(
|
||||
config["elastic"], "metrics_index", "krkn-metrics"
|
||||
)
|
||||
|
||||
elastic_alerts_index = get_yaml_item_value(
|
||||
config["elastic"], "alerts_index", "krkn-alerts"
|
||||
)
|
||||
|
||||
elastic_telemetry_index = get_yaml_item_value(
|
||||
config["elastic"], "telemetry_index", "krkn-telemetry"
|
||||
)
|
||||
|
||||
|
||||
|
||||
alert_profile = config["performance_monitoring"].get("alert_profile")
|
||||
metrics_profile = config["performance_monitoring"].get("metrics_profile")
|
||||
check_critical_alerts = get_yaml_item_value(
|
||||
config["performance_monitoring"], "check_critical_alerts", False
|
||||
)
|
||||
telemetry_api_url = config["telemetry"].get("api_url")
|
||||
|
||||
|
||||
# Initialize clients
|
||||
if (not os.path.isfile(kubeconfig_path) and
|
||||
@@ -101,7 +159,8 @@ def main(cfg):
|
||||
logging.error(
|
||||
"Cannot read the kubeconfig file at %s, please check" % kubeconfig_path
|
||||
)
|
||||
sys.exit(1)
|
||||
#sys.exit(1)
|
||||
return 1
|
||||
logging.info("Initializing client to talk to the Kubernetes cluster")
|
||||
|
||||
# Generate uuid for the run
|
||||
@@ -129,18 +188,18 @@ def main(cfg):
|
||||
except:
|
||||
kubecli.initialize_clients(None)
|
||||
|
||||
|
||||
|
||||
# find node kraken might be running on
|
||||
kubecli.find_kraken_node()
|
||||
|
||||
# Set up kraken url to track signal
|
||||
if not 0 <= int(port) <= 65535:
|
||||
logging.error("%s isn't a valid port number, please check" % (port))
|
||||
sys.exit(1)
|
||||
#sys.exit(1)
|
||||
return 1
|
||||
if not signal_address:
|
||||
logging.error("Please set the signal address in the config")
|
||||
sys.exit(1)
|
||||
#sys.exit(1)
|
||||
return 1
|
||||
address = (signal_address, port)
|
||||
|
||||
# If publish_running_status is False this should keep us going
|
||||
@@ -156,23 +215,42 @@ def main(cfg):
|
||||
# Cluster info
|
||||
logging.info("Fetching cluster info")
|
||||
cv = ""
|
||||
if config["kraken"]["distribution"] == "openshift":
|
||||
if distribution == "openshift":
|
||||
cv = ocpcli.get_clusterversion_string()
|
||||
if prometheus_url is None:
|
||||
connection_data = ocpcli.get_prometheus_api_connection_data()
|
||||
prometheus_url = connection_data.endpoint
|
||||
prometheus_bearer_token = connection_data.token
|
||||
if not prometheus_url:
|
||||
try:
|
||||
connection_data = ocpcli.get_prometheus_api_connection_data()
|
||||
if connection_data:
|
||||
prometheus_url = connection_data.endpoint
|
||||
prometheus_bearer_token = connection_data.token
|
||||
else:
|
||||
# If can't make a connection, set alerts to false
|
||||
enable_alerts = False
|
||||
critical_alerts = False
|
||||
except Exception:
|
||||
logging.error("invalid distribution selected, running openshift scenarios against kubernetes cluster."
|
||||
"Please set 'kubernetes' in config.yaml krkn.platform and try again")
|
||||
return 1
|
||||
if cv != "":
|
||||
logging.info(cv)
|
||||
else:
|
||||
logging.info("Cluster version CRD not detected, skipping")
|
||||
|
||||
# KrknTelemetry init
|
||||
telemetry_k8s = KrknTelemetryKubernetes(safe_logger, kubecli)
|
||||
telemetry_ocp = KrknTelemetryOpenshift(safe_logger, ocpcli)
|
||||
|
||||
|
||||
if enable_alerts:
|
||||
telemetry_k8s = KrknTelemetryKubernetes(safe_logger, kubecli, config["telemetry"])
|
||||
telemetry_ocp = KrknTelemetryOpenshift(safe_logger, ocpcli, config["telemetry"])
|
||||
if enable_elastic:
|
||||
elastic_search = KrknElastic(safe_logger,
|
||||
elastic_url,
|
||||
elastic_port,
|
||||
elastic_verify_certs,
|
||||
elastic_username,
|
||||
elastic_password
|
||||
)
|
||||
else:
|
||||
elastic_search = None
|
||||
summary = ChaosRunAlertSummary()
|
||||
if enable_metrics or enable_alerts or check_critical_alerts:
|
||||
prometheus = KrknPrometheus(prometheus_url, prometheus_bearer_token)
|
||||
|
||||
logging.info("Server URL: %s" % kubecli.get_host())
|
||||
@@ -203,7 +281,8 @@ def main(cfg):
|
||||
|
||||
# Capture the start time
|
||||
start_time = int(time.time())
|
||||
|
||||
post_critical_alerts = 0
|
||||
chaos_output = ChaosRunOutput()
|
||||
chaos_telemetry = ChaosRunTelemetry()
|
||||
chaos_telemetry.run_uuid = run_uuid
|
||||
# Loop to run the chaos starts here
|
||||
@@ -235,34 +314,36 @@ def main(cfg):
|
||||
"plugin_scenarios with the "
|
||||
"kill-pods configuration instead."
|
||||
)
|
||||
sys.exit(1)
|
||||
return 1
|
||||
elif scenario_type == "arcaflow_scenarios":
|
||||
failed_post_scenarios, scenario_telemetries = arcaflow_plugin.run(
|
||||
scenarios_list, kubeconfig_path, telemetry_k8s
|
||||
scenarios_list,
|
||||
telemetry_ocp,
|
||||
telemetry_request_id
|
||||
)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
elif scenario_type == "plugin_scenarios":
|
||||
failed_post_scenarios, scenario_telemetries = plugins.run(
|
||||
scenarios_list,
|
||||
kubeconfig_path,
|
||||
kraken_config,
|
||||
failed_post_scenarios,
|
||||
wait_duration,
|
||||
telemetry_k8s
|
||||
telemetry_ocp,
|
||||
run_uuid,
|
||||
telemetry_request_id
|
||||
)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
# krkn_lib
|
||||
elif scenario_type == "container_scenarios":
|
||||
logging.info("Running container scenarios")
|
||||
failed_post_scenarios, scenario_telemetries = pod_scenarios.container_run(
|
||||
kubeconfig_path,
|
||||
scenarios_list,
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
wait_duration,
|
||||
kubecli,
|
||||
telemetry_k8s
|
||||
telemetry_ocp,
|
||||
telemetry_request_id
|
||||
)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
@@ -270,14 +351,21 @@ def main(cfg):
|
||||
# krkn_lib
|
||||
elif scenario_type == "node_scenarios":
|
||||
logging.info("Running node scenarios")
|
||||
failed_post_scenarios, scenario_telemetries = nodeaction.run(scenarios_list, config, wait_duration, kubecli, telemetry_k8s)
|
||||
failed_post_scenarios, scenario_telemetries = nodeaction.run(scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
telemetry_ocp,
|
||||
telemetry_request_id)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
# Inject managedcluster chaos scenarios specified in the config
|
||||
# krkn_lib
|
||||
elif scenario_type == "managedcluster_scenarios":
|
||||
logging.info("Running managedcluster scenarios")
|
||||
managedcluster_scenarios.run(
|
||||
scenarios_list, config, wait_duration, kubecli
|
||||
scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
kubecli
|
||||
)
|
||||
|
||||
# Inject time skew chaos scenarios specified
|
||||
@@ -285,12 +373,22 @@ def main(cfg):
|
||||
# krkn_lib
|
||||
elif scenario_type == "time_scenarios":
|
||||
logging.info("Running time skew scenarios")
|
||||
failed_post_scenarios, scenario_telemetries = time_actions.run(scenarios_list, config, wait_duration, kubecli, telemetry_k8s)
|
||||
failed_post_scenarios, scenario_telemetries = time_actions.run(scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
telemetry_ocp,
|
||||
telemetry_request_id
|
||||
)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
# Inject cluster shutdown scenarios
|
||||
# krkn_lib
|
||||
elif scenario_type == "cluster_shut_down_scenarios":
|
||||
failed_post_scenarios, scenario_telemetries = shut_down.run(scenarios_list, config, wait_duration, kubecli, telemetry_k8s)
|
||||
failed_post_scenarios, scenario_telemetries = shut_down.run(scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
telemetry_ocp,
|
||||
telemetry_request_id
|
||||
)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
# Inject namespace chaos scenarios
|
||||
@@ -302,57 +400,87 @@ def main(cfg):
|
||||
config,
|
||||
wait_duration,
|
||||
failed_post_scenarios,
|
||||
kubeconfig_path,
|
||||
kubecli,
|
||||
telemetry_k8s
|
||||
telemetry_ocp,
|
||||
telemetry_request_id
|
||||
)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
# Inject zone failures
|
||||
elif scenario_type == "zone_outages":
|
||||
logging.info("Inject zone outages")
|
||||
failed_post_scenarios, scenario_telemetries = zone_outages.run(scenarios_list, config, wait_duration, telemetry_k8s)
|
||||
failed_post_scenarios, scenario_telemetries = zone_outages.run(scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
telemetry_ocp,
|
||||
telemetry_request_id
|
||||
)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
# Application outages
|
||||
elif scenario_type == "application_outages":
|
||||
logging.info("Injecting application outage")
|
||||
failed_post_scenarios, scenario_telemetries = application_outage.run(
|
||||
scenarios_list, config, wait_duration, telemetry_k8s)
|
||||
scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
telemetry_ocp,
|
||||
telemetry_request_id
|
||||
)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
# PVC scenarios
|
||||
# krkn_lib
|
||||
elif scenario_type == "pvc_scenarios":
|
||||
logging.info("Running PVC scenario")
|
||||
failed_post_scenarios, scenario_telemetries = pvc_scenario.run(scenarios_list, config, kubecli, telemetry_k8s)
|
||||
failed_post_scenarios, scenario_telemetries = pvc_scenario.run(scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
telemetry_ocp,
|
||||
telemetry_request_id
|
||||
)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
# Network scenarios
|
||||
# krkn_lib
|
||||
elif scenario_type == "network_chaos":
|
||||
logging.info("Running Network Chaos")
|
||||
failed_post_scenarios, scenario_telemetries = network_chaos.run(scenarios_list, config, wait_duration, kubecli, telemetry_k8s)
|
||||
failed_post_scenarios, scenario_telemetries = network_chaos.run(scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
telemetry_ocp,
|
||||
telemetry_request_id
|
||||
)
|
||||
elif scenario_type == "service_hijacking":
|
||||
logging.info("Running Service Hijacking Chaos")
|
||||
failed_post_scenarios, scenario_telemetries = service_hijacking_plugin.run(scenarios_list,
|
||||
wait_duration,
|
||||
telemetry_ocp,
|
||||
telemetry_request_id
|
||||
)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
elif scenario_type == "syn_flood":
|
||||
logging.info("Running Syn Flood Chaos")
|
||||
failed_post_scenarios, scenario_telemetries = syn_flood.run(scenarios_list,
|
||||
telemetry_ocp,
|
||||
telemetry_request_id
|
||||
)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
# Check for critical alerts when enabled
|
||||
if enable_alerts and check_critical_alerts :
|
||||
logging.info("Checking for critical alerts firing post choas")
|
||||
post_critical_alerts = 0
|
||||
if check_critical_alerts:
|
||||
prometheus_plugin.critical_alerts(prometheus,
|
||||
summary,
|
||||
run_uuid,
|
||||
scenario_type,
|
||||
start_time,
|
||||
datetime.datetime.now())
|
||||
|
||||
##PROM
|
||||
query = r"""ALERTS{severity="critical"}"""
|
||||
end_time = datetime.datetime.now()
|
||||
critical_alerts = prometheus.process_prom_query_in_range(
|
||||
query,
|
||||
start_time = datetime.datetime.fromtimestamp(start_time),
|
||||
end_time = end_time
|
||||
chaos_output.critical_alerts = summary
|
||||
post_critical_alerts = len(summary.post_chaos_alerts)
|
||||
if post_critical_alerts > 0:
|
||||
logging.error("Post chaos critical alerts firing please check, exiting")
|
||||
break
|
||||
|
||||
)
|
||||
critical_alerts_count = len(critical_alerts)
|
||||
if critical_alerts_count > 0:
|
||||
logging.error("Critical alerts are firing: %s", critical_alerts)
|
||||
logging.error("Please check, exiting")
|
||||
sys.exit(1)
|
||||
else:
|
||||
logging.info("No critical alerts are firing!!")
|
||||
|
||||
iteration += 1
|
||||
logging.info("")
|
||||
@@ -366,28 +494,57 @@ def main(cfg):
|
||||
# if platform is openshift will be collected
|
||||
# Cloud platform and network plugins metadata
|
||||
# through OCP specific APIs
|
||||
if config["kraken"]["distribution"] == "openshift":
|
||||
if distribution == "openshift":
|
||||
telemetry_ocp.collect_cluster_metadata(chaos_telemetry)
|
||||
else:
|
||||
telemetry_k8s.collect_cluster_metadata(chaos_telemetry)
|
||||
|
||||
decoded_chaos_run_telemetry = ChaosRunTelemetry(json.loads(chaos_telemetry.to_json()))
|
||||
logging.info(f"Telemetry data:\n{decoded_chaos_run_telemetry.to_json()}")
|
||||
telemetry_json = chaos_telemetry.to_json()
|
||||
decoded_chaos_run_telemetry = ChaosRunTelemetry(json.loads(telemetry_json))
|
||||
chaos_output.telemetry = decoded_chaos_run_telemetry
|
||||
logging.info(f"Chaos data:\n{chaos_output.to_json()}")
|
||||
if enable_elastic:
|
||||
elastic_telemetry = ElasticChaosRunTelemetry(chaos_run_telemetry=decoded_chaos_run_telemetry)
|
||||
result = elastic_search.push_telemetry(elastic_telemetry, elastic_telemetry_index)
|
||||
if result == -1:
|
||||
safe_logger.error(f"failed to save telemetry on elastic search: {chaos_output.to_json()}")
|
||||
|
||||
if config["telemetry"]["enabled"]:
|
||||
logging.info(f"telemetry data will be stored on s3 bucket folder: {telemetry_api_url}/download/{telemetry_request_id}")
|
||||
logging.info(f'telemetry data will be stored on s3 bucket folder: {telemetry_api_url}/files/'
|
||||
f'{(config["telemetry"]["telemetry_group"] if config["telemetry"]["telemetry_group"] else "default")}/'
|
||||
f'{telemetry_request_id}')
|
||||
logging.info(f"telemetry upload log: {safe_logger.log_file_name}")
|
||||
try:
|
||||
telemetry_k8s.send_telemetry(config["telemetry"], telemetry_request_id, chaos_telemetry)
|
||||
telemetry_k8s.put_cluster_events(telemetry_request_id, config["telemetry"], start_time, end_time)
|
||||
telemetry_k8s.put_critical_alerts(telemetry_request_id, config["telemetry"], summary)
|
||||
# prometheus data collection is available only on Openshift
|
||||
if config["telemetry"]["prometheus_backup"] and config["kraken"]["distribution"] == "openshift":
|
||||
safe_logger.info("archives download started:")
|
||||
prometheus_archive_files = telemetry_ocp.get_ocp_prometheus_data(config["telemetry"], telemetry_request_id)
|
||||
safe_logger.info("archives upload started:")
|
||||
telemetry_k8s.put_prometheus_data(config["telemetry"], prometheus_archive_files, telemetry_request_id)
|
||||
if config["telemetry"]["logs_backup"]:
|
||||
telemetry_ocp.put_ocp_logs(telemetry_request_id, config["telemetry"], start_time, end_time)
|
||||
if config["telemetry"]["prometheus_backup"]:
|
||||
prometheus_archive_files = ''
|
||||
if distribution == "openshift" :
|
||||
prometheus_archive_files = telemetry_ocp.get_ocp_prometheus_data(config["telemetry"], telemetry_request_id)
|
||||
else:
|
||||
if (config["telemetry"]["prometheus_namespace"] and
|
||||
config["telemetry"]["prometheus_pod_name"] and
|
||||
config["telemetry"]["prometheus_container_name"]):
|
||||
try:
|
||||
prometheus_archive_files = telemetry_k8s.get_prometheus_pod_data(
|
||||
config["telemetry"],
|
||||
telemetry_request_id,
|
||||
config["telemetry"]["prometheus_pod_name"],
|
||||
config["telemetry"]["prometheus_container_name"],
|
||||
config["telemetry"]["prometheus_namespace"]
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(f"failed to get prometheus backup with exception {str(e)}")
|
||||
else:
|
||||
logging.warning("impossible to backup prometheus,"
|
||||
"check if config contains telemetry.prometheus_namespace, "
|
||||
"telemetry.prometheus_pod_name and "
|
||||
"telemetry.prometheus_container_name")
|
||||
if prometheus_archive_files:
|
||||
safe_logger.info("starting prometheus archive upload:")
|
||||
telemetry_k8s.put_prometheus_data(config["telemetry"], prometheus_archive_files, telemetry_request_id)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"failed to send telemetry data: {str(e)}")
|
||||
else:
|
||||
@@ -400,28 +557,52 @@ def main(cfg):
|
||||
if alert_profile:
|
||||
prometheus_plugin.alerts(
|
||||
prometheus,
|
||||
elastic_search,
|
||||
run_uuid,
|
||||
start_time,
|
||||
end_time,
|
||||
alert_profile,
|
||||
elastic_colllect_alerts,
|
||||
elastic_alerts_index
|
||||
)
|
||||
|
||||
else:
|
||||
logging.error("Alert profile is not defined")
|
||||
sys.exit(1)
|
||||
return 1
|
||||
#sys.exit(1)
|
||||
if enable_metrics:
|
||||
prometheus_plugin.metrics(prometheus,
|
||||
elastic_search,
|
||||
start_time,
|
||||
run_uuid,
|
||||
end_time,
|
||||
metrics_profile,
|
||||
elastic_collect_metrics,
|
||||
elastic_metrics_index)
|
||||
|
||||
if post_critical_alerts > 0:
|
||||
logging.error("Critical alerts are firing, please check; exiting")
|
||||
#sys.exit(2)
|
||||
return 2
|
||||
|
||||
if failed_post_scenarios:
|
||||
logging.error(
|
||||
"Post scenarios are still failing at the end of all iterations"
|
||||
)
|
||||
sys.exit(1)
|
||||
#sys.exit(2)
|
||||
return 2
|
||||
|
||||
run_dir = os.getcwd() + "/kraken.report"
|
||||
logging.info(
|
||||
"Successfully finished running Kraken. UUID for the run: "
|
||||
"%s. Report generated at %s. Exiting" % (run_uuid, run_dir)
|
||||
"%s. Report generated at %s. Exiting" % (run_uuid, report_file)
|
||||
)
|
||||
else:
|
||||
logging.error("Cannot find a config at %s, please check" % (cfg))
|
||||
sys.exit(1)
|
||||
#sys.exit(1)
|
||||
return 2
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@@ -434,17 +615,109 @@ if __name__ == "__main__":
|
||||
help="config location",
|
||||
default="config/config.yaml",
|
||||
)
|
||||
parser.add_option(
|
||||
"-o",
|
||||
"--output",
|
||||
dest="output",
|
||||
help="output report location",
|
||||
default="kraken.report",
|
||||
)
|
||||
|
||||
|
||||
|
||||
parser.add_option(
|
||||
"--junit-testcase",
|
||||
dest="junit_testcase",
|
||||
help="junit test case description",
|
||||
default=None,
|
||||
)
|
||||
|
||||
parser.add_option(
|
||||
"--junit-testcase-path",
|
||||
dest="junit_testcase_path",
|
||||
help="junit test case path",
|
||||
default=None,
|
||||
)
|
||||
|
||||
parser.add_option(
|
||||
"--junit-testcase-version",
|
||||
dest="junit_testcase_version",
|
||||
help="junit test case version",
|
||||
default=None,
|
||||
)
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
report_file = options.output
|
||||
tee_handler = TeeLogHandler()
|
||||
handlers = [logging.FileHandler(report_file, mode="w"), logging.StreamHandler(), tee_handler]
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s [%(levelname)s] %(message)s",
|
||||
handlers=[
|
||||
logging.FileHandler("kraken.report", mode="w"),
|
||||
logging.StreamHandler(),
|
||||
],
|
||||
handlers=handlers,
|
||||
)
|
||||
option_error = False
|
||||
|
||||
# used to check if there is any missing or wrong parameter that prevents
|
||||
# the creation of the junit file
|
||||
junit_error = False
|
||||
junit_normalized_path = None
|
||||
retval = 0
|
||||
junit_start_time = time.time()
|
||||
# checks if both mandatory options for junit are set
|
||||
if options.junit_testcase_path and not options.junit_testcase:
|
||||
logging.error("please set junit test case description with --junit-testcase [description] option")
|
||||
option_error = True
|
||||
junit_error = True
|
||||
|
||||
if options.junit_testcase and not options.junit_testcase_path:
|
||||
logging.error("please set junit test case path with --junit-testcase-path [path] option")
|
||||
option_error = True
|
||||
junit_error = True
|
||||
|
||||
# normalized path
|
||||
if options.junit_testcase:
|
||||
junit_normalized_path = os.path.normpath(options.junit_testcase_path)
|
||||
|
||||
if not os.path.exists(junit_normalized_path):
|
||||
logging.error(f"{junit_normalized_path} do not exists, please select a valid path")
|
||||
option_error = True
|
||||
junit_error = True
|
||||
|
||||
if not os.path.isdir(junit_normalized_path):
|
||||
logging.error(f"{junit_normalized_path} is a file, please select a valid folder path")
|
||||
option_error = True
|
||||
junit_error = True
|
||||
|
||||
if not os.access(junit_normalized_path, os.W_OK):
|
||||
logging.error(f"{junit_normalized_path} is not writable, please select a valid path")
|
||||
option_error = True
|
||||
junit_error = True
|
||||
|
||||
if options.cfg is None:
|
||||
logging.error("Please check if you have passed the config")
|
||||
sys.exit(1)
|
||||
option_error = True
|
||||
|
||||
if option_error:
|
||||
retval = 1
|
||||
else:
|
||||
main(options.cfg)
|
||||
retval = main(options.cfg)
|
||||
|
||||
junit_endtime = time.time()
|
||||
|
||||
# checks the minimum required parameters to write the junit file
|
||||
if junit_normalized_path and not junit_error:
|
||||
junit_testcase_xml = get_junit_test_case(
|
||||
success=True if retval == 0 else False,
|
||||
time=int(junit_endtime - junit_start_time),
|
||||
test_suite_name="krkn-test-suite",
|
||||
test_case_description=options.junit_testcase,
|
||||
test_stdout=tee_handler.get_output(),
|
||||
test_version=options.junit_testcase_version
|
||||
)
|
||||
junit_testcase_file_path = f"{junit_normalized_path}/junit_krkn_{int(time.time())}.xml"
|
||||
logging.info(f"writing junit XML testcase in {junit_testcase_file_path}")
|
||||
with open(junit_testcase_file_path, "w") as stream:
|
||||
stream.write(junit_testcase_xml)
|
||||
|
||||
sys.exit(retval)
|
||||
|
||||
@@ -4,7 +4,7 @@ deployers:
|
||||
connection: {}
|
||||
deployer_name: kubernetes
|
||||
log:
|
||||
level: debug
|
||||
level: error
|
||||
logged_outputs:
|
||||
error:
|
||||
level: error
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
input_list:
|
||||
- cpu_count: 1
|
||||
cpu_load_percentage: 80
|
||||
cpu_method: all
|
||||
duration: 1s
|
||||
kubeconfig: ''
|
||||
namespace: default
|
||||
node_selector: {}
|
||||
- cpu_count: 1
|
||||
cpu_load_percentage: 80
|
||||
cpu_method: all
|
||||
duration: 30
|
||||
kubeconfig: ''
|
||||
namespace: default
|
||||
# set the node selector as a key-value pair eg.
|
||||
# node_selector:
|
||||
# kubernetes.io/hostname: kind-worker2
|
||||
node_selector: {}
|
||||
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user