mirror of
https://github.com/krkn-chaos/krkn.git
synced 2026-02-17 03:19:54 +00:00
Compare commits
181 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
126f4ebb35 | ||
|
|
83d99bbb02 | ||
|
|
2624102d65 | ||
|
|
02587bcbe6 | ||
|
|
c51bf04f9e | ||
|
|
41195b1a60 | ||
|
|
ab80acbee7 | ||
|
|
3573d13ea9 | ||
|
|
9c5251d52f | ||
|
|
a0bba27edc | ||
|
|
0d0143d1e0 | ||
|
|
0004c05f81 | ||
|
|
57a747a34a | ||
|
|
22108ae4e7 | ||
|
|
cecaa1eda3 | ||
|
|
5450ecb914 | ||
|
|
cad6b68f43 | ||
|
|
0eba329305 | ||
|
|
ce8593f2f0 | ||
|
|
9061ddbb5b | ||
|
|
dd4d0d0389 | ||
|
|
0cabe5e91d | ||
|
|
32fe0223ff | ||
|
|
a25736ad08 | ||
|
|
440890d252 | ||
|
|
69bf20fc76 | ||
|
|
2a42a2dc31 | ||
|
|
21ab8d475d | ||
|
|
b024cfde19 | ||
|
|
c7e068a562 | ||
|
|
64cfd2ca4d | ||
|
|
9cb701a616 | ||
|
|
0372013b67 | ||
|
|
4fea1a354d | ||
|
|
667798d588 | ||
|
|
0c30d89a1b | ||
|
|
2ba20fa483 | ||
|
|
97035a765c | ||
|
|
10ba53574e | ||
|
|
0ecba41082 | ||
|
|
491f59d152 | ||
|
|
2549c9a146 | ||
|
|
949f1f09e0 | ||
|
|
959766254d | ||
|
|
0e68dedb12 | ||
|
|
34a676a795 | ||
|
|
e5c5b35db3 | ||
|
|
93d2e60386 | ||
|
|
462c9ac67e | ||
|
|
04e44738d9 | ||
|
|
f810cadad2 | ||
|
|
4b869bad83 | ||
|
|
a36b0c76b2 | ||
|
|
a17e16390c | ||
|
|
f8534d616c | ||
|
|
9670ce82f5 | ||
|
|
95e4b68389 | ||
|
|
0aac6119b0 | ||
|
|
7e5bdfd5cf | ||
|
|
3c207ab2ea | ||
|
|
d91172d9b2 | ||
|
|
a13fb43d94 | ||
|
|
37ee7177bc | ||
|
|
32142cc159 | ||
|
|
34bfc0d3d9 | ||
|
|
736c90e937 | ||
|
|
5e7938ba4a | ||
|
|
b525f83261 | ||
|
|
26460a0dce | ||
|
|
7968c2a776 | ||
|
|
6186555c15 | ||
|
|
9cd086f59c | ||
|
|
1057917731 | ||
|
|
5484828b67 | ||
|
|
d18b6332e5 | ||
|
|
89a0e166f1 | ||
|
|
624f50acd1 | ||
|
|
e02c6d1287 | ||
|
|
04425a8d8a | ||
|
|
f3933f0e62 | ||
|
|
56ff0a8c72 | ||
|
|
9378cd74cd | ||
|
|
4d3491da0f | ||
|
|
d6ce66160b | ||
|
|
ef1a55438b | ||
|
|
d8f54b83a2 | ||
|
|
4870c86515 | ||
|
|
6ae17cf678 | ||
|
|
ce9f8aa050 | ||
|
|
05148317c1 | ||
|
|
5f836f294b | ||
|
|
cfa1bb09a0 | ||
|
|
5ddfff5a85 | ||
|
|
7d18487228 | ||
|
|
08de42c91a | ||
|
|
dc7d5bb01b | ||
|
|
ea3444d375 | ||
|
|
7b660a0878 | ||
|
|
5fe0655f22 | ||
|
|
5df343c183 | ||
|
|
f364e9f283 | ||
|
|
86a7427606 | ||
|
|
31266fbc3e | ||
|
|
57de3769e7 | ||
|
|
42fc8eea40 | ||
|
|
22d56e2cdc | ||
|
|
a259b68221 | ||
|
|
052f83e7d9 | ||
|
|
fb3bbe4e26 | ||
|
|
96ba9be4b8 | ||
|
|
58d5d1d8dc | ||
|
|
3fe22a0d8f | ||
|
|
21b89a32a7 | ||
|
|
dbe3ea9718 | ||
|
|
a142f6e7a4 | ||
|
|
2610a7af67 | ||
|
|
f827f65132 | ||
|
|
aa6cbbc11a | ||
|
|
e17354e54d | ||
|
|
2dfa5cb0cd | ||
|
|
0799008cd5 | ||
|
|
2327531e46 | ||
|
|
2c14c48a63 | ||
|
|
ab98e416a6 | ||
|
|
19ad2d1a3d | ||
|
|
804d7cbf58 | ||
|
|
54af2fc6ff | ||
|
|
b79e526cfd | ||
|
|
a5efd7d06c | ||
|
|
a1b81bd382 | ||
|
|
782440c8c4 | ||
|
|
7e2755cbb7 | ||
|
|
2babb53d6e | ||
|
|
85f76e9193 | ||
|
|
8bf21392f1 | ||
|
|
606fb60811 | ||
|
|
fac7c3c6fb | ||
|
|
8dd9b30030 | ||
|
|
2d99f17aaf | ||
|
|
50742a793c | ||
|
|
ba6a844544 | ||
|
|
7e7a917dba | ||
|
|
b9c0bb39c7 | ||
|
|
706a886151 | ||
|
|
a1cf9e2c00 | ||
|
|
0f5dfcb823 | ||
|
|
1e1015e6e7 | ||
|
|
c71ce31779 | ||
|
|
1298f220a6 | ||
|
|
24059fb731 | ||
|
|
ab951adb78 | ||
|
|
a9a7fb7e51 | ||
|
|
5a8d5b0fe1 | ||
|
|
c440dc4b51 | ||
|
|
b174c51ee0 | ||
|
|
fec0434ce1 | ||
|
|
1067d5ec8d | ||
|
|
85ea1ef7e1 | ||
|
|
2e38b8b033 | ||
|
|
c7ea366756 | ||
|
|
67d4ee9fa2 | ||
|
|
fa59834bae | ||
|
|
f154bcb692 | ||
|
|
60ece4b1b8 | ||
|
|
d660542a40 | ||
|
|
2e651798fa | ||
|
|
f801dfce54 | ||
|
|
8b95458444 | ||
|
|
ce1ae78f1f | ||
|
|
967753489b | ||
|
|
aa16cb1bf2 | ||
|
|
ac47e215d8 | ||
|
|
4f7c58106d | ||
|
|
a7e5ae6c80 | ||
|
|
aa030a21d3 | ||
|
|
631f12bdff | ||
|
|
2525982c55 | ||
|
|
9760d7d97d | ||
|
|
720488c159 | ||
|
|
487a9f464c | ||
|
|
d9e137e85a |
10
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
10
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
## Description
|
||||
<!-- Provide a brief description of the changes made in this PR. -->
|
||||
|
||||
## Documentation
|
||||
- [ ] **Is documentation needed for this update?**
|
||||
|
||||
If checked, a documentation PR must be created and merged in the [website repository](https://github.com/krkn-chaos/website/).
|
||||
|
||||
## Related Documentation PR (if applicable)
|
||||
<!-- Add the link to the corresponding documentation PR in the website repository -->
|
||||
51
.github/workflows/build.yml
vendored
51
.github/workflows/build.yml
vendored
@@ -1,51 +0,0 @@
|
||||
name: Build Krkn
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
- name: Create multi-node KinD cluster
|
||||
uses: redhat-chaos/actions/kind@main
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
architecture: 'x64'
|
||||
- name: Install environment
|
||||
run: |
|
||||
sudo apt-get install build-essential python3-dev
|
||||
pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
- name: Run unit tests
|
||||
run: python -m coverage run -a -m unittest discover -s tests -v
|
||||
- name: Run CI
|
||||
run: |
|
||||
./CI/run.sh
|
||||
cat ./CI/results.markdown >> $GITHUB_STEP_SUMMARY
|
||||
echo >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload CI logs
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ci-logs
|
||||
path: CI/out
|
||||
if-no-files-found: error
|
||||
- name: Collect coverage report
|
||||
run: |
|
||||
python -m coverage html
|
||||
- name: Publish coverage report to job summary
|
||||
run: |
|
||||
pip install html2text
|
||||
html2text --ignore-images --ignore-links -b 0 htmlcov/index.html >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload coverage data
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: coverage
|
||||
path: htmlcov
|
||||
if-no-files-found: error
|
||||
- name: Check CI results
|
||||
run: grep Fail CI/results.markdown && false || true
|
||||
|
||||
45
.github/workflows/docker-image.yml
vendored
45
.github/workflows/docker-image.yml
vendored
@@ -1,8 +1,7 @@
|
||||
name: Docker Image CI
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags: ['v[0-9].[0-9]+.[0-9]+']
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
@@ -12,19 +11,45 @@ jobs:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
- name: Build the Docker images
|
||||
run: docker build --no-cache -t quay.io/redhat-chaos/krkn containers/
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
run: |
|
||||
./containers/compile_dockerfile.sh
|
||||
docker build --no-cache -t quay.io/krkn-chaos/krkn containers/ --build-arg TAG=${GITHUB_REF#refs/tags/}
|
||||
docker tag quay.io/krkn-chaos/krkn quay.io/redhat-chaos/krkn
|
||||
docker tag quay.io/krkn-chaos/krkn quay.io/krkn-chaos/krkn:${GITHUB_REF#refs/tags/}
|
||||
docker tag quay.io/krkn-chaos/krkn quay.io/redhat-chaos/krkn:${GITHUB_REF#refs/tags/}
|
||||
|
||||
- name: Test Build the Docker images
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
run: |
|
||||
./containers/compile_dockerfile.sh
|
||||
docker build --no-cache -t quay.io/krkn-chaos/krkn containers/ --build-arg PR_NUMBER=${{ github.event.pull_request.number }}
|
||||
- name: Login in quay
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
run: docker login quay.io -u ${QUAY_USER} -p ${QUAY_TOKEN}
|
||||
env:
|
||||
QUAY_USER: ${{ secrets.QUAY_USERNAME }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_PASSWORD }}
|
||||
- name: Push the KrknChaos Docker images
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
run: |
|
||||
docker push quay.io/krkn-chaos/krkn
|
||||
docker push quay.io/krkn-chaos/krkn:${GITHUB_REF#refs/tags/}
|
||||
- name: Login in to redhat-chaos quay
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: docker login quay.io -u ${QUAY_USER} -p ${QUAY_TOKEN}
|
||||
env:
|
||||
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}
|
||||
- name: Push the Docker images
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker push quay.io/redhat-chaos/krkn
|
||||
- name: Push the RedHat Chaos Docker images
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
run: |
|
||||
docker push quay.io/redhat-chaos/krkn
|
||||
docker push quay.io/redhat-chaos/krkn:${GITHUB_REF#refs/tags/}
|
||||
- name: Rebuild krkn-hub
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
uses: redhat-chaos/actions/krkn-hub@main
|
||||
with:
|
||||
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}
|
||||
QUAY_USER: ${{ secrets.QUAY_USERNAME }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_PASSWORD }}
|
||||
AUTOPUSH: ${{ secrets.AUTOPUSH }}
|
||||
|
||||
129
.github/workflows/functional_tests.yaml
vendored
129
.github/workflows/functional_tests.yaml
vendored
@@ -1,129 +0,0 @@
|
||||
on: issue_comment
|
||||
|
||||
jobs:
|
||||
check_user:
|
||||
# This job only runs for pull request comments
|
||||
name: Check User Authorization
|
||||
env:
|
||||
USERS: ${{vars.USERS}}
|
||||
if: contains(github.event.comment.body, '/funtest') && contains(github.event.comment.html_url, '/pull/')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check User
|
||||
run: |
|
||||
for name in `echo $USERS`
|
||||
do
|
||||
name="${name//$'\r'/}"
|
||||
name="${name//$'\n'/}"
|
||||
if [ $name == "${{github.event.sender.login}}" ]
|
||||
then
|
||||
echo "user ${{github.event.sender.login}} authorized, action started..."
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
echo "user ${{github.event.sender.login}} is not allowed to run functional tests Action"
|
||||
exit 1
|
||||
pr_commented:
|
||||
# This job only runs for pull request comments containing /functional
|
||||
name: Functional Tests
|
||||
if: contains(github.event.comment.body, '/funtest') && contains(github.event.comment.html_url, '/pull/')
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- check_user
|
||||
steps:
|
||||
- name: Check out Kraken
|
||||
uses: actions/checkout@v3
|
||||
- name: Checkout Pull Request
|
||||
run: gh pr checkout ${{ github.event.issue.number }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install OC CLI
|
||||
uses: redhat-actions/oc-installer@v1
|
||||
with:
|
||||
oc_version: latest
|
||||
- name: Install python 3.9
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
- name: Setup kraken dependencies
|
||||
run: pip install -r requirements.txt
|
||||
- name: Create Workdir & export the path
|
||||
run: |
|
||||
mkdir workdir
|
||||
echo "WORKDIR_PATH=`pwd`/workdir" >> $GITHUB_ENV
|
||||
- name: Generate run id
|
||||
run: |
|
||||
echo "RUN_ID=`date +%s`" > $GITHUB_ENV
|
||||
echo "Run Id: ${RUN_ID}"
|
||||
- name: Write Pull Secret
|
||||
env:
|
||||
PULLSECRET_BASE64: ${{ secrets.PS_64 }}
|
||||
run: |
|
||||
echo "$PULLSECRET_BASE64" | base64 --decode > pullsecret.txt
|
||||
- name: Write Boot Private Key
|
||||
env:
|
||||
BOOT_KEY: ${{ secrets.CRC_KEY_FILE }}
|
||||
run: |
|
||||
echo -n "$BOOT_KEY" > key.txt
|
||||
- name: Teardown CRC (Post Action)
|
||||
uses: webiny/action-post-run@3.0.0
|
||||
id: post-run-command
|
||||
with:
|
||||
run: podman run --rm -v "${{ github.workspace }}:/workspace:z" -e AWS_ACCESS_KEY_ID="${{ secrets.AWS_ACCESS_KEY_ID }}" -e AWS_SECRET_ACCESS_KEY="${{ secrets.AWS_SECRET_ACCESS_KEY }}" -e AWS_DEFAULT_REGION=us-west-2 quay.io/crcont/crc-cloud:v0.0.2 destroy --project-name "chaos-funtest-${{ env.RUN_ID }}" --backed-url "s3://krkn-crc-state/${{ env.RUN_ID }}" --provider "aws"
|
||||
- name: Create cluster
|
||||
run: |
|
||||
podman run --name crc-cloud-create --rm \
|
||||
-v ${PWD}:/workspace:z \
|
||||
-e AWS_ACCESS_KEY_ID="${{ secrets.AWS_ACCESS_KEY_ID }}" \
|
||||
-e AWS_SECRET_ACCESS_KEY="${{ secrets.AWS_SECRET_ACCESS_KEY }}" \
|
||||
-e AWS_DEFAULT_REGION="us-west-2" \
|
||||
quay.io/crcont/crc-cloud:v0.0.2 \
|
||||
create aws \
|
||||
--project-name "chaos-funtest-${RUN_ID}" \
|
||||
--backed-url "s3://krkn-crc-state/${RUN_ID}" \
|
||||
--output "/workspace" \
|
||||
--aws-ami-id "ami-00f5eaf98cf42ef9f" \
|
||||
--pullsecret-filepath /workspace/pullsecret.txt \
|
||||
--key-filepath /workspace/key.txt
|
||||
|
||||
- name: Setup kubeconfig
|
||||
continue-on-error: true
|
||||
run: |
|
||||
ssh -o StrictHostKeyChecking=no -i id_rsa core@$(cat host) "cat /opt/kubeconfig" > kubeconfig
|
||||
sed -i "s/https:\/\/api.crc.testing:6443/https:\/\/`cat host`.nip.io:6443/g" kubeconfig
|
||||
echo "KUBECONFIG=${PWD}/kubeconfig" > $GITHUB_ENV
|
||||
|
||||
- name: Example deployment, GitHub Action env init
|
||||
env:
|
||||
NAMESPACE: test-namespace
|
||||
DEPLOYMENT_NAME: test-nginx
|
||||
run: ./CI/CRC/init_github_action.sh
|
||||
- name: Setup test suite
|
||||
run: |
|
||||
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.kubeconfig_path="'${KUBECONFIG}'"' CI/config/common_test_config.yaml
|
||||
echo "test_app_outages_gh" > ./CI/tests/my_tests
|
||||
echo "test_container" >> ./CI/tests/my_tests
|
||||
echo "test_namespace" >> ./CI/tests/my_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/my_tests
|
||||
echo "test_time" >> ./CI/tests/my_tests
|
||||
|
||||
- name: Print affected config files
|
||||
run: |
|
||||
echo -e "## CI/config/common_test_config.yaml\n\n"
|
||||
cat CI/config/common_test_config.yaml
|
||||
|
||||
- name: Running test suite
|
||||
run: |
|
||||
./CI/run.sh
|
||||
- name: Print test output
|
||||
run: cat CI/out/*
|
||||
- name: Create coverage report
|
||||
run: |
|
||||
echo "# Test results" > $GITHUB_STEP_SUMMARY
|
||||
cat CI/results.markdown >> $GITHUB_STEP_SUMMARY
|
||||
echo "# Test coverage" >> $GITHUB_STEP_SUMMARY
|
||||
python -m coverage report --format=markdown >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
|
||||
45
.github/workflows/require-docs.yml
vendored
Normal file
45
.github/workflows/require-docs.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
name: Require Documentation Update
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited, synchronize]
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
check-docs:
|
||||
name: Check Documentation Update
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Check if Documentation is Required
|
||||
id: check_docs
|
||||
run: |
|
||||
echo "Checking PR body for documentation checkbox..."
|
||||
# Read the PR body from the GitHub event payload
|
||||
if echo "${{ github.event.pull_request.body }}" | grep -qi '\[x\].*documentation needed'; then
|
||||
echo "Documentation required detected."
|
||||
echo "docs_required=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Documentation not required."
|
||||
echo "docs_required=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Enforce Documentation Update (if required)
|
||||
if: steps.check_docs.outputs.docs_required == 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
# Retrieve feature branch and repository owner from the GitHub context
|
||||
FEATURE_BRANCH="${{ github.head_ref }}"
|
||||
REPO_OWNER="${{ github.repository_owner }}"
|
||||
WEBSITE_REPO="website"
|
||||
echo "Searching for a merged documentation PR for feature branch: $FEATURE_BRANCH in $REPO_OWNER/$WEBSITE_REPO..."
|
||||
MERGED_PR=$(gh pr list --repo "$REPO_OWNER/$WEBSITE_REPO" --state merged --json headRefName,title,url | jq -r \
|
||||
--arg FEATURE_BRANCH "$FEATURE_BRANCH" '.[] | select(.title | contains($FEATURE_BRANCH)) | .url')
|
||||
if [[ -z "$MERGED_PR" ]]; then
|
||||
echo ":x: Documentation PR for branch '$FEATURE_BRANCH' is required and has not been merged."
|
||||
exit 1
|
||||
else
|
||||
echo ":white_check_mark: Found merged documentation PR: $MERGED_PR"
|
||||
fi
|
||||
198
.github/workflows/tests.yml
vendored
Normal file
198
.github/workflows/tests.yml
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
name: Functional & Unit Tests
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
tests:
|
||||
# Common steps
|
||||
name: Functional & Unit Tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
- name: Create multi-node KinD cluster
|
||||
uses: redhat-chaos/actions/kind@main
|
||||
- name: Install Helm & add repos
|
||||
run: |
|
||||
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
|
||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
||||
helm repo add stable https://charts.helm.sh/stable
|
||||
helm repo update
|
||||
- name: Deploy prometheus & Port Forwarding
|
||||
run: |
|
||||
kubectl create namespace prometheus-k8s
|
||||
helm install \
|
||||
--wait --timeout 360s \
|
||||
kind-prometheus \
|
||||
prometheus-community/kube-prometheus-stack \
|
||||
--namespace prometheus-k8s \
|
||||
--set prometheus.service.nodePort=30000 \
|
||||
--set prometheus.service.type=NodePort \
|
||||
--set grafana.service.nodePort=31000 \
|
||||
--set grafana.service.type=NodePort \
|
||||
--set alertmanager.service.nodePort=32000 \
|
||||
--set alertmanager.service.type=NodePort \
|
||||
--set prometheus-node-exporter.service.nodePort=32001 \
|
||||
--set prometheus-node-exporter.service.type=NodePort
|
||||
|
||||
SELECTOR=`kubectl -n prometheus-k8s get service kind-prometheus-kube-prome-prometheus -o wide --no-headers=true | awk '{ print $7 }'`
|
||||
POD_NAME=`kubectl -n prometheus-k8s get pods --selector="$SELECTOR" --no-headers=true | awk '{ print $1 }'`
|
||||
kubectl -n prometheus-k8s port-forward $POD_NAME 9090:9090 &
|
||||
sleep 5
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
architecture: 'x64'
|
||||
- name: Install environment
|
||||
run: |
|
||||
sudo apt-get install build-essential python3-dev
|
||||
pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Deploy test workloads
|
||||
run: |
|
||||
kubectl apply -f CI/templates/outage_pod.yaml
|
||||
kubectl wait --for=condition=ready pod -l scenario=outage --timeout=300s
|
||||
kubectl apply -f CI/templates/container_scenario_pod.yaml
|
||||
kubectl wait --for=condition=ready pod -l scenario=container --timeout=300s
|
||||
kubectl create namespace namespace-scenario
|
||||
kubectl apply -f CI/templates/time_pod.yaml
|
||||
kubectl wait --for=condition=ready pod -l scenario=time-skew --timeout=300s
|
||||
kubectl apply -f CI/templates/service_hijacking.yaml
|
||||
kubectl wait --for=condition=ready pod -l "app.kubernetes.io/name=proxy" --timeout=300s
|
||||
- name: Get Kind nodes
|
||||
run: |
|
||||
kubectl get nodes --show-labels=true
|
||||
# Pull request only steps
|
||||
- name: Run unit tests
|
||||
if: github.event_name == 'pull_request'
|
||||
run: python -m coverage run -a -m unittest discover -s tests -v
|
||||
|
||||
- name: Setup Pull Request Functional Tests
|
||||
if: |
|
||||
github.event_name == 'pull_request'
|
||||
run: |
|
||||
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.performance_monitoring="localhost:9090"' CI/config/common_test_config.yaml
|
||||
echo "test_service_hijacking" > ./CI/tests/functional_tests
|
||||
echo "test_app_outages" >> ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_time" >> ./CI/tests/functional_tests
|
||||
echo "test_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_io_hog" >> ./CI/tests/functional_tests
|
||||
|
||||
|
||||
# Push on main only steps + all other functional to collect coverage
|
||||
# for the badge
|
||||
- name: Configure AWS Credentials
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region : ${{ secrets.AWS_REGION }}
|
||||
- name: Setup Post Merge Request Functional Tests
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: |
|
||||
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.performance_monitoring="localhost:9090"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.username="${{secrets.TELEMETRY_USERNAME}}"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.password="${{secrets.TELEMETRY_PASSWORD}}"' CI/config/common_test_config.yaml
|
||||
echo "test_telemetry" > ./CI/tests/functional_tests
|
||||
echo "test_service_hijacking" >> ./CI/tests/functional_tests
|
||||
echo "test_app_outages" >> ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_time" >> ./CI/tests/functional_tests
|
||||
echo "test_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_io_hog" >> ./CI/tests/functional_tests
|
||||
|
||||
# Final common steps
|
||||
- name: Run Functional tests
|
||||
env:
|
||||
AWS_BUCKET: ${{ secrets.AWS_BUCKET }}
|
||||
run: |
|
||||
./CI/run.sh
|
||||
cat ./CI/results.markdown >> $GITHUB_STEP_SUMMARY
|
||||
echo >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload CI logs
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ci-logs
|
||||
path: CI/out
|
||||
if-no-files-found: error
|
||||
- name: Collect coverage report
|
||||
run: |
|
||||
python -m coverage html
|
||||
python -m coverage json
|
||||
- name: Publish coverage report to job summary
|
||||
run: |
|
||||
pip install html2text
|
||||
html2text --ignore-images --ignore-links -b 0 htmlcov/index.html >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload coverage data
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage
|
||||
path: htmlcov
|
||||
if-no-files-found: error
|
||||
- name: Upload json coverage
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage.json
|
||||
path: coverage.json
|
||||
if-no-files-found: error
|
||||
- name: Check CI results
|
||||
run: grep Fail CI/results.markdown && false || true
|
||||
badge:
|
||||
permissions:
|
||||
contents: write
|
||||
name: Generate Coverage Badge
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- tests
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
steps:
|
||||
- name: Check out doc repo
|
||||
uses: actions/checkout@master
|
||||
with:
|
||||
repository: krkn-chaos/krkn-lib-docs
|
||||
path: krkn-lib-docs
|
||||
ssh-key: ${{ secrets.KRKN_LIB_DOCS_PRIV_KEY }}
|
||||
- name: Download json coverage
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: coverage.json
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.9
|
||||
- name: Copy badge on GitHub Page Repo
|
||||
env:
|
||||
COLOR: yellow
|
||||
run: |
|
||||
# generate coverage badge on previously calculated total coverage
|
||||
# and copy in the docs page
|
||||
export TOTAL=$(python -c "import json;print(json.load(open('coverage.json'))['totals']['percent_covered_display'])")
|
||||
[[ $TOTAL > 40 ]] && COLOR=green
|
||||
echo "TOTAL: $TOTAL"
|
||||
echo "COLOR: $COLOR"
|
||||
curl "https://img.shields.io/badge/coverage-$TOTAL%25-$COLOR" > ./krkn-lib-docs/coverage_badge_krkn.svg
|
||||
- name: Push updated Coverage Badge
|
||||
run: |
|
||||
cd krkn-lib-docs
|
||||
git add .
|
||||
git config user.name "krkn-chaos"
|
||||
git config user.email "<>"
|
||||
git commit -m "[KRKN] Coverage Badge ${GITHUB_REF##*/}" || echo "no changes to commit"
|
||||
git push
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -16,6 +16,7 @@ __pycache__/*
|
||||
*.out
|
||||
kube-burner*
|
||||
kube_burner*
|
||||
recommender_*.json
|
||||
|
||||
# Project files
|
||||
.ropeproject
|
||||
@@ -61,7 +62,7 @@ inspect.local.*
|
||||
!CI/config/common_test_config.yaml
|
||||
CI/out/*
|
||||
CI/ci_results
|
||||
CI/scenarios/*node.yaml
|
||||
CI/legacy/*node.yaml
|
||||
CI/results.markdown
|
||||
|
||||
#env
|
||||
|
||||
7
ADOPTERS.md
Normal file
7
ADOPTERS.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# Krkn Adopters
|
||||
|
||||
This is a list of organizations that have publicly acknowledged usage of Krkn and shared details of how they are leveraging it in their environment for chaos engineering use cases. Do you want to add yourself to this list? Please fork the repository and open a PR with the required change.
|
||||
|
||||
| Organization | Since | Website | Use-Case |
|
||||
|:-|:-|:-|:-|
|
||||
| MarketAxess | 2024 | https://www.marketaxess.com/ | Kraken enables us to achieve our goal of increasing the reliability of our cloud products on Kubernetes. The tool allows us to automatically run various chaos scenarios, identify resilience and performance bottlenecks, and seamlessly restore the system to its original state once scenarios finish. These chaos scenarios include pod disruptions, node (EC2) outages, simulating availability zone (AZ) outages, and filling up storage spaces like EBS and EFS. The community is highly responsive to requests and works on expanding the tool's capabilities. MarketAxess actively contributes to the project, adding features such as the ability to leverage existing network ACLs and proposing several feature improvements to enhance test coverage. |
|
||||
@@ -1,44 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: $NAMESPACE
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: $DEPLOYMENT_NAME-service
|
||||
namespace: $NAMESPACE
|
||||
spec:
|
||||
selector:
|
||||
app: $DEPLOYMENT_NAME
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
type: ClusterIP
|
||||
|
||||
---
|
||||
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: $NAMESPACE
|
||||
name: $DEPLOYMENT_NAME-deployment
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: $DEPLOYMENT_NAME
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: $DEPLOYMENT_NAME
|
||||
spec:
|
||||
containers:
|
||||
- name: $DEPLOYMENT_NAME
|
||||
image: nginxinc/nginx-unprivileged:stable-alpine
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
@@ -1,58 +0,0 @@
|
||||
#!/bin/bash
|
||||
SCRIPT_PATH=./CI/CRC
|
||||
DEPLOYMENT_PATH=$SCRIPT_PATH/deployment.yaml
|
||||
|
||||
[[ ! -f $DEPLOYMENT_PATH ]] && echo "[ERROR] please run $0 from GitHub action root directory" && exit 1
|
||||
[[ -z $DEPLOYMENT_NAME ]] && echo "[ERROR] please set \$DEPLOYMENT_NAME environment variable" && exit 1
|
||||
[[ -z $NAMESPACE ]] && echo "[ERROR] please set \$NAMESPACE environment variable" && exit 1
|
||||
|
||||
|
||||
OPENSSL=`which openssl 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: openssl missing, please install it and try again" && exit 1
|
||||
OC=`which oc 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: oc missing, please install it and try again" && exit 1
|
||||
SED=`which sed 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: sed missing, please install it and try again" && exit 1
|
||||
JQ=`which jq 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: jq missing, please install it and try again" && exit 1
|
||||
ENVSUBST=`which envsubst 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: envsubst missing, please install it and try again" && exit 1
|
||||
|
||||
|
||||
API_PORT="6443"
|
||||
API_ADDRESS="https://api.`cat host`.nip.io:${API_PORT}"
|
||||
FQN=$DEPLOYMENT_NAME.apps.$API_ADDRESS
|
||||
|
||||
|
||||
echo "[INF] deploying example deployment: $DEPLOYMENT_NAME in namespace: $NAMESPACE"
|
||||
$ENVSUBST < $DEPLOYMENT_PATH | $OC apply -f - > /dev/null 2>&1
|
||||
|
||||
echo "[INF] creating SSL self-signed certificates for route https://$FQN"
|
||||
$OPENSSL genrsa -out servercakey.pem > /dev/null 2>&1
|
||||
$OPENSSL req -new -x509 -key servercakey.pem -out serverca.crt -subj "/CN=$FQN/O=Red Hat Inc./C=US" > /dev/null 2>&1
|
||||
$OPENSSL genrsa -out server.key > /dev/null 2>&1
|
||||
$OPENSSL req -new -key server.key -out server_reqout.txt -subj "/CN=$FQN/O=Red Hat Inc./C=US" > /dev/null 2>&1
|
||||
$OPENSSL x509 -req -in server_reqout.txt -days 3650 -sha256 -CAcreateserial -CA serverca.crt -CAkey servercakey.pem -out server.crt > /dev/null 2>&1
|
||||
echo "[INF] creating deployment: $DEPLOYMENT_NAME public route: https://$FQN"
|
||||
$OC create route --namespace $NAMESPACE edge --service=$DEPLOYMENT_NAME-service --cert=server.crt --key=server.key --ca-cert=serverca.crt --hostname="$FQN" > /dev/null 2>&1
|
||||
|
||||
|
||||
echo "[INF] setting github action environment variables"
|
||||
|
||||
NODE_NAME="`$OC get nodes -o json | $JQ -r '.items[0].metadata.name'`"
|
||||
COVERAGE_FILE="`pwd`/coverage.md"
|
||||
echo "DEPLOYMENT_NAME=$DEPLOYMENT_NAME" >> $GITHUB_ENV
|
||||
echo "DEPLOYMENT_FQN=$FQN" >> $GITHUB_ENV
|
||||
echo "API_ADDRESS=$API_ADDRESS" >> $GITHUB_ENV
|
||||
echo "API_PORT=$API_PORT" >> $GITHUB_ENV
|
||||
echo "NODE_NAME=$NODE_NAME" >> $GITHUB_ENV
|
||||
echo "NAMESPACE=$NAMESPACE" >> $GITHUB_ENV
|
||||
echo "COVERAGE_FILE=$COVERAGE_FILE" >> $GITHUB_ENV
|
||||
|
||||
echo "[INF] deployment fully qualified name will be available in \${{ env.DEPLOYMENT_NAME }} with value $DEPLOYMENT_NAME"
|
||||
echo "[INF] deployment name will be available in \${{ env.DEPLOYMENT_FQN }} with value $FQN"
|
||||
echo "[INF] OCP API address will be available in \${{ env.API_ADDRESS }} with value $API_ADDRESS"
|
||||
echo "[INF] OCP API port will be available in \${{ env.API_PORT }} with value $API_PORT"
|
||||
echo "[INF] OCP node name will be available in \${{ env.NODE_NAME }} with value $NODE_NAME"
|
||||
echo "[INF] coverage file will ve available in \${{ env.COVERAGE_FILE }} with value $COVERAGE_FILE"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
## CI Tests
|
||||
|
||||
### First steps
|
||||
Edit [my_tests](tests/my_tests) with tests you want to run
|
||||
Edit [functional_tests](tests/functional_tests) with tests you want to run
|
||||
|
||||
### How to run
|
||||
```./CI/run.sh```
|
||||
@@ -11,7 +11,7 @@ This will run kraken using python, make sure python3 is set up and configured pr
|
||||
|
||||
### Adding a test case
|
||||
|
||||
1. Add in simple scenario yaml file to execute under [../CI/scenarios/](scenarios)
|
||||
1. Add in simple scenario yaml file to execute under [../CI/scenarios/](legacy)
|
||||
|
||||
2. Copy [test_application_outages.sh](tests/test_app_outages.sh) for example on how to get started
|
||||
|
||||
@@ -27,7 +27,7 @@ This will run kraken using python, make sure python3 is set up and configured pr
|
||||
|
||||
e. 15: Make sure name of config in line 14 matches what you pass on this line
|
||||
|
||||
4. Add test name to [my_tests](../CI/tests/my_tests) file
|
||||
4. Add test name to [functional_tests](../CI/tests/functional_tests) file
|
||||
|
||||
a. This will be the name of the file without ".sh"
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
kraken:
|
||||
distribution: openshift # Distribution can be kubernetes or openshift.
|
||||
distribution: kubernetes # Distribution can be kubernetes or openshift.
|
||||
kubeconfig_path: ~/.kube/config # Path to kubeconfig.
|
||||
exit_on_failure: False # Exit when a post action scenario fails.
|
||||
litmus_version: v1.13.6 # Litmus version to install.
|
||||
@@ -29,9 +29,12 @@ tunings:
|
||||
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever.
|
||||
telemetry:
|
||||
enabled: False # enable/disables the telemetry collection feature
|
||||
api_url: https://ulnmf9xv7j.execute-api.us-west-2.amazonaws.com/production #telemetry service endpoint
|
||||
username: username # telemetry service username
|
||||
password: password # telemetry service password
|
||||
api_url: https://yvnn4rfoi7.execute-api.us-west-2.amazonaws.com/test #telemetry service endpoint
|
||||
username: $TELEMETRY_USERNAME # telemetry service username
|
||||
password: $TELEMETRY_PASSWORD # telemetry service password
|
||||
prometheus_namespace: 'prometheus-k8s' # prometheus namespace
|
||||
prometheus_pod_name: 'prometheus-kind-prometheus-kube-prome-prometheus-0' # prometheus pod_name
|
||||
prometheus_container_name: 'prometheus'
|
||||
prometheus_backup: True # enables/disables prometheus data collection
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
@@ -39,3 +42,31 @@ telemetry:
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 10000 # the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
logs_backup: True
|
||||
logs_filter_patterns:
|
||||
- "(\\w{3}\\s\\d{1,2}\\s\\d{2}:\\d{2}:\\d{2}\\.\\d+).+" # Sep 9 11:20:36.123425532
|
||||
- "kinit (\\d+/\\d+/\\d+\\s\\d{2}:\\d{2}:\\d{2})\\s+" # kinit 2023/09/15 11:20:36 log
|
||||
- "(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z).+" # 2023-09-15T11:20:36.123425532Z log
|
||||
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
|
||||
events_backup: True # enables/disables cluster events collection
|
||||
telemetry_group: "funtests"
|
||||
elastic:
|
||||
enable_elastic: False
|
||||
collect_metrics: False
|
||||
collect_alerts: False
|
||||
verify_certs: False
|
||||
elastic_url: "https://192.168.39.196" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
|
||||
elastic_port: 32766
|
||||
username: "elastic"
|
||||
password: "test"
|
||||
metrics_index: "krkn-metrics"
|
||||
alerts_index: "krkn-alerts"
|
||||
telemetry_index: "krkn-telemetry"
|
||||
|
||||
health_checks: # Utilizing health check endpoints to observe application behavior during chaos injection.
|
||||
interval: # Interval in seconds to perform health checks, default value is 2 seconds
|
||||
config: # Provide list of health check configurations for applications
|
||||
- url: # Provide application endpoint
|
||||
bearer_token: # Bearer token for authentication if any
|
||||
auth: # Provide authentication credentials (username , password) in tuple format if any, ex:("admin","secretpassword")
|
||||
exit_on_failure: # If value is True exits when health check failed for application, values can be True/False
|
||||
|
||||
42
CI/run.sh
42
CI/run.sh
@@ -1,15 +1,14 @@
|
||||
#!/bin/bash
|
||||
set -x
|
||||
MAX_RETRIES=60
|
||||
|
||||
OC=`which oc 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: oc missing, please install it and try again" && exit 1
|
||||
KUBECTL=`which kubectl 2>/dev/null`
|
||||
[[ $? != 0 ]] && echo "[ERROR]: kubectl missing, please install it and try again" && exit 1
|
||||
|
||||
wait_cluster_become_ready() {
|
||||
COUNT=1
|
||||
until `$OC get namespace > /dev/null 2>&1`
|
||||
until `$KUBECTL get namespace > /dev/null 2>&1`
|
||||
do
|
||||
echo "[INF] waiting OpenShift to become ready, after $COUNT check"
|
||||
echo "[INF] waiting Kubernetes to become ready, after $COUNT check"
|
||||
sleep 3
|
||||
[[ $COUNT == $MAX_RETRIES ]] && echo "[ERR] max retries exceeded, failing" && exit 1
|
||||
((COUNT++))
|
||||
@@ -18,9 +17,9 @@ wait_cluster_become_ready() {
|
||||
|
||||
|
||||
|
||||
ci_tests_loc="CI/tests/my_tests"
|
||||
ci_tests_loc="CI/tests/functional_tests"
|
||||
|
||||
echo "running test suit consisting of ${ci_tests}"
|
||||
echo -e "********* Running Functional Tests Suite *********\n\n"
|
||||
|
||||
rm -rf CI/out
|
||||
|
||||
@@ -37,9 +36,32 @@ echo 'Test | Result | Duration' >> $results
|
||||
echo '-----------------------|--------|---------' >> $results
|
||||
|
||||
# Run each test
|
||||
for test_name in `cat CI/tests/my_tests`
|
||||
failed_tests=()
|
||||
for test_name in `cat CI/tests/functional_tests`
|
||||
do
|
||||
wait_cluster_become_ready
|
||||
./CI/run_test.sh $test_name $results
|
||||
#wait_cluster_become_ready
|
||||
return_value=`./CI/run_test.sh $test_name $results`
|
||||
if [[ $return_value == 1 ]]
|
||||
then
|
||||
echo "Failed"
|
||||
failed_tests+=("$test_name")
|
||||
fi
|
||||
wait_cluster_become_ready
|
||||
done
|
||||
|
||||
|
||||
if (( ${#failed_tests[@]}>0 ))
|
||||
then
|
||||
echo -e "\n\n======================================================================"
|
||||
echo -e "\n FUNCTIONAL TESTS FAILED ${failed_tests[*]} ABORTING"
|
||||
echo -e "\n======================================================================\n\n"
|
||||
|
||||
for test in "${failed_tests[@]}"
|
||||
do
|
||||
echo -e "\n********** $test KRKN RUN OUTPUT **********\n"
|
||||
cat "CI/out/$test.out"
|
||||
echo -e "\n********************************************\n\n\n\n"
|
||||
done
|
||||
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
#!/bin/bash
|
||||
set -x
|
||||
readonly SECONDS_PER_HOUR=3600
|
||||
readonly SECONDS_PER_MINUTE=60
|
||||
function get_time_format() {
|
||||
@@ -14,9 +13,7 @@ ci_test=`echo $1`
|
||||
|
||||
results_file=$2
|
||||
|
||||
echo -e "\n======================================================================"
|
||||
echo -e " CI test for ${ci_test} "
|
||||
echo -e "======================================================================\n"
|
||||
echo -e "test: ${ci_test}" >&2
|
||||
|
||||
ci_results="CI/out/$ci_test.out"
|
||||
# Test ci
|
||||
@@ -28,13 +25,16 @@ then
|
||||
# if the test passes update the results and complete
|
||||
duration=$SECONDS
|
||||
duration=$(get_time_format $duration)
|
||||
echo "$ci_test: Successful"
|
||||
echo -e "> $ci_test: Successful\n" >&2
|
||||
echo "$ci_test | Pass | $duration" >> $results_file
|
||||
count=$retries
|
||||
# return value for run.sh
|
||||
echo 0
|
||||
else
|
||||
duration=$SECONDS
|
||||
duration=$(get_time_format $duration)
|
||||
echo "$ci_test: Failed"
|
||||
echo -e "> $ci_test: Failed\n" >&2
|
||||
echo "$ci_test | Fail | $duration" >> $results_file
|
||||
echo "Logs for "$ci_test
|
||||
# return value for run.sh
|
||||
echo 1
|
||||
fi
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
application_outage: # Scenario to create an outage of an application by blocking traffic
|
||||
duration: 10 # Duration in seconds after which the routes will be accessible
|
||||
namespace: openshift-monitoring # Namespace to target - all application routes will go inaccessible if pod selector is empty
|
||||
pod_selector: {} # Pods to target
|
||||
block: [Ingress, Egress] # It can be Ingress or Egress or Ingress, Egress
|
||||
@@ -1,8 +0,0 @@
|
||||
scenarios:
|
||||
- name: "kill machine config container"
|
||||
namespace: "openshift-machine-config-operator"
|
||||
label_selector: "k8s-app=machine-config-server"
|
||||
container_name: "hello-openshift"
|
||||
action: "kill 1"
|
||||
count: 1
|
||||
retry_wait: 60
|
||||
@@ -1,6 +0,0 @@
|
||||
network_chaos: # Scenario to create an outage by simulating random variations in the network.
|
||||
duration: 10 # seconds
|
||||
instance_count: 1
|
||||
execution: serial
|
||||
egress:
|
||||
bandwidth: 100mbit
|
||||
@@ -1,7 +0,0 @@
|
||||
scenarios:
|
||||
- action: delete
|
||||
namespace: "^$openshift-network-diagnostics$"
|
||||
label_selector:
|
||||
runs: 1
|
||||
sleep: 15
|
||||
wait_time: 30
|
||||
@@ -1,5 +0,0 @@
|
||||
time_scenarios:
|
||||
- action: skew_time
|
||||
object_type: pod
|
||||
label_selector: k8s-app=etcd
|
||||
container_name: ""
|
||||
16
CI/templates/container_scenario_pod.yaml
Normal file
16
CI/templates/container_scenario_pod.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: container
|
||||
labels:
|
||||
scenario: container
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: docker.io/fedora/tools
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
sleep infinity
|
||||
16
CI/templates/outage_pod.yaml
Normal file
16
CI/templates/outage_pod.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: outage
|
||||
labels:
|
||||
scenario: outage
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: docker.io/fedora/tools
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
sleep infinity
|
||||
29
CI/templates/service_hijacking.yaml
Normal file
29
CI/templates/service_hijacking.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: proxy
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:stable
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: http-web-svc
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx-service
|
||||
spec:
|
||||
selector:
|
||||
app.kubernetes.io/name: proxy
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: name-of-service-port
|
||||
protocol: TCP
|
||||
port: 80
|
||||
targetPort: http-web-svc
|
||||
nodePort: 30036
|
||||
16
CI/templates/time_pod.yaml
Normal file
16
CI/templates/time_pod.yaml
Normal file
@@ -0,0 +1,16 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: time-skew
|
||||
labels:
|
||||
scenario: time-skew
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: docker.io/fedora/tools
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
sleep infinity
|
||||
@@ -1,18 +1,26 @@
|
||||
ERRORED=false
|
||||
|
||||
function finish {
|
||||
if [ $? -eq 1 ] && [ $ERRORED != "true" ]
|
||||
if [ $? != 0 ] && [ $ERRORED != "true" ]
|
||||
then
|
||||
error
|
||||
fi
|
||||
}
|
||||
|
||||
function error {
|
||||
echo "Error caught."
|
||||
ERRORED=true
|
||||
exit_code=$?
|
||||
if [ $exit_code == 1 ]
|
||||
then
|
||||
echo "Error caught."
|
||||
ERRORED=true
|
||||
elif [ $exit_code == 2 ]
|
||||
then
|
||||
echo "Run with exit code 2 detected, it is expected, wrapping the exit code with 0 to avoid pipeline failure"
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
function get_node {
|
||||
worker_node=$(oc get nodes --no-headers | grep worker | head -n 1)
|
||||
worker_node=$(kubectl get nodes --no-headers | grep worker | head -n 1)
|
||||
export WORKER_NODE=$worker_node
|
||||
}
|
||||
|
||||
1
CI/tests/functional_tests
Normal file
1
CI/tests/functional_tests
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
test_net_chaos
|
||||
@@ -7,9 +7,11 @@ trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_app_outage {
|
||||
|
||||
export scenario_type="application_outages"
|
||||
export scenario_file="CI/scenarios/app_outage.yaml"
|
||||
yq -i '.application_outage.duration=10' scenarios/openshift/app_outage.yaml
|
||||
yq -i '.application_outage.pod_selector={"scenario":"outage"}' scenarios/openshift/app_outage.yaml
|
||||
yq -i '.application_outage.namespace="default"' scenarios/openshift/app_outage.yaml
|
||||
export scenario_type="application_outages_scenarios"
|
||||
export scenario_file="scenarios/openshift/app_outage.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/app_outage.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/app_outage.yaml
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_app_outage {
|
||||
[ -z $DEPLOYMENT_NAME ] && echo "[ERR] DEPLOYMENT_NAME variable not set, failing." && exit 1
|
||||
yq -i '.application_outage.pod_selector={"app":"'$DEPLOYMENT_NAME'"}' CI/scenarios/app_outage.yaml
|
||||
yq -i '.application_outage.namespace="'$NAMESPACE'"' CI/scenarios/app_outage.yaml
|
||||
export scenario_type="application_outages"
|
||||
export scenario_file="CI/scenarios/app_outage.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/app_outage.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/app_outage.yaml
|
||||
echo "App outage scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_app_outage
|
||||
@@ -8,9 +8,11 @@ trap finish EXIT
|
||||
pod_file="CI/scenarios/hello_pod.yaml"
|
||||
|
||||
function functional_test_container_crash {
|
||||
|
||||
yq -i '.scenarios[0].namespace="default"' scenarios/openshift/container_etcd.yml
|
||||
yq -i '.scenarios[0].label_selector="scenario=container"' scenarios/openshift/container_etcd.yml
|
||||
yq -i '.scenarios[0].container_name="fedtools"' scenarios/openshift/container_etcd.yml
|
||||
export scenario_type="container_scenarios"
|
||||
export scenario_file="- CI/scenarios/container_scenario.yml"
|
||||
export scenario_file="scenarios/openshift/container_etcd.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/container_config.yaml
|
||||
|
||||
|
||||
20
CI/tests/test_cpu_hog.sh
Normal file
20
CI/tests/test_cpu_hog.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_cpu_hog {
|
||||
yq -i '.node_selector="kubernetes.io/hostname=kind-worker2"' scenarios/kube/cpu-hog.yml
|
||||
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/cpu-hog.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/cpu_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/cpu_hog.yaml
|
||||
echo "CPU Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_cpu_hog
|
||||
19
CI/tests/test_io_hog.sh
Normal file
19
CI/tests/test_io_hog.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_io_hog {
|
||||
yq -i '.node_selector="kubernetes.io/hostname=kind-worker2"' scenarios/kube/io-hog.yml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/io-hog.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/io_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/io_hog.yaml
|
||||
echo "IO Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_io_hog
|
||||
19
CI/tests/test_memory_hog.sh
Normal file
19
CI/tests/test_memory_hog.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_memory_hog {
|
||||
yq -i '.node_selector="kubernetes.io/hostname=kind-worker2"' scenarios/kube/memory-hog.yml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/memory-hog.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/memory_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/memory_hog.yaml
|
||||
echo "Memory Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_memory_hog
|
||||
@@ -6,13 +6,14 @@ trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function funtional_test_namespace_deletion {
|
||||
export scenario_type="namespace_scenarios"
|
||||
export scenario_file="- CI/scenarios/network_diagnostics_namespace.yaml"
|
||||
export scenario_type="service_disruption_scenarios"
|
||||
export scenario_file="scenarios/openshift/ingress_namespace.yaml"
|
||||
export post_config=""
|
||||
yq '.scenarios.[0].namespace="^openshift-network-diagnostics$"' -i CI/scenarios/network_diagnostics_namespace.yaml
|
||||
yq '.scenarios[0].namespace="^namespace-scenario$"' -i scenarios/openshift/ingress_namespace.yaml
|
||||
yq '.scenarios[0].wait_time=30' -i scenarios/openshift/ingress_namespace.yaml
|
||||
yq '.scenarios[0].action="delete"' -i scenarios/openshift/ingress_namespace.yaml
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/namespace_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/namespace_config.yaml
|
||||
echo $?
|
||||
echo "Namespace scenario test: Success"
|
||||
}
|
||||
|
||||
|
||||
@@ -7,9 +7,16 @@ trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_network_chaos {
|
||||
yq -i '.network_chaos.duration=10' scenarios/openshift/network_chaos.yaml
|
||||
yq -i '.network_chaos.node_name="kind-worker2"' scenarios/openshift/network_chaos.yaml
|
||||
yq -i '.network_chaos.egress.bandwidth="100mbit"' scenarios/openshift/network_chaos.yaml
|
||||
yq -i 'del(.network_chaos.interfaces)' scenarios/openshift/network_chaos.yaml
|
||||
yq -i 'del(.network_chaos.label_selector)' scenarios/openshift/network_chaos.yaml
|
||||
yq -i 'del(.network_chaos.egress.latency)' scenarios/openshift/network_chaos.yaml
|
||||
yq -i 'del(.network_chaos.egress.loss)' scenarios/openshift/network_chaos.yaml
|
||||
|
||||
export scenario_type="network_chaos"
|
||||
export scenario_file="CI/scenarios/network_chaos.yaml"
|
||||
export scenario_type="network_chaos_scenarios"
|
||||
export scenario_file="scenarios/openshift/network_chaos.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/network_chaos.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/network_chaos.yaml
|
||||
|
||||
114
CI/tests/test_service_hijacking.sh
Normal file
114
CI/tests/test_service_hijacking.sh
Normal file
@@ -0,0 +1,114 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
# port mapping has been configured in kind-config.yml
|
||||
SERVICE_URL=http://localhost:8888
|
||||
PAYLOAD_GET_1="{ \
|
||||
\"status\":\"internal server error\" \
|
||||
}"
|
||||
STATUS_CODE_GET_1=500
|
||||
|
||||
PAYLOAD_PATCH_1="resource patched"
|
||||
STATUS_CODE_PATCH_1=201
|
||||
|
||||
PAYLOAD_POST_1="{ \
|
||||
\"status\": \"unauthorized\" \
|
||||
}"
|
||||
STATUS_CODE_POST_1=401
|
||||
|
||||
PAYLOAD_GET_2="{ \
|
||||
\"status\":\"resource created\" \
|
||||
}"
|
||||
STATUS_CODE_GET_2=201
|
||||
|
||||
PAYLOAD_PATCH_2="bad request"
|
||||
STATUS_CODE_PATCH_2=400
|
||||
|
||||
PAYLOAD_POST_2="not found"
|
||||
STATUS_CODE_POST_2=404
|
||||
|
||||
JSON_MIME="application/json"
|
||||
TEXT_MIME="text/plain; charset=utf-8"
|
||||
|
||||
function functional_test_service_hijacking {
|
||||
|
||||
export scenario_type="service_hijacking_scenarios"
|
||||
export scenario_file="scenarios/kube/service_hijacking.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/service_hijacking.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/service_hijacking.yaml > /dev/null 2>&1 &
|
||||
PID=$!
|
||||
#Waiting the hijacking to have effect
|
||||
COUNTER=0
|
||||
while [ `curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php` == 404 ]
|
||||
do
|
||||
echo "waiting scenario to kick in."
|
||||
sleep 1
|
||||
COUNTER=$((COUNTER+1))
|
||||
[ $COUNTER -eq "100" ] && echo "maximum number of retry reached, test failed" && exit 1
|
||||
done
|
||||
|
||||
#Checking Step 1 GET on /list/index.php
|
||||
OUT_GET="`curl -X GET -s $SERVICE_URL/list/index.php`"
|
||||
OUT_CONTENT=`curl -X GET -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
|
||||
OUT_STATUS_CODE=`curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
|
||||
[ "${PAYLOAD_GET_1//[$'\t\r\n ']}" == "${OUT_GET//[$'\t\r\n ']}" ] && echo "Step 1 GET Payload OK" || (echo "Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_GET_1" ] && echo "Step 1 GET Status Code OK" || (echo " Step 1 GET status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$JSON_MIME" ] && echo "Step 1 GET MIME OK" || (echo " Step 1 GET MIME did not match. Test failed." && exit 1)
|
||||
|
||||
#Checking Step 1 POST on /list/index.php
|
||||
OUT_POST="`curl -s -X POST $SERVICE_URL/list/index.php`"
|
||||
OUT_STATUS_CODE=`curl -X POST -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
|
||||
OUT_CONTENT=`curl -X POST -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
|
||||
[ "${PAYLOAD_POST_1//[$'\t\r\n ']}" == "${OUT_POST//[$'\t\r\n ']}" ] && echo "Step 1 POST Payload OK" || (echo "Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_POST_1" ] && echo "Step 1 POST Status Code OK" || (echo "Step 1 POST status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$JSON_MIME" ] && echo "Step 1 POST MIME OK" || (echo " Step 1 POST MIME did not match. Test failed." && exit 1)
|
||||
|
||||
#Checking Step 1 PATCH on /patch
|
||||
OUT_PATCH="`curl -s -X PATCH $SERVICE_URL/patch`"
|
||||
OUT_STATUS_CODE=`curl -X PATCH -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/patch`
|
||||
OUT_CONTENT=`curl -X PATCH -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/patch`
|
||||
[ "${PAYLOAD_PATCH_1//[$'\t\r\n ']}" == "${OUT_PATCH//[$'\t\r\n ']}" ] && echo "Step 1 PATCH Payload OK" || (echo "Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_PATCH_1" ] && echo "Step 1 PATCH Status Code OK" || (echo "Step 1 PATCH status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$TEXT_MIME" ] && echo "Step 1 PATCH MIME OK" || (echo " Step 1 PATCH MIME did not match. Test failed." && exit 1)
|
||||
# wait for the next step
|
||||
sleep 16
|
||||
|
||||
#Checking Step 2 GET on /list/index.php
|
||||
OUT_GET="`curl -X GET -s $SERVICE_URL/list/index.php`"
|
||||
OUT_CONTENT=`curl -X GET -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
|
||||
OUT_STATUS_CODE=`curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
|
||||
[ "${PAYLOAD_GET_2//[$'\t\r\n ']}" == "${OUT_GET//[$'\t\r\n ']}" ] && echo "Step 2 GET Payload OK" || (echo "Step 2 GET Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_GET_2" ] && echo "Step 2 GET Status Code OK" || (echo "Step 2 GET status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$JSON_MIME" ] && echo "Step 2 GET MIME OK" || (echo " Step 2 GET MIME did not match. Test failed." && exit 1)
|
||||
|
||||
#Checking Step 2 POST on /list/index.php
|
||||
OUT_POST="`curl -s -X POST $SERVICE_URL/list/index.php`"
|
||||
OUT_CONTENT=`curl -X POST -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
|
||||
OUT_STATUS_CODE=`curl -X POST -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
|
||||
[ "${PAYLOAD_POST_2//[$'\t\r\n ']}" == "${OUT_POST//[$'\t\r\n ']}" ] && echo "Step 2 POST Payload OK" || (echo "Step 2 POST Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_POST_2" ] && echo "Step 2 POST Status Code OK" || (echo "Step 2 POST status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$TEXT_MIME" ] && echo "Step 2 POST MIME OK" || (echo " Step 2 POST MIME did not match. Test failed." && exit 1)
|
||||
|
||||
#Checking Step 2 PATCH on /patch
|
||||
OUT_PATCH="`curl -s -X PATCH $SERVICE_URL/patch`"
|
||||
OUT_CONTENT=`curl -X PATCH -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/patch`
|
||||
OUT_STATUS_CODE=`curl -X PATCH -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/patch`
|
||||
[ "${PAYLOAD_PATCH_2//[$'\t\r\n ']}" == "${OUT_PATCH//[$'\t\r\n ']}" ] && echo "Step 2 PATCH Payload OK" || (echo "Step 2 PATCH Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_PATCH_2" ] && echo "Step 2 PATCH Status Code OK" || (echo "Step 2 PATCH status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$TEXT_MIME" ] && echo "Step 2 PATCH MIME OK" || (echo " Step 2 PATCH MIME did not match. Test failed." && exit 1)
|
||||
wait $PID
|
||||
|
||||
# now checking if service has been restore correctly and nginx responds correctly
|
||||
curl -s $SERVICE_URL | grep nginx! && echo "BODY: Service restored!" || (echo "BODY: failed to restore service" && exit 1)
|
||||
OUT_STATUS_CODE=`curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL`
|
||||
[ "$OUT_STATUS_CODE" == "200" ] && echo "STATUS_CODE: Service restored!" || (echo "STATUS_CODE: failed to restore service" && exit 1)
|
||||
|
||||
echo "Service Hijacking Chaos test: Success"
|
||||
}
|
||||
|
||||
|
||||
functional_test_service_hijacking
|
||||
36
CI/tests/test_telemetry.sh
Normal file
36
CI/tests/test_telemetry.sh
Normal file
@@ -0,0 +1,36 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_telemetry {
|
||||
AWS_CLI=`which aws`
|
||||
[ -z "$AWS_CLI" ]&& echo "AWS cli not found in path" && exit 1
|
||||
[ -z "$AWS_BUCKET" ] && echo "AWS bucket not set in environment" && exit 1
|
||||
|
||||
export RUN_TAG="funtest-telemetry"
|
||||
yq -i '.telemetry.enabled=True' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.full_prometheus_backup=True' CI/config/common_test_config.yaml
|
||||
yq -i '.performance_monitoring.check_critical_alerts=True' CI/config/common_test_config.yaml
|
||||
yq -i '.performance_monitoring.prometheus_url="http://localhost:9090"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.run_tag=env(RUN_TAG)' CI/config/common_test_config.yaml
|
||||
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/cpu–hog.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/telemetry.yaml
|
||||
retval=$(python3 -m coverage run -a run_kraken.py -c CI/config/telemetry.yaml)
|
||||
RUN_FOLDER=`cat CI/out/test_telemetry.out | grep amazonaws.com | sed -rn "s#.*https:\/\/.*\/files/(.*)#\1#p"`
|
||||
$AWS_CLI s3 ls "s3://$AWS_BUCKET/$RUN_FOLDER/" | awk '{ print $4 }' > s3_remote_files
|
||||
echo "checking if telemetry files are uploaded on s3"
|
||||
cat s3_remote_files | grep critical-alerts-00.log || ( echo "FAILED: critical-alerts-00.log not uploaded" && exit 1 )
|
||||
cat s3_remote_files | grep prometheus-00.tar || ( echo "FAILED: prometheus backup not uploaded" && exit 1 )
|
||||
cat s3_remote_files | grep telemetry.json || ( echo "FAILED: telemetry.json not uploaded" && exit 1 )
|
||||
echo "all files uploaded!"
|
||||
echo "Telemetry Collection: Success"
|
||||
}
|
||||
|
||||
functional_test_telemetry
|
||||
@@ -7,8 +7,12 @@ trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_time_scenario {
|
||||
yq -i '.time_scenarios[0].label_selector="scenario=time-skew"' scenarios/openshift/time_scenarios_example.yml
|
||||
yq -i '.time_scenarios[0].container_name=""' scenarios/openshift/time_scenarios_example.yml
|
||||
yq -i '.time_scenarios[0].namespace="default"' scenarios/openshift/time_scenarios_example.yml
|
||||
yq -i '.time_scenarios[1].label_selector="kubernetes.io/hostname=kind-worker2"' scenarios/openshift/time_scenarios_example.yml
|
||||
export scenario_type="time_scenarios"
|
||||
export scenario_file="CI/scenarios/time_scenarios.yml"
|
||||
export scenario_file="scenarios/openshift/time_scenarios_example.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/time_config.yaml
|
||||
|
||||
|
||||
113
README.md
113
README.md
@@ -1,86 +1,81 @@
|
||||
# KrknChaos aka Kraken
|
||||
[](https://quay.io/repository/redhat-chaos/krkn?tab=tags&tag=latest)
|
||||

|
||||
# Krkn aka Kraken
|
||||

|
||||

|
||||

|
||||
|
||||

|
||||
|
||||
Chaos and resiliency testing tool for Kubernetes and OpenShift.
|
||||
Kraken injects deliberate failures into Kubernetes/OpenShift clusters to check if it is resilient to turbulent conditions.
|
||||
Chaos and resiliency testing tool for Kubernetes.
|
||||
Kraken injects deliberate failures into Kubernetes clusters to check if it is resilient to turbulent conditions.
|
||||
|
||||
|
||||
### Website
|
||||
[Kraken Website](https://krkn-chaos.dev) is the one stop shop for all things Kraken.
|
||||
The website contains comprehensive information about the workflow, supported scenarios, and detailed descriptions of each scenario. It also provides the necessary configurations needed to run Kraken, along with insights into performance monitoring and signaling features.
|
||||
### Workflow
|
||||

|
||||

|
||||
|
||||
### Demo
|
||||
[](https://youtu.be/LN-fZywp_mo "Kraken Demo - Click to Watch!")
|
||||
<!-- ### Demo
|
||||
[](https://youtu.be/LN-fZywp_mo "Kraken Demo - Click to Watch!") -->
|
||||
|
||||
|
||||
### Chaos Testing Guide
|
||||
<!-- ### Chaos Testing Guide
|
||||
[Guide](docs/index.md) encapsulates:
|
||||
- Test methodology that needs to be embraced.
|
||||
- Best practices that an OpenShift cluster, platform and applications running on top of it should take into account for best user experience, performance, resilience and reliability.
|
||||
- Best practices that an Kubernetes cluster, platform and applications running on top of it should take into account for best user experience, performance, resilience and reliability.
|
||||
- Tooling.
|
||||
- Scenarios supported.
|
||||
- Test environment recommendations as to how and where to run chaos tests.
|
||||
- Chaos testing in practice.
|
||||
|
||||
The guide is hosted at https://redhat-chaos.github.io/krknChoas.
|
||||
The guide is hosted at https://krkn-chaos.github.io/krkn. -->
|
||||
|
||||
|
||||
### How to Get Started
|
||||
Instructions on how to setup, configure and run Kraken can be found at [Installation](docs/installation.md).
|
||||
Instructions on how to setup, configure and run Kraken can be found at [Installation](https://krkn-chaos.dev/docs/installation/).
|
||||
|
||||
You may consider utilizing the chaos recommendation tool prior to initiating the chaos runs to profile the application service(s) under test. This tool discovers a list of Krkn scenarios with a high probability of causing failures or disruptions to your application service(s). The tool can be accessed at [Chaos-Recommender](utils/chaos_recommender/README.md).
|
||||
You may consider utilizing the chaos recommendation tool prior to initiating the chaos runs to profile the application service(s) under test. This tool discovers a list of Krkn scenarios with a high probability of causing failures or disruptions to your application service(s). The tool can be accessed at [Chaos-Recommender](https://krkn-chaos.dev/docs/chaos-recommender/).
|
||||
|
||||
See the [getting started doc](docs/getting_started.md) on support on how to get started with your own custom scenario or editing current scenarios for your specific usage.
|
||||
See the [getting started doc](https://krkn-chaos.dev/docs/getting-started/) on support on how to get started with your own custom scenario or editing current scenarios for your specific usage.
|
||||
|
||||
After installation, refer back to the below sections for supported scenarios and how to tweak the kraken config to load them on your cluster.
|
||||
|
||||
|
||||
#### Running Kraken with minimal configuration tweaks
|
||||
For cases where you want to run Kraken with minimal configuration changes, refer to [Kraken-hub](https://github.com/redhat-chaos/krknChaos-hub). One use case is CI integration where you do not want to carry around different configuration files for the scenarios.
|
||||
|
||||
### Setting up infrastructure dependencies
|
||||
Kraken indexes the metrics specified in the profile into Elasticsearch in addition to leveraging Cerberus for understanding the health of the Kubernetes/OpenShift cluster under test. More information on the features is documented below. The infrastructure pieces can be easily installed and uninstalled by running:
|
||||
|
||||
```
|
||||
$ cd kraken
|
||||
$ podman-compose up or $ docker-compose up # Spins up the containers specified in the docker-compose.yml file present in the run directory.
|
||||
$ podman-compose down or $ docker-compose down # Delete the containers installed.
|
||||
```
|
||||
This will manage the Cerberus and Elasticsearch containers on the host on which you are running Kraken.
|
||||
|
||||
**NOTE**: Make sure you have enough resources (memory and disk) on the machine on top of which the containers are running as Elasticsearch is resource intensive. Cerberus monitors the system components by default, the [config](config/cerberus.yaml) can be tweaked to add applications namespaces, routes and other components to monitor as well. The command will keep running until killed since detached mode is not supported as of now.
|
||||
<!-- #### Running Kraken with minimal configuration tweaks
|
||||
For cases where you want to run Kraken with minimal configuration changes, refer to [krkn-hub](https://github.com/krkn-chaos/krkn-hub). One use case is CI integration where you do not want to carry around different configuration files for the scenarios. -->
|
||||
|
||||
|
||||
### Config
|
||||
Instructions on how to setup the config and the options supported can be found at [Config](docs/config.md).
|
||||
|
||||
|
||||
### Kubernetes/OpenShift chaos scenarios supported
|
||||
<!-- ### Kubernetes chaos scenarios supported
|
||||
|
||||
Scenario type | Kubernetes | OpenShift
|
||||
--------------------------- | ------------- |--------------------|
|
||||
[Pod Scenarios](docs/pod_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Pod Network Scenarios](docs/pod_network_scenarios.md) | :x: | :heavy_check_mark: |
|
||||
[Container Scenarios](docs/container_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Node Scenarios](docs/node_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Time Scenarios](docs/time_scenarios.md) | :x: | :heavy_check_mark: |
|
||||
[Hog Scenarios: CPU, Memory](docs/arcaflow_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Cluster Shut Down Scenarios](docs/cluster_shut_down_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Service Disruption Scenarios](docs/service_disruption_scenarios.md.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Zone Outage Scenarios](docs/zone_outage.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Application_outages](docs/application_outages.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[PVC scenario](docs/pvc_scenario.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Network_Chaos](docs/network_chaos.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[ManagedCluster Scenarios](docs/managedcluster_scenarios.md) | :heavy_check_mark: | :question: |
|
||||
Scenario type | Kubernetes
|
||||
--------------------------- | ------------- |
|
||||
[Pod Scenarios](docs/pod_scenarios.md) | :heavy_check_mark: |
|
||||
[Pod Network Scenarios](docs/pod_network_scenarios.md) | :x: |
|
||||
[Container Scenarios](docs/container_scenarios.md) | :heavy_check_mark: |
|
||||
[Node Scenarios](docs/node_scenarios.md) | :heavy_check_mark: |
|
||||
[Time Scenarios](docs/time_scenarios.md) | :heavy_check_mark: |
|
||||
[Hog Scenarios: CPU, Memory](docs/hog_scenarios.md) | :heavy_check_mark: |
|
||||
[Cluster Shut Down Scenarios](docs/cluster_shut_down_scenarios.md) | :heavy_check_mark: |
|
||||
[Service Disruption Scenarios](docs/service_disruption_scenarios.md) | :heavy_check_mark: |
|
||||
[Zone Outage Scenarios](docs/zone_outage.md) | :heavy_check_mark: |
|
||||
[Application_outages](docs/application_outages.md) | :heavy_check_mark: |
|
||||
[PVC scenario](docs/pvc_scenario.md) | :heavy_check_mark: |
|
||||
[Network_Chaos](docs/network_chaos.md) | :heavy_check_mark: |
|
||||
[ManagedCluster Scenarios](docs/managedcluster_scenarios.md) | :heavy_check_mark: |
|
||||
[Service Hijacking Scenarios](docs/service_hijacking_scenarios.md) | :heavy_check_mark: |
|
||||
[SYN Flood Scenarios](docs/syn_flood_scenarios.md) | :heavy_check_mark: |
|
||||
|
||||
|
||||
### Kraken scenario pass/fail criteria and report
|
||||
It is important to make sure to check if the targeted component recovered from the chaos injection and also if the Kubernetes/OpenShift cluster is healthy as failures in one component can have an adverse impact on other components. Kraken does this by:
|
||||
It is important to make sure to check if the targeted component recovered from the chaos injection and also if the Kubernetes cluster is healthy as failures in one component can have an adverse impact on other components. Kraken does this by:
|
||||
- Having built in checks for pod and node based scenarios to ensure the expected number of replicas and nodes are up. It also supports running custom scripts with the checks.
|
||||
- Leveraging [Cerberus](https://github.com/openshift-scale/cerberus) to monitor the cluster under test and consuming the aggregated go/no-go signal to determine pass/fail post chaos. It is highly recommended to turn on the Cerberus health check feature available in Kraken. Instructions on installing and setting up Cerberus can be found [here](https://github.com/openshift-scale/cerberus#installation) or can be installed from Kraken using the [instructions](https://github.com/redhat-chaos/krkn#setting-up-infrastructure-dependencies). Once Cerberus is up and running, set cerberus_enabled to True and cerberus_url to the url where Cerberus publishes go/no-go signal in the Kraken config file. Cerberus can monitor [application routes](https://github.com/redhat-chaos/cerberus/blob/main/docs/config.md#watch-routes) during the chaos and fails the run if it encounters downtime as it is a potential downtime in a customers, or users environment as well. It is especially important during the control plane chaos scenarios including the API server, Etcd, Ingress etc. It can be enabled by setting `check_applicaton_routes: True` in the [Kraken config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) provided application routes are being monitored in the [cerberus config](https://github.com/redhat-chaos/krkn/blob/main/config/cerberus.yaml).
|
||||
- Leveraging [Cerberus](https://github.com/krkn-chaos/cerberus) to monitor the cluster under test and consuming the aggregated go/no-go signal to determine pass/fail post chaos. It is highly recommended to turn on the Cerberus health check feature available in Kraken. Instructions on installing and setting up Cerberus can be found [here](https://github.com/openshift-scale/cerberus#installation) or can be installed from Kraken using the [instructions](https://github.com/krkn-chaos/krkn#setting-up-infrastructure-dependencies). Once Cerberus is up and running, set cerberus_enabled to True and cerberus_url to the url where Cerberus publishes go/no-go signal in the Kraken config file. Cerberus can monitor [application routes](https://github.com/redhat-chaos/cerberus/blob/main/docs/config.md#watch-routes) during the chaos and fails the run if it encounters downtime as it is a potential downtime in a customers, or users environment as well. It is especially important during the control plane chaos scenarios including the API server, Etcd, Ingress etc. It can be enabled by setting `check_applicaton_routes: True` in the [Kraken config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) provided application routes are being monitored in the [cerberus config](https://github.com/redhat-chaos/krkn/blob/main/config/cerberus.yaml).
|
||||
- Leveraging built-in alert collection feature to fail the runs in case of critical alerts.
|
||||
- Utilizing health check endpoints to observe application behavior during chaos injection [Health checks](docs/health_checks.md)
|
||||
|
||||
### Signaling
|
||||
In CI runs or any external job it is useful to stop Kraken once a certain test or state gets reached. We created a way to signal to kraken to pause the chaos or stop it completely using a signal posted to a port of your choice.
|
||||
@@ -94,10 +89,6 @@ More detailed information on enabling and leveraging this feature can be found [
|
||||
Monitoring the Kubernetes/OpenShift cluster to observe the impact of Kraken chaos scenarios on various components is key to find out the bottlenecks as it is important to make sure the cluster is healthy in terms if both recovery as well as performance during/after the failure has been injected. Instructions on enabling it can be found [here](docs/performance_dashboards.md).
|
||||
|
||||
|
||||
### Scraping and storing metrics long term
|
||||
Kraken supports capturing metrics for the duration of the scenarios defined in the config and indexes then into Elasticsearch to be able to store and evaluate the state of the runs long term. The indexed metrics can be visualized with the help of Grafana. It uses [Kube-burner](https://github.com/kube-burner/kube-burner) under the hood. The metrics to capture need to be defined in a metrics profile which Kraken consumes to query prometheus ( installed by default in OpenShift ) with the start and end timestamp of the run. Information on enabling and leveraging this feature can be found [here](docs/metrics.md).
|
||||
|
||||
|
||||
### SLOs validation during and post chaos
|
||||
- In addition to checking the recovery and health of the cluster and components under test, Kraken takes in a profile with the Prometheus expressions to validate and alerts, exits with a non-zero return code depending on the severity set. This feature can be used to determine pass/fail or alert on abnormalities observed in the cluster based on the metrics.
|
||||
- Kraken also provides ability to check if any critical alerts are firing in the cluster post chaos and pass/fail's.
|
||||
@@ -107,7 +98,7 @@ Information on enabling and leveraging this feature can be found [here](docs/SLO
|
||||
|
||||
### OCM / ACM integration
|
||||
|
||||
Kraken supports injecting faults into [Open Cluster Management (OCM)](https://open-cluster-management.io/) and [Red Hat Advanced Cluster Management for Kubernetes (ACM)](https://www.redhat.com/en/technologies/management/advanced-cluster-management) managed clusters through [ManagedCluster Scenarios](docs/managedcluster_scenarios.md).
|
||||
Kraken supports injecting faults into [Open Cluster Management (OCM)](https://open-cluster-management.io/) and [Red Hat Advanced Cluster Management for Kubernetes (ACM)](https://www.krkn.com/en/technologies/management/advanced-cluster-management) managed clusters through [ManagedCluster Scenarios](docs/managedcluster_scenarios.md). -->
|
||||
|
||||
|
||||
### Blogs and other useful resources
|
||||
@@ -116,7 +107,8 @@ Kraken supports injecting faults into [Open Cluster Management (OCM)](https://op
|
||||
- Blog post emphasizing the importance of making Chaos part of Performance and Scale runs to mimic the production environments: https://www.openshift.com/blog/making-chaos-part-of-kubernetes/openshift-performance-and-scalability-tests
|
||||
- Blog post on findings from Chaos test runs: https://cloud.redhat.com/blog/openshift/kubernetes-chaos-stories
|
||||
- Discussion with CNCF TAG App Delivery on Krkn workflow, features and addition to CNCF sandbox: [Github](https://github.com/cncf/sandbox/issues/44), [Tracker](https://github.com/cncf/tag-app-delivery/issues/465), [recording](https://www.youtube.com/watch?v=nXQkBFK_MWc&t=722s)
|
||||
|
||||
- Blog post on supercharging chaos testing using AI integration in Krkn: https://www.redhat.com/en/blog/supercharging-chaos-testing-using-ai
|
||||
- Blog post announcing Krkn joining CNCF Sandbox: https://www.redhat.com/en/blog/krknchaos-joining-cncf-sandbox
|
||||
|
||||
### Roadmap
|
||||
Enhancements being planned can be found in the [roadmap](ROADMAP.md).
|
||||
@@ -125,13 +117,20 @@ Enhancements being planned can be found in the [roadmap](ROADMAP.md).
|
||||
### Contributions
|
||||
We are always looking for more enhancements, fixes to make it better, any contributions are most welcome. Feel free to report or work on the issues filed on github.
|
||||
|
||||
[More information on how to Contribute](docs/contribute.md)
|
||||
[More information on how to Contribute](https://krkn-chaos.dev/docs/contribution-guidelines/contribute/)
|
||||
|
||||
If adding a new scenario or tweaking the main config, be sure to add in updates into the CI to be sure the CI is up to date.
|
||||
Please read [this file]((CI/README.md#adding-a-test-case)) for more information on updates.
|
||||
Please read [this file](https://krkn-chaos.dev/docs/getting-started/#adding-new-scenarios) for more information on updates.
|
||||
|
||||
|
||||
### Scenario Plugin Development
|
||||
|
||||
If you're gearing up to develop new scenarios, take a moment to review our
|
||||
[Scenario Plugin API Documentation](docs/scenario_plugin_api.md).
|
||||
It’s the perfect starting point to tap into your chaotic creativity!
|
||||
|
||||
### Community
|
||||
Key Members(slack_usernames/full name): paigerube14/Paige Rubendall, mffiedler/Mike Fiedler, ravielluri/Naga Ravi Chaitanya Elluri.
|
||||
* [**#krkn on Kubernetes Slack**](https://kubernetes.slack.com)
|
||||
* [**#forum-chaos on CoreOS Slack internal to Red Hat**](https://coreos.slack.com)
|
||||
Key Members(slack_usernames/full name): paigerube14/Paige Rubendall, mffiedler/Mike Fiedler, tsebasti/Tullio Sebastiani, yogi/Yogananth Subramanian, sahil/Sahil Shah, pradeep/Pradeep Surisetty and ravielluri/Naga Ravi Chaitanya Elluri.
|
||||
* [**#krkn on Kubernetes Slack**](https://kubernetes.slack.com/messages/C05SFMHRWK1)
|
||||
|
||||
The Linux Foundation® (TLF) has registered trademarks and uses trademarks. For a list of TLF trademarks, see [Trademark Usage](https://www.linuxfoundation.org/legal/trademark-usage).
|
||||
|
||||
23
ROADMAP.md
23
ROADMAP.md
@@ -2,14 +2,15 @@
|
||||
|
||||
Following are a list of enhancements that we are planning to work on adding support in Krkn. Of course any help/contributions are greatly appreciated.
|
||||
|
||||
- [ ] [Ability to run multiple chaos scenarios in parallel under load to mimic real world outages](https://github.com/redhat-chaos/krkn/issues/424)
|
||||
- [x] [Centralized storage for chaos experiments artifacts](https://github.com/redhat-chaos/krkn/issues/423)
|
||||
- [ ] [Support for causing DNS outages](https://github.com/redhat-chaos/krkn/issues/394)
|
||||
- [x] [Chaos recommender](https://github.com/redhat-chaos/krkn/tree/main/utils/chaos-recommender) to suggest scenarios having probability of impacting the service under test using profiling results
|
||||
- [ ] Chaos AI integration to improve and automate test coverage
|
||||
- [x] [Support for pod level network traffic shaping](https://github.com/redhat-chaos/krkn/issues/393)
|
||||
- [ ] [Ability to visualize the metrics that are being captured by Kraken and stored in Elasticsearch](https://github.com/redhat-chaos/krkn/issues/124)
|
||||
- [ ] Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/redhat-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
|
||||
- [ ] Continue to improve [Chaos Testing Guide](https://redhat-chaos.github.io/krkn) in terms of adding best practices, test environment recommendations and scenarios to make sure the OpenShift platform, as well the applications running on top it, are resilient and performant under chaotic conditions.
|
||||
- [ ] [Switch documentation references to Kubernetes](https://github.com/redhat-chaos/krkn/issues/495)
|
||||
- [ ] [OCP and Kubernetes functionalities segregation](https://github.com/redhat-chaos/krkn/issues/497)
|
||||
- [ ] [Ability to run multiple chaos scenarios in parallel under load to mimic real world outages](https://github.com/krkn-chaos/krkn/issues/424)
|
||||
- [x] [Centralized storage for chaos experiments artifacts](https://github.com/krkn-chaos/krkn/issues/423)
|
||||
- [ ] [Support for causing DNS outages](https://github.com/krkn-chaos/krkn/issues/394)
|
||||
- [x] [Chaos recommender](https://github.com/krkn-chaos/krkn/tree/main/utils/chaos-recommender) to suggest scenarios having probability of impacting the service under test using profiling results
|
||||
- [] Chaos AI integration to improve test coverage while reducing fault space to save costs and execution time
|
||||
- [x] [Support for pod level network traffic shaping](https://github.com/krkn-chaos/krkn/issues/393)
|
||||
- [ ] [Ability to visualize the metrics that are being captured by Kraken and stored in Elasticsearch](https://github.com/krkn-chaos/krkn/issues/124)
|
||||
- [x] Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/krkn-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
|
||||
- [x] Continue to improve [Chaos Testing Guide](https://krkn-chaos.github.io/krkn) in terms of adding best practices, test environment recommendations and scenarios to make sure the OpenShift platform, as well the applications running on top it, are resilient and performant under chaotic conditions.
|
||||
- [x] [Switch documentation references to Kubernetes](https://github.com/krkn-chaos/krkn/issues/495)
|
||||
- [x] [OCP and Kubernetes functionalities segregation](https://github.com/krkn-chaos/krkn/issues/497)
|
||||
- [x] [Krknctl - client for running Krkn scenarios with ease](https://github.com/krkn-chaos/krknctl)
|
||||
|
||||
43
SECURITY.md
Normal file
43
SECURITY.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Security Policy
|
||||
|
||||
We attach great importance to code security. We are very grateful to the users, security vulnerability researchers, etc. for reporting security vulnerabilities to the Krkn community. All reported security vulnerabilities will be carefully assessed and addressed in a timely manner.
|
||||
|
||||
|
||||
## Security Checks
|
||||
|
||||
Krkn leverages [Snyk](https://snyk.io/) to ensure that any security vulnerabilities found
|
||||
in the code base and dependencies are fixed and published in the latest release. Security
|
||||
vulnerability checks are enabled for each pull request to enable developers to get insights
|
||||
and proactively fix them.
|
||||
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
The Krkn project treats security vulnerabilities seriously, so we
|
||||
strive to take action quickly when required.
|
||||
|
||||
The project requests that security issues be disclosed in a responsible
|
||||
manner to allow adequate time to respond. If a security issue or
|
||||
vulnerability has been found, please disclose the details to our
|
||||
dedicated email address:
|
||||
|
||||
cncf-krkn-maintainers@lists.cncf.io
|
||||
|
||||
You can also use the [GitHub vulnerability report mechanism](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability) to report the security vulnerability.
|
||||
|
||||
Please include as much information as possible with the report. The
|
||||
following details assist with analysis efforts:
|
||||
- Description of the vulnerability
|
||||
- Affected component (version, commit, branch etc)
|
||||
- Affected code (file path, line numbers)
|
||||
- Exploit code
|
||||
|
||||
|
||||
## Security Team
|
||||
|
||||
The security team currently consists of the [Maintainers of Krkn](https://github.com/krkn-chaos/krkn/blob/main/MAINTAINERS.md)
|
||||
|
||||
|
||||
## Process and Supported Releases
|
||||
|
||||
The Krkn security team will investigate and provide a fix in a timely mannner depending on the severity. The fix will be included in the new release of Krkn and details will be included in the release notes.
|
||||
@@ -8,7 +8,7 @@
|
||||
description: 10 minutes avg. 99th etcd fsync latency on {{$labels.pod}} higher than 1s. {{$value}}s
|
||||
severity: error
|
||||
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))[10m:]) > 0.007
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))[10m:]) > 0.03
|
||||
description: 10 minutes avg. 99th etcd commit latency on {{$labels.pod}} higher than 30ms. {{$value}}s
|
||||
severity: warning
|
||||
|
||||
@@ -88,3 +88,42 @@
|
||||
- expr: ALERTS{severity="critical", alertstate="firing"} > 0
|
||||
description: Critical prometheus alert. {{$labels.alertname}}
|
||||
severity: warning
|
||||
|
||||
# etcd CPU and usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-etcd', container='etcd'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: Etcd CPU usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# etcd memory usage increase
|
||||
- expr: sum(deriv(container_memory_usage_bytes{image!='', namespace='openshift-etcd', container='etcd'}[5m])) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: Etcd memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Openshift API server CPU and memory usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-apiserver', container='openshift-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: openshift apiserver cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-apiserver', container='openshift-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: openshift apiserver memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Openshift kube API server CPU and memory usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-kube-apiserver', container='kube-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: openshift apiserver cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-kube-apiserver', container='kube-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: openshift apiserver memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Master node CPU usage increase
|
||||
- expr: (sum((sum(deriv(pod:container_cpu_usage:sum{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: master nodes cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Master nodes memory usage increase
|
||||
- expr: (sum((sum(deriv(container_memory_usage_bytes{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: master nodes memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
|
||||
@@ -99,3 +99,41 @@
|
||||
- expr: ALERTS{severity="critical", alertstate="firing"} > 0
|
||||
description: Critical prometheus alert. {{$labels.alertname}}
|
||||
severity: warning
|
||||
|
||||
# etcd CPU and usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-etcd', container='etcd'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: Etcd CPU usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# etcd memory usage increase
|
||||
- expr: sum(deriv(container_memory_usage_bytes{image!='', namespace='openshift-etcd', container='etcd'}[5m])) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: Etcd memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Openshift API server CPU and memory usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-apiserver', container='openshift-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: openshift apiserver cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-apiserver', container='openshift-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: openshift apiserver memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Openshift kube API server CPU and memory usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-kube-apiserver', container='kube-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: openshift apiserver cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-kube-apiserver', container='kube-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: openshift apiserver memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Master node CPU usage increase
|
||||
- expr: (sum((sum(deriv(pod:container_cpu_usage:sum{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: master nodes cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Master nodes memory usage increase
|
||||
- expr: (sum((sum(deriv(container_memory_usage_bytes{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: master nodes memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
kraken:
|
||||
distribution: openshift # Distribution can be kubernetes or openshift
|
||||
kubeconfig_path: ~/.kube/config # Path to kubeconfig
|
||||
kubeconfig_path: ~/.kube/config # Path to kubeconfig
|
||||
exit_on_failure: False # Exit when a post action scenario fails
|
||||
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
@@ -8,40 +7,44 @@ kraken:
|
||||
port: 8081 # Signal port
|
||||
chaos_scenarios:
|
||||
# List of policies/chaos scenarios to load
|
||||
- arcaflow_scenarios:
|
||||
- scenarios/arcaflow/cpu-hog/input.yaml
|
||||
- scenarios/arcaflow/memory-hog/input.yaml
|
||||
- scenarios/arcaflow/io-hog/input.yaml
|
||||
- application_outages:
|
||||
- hog_scenarios:
|
||||
- scenarios/kube/cpu-hog.yml
|
||||
- scenarios/kube/memory-hog.yml
|
||||
- scenarios/kube/io-hog.yml
|
||||
- application_outages_scenarios:
|
||||
- scenarios/openshift/app_outage.yaml
|
||||
- container_scenarios: # List of chaos pod scenarios to load
|
||||
- - scenarios/openshift/container_etcd.yml
|
||||
- plugin_scenarios:
|
||||
- scenarios/openshift/container_etcd.yml
|
||||
- pod_network_scenarios:
|
||||
- scenarios/openshift/network_chaos_ingress.yml
|
||||
- scenarios/openshift/pod_network_outage.yml
|
||||
- pod_disruption_scenarios:
|
||||
- scenarios/openshift/etcd.yml
|
||||
- scenarios/openshift/regex_openshift_pod_kill.yml
|
||||
- scenarios/openshift/vmware_node_scenarios.yml
|
||||
- scenarios/openshift/network_chaos_ingress.yml
|
||||
- scenarios/openshift/prom_kill.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/node_scenarios_example.yml
|
||||
- plugin_scenarios:
|
||||
- scenarios/openshift/openshift-apiserver.yml
|
||||
- scenarios/openshift/openshift-kube-apiserver.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/aws_node_scenarios.yml
|
||||
- scenarios/openshift/vmware_node_scenarios.yml
|
||||
- scenarios/openshift/ibmcloud_node_scenarios.yml
|
||||
- time_scenarios: # List of chaos time scenarios to load
|
||||
- scenarios/openshift/time_scenarios_example.yml
|
||||
- cluster_shut_down_scenarios:
|
||||
- - scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- scenarios/openshift/post_action_shut_down.py
|
||||
- scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- service_disruption_scenarios:
|
||||
- - scenarios/openshift/regex_namespace.yaml
|
||||
- - scenarios/openshift/ingress_namespace.yaml
|
||||
- scenarios/openshift/post_action_namespace.py
|
||||
- zone_outages:
|
||||
- scenarios/openshift/regex_namespace.yaml
|
||||
- scenarios/openshift/ingress_namespace.yaml
|
||||
- zone_outages_scenarios:
|
||||
- scenarios/openshift/zone_outage.yaml
|
||||
- pvc_scenarios:
|
||||
- scenarios/openshift/pvc_scenario.yaml
|
||||
- network_chaos:
|
||||
- network_chaos_scenarios:
|
||||
- scenarios/openshift/network_chaos.yaml
|
||||
- service_hijacking_scenarios:
|
||||
- scenarios/kube/service_hijacking.yaml
|
||||
- syn_flood_scenarios:
|
||||
- scenarios/kube/syn_flood.yaml
|
||||
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
@@ -51,14 +54,25 @@ cerberus:
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
capture_metrics: False
|
||||
metrics_profile_path: config/metrics-aggregated.yaml
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_url: '' # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
enable_metrics: False
|
||||
alert_profile: config/alerts.yaml # Path or URL to alert profile with the prometheus queries
|
||||
metrics_profile: config/metrics-report.yaml
|
||||
check_critical_alerts: False # When enabled will check prometheus for critical alerts firing post chaos
|
||||
elastic:
|
||||
enable_elastic: False
|
||||
verify_certs: False
|
||||
elastic_url: "" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
|
||||
elastic_port: 32766
|
||||
username: "elastic"
|
||||
password: "test"
|
||||
metrics_index: "krkn-metrics"
|
||||
alerts_index: "krkn-alerts"
|
||||
telemetry_index: "krkn-telemetry"
|
||||
|
||||
tunings:
|
||||
wait_duration: 60 # Duration to wait between each chaos scenario
|
||||
iterations: 1 # Number of times to execute the scenarios
|
||||
@@ -67,14 +81,19 @@ telemetry:
|
||||
enabled: False # enable/disables the telemetry collection feature
|
||||
api_url: https://ulnmf9xv7j.execute-api.us-west-2.amazonaws.com/production #telemetry service endpoint
|
||||
username: username # telemetry service username
|
||||
password: password # telemetry service password
|
||||
password: password # telemetry service password
|
||||
prometheus_backup: True # enables/disables prometheus data collection
|
||||
prometheus_namespace: "" # namespace where prometheus is deployed (if distribution is kubernetes)
|
||||
prometheus_container_name: "" # name of the prometheus container name (if distribution is kubernetes)
|
||||
prometheus_pod_name: "" # name of the prometheus pod (if distribution is kubernetes)
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
archive_path: /tmp # local path where the archive files will be temporarly stored
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 500000 # the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
archive_size: 500000
|
||||
telemetry_group: '' # if set will archive the telemetry in the S3 bucket on a folder named after the value, otherwise will use "default"
|
||||
# the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
# the higher the number of archive files will be produced and uploaded (and processed by backup_threads
|
||||
# simultaneously).
|
||||
# For unstable/slow connection is better to keep this value low
|
||||
@@ -88,6 +107,10 @@ telemetry:
|
||||
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
|
||||
events_backup: True # enables/disables cluster events collection
|
||||
|
||||
|
||||
|
||||
|
||||
health_checks: # Utilizing health check endpoints to observe application behavior during chaos injection.
|
||||
interval: # Interval in seconds to perform health checks, default value is 2 seconds
|
||||
config: # Provide list of health check configurations for applications
|
||||
- url: # Provide application endpoint
|
||||
bearer_token: # Bearer token for authentication if any
|
||||
auth: # Provide authentication credentials (username , password) in tuple format if any, ex:("admin","secretpassword")
|
||||
exit_on_failure: # If value is True exits when health check failed for application, values can be True/False
|
||||
|
||||
@@ -6,7 +6,7 @@ kraken:
|
||||
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load
|
||||
- plugin_scenarios:
|
||||
- scenarios/kind/scheduler.yml
|
||||
- node_scenarios:
|
||||
@@ -20,8 +20,6 @@ cerberus:
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
capture_metrics: False
|
||||
metrics_profile_path: config/metrics-aggregated.yaml
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
|
||||
@@ -7,7 +7,7 @@ kraken:
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load
|
||||
- container_scenarios: # List of chaos pod scenarios to load
|
||||
- - scenarios/kube/container_dns.yml
|
||||
- scenarios/kube/container_dns.yml
|
||||
- plugin_scenarios:
|
||||
- scenarios/kube/scheduler.yml
|
||||
|
||||
@@ -19,8 +19,6 @@ cerberus:
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
capture_metrics: False
|
||||
metrics_profile_path: config/metrics-aggregated.yaml
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
|
||||
@@ -12,15 +12,14 @@ kraken:
|
||||
- scenarios/openshift/regex_openshift_pod_kill.yml
|
||||
- scenarios/openshift/prom_kill.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/node_scenarios_example.yml
|
||||
- scenarios/openshift/node_scenarios_example.yml
|
||||
- plugin_scenarios:
|
||||
- scenarios/openshift/openshift-apiserver.yml
|
||||
- scenarios/openshift/openshift-kube-apiserver.yml
|
||||
- time_scenarios: # List of chaos time scenarios to load
|
||||
- scenarios/openshift/time_scenarios_example.yml
|
||||
- cluster_shut_down_scenarios:
|
||||
- - scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- scenarios/openshift/post_action_shut_down.py
|
||||
- scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- service_disruption_scenarios:
|
||||
- scenarios/openshift/regex_namespace.yaml
|
||||
- scenarios/openshift/ingress_namespace.yaml
|
||||
@@ -77,3 +76,8 @@ telemetry:
|
||||
- "kinit (\\d+/\\d+/\\d+\\s\\d{2}:\\d{2}:\\d{2})\\s+" # kinit 2023/09/15 11:20:36 log
|
||||
- "(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z).+" # 2023-09-15T11:20:36.123425532Z log
|
||||
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
|
||||
elastic:
|
||||
elastic_url: "" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
|
||||
elastic_index: "" # Elastic search index pattern to post results to
|
||||
|
||||
|
||||
|
||||
@@ -1,133 +1,126 @@
|
||||
metrics:
|
||||
# API server
|
||||
- query: histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb!~"WATCH", subresource!="log"}[2m])) by (verb,resource,subresource,instance,le)) > 0
|
||||
metricName: API99thLatency
|
||||
|
||||
- query: sum(irate(apiserver_request_total{apiserver="kube-apiserver",verb!="WATCH",subresource!="log"}[2m])) by (verb,instance,resource,code) > 0
|
||||
metricName: APIRequestRate
|
||||
instant: True
|
||||
|
||||
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
|
||||
metricName: APIInflightRequests
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99, rate(apiserver_current_inflight_requests[5m]))
|
||||
metricName: APIInflightRequests
|
||||
instant: True
|
||||
|
||||
# Container & pod metrics
|
||||
- query: (sum(container_memory_rss{name!="",container!="POD",namespace=~"openshift-(etcd|oauth-apiserver|.*apiserver|ovn-kubernetes|sdn|ingress|authentication|.*controller-manager|.*scheduler)"}) by (container, pod, namespace, node) and on (node) kube_node_role{role="master"}) > 0
|
||||
metricName: containerMemory-Masters
|
||||
instant: true
|
||||
|
||||
- query: (sum(irate(container_cpu_usage_seconds_total{name!="",container!="POD",namespace=~"openshift-(etcd|oauth-apiserver|sdn|ovn-kubernetes|.*apiserver|authentication|.*controller-manager|.*scheduler)"}[2m]) * 100) by (container, pod, namespace, node) and on (node) kube_node_role{role="master"}) > 0
|
||||
metricName: containerCPU-Masters
|
||||
instant: true
|
||||
|
||||
- query: (sum(irate(container_cpu_usage_seconds_total{pod!="",container="prometheus",namespace="openshift-monitoring"}[2m]) * 100) by (container, pod, namespace, node) and on (node) kube_node_role{role="infra"}) > 0
|
||||
metricName: containerCPU-Prometheus
|
||||
instant: true
|
||||
|
||||
- query: (avg(irate(container_cpu_usage_seconds_total{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress)"}[2m]) * 100 and on (node) kube_node_role{role="worker"}) by (namespace, container)) > 0
|
||||
metricName: containerCPU-AggregatedWorkers
|
||||
instant: true
|
||||
|
||||
- query: (avg(irate(container_cpu_usage_seconds_total{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress|monitoring|image-registry|logging)"}[2m]) * 100 and on (node) kube_node_role{role="infra"}) by (namespace, container)) > 0
|
||||
metricName: containerCPU-AggregatedInfra
|
||||
|
||||
- query: (sum(container_memory_rss{pod!="",namespace="openshift-monitoring",name!="",container="prometheus"}) by (container, pod, namespace, node) and on (node) kube_node_role{role="infra"}) > 0
|
||||
metricName: containerMemory-Prometheus
|
||||
instant: True
|
||||
|
||||
- query: avg(container_memory_rss{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress)"} and on (node) kube_node_role{role="worker"}) by (container, namespace)
|
||||
metricName: containerMemory-AggregatedWorkers
|
||||
instant: True
|
||||
|
||||
- query: avg(container_memory_rss{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress|monitoring|image-registry|logging)"} and on (node) kube_node_role{role="infra"}) by (container, namespace)
|
||||
metricName: containerMemory-AggregatedInfra
|
||||
instant: True
|
||||
|
||||
# Node metrics
|
||||
- query: (sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) > 0
|
||||
metricName: nodeCPU-Masters
|
||||
instant: True
|
||||
|
||||
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: maxCPU-Masters
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemory-Masters
|
||||
instant: true
|
||||
|
||||
- query: (avg((sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))) by (mode)) > 0
|
||||
metricName: nodeCPU-AggregatedWorkers
|
||||
instant: True
|
||||
|
||||
- query: (avg((sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))) by (mode)) > 0
|
||||
metricName: nodeCPU-AggregatedInfra
|
||||
instant: True
|
||||
|
||||
- query: avg(node_memory_MemAvailable_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeMemoryAvailable-Masters
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemory-Masters
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: maxMemory-Masters
|
||||
instant: true
|
||||
|
||||
- query: avg(node_memory_MemAvailable_bytes and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryAvailable-AggregatedWorkers
|
||||
instant: True
|
||||
|
||||
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: maxCPU-Workers
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: maxMemory-Workers
|
||||
instant: true
|
||||
|
||||
- query: avg(node_memory_MemAvailable_bytes and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryAvailable-AggregatedInfra
|
||||
instant: True
|
||||
|
||||
- query: avg(node_memory_Active_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeMemoryActive-Masters
|
||||
instant: True
|
||||
|
||||
- query: avg(node_memory_Active_bytes and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryActive-AggregatedWorkers
|
||||
instant: True
|
||||
|
||||
- query: avg(avg(node_memory_Active_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryActive-AggregatedInfra
|
||||
|
||||
- query: avg(node_memory_Cached_bytes) by (instance) + avg(node_memory_Buffers_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeMemoryCached+nodeMemoryBuffers-Masters
|
||||
|
||||
- query: avg(node_memory_Cached_bytes + node_memory_Buffers_bytes and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryCached+nodeMemoryBuffers-AggregatedWorkers
|
||||
|
||||
- query: avg(node_memory_Cached_bytes + node_memory_Buffers_bytes and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryCached+nodeMemoryBuffers-AggregatedInfra
|
||||
|
||||
- query: irate(node_network_receive_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: rxNetworkBytes-Masters
|
||||
|
||||
- query: avg(irate(node_network_receive_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: rxNetworkBytes-AggregatedWorkers
|
||||
|
||||
- query: avg(irate(node_network_receive_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: rxNetworkBytes-AggregatedInfra
|
||||
|
||||
- query: irate(node_network_transmit_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: txNetworkBytes-Masters
|
||||
|
||||
- query: avg(irate(node_network_transmit_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: txNetworkBytes-AggregatedWorkers
|
||||
|
||||
- query: avg(irate(node_network_transmit_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: txNetworkBytes-AggregatedInfra
|
||||
|
||||
- query: rate(node_disk_written_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeDiskWrittenBytes-Masters
|
||||
|
||||
- query: avg(rate(node_disk_written_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: nodeDiskWrittenBytes-AggregatedWorkers
|
||||
|
||||
- query: avg(rate(node_disk_written_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: nodeDiskWrittenBytes-AggregatedInfra
|
||||
|
||||
- query: rate(node_disk_read_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeDiskReadBytes-Masters
|
||||
|
||||
- query: avg(rate(node_disk_read_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: nodeDiskReadBytes-AggregatedWorkers
|
||||
|
||||
- query: avg(rate(node_disk_read_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: nodeDiskReadBytes-AggregatedInfra
|
||||
instant: True
|
||||
|
||||
# Etcd metrics
|
||||
- query: sum(rate(etcd_server_leader_changes_seen_total[2m]))
|
||||
metricName: etcdLeaderChangesRate
|
||||
instant: True
|
||||
|
||||
- query: etcd_server_is_leader > 0
|
||||
metricName: etcdServerIsLeader
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))
|
||||
metricName: 99thEtcdDiskBackendCommitDurationSeconds
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))
|
||||
metricName: 99thEtcdDiskWalFsyncDurationSeconds
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket[5m]))
|
||||
metricName: 99thEtcdRoundTripTimeSeconds
|
||||
|
||||
- query: etcd_mvcc_db_total_size_in_bytes
|
||||
metricName: etcdDBPhysicalSizeBytes
|
||||
|
||||
- query: etcd_mvcc_db_total_size_in_use_in_bytes
|
||||
metricName: etcdDBLogicalSizeBytes
|
||||
instant: True
|
||||
|
||||
- query: sum by (cluster_version)(etcd_cluster_version)
|
||||
metricName: etcdVersion
|
||||
@@ -135,83 +128,16 @@ metrics:
|
||||
|
||||
- query: sum(rate(etcd_object_counts{}[5m])) by (resource) > 0
|
||||
metricName: etcdObjectCount
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99,sum(rate(etcd_request_duration_seconds_bucket[2m])) by (le,operation,apiserver)) > 0
|
||||
metricName: P99APIEtcdRequestLatency
|
||||
|
||||
- query: sum(grpc_server_started_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"})
|
||||
metricName: ActiveWatchStreams
|
||||
|
||||
- query: sum(grpc_server_started_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"})
|
||||
metricName: ActiveLeaseStreams
|
||||
|
||||
- query: sum(rate(etcd_debugging_snap_save_total_duration_seconds_sum{namespace="openshift-etcd"}[2m]))
|
||||
metricName: snapshotSaveLatency
|
||||
|
||||
- query: sum(rate(etcd_server_heartbeat_send_failures_total{namespace="openshift-etcd"}[2m]))
|
||||
metricName: HeartBeatFailures
|
||||
|
||||
- query: sum(rate(etcd_server_health_failures{namespace="openshift-etcd"}[2m]))
|
||||
metricName: HealthFailures
|
||||
|
||||
- query: sum(rate(etcd_server_slow_apply_total{namespace="openshift-etcd"}[2m]))
|
||||
metricName: SlowApplies
|
||||
|
||||
- query: sum(rate(etcd_server_slow_read_indexes_total{namespace="openshift-etcd"}[2m]))
|
||||
metricName: SlowIndexRead
|
||||
|
||||
- query: sum(etcd_server_proposals_pending)
|
||||
metricName: PendingProposals
|
||||
|
||||
- query: histogram_quantile(1.0, sum(rate(etcd_debugging_mvcc_db_compaction_pause_duration_milliseconds_bucket[1m])) by (le, instance))
|
||||
metricName: CompactionMaxPause
|
||||
instant: True
|
||||
|
||||
- query: sum by (instance) (apiserver_storage_objects)
|
||||
metricName: etcdTotalObjectCount
|
||||
instant: True
|
||||
|
||||
- query: topk(500, max by(resource) (apiserver_storage_objects))
|
||||
metricName: etcdTopObectCount
|
||||
|
||||
# Cluster metrics
|
||||
- query: count(kube_namespace_created)
|
||||
metricName: namespaceCount
|
||||
|
||||
- query: sum(kube_pod_status_phase{}) by (phase)
|
||||
metricName: podStatusCount
|
||||
|
||||
- query: count(kube_secret_info{})
|
||||
metricName: secretCount
|
||||
|
||||
- query: count(kube_deployment_labels{})
|
||||
metricName: deploymentCount
|
||||
|
||||
- query: count(kube_configmap_info{})
|
||||
metricName: configmapCount
|
||||
|
||||
- query: count(kube_service_info{})
|
||||
metricName: serviceCount
|
||||
|
||||
- query: kube_node_role
|
||||
metricName: nodeRoles
|
||||
instant: true
|
||||
|
||||
- query: sum(kube_node_status_condition{status="true"}) by (condition)
|
||||
metricName: nodeStatus
|
||||
|
||||
- query: (sum(rate(container_fs_writes_bytes_total{container!="",device!~".+dm.+"}[5m])) by (device, container, node) and on (node) kube_node_role{role="master"}) > 0
|
||||
metricName: containerDiskUsage
|
||||
|
||||
- query: cluster_version{type="completed"}
|
||||
metricName: clusterVersion
|
||||
instant: true
|
||||
|
||||
# Golang metrics
|
||||
|
||||
- query: go_memstats_heap_alloc_bytes{job=~"apiserver|api|etcd"}
|
||||
metricName: goHeapAllocBytes
|
||||
|
||||
- query: go_memstats_heap_inuse_bytes{job=~"apiserver|api|etcd"}
|
||||
metricName: goHeapInuseBytes
|
||||
|
||||
- query: go_gc_duration_seconds{job=~"apiserver|api|etcd",quantile="1"}
|
||||
metricName: goGCDurationSeconds
|
||||
instant: True
|
||||
|
||||
248
config/metrics-report.yaml
Normal file
248
config/metrics-report.yaml
Normal file
@@ -0,0 +1,248 @@
|
||||
metrics:
|
||||
|
||||
# API server
|
||||
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
|
||||
metricName: APIInflightRequests
|
||||
instant: true
|
||||
|
||||
# Kubelet & CRI-O
|
||||
|
||||
# Average and max of the CPU usage from all worker's kubelet
|
||||
- query: avg(avg_over_time(irate(process_cpu_seconds_total{service="kubelet",job="kubelet"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: cpu-kubelet
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(irate(process_cpu_seconds_total{service="kubelet",job="kubelet"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: max-cpu-kubelet
|
||||
instant: true
|
||||
|
||||
# Average of the memory usage from all worker's kubelet
|
||||
- query: avg(avg_over_time(process_resident_memory_bytes{service="kubelet",job="kubelet"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: memory-kubelet
|
||||
instant: true
|
||||
|
||||
# Max of the memory usage from all worker's kubelet
|
||||
- query: max(max_over_time(process_resident_memory_bytes{service="kubelet",job="kubelet"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: max-memory-kubelet
|
||||
instant: true
|
||||
|
||||
- query: max_over_time(sum(process_resident_memory_bytes{service="kubelet",job="kubelet"} and on (node) kube_node_role{role="worker"})[.elapsed:])
|
||||
metricName: max-memory-sum-kubelet
|
||||
instant: true
|
||||
|
||||
# Average and max of the CPU usage from all worker's CRI-O
|
||||
- query: avg(avg_over_time(irate(process_cpu_seconds_total{service="kubelet",job="crio"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: cpu-crio
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(irate(process_cpu_seconds_total{service="kubelet",job="crio"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: max-cpu-crio
|
||||
instant: true
|
||||
|
||||
# Average of the memory usage from all worker's CRI-O
|
||||
- query: avg(avg_over_time(process_resident_memory_bytes{service="kubelet",job="crio"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: memory-crio
|
||||
instant: true
|
||||
|
||||
# Max of the memory usage from all worker's CRI-O
|
||||
- query: max(max_over_time(process_resident_memory_bytes{service="kubelet",job="crio"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: max-memory-crio
|
||||
instant: true
|
||||
|
||||
# Etcd
|
||||
|
||||
- query: avg(avg_over_time(histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))[.elapsed:]))
|
||||
metricName: 99thEtcdDiskBackendCommit
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[.elapsed:]))
|
||||
metricName: 99thEtcdDiskWalFsync
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(histogram_quantile(0.99, irate(etcd_network_peer_round_trip_time_seconds_bucket[2m]))[.elapsed:]))
|
||||
metricName: 99thEtcdRoundTripTime
|
||||
instant: true
|
||||
|
||||
# Control-plane
|
||||
|
||||
- query: avg(avg_over_time(topk(1, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-kube-controller-manager"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-kube-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(topk(1, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-kube-controller-manager"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: max-cpu-kube-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(1, sum(container_memory_rss{name!="", namespace="openshift-kube-controller-manager"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-kube-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(topk(1, sum(container_memory_rss{name!="", namespace="openshift-kube-controller-manager"}) by (pod))[.elapsed:]))
|
||||
metricName: max-memory-kube-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-kube-apiserver"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-kube-apiserver
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(container_memory_rss{name!="", namespace="openshift-kube-apiserver"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-kube-apiserver
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-apiserver"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-openshift-apiserver
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(container_memory_rss{name!="", namespace="openshift-apiserver"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-openshift-apiserver
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-etcd"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-etcd
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3,sum(container_memory_rss{name!="", namespace="openshift-etcd"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-etcd
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(1, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-controller-manager"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-openshift-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(1, sum(container_memory_rss{name!="", namespace="openshift-controller-manager"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-openshift-controller-manager
|
||||
instant: true
|
||||
|
||||
# multus
|
||||
|
||||
- query: avg(avg_over_time(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-multus", pod=~"(multus).+", container!="POD"}[2m])[.elapsed:])) by (container)
|
||||
metricName: cpu-multus
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(container_memory_rss{name!="", namespace="openshift-multus", pod=~"(multus).+", container!="POD"}[.elapsed:])) by (container)
|
||||
metricName: memory-multus
|
||||
instant: true
|
||||
|
||||
# OVNKubernetes - standard & IC
|
||||
|
||||
- query: avg(avg_over_time(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-ovn-kubernetes", pod=~"(ovnkube-master|ovnkube-control-plane).+", container!="POD"}[2m])[.elapsed:])) by (container)
|
||||
metricName: cpu-ovn-control-plane
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(container_memory_rss{name!="", namespace="openshift-ovn-kubernetes", pod=~"(ovnkube-master|ovnkube-control-plane).+", container!="POD"}[.elapsed:])) by (container)
|
||||
metricName: memory-ovn-control-plane
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-ovn-kubernetes", pod=~"ovnkube-node.+", container!="POD"}[2m])[.elapsed:])) by (container)
|
||||
metricName: cpu-ovnkube-node
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(container_memory_rss{name!="", namespace="openshift-ovn-kubernetes", pod=~"ovnkube-node.+", container!="POD"}[.elapsed:])) by (container)
|
||||
metricName: memory-ovnkube-node
|
||||
instant: true
|
||||
|
||||
# Nodes
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: cpu-masters
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: memory-masters
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: max-memory-masters
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: cpu-workers
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: max-cpu-workers
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: memory-workers
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: max-memory-workers
|
||||
instant: true
|
||||
|
||||
- query: sum( (node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)") )
|
||||
metricName: memory-sum-workers
|
||||
instant: true
|
||||
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: cpu-infra
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: max-cpu-infra
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: memory-infra
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: max-memory-infra
|
||||
instant: true
|
||||
|
||||
- query: max_over_time(sum((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))[.elapsed:])
|
||||
metricName: max-memory-sum-infra
|
||||
instant: true
|
||||
|
||||
# Monitoring and ingress
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}[2m])) by (pod)[.elapsed:]))
|
||||
metricName: cpu-prometheus
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}[2m])) by (pod)[.elapsed:]))
|
||||
metricName: max-cpu-prometheus
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(sum(container_memory_rss{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}) by (pod)[.elapsed:]))
|
||||
metricName: memory-prometheus
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(sum(container_memory_rss{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}) by (pod)[.elapsed:]))
|
||||
metricName: max-memory-prometheus
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-ingress", pod=~"router-default.+"}[2m])) by (pod)[.elapsed:]))
|
||||
metricName: cpu-router
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(sum(container_memory_rss{name!="", namespace="openshift-ingress", pod=~"router-default.+"}) by (pod)[.elapsed:]))
|
||||
metricName: memory-router
|
||||
instant: true
|
||||
|
||||
# Cluster
|
||||
|
||||
- query: avg_over_time(cluster:memory_usage:ratio[.elapsed:])
|
||||
metricName: memory-cluster-usage-ratio
|
||||
instant: true
|
||||
|
||||
- query: avg_over_time(cluster:node_cpu:ratio[.elapsed:])
|
||||
metricName: cpu-cluster-usage-ratio
|
||||
instant: true
|
||||
|
||||
# Retain the raw CPU seconds totals for comparison
|
||||
- query: sum(node_cpu_seconds_total and on (instance) label_replace(kube_node_role{role="worker",role!="infra"}, "instance", "$1", "node", "(.+)")) by (mode)
|
||||
metricName: nodeCPUSeconds-Workers
|
||||
instant: true
|
||||
|
||||
|
||||
- query: sum(node_cpu_seconds_total and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) by (mode)
|
||||
metricName: nodeCPUSeconds-Masters
|
||||
instant: true
|
||||
|
||||
|
||||
- query: sum(node_cpu_seconds_total and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (mode)
|
||||
metricName: nodeCPUSeconds-Infra
|
||||
instant: true
|
||||
@@ -1,13 +1,7 @@
|
||||
metrics:
|
||||
# API server
|
||||
- query: histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb!~"WATCH", subresource!="log"}[2m])) by (verb,resource,subresource,instance,le)) > 0
|
||||
metricName: API99thLatency
|
||||
|
||||
- query: sum(irate(apiserver_request_total{apiserver="kube-apiserver",verb!="WATCH",subresource!="log"}[2m])) by (verb,instance,resource,code) > 0
|
||||
metricName: APIRequestRate
|
||||
|
||||
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
|
||||
metricName: APIInflightRequests
|
||||
- query: irate(apiserver_request_total{verb="POST", resource="pods", subresource="binding",code="201"}[2m]) > 0
|
||||
metricName: schedulingThroughput
|
||||
|
||||
# Containers & pod metrics
|
||||
- query: sum(irate(container_cpu_usage_seconds_total{name!="",namespace=~"openshift-(etcd|oauth-apiserver|.*apiserver|ovn-kubernetes|sdn|ingress|authentication|.*controller-manager|.*scheduler|monitoring|logging|image-registry)"}[2m]) * 100) by (pod, namespace, node)
|
||||
@@ -33,8 +27,17 @@ metrics:
|
||||
metricName: crioMemory
|
||||
|
||||
# Node metrics
|
||||
- query: sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) > 0
|
||||
metricName: nodeCPU
|
||||
- query: (sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) > 0
|
||||
metricName: nodeCPU-Masters
|
||||
|
||||
- query: (avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemory-Masters
|
||||
|
||||
- query: (sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) > 0
|
||||
metricName: nodeCPU-Workers
|
||||
|
||||
- query: (avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[2m:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemory-Workers
|
||||
|
||||
- query: avg(node_memory_MemAvailable_bytes) by (instance)
|
||||
metricName: nodeMemoryAvailable
|
||||
@@ -42,6 +45,9 @@ metrics:
|
||||
- query: avg(node_memory_Active_bytes) by (instance)
|
||||
metricName: nodeMemoryActive
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: maxMemory-Masters
|
||||
|
||||
- query: avg(node_memory_Cached_bytes) by (instance) + avg(node_memory_Buffers_bytes) by (instance)
|
||||
metricName: nodeMemoryCached+nodeMemoryBuffers
|
||||
|
||||
@@ -84,34 +90,4 @@ metrics:
|
||||
|
||||
- query: sum by (cluster_version)(etcd_cluster_version)
|
||||
metricName: etcdVersion
|
||||
instant: true
|
||||
|
||||
# Cluster metrics
|
||||
- query: count(kube_namespace_created)
|
||||
metricName: namespaceCount
|
||||
|
||||
- query: sum(kube_pod_status_phase{}) by (phase)
|
||||
metricName: podStatusCount
|
||||
|
||||
- query: count(kube_secret_info{})
|
||||
metricName: secretCount
|
||||
|
||||
- query: count(kube_deployment_labels{})
|
||||
metricName: deploymentCount
|
||||
|
||||
- query: count(kube_configmap_info{})
|
||||
metricName: configmapCount
|
||||
|
||||
- query: count(kube_service_info{})
|
||||
metricName: serviceCount
|
||||
|
||||
- query: kube_node_role
|
||||
metricName: nodeRoles
|
||||
instant: true
|
||||
|
||||
- query: sum(kube_node_status_condition{status="true"}) by (condition)
|
||||
metricName: nodeStatus
|
||||
|
||||
- query: cluster_version{type="completed"}
|
||||
metricName: clusterVersion
|
||||
instant: true
|
||||
instant: true
|
||||
@@ -1,5 +1,5 @@
|
||||
application: openshift-etcd
|
||||
namespace: openshift-etcd
|
||||
namespaces: openshift-etcd
|
||||
labels: app=openshift-etcd
|
||||
kubeconfig: ~/.kube/config.yaml
|
||||
prometheus_endpoint: <Prometheus_Endpoint>
|
||||
@@ -7,6 +7,8 @@ auth_token: <Auth_Token>
|
||||
scrape_duration: 10m
|
||||
chaos_library: "kraken"
|
||||
log_level: INFO
|
||||
json_output_file: False
|
||||
json_output_folder_path:
|
||||
|
||||
# for output purpose only do not change if not needed
|
||||
chaos_tests:
|
||||
@@ -26,4 +28,8 @@ chaos_tests:
|
||||
- pod_network_chaos
|
||||
MEM:
|
||||
- node_memory_hog
|
||||
- pvc_disk_fill
|
||||
- pvc_disk_fill
|
||||
|
||||
threshold: .7
|
||||
cpu_threshold: .5
|
||||
mem_threshold: .5
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
# Dockerfile for kraken
|
||||
|
||||
FROM mcr.microsoft.com/azure-cli:latest as azure-cli
|
||||
|
||||
FROM registry.access.redhat.com/ubi8/ubi:latest
|
||||
|
||||
LABEL org.opencontainers.image.authors="Red Hat OpenShift Chaos Engineering"
|
||||
|
||||
ENV KUBECONFIG /root/.kube/config
|
||||
|
||||
# Copy azure client binary from azure-cli image
|
||||
COPY --from=azure-cli /usr/local/bin/az /usr/bin/az
|
||||
|
||||
# Install dependencies
|
||||
RUN yum install -y git python39 python3-pip jq gettext wget && \
|
||||
python3.9 -m pip install -U pip && \
|
||||
git clone https://github.com/redhat-chaos/krkn.git --branch v1.5.3 /root/kraken && \
|
||||
mkdir -p /root/.kube && cd /root/kraken && \
|
||||
pip3.9 install -r requirements.txt && \
|
||||
pip3.9 install virtualenv && \
|
||||
wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq && chmod +x /usr/bin/yq
|
||||
|
||||
# Get Kubernetes and OpenShift clients from stable releases
|
||||
WORKDIR /tmp
|
||||
RUN wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-client-linux.tar.gz && tar -xvf openshift-client-linux.tar.gz && cp oc /usr/local/bin/oc && cp kubectl /usr/local/bin/kubectl
|
||||
|
||||
WORKDIR /root/kraken
|
||||
|
||||
ENTRYPOINT ["python3.9", "run_kraken.py"]
|
||||
CMD ["--config=config/config.yaml"]
|
||||
@@ -1,29 +0,0 @@
|
||||
# Dockerfile for kraken
|
||||
|
||||
FROM ppc64le/centos:8
|
||||
|
||||
FROM mcr.microsoft.com/azure-cli:latest as azure-cli
|
||||
|
||||
LABEL org.opencontainers.image.authors="Red Hat OpenShift Chaos Engineering"
|
||||
|
||||
ENV KUBECONFIG /root/.kube/config
|
||||
|
||||
# Copy azure client binary from azure-cli image
|
||||
COPY --from=azure-cli /usr/local/bin/az /usr/bin/az
|
||||
|
||||
# Install dependencies
|
||||
RUN yum install -y git python39 python3-pip jq gettext wget && \
|
||||
python3.9 -m pip install -U pip && \
|
||||
git clone https://github.com/redhat-chaos/krkn.git --branch v1.5.3 /root/kraken && \
|
||||
mkdir -p /root/.kube && cd /root/kraken && \
|
||||
pip3.9 install -r requirements.txt && \
|
||||
pip3.9 install virtualenv && \
|
||||
wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq && chmod +x /usr/bin/yq
|
||||
|
||||
# Get Kubernetes and OpenShift clients from stable releases
|
||||
WORKDIR /tmp
|
||||
RUN wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-client-linux.tar.gz && tar -xvf openshift-client-linux.tar.gz && cp oc /usr/local/bin/oc && cp kubectl /usr/local/bin/kubectl
|
||||
|
||||
WORKDIR /root/kraken
|
||||
|
||||
ENTRYPOINT python3.9 run_kraken.py --config=config/config.yaml
|
||||
66
containers/Dockerfile.template
Normal file
66
containers/Dockerfile.template
Normal file
@@ -0,0 +1,66 @@
|
||||
# oc build
|
||||
FROM golang:1.23.1 AS oc-build
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libkrb5-dev
|
||||
WORKDIR /tmp
|
||||
RUN git clone --branch release-4.18 https://github.com/openshift/oc.git
|
||||
WORKDIR /tmp/oc
|
||||
RUN go mod edit -go 1.23.1 &&\
|
||||
go get github.com/moby/buildkit@v0.12.5 &&\
|
||||
go get github.com/containerd/containerd@v1.7.11&&\
|
||||
go get github.com/docker/docker@v25.0.6&&\
|
||||
go get github.com/opencontainers/runc@v1.1.14&&\
|
||||
go get github.com/go-git/go-git/v5@v5.13.0&&\
|
||||
go get golang.org/x/net@v0.36.0&&\
|
||||
go get github.com/containerd/containerd@v1.7.27&&\
|
||||
go get golang.org/x/oauth2@v0.27.0&&\
|
||||
go get golang.org/x/crypto@v0.35.0&&\
|
||||
go mod tidy && go mod vendor
|
||||
RUN make GO_REQUIRED_MIN_VERSION:= oc
|
||||
|
||||
FROM fedora:40
|
||||
ARG PR_NUMBER
|
||||
ARG TAG
|
||||
RUN groupadd -g 1001 krkn && useradd -m -u 1001 -g krkn krkn
|
||||
RUN dnf update -y
|
||||
|
||||
ENV KUBECONFIG /home/krkn/.kube/config
|
||||
|
||||
# install kubectl
|
||||
RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" &&\
|
||||
cp kubectl /usr/local/bin/kubectl && chmod +x /usr/local/bin/kubectl &&\
|
||||
cp kubectl /usr/bin/kubectl && chmod +x /usr/bin/kubectl
|
||||
|
||||
# This overwrites any existing configuration in /etc/yum.repos.d/kubernetes.repo
|
||||
RUN dnf update && dnf install -y --setopt=install_weak_deps=False \
|
||||
git python39 jq yq gettext wget which &&\
|
||||
dnf clean all
|
||||
|
||||
# copy oc client binary from oc-build image
|
||||
COPY --from=oc-build /tmp/oc/oc /usr/bin/oc
|
||||
|
||||
# krkn build
|
||||
RUN git clone https://github.com/krkn-chaos/krkn.git /home/krkn/kraken && \
|
||||
mkdir -p /home/krkn/.kube
|
||||
|
||||
WORKDIR /home/krkn/kraken
|
||||
|
||||
# default behaviour will be to build main
|
||||
# if it is a PR trigger the PR itself will be checked out
|
||||
RUN if [ -n "$PR_NUMBER" ]; then git fetch origin pull/${PR_NUMBER}/head:pr-${PR_NUMBER} && git checkout pr-${PR_NUMBER};fi
|
||||
# if it is a TAG trigger checkout the tag
|
||||
RUN if [ -n "$TAG" ]; then git checkout "$TAG";fi
|
||||
|
||||
RUN python3.9 -m ensurepip --upgrade --default-pip
|
||||
RUN python3.9 -m pip install --upgrade pip setuptools==70.0.0
|
||||
RUN pip3.9 install -r requirements.txt
|
||||
RUN pip3.9 install jsonschema
|
||||
|
||||
LABEL krknctl.title.global="Krkn Base Image"
|
||||
LABEL krknctl.description.global="This is the krkn base image."
|
||||
LABEL krknctl.input_fields.global='$KRKNCTL_INPUT'
|
||||
|
||||
|
||||
RUN chown -R krkn:krkn /home/krkn && chmod 755 /home/krkn
|
||||
USER krkn
|
||||
ENTRYPOINT ["python3.9", "run_kraken.py"]
|
||||
CMD ["--config=config/config.yaml"]
|
||||
@@ -12,35 +12,3 @@ Refer [instructions](https://github.com/redhat-chaos/krkn/blob/main/docs/install
|
||||
### Run Custom Kraken Image
|
||||
|
||||
Refer to [instructions](https://github.com/redhat-chaos/krkn/blob/main/containers/build_own_image-README.md) for information on how to run a custom containerized version of kraken using podman.
|
||||
|
||||
|
||||
### Kraken as a KubeApp ( Unsupported and not recommended )
|
||||
|
||||
#### GENERAL NOTES:
|
||||
|
||||
- It is not generally recommended to run Kraken internal to the cluster as the pod which is running Kraken might get disrupted, the suggested use case to run kraken from inside k8s/OpenShift is to target **another** cluster (eg. to bypass network restrictions or to leverage cluster's computational resources)
|
||||
|
||||
- your kubeconfig might contain several cluster contexts and credentials so be sure, before creating the ConfigMap, to keep **only** the credentials related to the destination cluster. Please refer to the [Kubernetes documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) for more details
|
||||
- to add privileges to the service account you must be logged in the cluster with an highly privileged account (ideally kubeadmin)
|
||||
|
||||
|
||||
|
||||
To run containerized Kraken as a Kubernetes/OpenShift Deployment, follow these steps:
|
||||
|
||||
1. Configure the [config.yaml](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) file according to your requirements.
|
||||
|
||||
**NOTE**: both the scenarios ConfigMaps are needed regardless you're running kraken in Kubernetes or OpenShift
|
||||
|
||||
2. Create a namespace under which you want to run the kraken pod using `kubectl create ns <namespace>`.
|
||||
3. Switch to `<namespace>` namespace:
|
||||
- In Kubernetes, use `kubectl config set-context --current --namespace=<namespace>`
|
||||
- In OpenShift, use `oc project <namespace>`
|
||||
|
||||
4. Create a ConfigMap named kube-config using `kubectl create configmap kube-config --from-file=<path_to_kubeconfig>` *(eg. ~/.kube/config)*
|
||||
5. Create a ConfigMap named kraken-config using `kubectl create configmap kraken-config --from-file=<path_to_kraken>/config`
|
||||
6. Create a ConfigMap named scenarios-config using `kubectl create configmap scenarios-config --from-file=<path_to_kraken>/scenarios`
|
||||
7. Create a ConfigMap named scenarios-openshift-config using `kubectl create configmap scenarios-openshift-config --from-file=<path_to_kraken>/scenarios/openshift`
|
||||
8. Create a ConfigMap named scenarios-kube-config using `kubectl create configmap scenarios-kube-config --from-file=<path_to_kraken>/scenarios/kube`
|
||||
9. Create a service account to run the kraken pod `kubectl create serviceaccount useroot`.
|
||||
10. In Openshift, add privileges to service account and execute `oc adm policy add-scc-to-user privileged -z useroot`.
|
||||
11. Create a Job using `kubectl apply -f <path_to_kraken>/containers/kraken.yml` and monitor the status using `oc get jobs` and `oc get pods`.
|
||||
|
||||
5
containers/compile_dockerfile.sh
Executable file
5
containers/compile_dockerfile.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$SCRIPT_DIR"
|
||||
export KRKNCTL_INPUT=$(cat krknctl-input.json|tr -d "\n")
|
||||
|
||||
envsubst '${KRKNCTL_INPUT}' < Dockerfile.template > Dockerfile
|
||||
@@ -1,49 +0,0 @@
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: kraken
|
||||
spec:
|
||||
parallelism: 1
|
||||
completions: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tool: Kraken
|
||||
spec:
|
||||
serviceAccountName: useroot
|
||||
containers:
|
||||
- name: kraken
|
||||
securityContext:
|
||||
privileged: true
|
||||
image: quay.io/redhat-chaos/krkn
|
||||
command: ["/bin/sh", "-c"]
|
||||
args: ["python3.9 run_kraken.py -c config/config.yaml"]
|
||||
volumeMounts:
|
||||
- mountPath: "/root/.kube"
|
||||
name: config
|
||||
- mountPath: "/root/kraken/config"
|
||||
name: kraken-config
|
||||
- mountPath: "/root/kraken/scenarios"
|
||||
name: scenarios-config
|
||||
- mountPath: "/root/kraken/scenarios/openshift"
|
||||
name: scenarios-openshift-config
|
||||
- mountPath: "/root/kraken/scenarios/kube"
|
||||
name: scenarios-kube-config
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: kube-config
|
||||
- name: kraken-config
|
||||
configMap:
|
||||
name: kraken-config
|
||||
- name: scenarios-config
|
||||
configMap:
|
||||
name: scenarios-config
|
||||
- name: scenarios-openshift-config
|
||||
configMap:
|
||||
name: scenarios-openshift-config
|
||||
- name: scenarios-kube-config
|
||||
configMap:
|
||||
name: scenarios-kube-config
|
||||
381
containers/krknctl-input.json
Normal file
381
containers/krknctl-input.json
Normal file
@@ -0,0 +1,381 @@
|
||||
[
|
||||
{
|
||||
"name": "cerberus-enabled",
|
||||
"short_description": "Enable Cerberus",
|
||||
"description": "Enables Cerberus Support",
|
||||
"variable": "CERBERUS_ENABLED",
|
||||
"type": "enum",
|
||||
"default": "False",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "cerberus-url",
|
||||
"short_description": "Cerberus URL",
|
||||
"description": "Cerberus http url",
|
||||
"variable": "CERBERUS_URL",
|
||||
"type": "string",
|
||||
"default": "http://0.0.0.0:8080",
|
||||
"validator": "^(http|https):\/\/.*",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "distribution",
|
||||
"short_description": "Orchestrator distribution",
|
||||
"description": "Selects the orchestrator distribution",
|
||||
"variable": "DISTRIBUTION",
|
||||
"type": "enum",
|
||||
"default": "openshift",
|
||||
"allowed_values": "openshift,kubernetes",
|
||||
"separator": ",",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "krkn-kubeconfig",
|
||||
"short_description": "Krkn kubeconfig path",
|
||||
"description": "Sets the path where krkn will search for kubeconfig (in container)",
|
||||
"variable": "KRKN_KUBE_CONFIG",
|
||||
"type": "string",
|
||||
"default": "/home/krkn/.kube/config",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "wait-duration",
|
||||
"short_description": "Post chaos wait duration",
|
||||
"description": "waits for a certain amount of time after the scenario",
|
||||
"variable": "WAIT_DURATION",
|
||||
"type": "number",
|
||||
"default": "1"
|
||||
},
|
||||
{
|
||||
"name": "iterations",
|
||||
"short_description": "Chaos scenario iterations",
|
||||
"description": "number of times the same chaos scenario will be executed",
|
||||
"variable": "ITERATIONS",
|
||||
"type": "number",
|
||||
"default": "1"
|
||||
},
|
||||
{
|
||||
"name": "daemon-mode",
|
||||
"short_description": "Sets krkn daemon mode",
|
||||
"description": "if set the scenario will execute forever",
|
||||
"variable": "DAEMON_MODE",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "uuid",
|
||||
"short_description": "Sets krkn run uuid",
|
||||
"description": "sets krkn run uuid instead of generating it",
|
||||
"variable": "UUID",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "capture-metrics",
|
||||
"short_description": "Enables metrics capture",
|
||||
"description": "Enables metrics capture",
|
||||
"variable": "CAPTURE_METRICS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "enable-alerts",
|
||||
"short_description": "Enables cluster alerts check",
|
||||
"description": "Enables cluster alerts check",
|
||||
"variable": "ENABLE_ALERTS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "alerts-path",
|
||||
"short_description": "Cluster alerts path file (in container)",
|
||||
"description": "Allows to specify a different alert file path",
|
||||
"variable": "ALERTS_PATH",
|
||||
"type": "string",
|
||||
"default": "config/alerts.yaml",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "metrics-path",
|
||||
"short_description": "Cluster metrics path file (in container)",
|
||||
"description": "Allows to specify a different metrics file path",
|
||||
"variable": "METRICS_PATH",
|
||||
"type": "string",
|
||||
"default": "config/metrics-aggregated.yaml",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "enable-es",
|
||||
"short_description": "Enables elastic search data collection",
|
||||
"description": "Enables elastic search data collection",
|
||||
"variable": "ENABLE_ES",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-server",
|
||||
"short_description": "Elasticsearch instance URL",
|
||||
"description": "Elasticsearch instance URL",
|
||||
"variable": "ES_SERVER",
|
||||
"type": "string",
|
||||
"default": "http://0.0.0.0",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-port",
|
||||
"short_description": "Elasticsearch instance port",
|
||||
"description": "Elasticsearch instance port",
|
||||
"variable": "ES_PORT",
|
||||
"type": "number",
|
||||
"default": "443",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-username",
|
||||
"short_description": "Elasticsearch instance username",
|
||||
"description": "Elasticsearch instance username",
|
||||
"variable": "ES_USERNAME",
|
||||
"type": "string",
|
||||
"default": "elastic",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-password",
|
||||
"short_description": "Elasticsearch instance password",
|
||||
"description": "Elasticsearch instance password",
|
||||
"variable": "ES_PASSWORD",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-verify-certs",
|
||||
"short_description": "Enables elasticsearch TLS certificate verification",
|
||||
"description": "Enables elasticsearch TLS certificate verification",
|
||||
"variable": "ES_VERIFY_CERTS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-metrics-index",
|
||||
"short_description": "Elasticsearch metrics index",
|
||||
"description": "Index name for metrics in Elasticsearch",
|
||||
"variable": "ES_METRICS_INDEX",
|
||||
"type": "string",
|
||||
"default": "krkn-metrics",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-alerts-index",
|
||||
"short_description": "Elasticsearch alerts index",
|
||||
"description": "Index name for alerts in Elasticsearch",
|
||||
"variable": "ES_ALERTS_INDEX",
|
||||
"type": "string",
|
||||
"default": "krkn-alerts",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-telemetry-index",
|
||||
"short_description": "Elasticsearch telemetry index",
|
||||
"description": "Index name for telemetry in Elasticsearch",
|
||||
"variable": "ES_TELEMETRY_INDEX",
|
||||
"type": "string",
|
||||
"default": "krkn-telemetry",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "check-critical-alerts",
|
||||
"short_description": "Check critical alerts",
|
||||
"description": "Enables checking for critical alerts",
|
||||
"variable": "CHECK_CRITICAL_ALERTS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-enabled",
|
||||
"short_description": "Enable telemetry",
|
||||
"description": "Enables telemetry support",
|
||||
"variable": "TELEMETRY_ENABLED",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-api-url",
|
||||
"short_description": "Telemetry API URL",
|
||||
"description": "API endpoint for telemetry data",
|
||||
"variable": "TELEMETRY_API_URL",
|
||||
"type": "string",
|
||||
"default": "https://ulnmf9xv7j.execute-api.us-west-2.amazonaws.com/production",
|
||||
"validator": "^(http|https):\/\/.*",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-username",
|
||||
"short_description": "Telemetry username",
|
||||
"description": "Username for telemetry authentication",
|
||||
"variable": "TELEMETRY_USERNAME",
|
||||
"type": "string",
|
||||
"default": "redhat-chaos",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-password",
|
||||
"short_description": "Telemetry password",
|
||||
"description": "Password for telemetry authentication",
|
||||
"variable": "TELEMETRY_PASSWORD",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-prometheus-backup",
|
||||
"short_description": "Prometheus backup for telemetry",
|
||||
"description": "Enables Prometheus backup for telemetry",
|
||||
"variable": "TELEMETRY_PROMETHEUS_BACKUP",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "True",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-full-prometheus-backup",
|
||||
"short_description": "Full Prometheus backup",
|
||||
"description": "Enables full Prometheus backup for telemetry",
|
||||
"variable": "TELEMETRY_FULL_PROMETHEUS_BACKUP",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-backup-threads",
|
||||
"short_description": "Telemetry backup threads",
|
||||
"description": "Number of threads for telemetry backup",
|
||||
"variable": "TELEMETRY_BACKUP_THREADS",
|
||||
"type": "number",
|
||||
"default": "5",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-archive-path",
|
||||
"short_description": "Telemetry archive path",
|
||||
"description": "Path to save telemetry archive",
|
||||
"variable": "TELEMETRY_ARCHIVE_PATH",
|
||||
"type": "string",
|
||||
"default": "/tmp",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-max-retries",
|
||||
"short_description": "Telemetry max retries",
|
||||
"description": "Maximum retries for telemetry operations",
|
||||
"variable": "TELEMETRY_MAX_RETRIES",
|
||||
"type": "number",
|
||||
"default": "0",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-run-tag",
|
||||
"short_description": "Telemetry run tag",
|
||||
"description": "Tag for telemetry run",
|
||||
"variable": "TELEMETRY_RUN_TAG",
|
||||
"type": "string",
|
||||
"default": "chaos",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-group",
|
||||
"short_description": "Telemetry group",
|
||||
"description": "Group name for telemetry data",
|
||||
"variable": "TELEMETRY_GROUP",
|
||||
"type": "string",
|
||||
"default": "default",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-archive-size",
|
||||
"short_description": "Telemetry archive size",
|
||||
"description": "Maximum size for telemetry archives",
|
||||
"variable": "TELEMETRY_ARCHIVE_SIZE",
|
||||
"type": "number",
|
||||
"default": "1000",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-logs-backup",
|
||||
"short_description": "Telemetry logs backup",
|
||||
"description": "Enables logs backup for telemetry",
|
||||
"variable": "TELEMETRY_LOGS_BACKUP",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-filter-pattern",
|
||||
"short_description": "Telemetry filter pattern",
|
||||
"description": "Filter pattern for telemetry logs",
|
||||
"variable": "TELEMETRY_FILTER_PATTERN",
|
||||
"type": "string",
|
||||
"default": "[\"(\\\\w{3}\\\\s\\\\d{1,2}\\\\s\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d+).+\",\"kinit (\\\\d+/\\\\d+/\\\\d+\\\\s\\\\d{2}:\\\\d{2}:\\\\d{2})\\\\s+\",\"(\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d+Z).+\"]",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-cli-path",
|
||||
"short_description": "Telemetry CLI path (oc)",
|
||||
"description": "Path to telemetry CLI tool (oc)",
|
||||
"variable": "TELEMETRY_CLI_PATH",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-events-backup",
|
||||
"short_description": "Telemetry events backup",
|
||||
"description": "Enables events backup for telemetry",
|
||||
"variable": "TELEMETRY_EVENTS_BACKUP",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "True",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "krkn-debug",
|
||||
"short_description": "Krkn debug mode",
|
||||
"description": "Enables debug mode for Krkn",
|
||||
"variable": "KRKN_DEBUG",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
}
|
||||
]
|
||||
@@ -1,31 +0,0 @@
|
||||
version: "3"
|
||||
services:
|
||||
elastic:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:7.13.2
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
network_mode: host
|
||||
environment:
|
||||
discovery.type: single-node
|
||||
kibana:
|
||||
image: docker.elastic.co/kibana/kibana:7.13.2
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
network_mode: host
|
||||
environment:
|
||||
ELASTICSEARCH_HOSTS: "http://0.0.0.0:9200"
|
||||
cerberus:
|
||||
image: quay.io/openshift-scale/cerberus:latest
|
||||
privileged: true
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
network_mode: host
|
||||
volumes:
|
||||
- ./config/cerberus.yaml:/root/cerberus/config/config.yaml:Z # Modify the config in case of the need to monitor additional components
|
||||
- ${HOME}/.kube/config:/root/.kube/config:Z
|
||||
@@ -38,11 +38,11 @@ A couple of [alert profiles](https://github.com/redhat-chaos/krkn/tree/main/conf
|
||||
severity: critical
|
||||
```
|
||||
|
||||
Kube-burner supports setting the severity for the alerts with each one having different effects:
|
||||
Krkn supports setting the severity for the alerts with each one having different effects:
|
||||
|
||||
```
|
||||
info: Prints an info message with the alarm description to stdout. By default all expressions have this severity.
|
||||
warning: Prints a warning message with the alarm description to stdout.
|
||||
error: Prints a error message with the alarm description to stdout and makes kube-burner rc = 1
|
||||
error: Prints a error message with the alarm description to stdout and sets Krkn rc = 1
|
||||
critical: Prints a fatal message with the alarm description to stdout and exits execution inmediatly with rc != 0
|
||||
```
|
||||
|
||||
@@ -1,70 +0,0 @@
|
||||
## Arcaflow Scenarios
|
||||
Arcaflow is a workflow engine in development which provides the ability to execute workflow steps in sequence, in parallel, repeatedly, etc. The main difference to competitors such as Netflix Conductor is the ability to run ad-hoc workflows without an infrastructure setup required.
|
||||
|
||||
The engine uses containers to execute plugins and runs them either locally in Docker/Podman or remotely on a Kubernetes cluster. The workflow system is strongly typed and allows for generating JSON schema and OpenAPI documents for all data formats involved.
|
||||
|
||||
### Available Scenarios
|
||||
#### Hog scenarios:
|
||||
- [CPU Hog](arcaflow_scenarios/cpu_hog.md)
|
||||
- [Memory Hog](arcaflow_scenarios/memory_hog.md)
|
||||
- [I/O Hog](arcaflow_scenarios/io_hog.md)
|
||||
|
||||
|
||||
### Prequisites
|
||||
Arcaflow supports three deployment technologies:
|
||||
- Docker
|
||||
- Podman
|
||||
- Kubernetes
|
||||
|
||||
#### Docker
|
||||
In order to run Arcaflow Scenarios with the Docker deployer, be sure that:
|
||||
- Docker is correctly installed in your Operating System (to find instructions on how to install docker please refer to [Docker Documentation](https://www.docker.com/))
|
||||
- The Docker daemon is running
|
||||
|
||||
#### Podman
|
||||
The podman deployer is built around the podman CLI and doesn't need necessarily to be run along with the podman daemon.
|
||||
To run Arcaflow Scenarios in your Operating system be sure that:
|
||||
- podman is correctly installed in your Operating System (to find instructions on how to install podman refer to [Podman Documentation](https://podman.io/))
|
||||
- the podman CLI is in your shell PATH
|
||||
|
||||
#### Kubernetes
|
||||
The kubernetes deployer integrates directly the Kubernetes API Client and needs only a valid kubeconfig file and a reachable Kubernetes/OpenShift Cluster.
|
||||
|
||||
### Usage
|
||||
|
||||
To enable arcaflow scenarios edit the kraken config file, go to the section `kraken -> chaos_scenarios` of the yaml structure
|
||||
and add a new element to the list named `arcaflow_scenarios` then add the desired scenario
|
||||
pointing to the `input.yaml` file.
|
||||
```
|
||||
kraken:
|
||||
...
|
||||
chaos_scenarios:
|
||||
- arcaflow_scenarios:
|
||||
- scenarios/arcaflow/cpu-hog/input.yaml
|
||||
```
|
||||
|
||||
#### input.yaml
|
||||
The implemented scenarios can be found in *scenarios/arcaflow/<scenario_name>* folder.
|
||||
The entrypoint of each scenario is the *input.yaml* file.
|
||||
In this file there are all the options to set up the scenario accordingly to the desired target
|
||||
### config.yaml
|
||||
The arcaflow config file. Here you can set the arcaflow deployer and the arcaflow log level.
|
||||
The supported deployers are:
|
||||
- Docker
|
||||
- Podman (podman daemon not needed, suggested option)
|
||||
- Kubernetes
|
||||
|
||||
The supported log levels are:
|
||||
- debug
|
||||
- info
|
||||
- warning
|
||||
- error
|
||||
### workflow.yaml
|
||||
This file contains the steps that will be executed to perform the scenario against the target.
|
||||
Each step is represented by a container that will be executed from the deployer and its options.
|
||||
Note that we provide the scenarios as a template, but they can be manipulated to define more complex workflows.
|
||||
To have more details regarding the arcaflow workflows architecture and syntax it is suggested to refer to the [Arcaflow Documentation](https://arcalot.io/arcaflow/).
|
||||
|
||||
This edit is no longer in quay image
|
||||
Working on fix in ticket: https://issues.redhat.com/browse/CHAOS-494
|
||||
This will effect all versions 4.12 and higher of OpenShift
|
||||
@@ -1,19 +0,0 @@
|
||||
# CPU Hog
|
||||
This scenario is based on the arcaflow [arcaflow-plugin-stressng](https://github.com/arcalot/arcaflow-plugin-stressng) plugin.
|
||||
The purpose of this scenario is to create cpu pressure on a particular node of the Kubernetes/OpenShift cluster for a time span.
|
||||
To enable this plugin add the pointer to the scenario input file `scenarios/arcaflow/cpu-hog/input.yaml` as described in the
|
||||
Usage section.
|
||||
This scenario takes a list of objects named `input_list` with the following properties:
|
||||
|
||||
- **kubeconfig :** *string* the kubeconfig needed by the deployer to deploy the sysbench plugin in the target cluster
|
||||
- **namespace :** *string* the namespace where the scenario container will be deployed
|
||||
**Note:** this parameter will be automatically filled by kraken if the `kubeconfig_path` property is correctly set
|
||||
- **node_selector :** *key-value map* the node label that will be used as `nodeSelector` by the pod to target a specific cluster node
|
||||
- **duration :** *string* stop stress test after N seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y.
|
||||
- **cpu_count :** *int* the number of CPU cores to be used (0 means all)
|
||||
- **cpu_method :** *string* a fine-grained control of which cpu stressors to use (ackermann, cfloat etc. see [manpage](https://manpages.org/sysbench) for all the cpu_method options)
|
||||
- **cpu_load_percentage :** *int* the CPU load by percentage
|
||||
|
||||
To perform several load tests in the same run simultaneously (eg. stress two or more nodes in the same run) add another item
|
||||
to the `input_list` with the same properties (and eventually different values eg. different node_selectors
|
||||
to schedule the pod on different nodes). To reduce (or increase) the parallelism change the value `parallelism` in `workload.yaml` file
|
||||
@@ -1,21 +0,0 @@
|
||||
# I/O Hog
|
||||
This scenario is based on the arcaflow [arcaflow-plugin-stressng](https://github.com/arcalot/arcaflow-plugin-stressng) plugin.
|
||||
The purpose of this scenario is to create disk pressure on a particular node of the Kubernetes/OpenShift cluster for a time span.
|
||||
The scenario allows to attach a node path to the pod as a `hostPath` volume.
|
||||
To enable this plugin add the pointer to the scenario input file `scenarios/arcaflow/io-hog/input.yaml` as described in the
|
||||
Usage section.
|
||||
This scenario takes a list of objects named `input_list` with the following properties:
|
||||
|
||||
- **kubeconfig :** *string* the kubeconfig needed by the deployer to deploy the sysbench plugin in the target cluster
|
||||
- **namespace :** *string* the namespace where the scenario container will be deployed
|
||||
**Note:** this parameter will be automatically filled by kraken if the `kubeconfig_path` property is correctly set
|
||||
- **node_selector :** *key-value map* the node label that will be used as `nodeSelector` by the pod to target a specific cluster node
|
||||
- **duration :** *string* stop stress test after N seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y.
|
||||
- **target_pod_folder :** *string* the path in the pod where the volume is mounted
|
||||
- **target_pod_volume :** *object* the `hostPath` volume definition in the [Kubernetes/OpenShift](https://docs.openshift.com/container-platform/3.11/install_config/persistent_storage/using_hostpath.html) format, that will be attached to the pod as a volume
|
||||
- **io_write_bytes :** *string* writes N bytes for each hdd process. The size can be expressed as % of free space on the file system or in units of Bytes, KBytes, MBytes and GBytes using the suffix b, k, m or g
|
||||
- **io_block_size :** *string* size of each write in bytes. Size can be from 1 byte to 4m.
|
||||
|
||||
To perform several load tests in the same run simultaneously (eg. stress two or more nodes in the same run) add another item
|
||||
to the `input_list` with the same properties (and eventually different values eg. different node_selectors
|
||||
to schedule the pod on different nodes). To reduce (or increase) the parallelism change the value `parallelism` in `workload.yaml` file
|
||||
@@ -1,18 +0,0 @@
|
||||
# Memory Hog
|
||||
This scenario is based on the arcaflow [arcaflow-plugin-stressng](https://github.com/arcalot/arcaflow-plugin-stressng) plugin.
|
||||
The purpose of this scenario is to create Virtual Memory pressure on a particular node of the Kubernetes/OpenShift cluster for a time span.
|
||||
To enable this plugin add the pointer to the scenario input file `scenarios/arcaflow/memory-hog/input.yaml` as described in the
|
||||
Usage section.
|
||||
This scenario takes a list of objects named `input_list` with the following properties:
|
||||
|
||||
- **kubeconfig :** *string* the kubeconfig needed by the deployer to deploy the sysbench plugin in the target cluster
|
||||
- **namespace :** *string* the namespace where the scenario container will be deployed
|
||||
**Note:** this parameter will be automatically filled by kraken if the `kubeconfig_path` property is correctly set
|
||||
- **node_selector :** *key-value map* the node label that will be used as `nodeSelector` by the pod to target a specific cluster node
|
||||
- **duration :** *string* stop stress test after N seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y.
|
||||
- **vm_bytes :** *string* N bytes per vm process or percentage of memory used (using the % symbol). The size can be expressed in units of Bytes, KBytes, MBytes and GBytes using the suffix b, k, m or g.
|
||||
- **vm_workers :** *int* Number of VM stressors to be run (0 means 1 stressor per CPU)
|
||||
|
||||
To perform several load tests in the same run simultaneously (eg. stress two or more nodes in the same run) add another item
|
||||
to the `input_list` with the same properties (and eventually different values eg. different node_selectors
|
||||
to schedule the pod on different nodes). To reduce (or increase) the parallelism change the value `parallelism` in `workload.yaml` file
|
||||
@@ -13,13 +13,26 @@ Supported Cloud Providers:
|
||||
**NOTE**: For clusters with AWS make sure [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) is installed and properly [configured](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) using an AWS account
|
||||
|
||||
## GCP
|
||||
**NOTE**: For clusters with GCP make sure [GCP CLI](https://cloud.google.com/sdk/docs/install#linux) is installed.
|
||||
|
||||
A google service account is required to give proper authentication to GCP for node actions. See [here](https://cloud.google.com/docs/authentication/getting-started) for how to create a service account.
|
||||
In order to set up Application Default Credentials (ADC) for use by Cloud Client Libraries, you can provide either service account credentials or the credentials associated with your user acccount:
|
||||
|
||||
**NOTE**: A user with 'resourcemanager.projects.setIamPolicy' permission is required to grant project-level permissions to the service account.
|
||||
- Using service account credentials:
|
||||
|
||||
After creating the service account you will need to enable the account using the following: ```export GOOGLE_APPLICATION_CREDENTIALS="<serviceaccount.json>"```
|
||||
A google service account is required to give proper authentication to GCP for node actions. See [here](https://cloud.google.com/docs/authentication/getting-started) for how to create a service account.
|
||||
|
||||
**NOTE**: A user with 'resourcemanager.projects.setIamPolicy' permission is required to grant project-level permissions to the service account.
|
||||
|
||||
After creating the service account you will need to enable the account using the following: ```export GOOGLE_APPLICATION_CREDENTIALS="<serviceaccount.json>"```
|
||||
|
||||
- Using the credentials associated with your user acccount:
|
||||
|
||||
1. Make sure that the [GCP CLI](https://cloud.google.com/sdk/docs/install#linux) is installed and [initialized](https://cloud.google.com/sdk/docs/initializing) by running:
|
||||
|
||||
```gcloud init```
|
||||
|
||||
2. Create local authentication credentials for your user account:
|
||||
|
||||
```gcloud auth application-default login```
|
||||
|
||||
## Openstack
|
||||
|
||||
@@ -27,14 +40,13 @@ After creating the service account you will need to enable the account using the
|
||||
|
||||
## Azure
|
||||
|
||||
**NOTE**: For Azure node killing scenarios, make sure [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) is installed.
|
||||
|
||||
You will also need to create a service principal and give it the correct access, see [here](https://docs.openshift.com/container-platform/4.5/installing/installing_azure/installing-azure-account.html) for creating the service principal and setting the proper permissions.
|
||||
**NOTE**: You will need to create a service principal and give it the correct access, see [here](https://docs.openshift.com/container-platform/4.5/installing/installing_azure/installing-azure-account.html) for creating the service principal and setting the proper permissions.
|
||||
|
||||
To properly run the service principal requires “Azure Active Directory Graph/Application.ReadWrite.OwnedBy” api permission granted and “User Access Administrator”.
|
||||
|
||||
Before running you will need to set the following:
|
||||
1. Login using ```az login```
|
||||
|
||||
1. ```export AZURE_SUBSCRIPTION_ID=<subscription_id>```
|
||||
|
||||
2. ```export AZURE_TENANT_ID=<tenant_id>```
|
||||
|
||||
@@ -68,9 +80,10 @@ Set the following environment variables
|
||||
|
||||
These are the credentials that you would normally use to access the vSphere client.
|
||||
|
||||
|
||||
## IBMCloud
|
||||
If no api key is set up with proper VPC resource permissions, use the following to create:
|
||||
|
||||
If no API key is set up with proper VPC resource permissions, use the following to create it:
|
||||
|
||||
* Access group
|
||||
* Service id with the following access
|
||||
* With policy **VPC Infrastructure Services**
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#### Kubernetes/OpenShift cluster shut down scenario
|
||||
Scenario to shut down all the nodes including the masters and restart them after specified duration. Cluster shut down scenario can be injected by placing the shut_down config file under cluster_shut_down_scenario option in the kraken config. Refer to [cluster_shut_down_scenario](https://github.com/redhat-chaos/krkn/blob/main/scenarios/cluster_shut_down_scenario.yml) config file.
|
||||
#### Kubernetes cluster shut down scenario
|
||||
Scenario to shut down all the nodes including the masters and restart them after specified duration. Cluster shut down scenario can be injected by placing the shut_down config file under cluster_shut_down_scenario option in the kraken config. Refer to [cluster_shut_down_scenario](https://github.com/krkn-chaos/krkn/blob/main/scenarios/cluster_shut_down_scenario.yml) config file.
|
||||
|
||||
Refer to [cloud setup](cloud_setup.md) to configure your cli properly for the cloud provider of the cluster you want to shut down.
|
||||
|
||||
@@ -8,6 +8,7 @@ Current accepted cloud types:
|
||||
* [GCP](cloud_setup.md#gcp)
|
||||
* [AWS](cloud_setup.md#aws)
|
||||
* [Openstack](cloud_setup.md#openstack)
|
||||
* [IBMCloud](cloud_setup.md#ibmcloud)
|
||||
|
||||
|
||||
```
|
||||
|
||||
@@ -12,10 +12,6 @@ Config components:
|
||||
# Kraken
|
||||
This section defines scenarios and specific data to the chaos run
|
||||
|
||||
## Distribution
|
||||
Either **openshift** or **kubernetes** depending on the type of cluster you want to run chaos on.
|
||||
The prometheus url/route and bearer token are automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
|
||||
## Exit on failure
|
||||
**exit_on_failure**: Exit when a post action check or cerberus run fails
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ This can be based on the pods namespace or labels. If you know the exact object
|
||||
These scenarios are in a simple yaml format that you can manipulate to run your specific tests or use the pre-existing scenarios to see how it works.
|
||||
|
||||
#### Example Config
|
||||
The following are the components of Kubernetes/OpenShift for which a basic chaos scenario config exists today.
|
||||
The following are the components of Kubernetes for which a basic chaos scenario config exists today.
|
||||
|
||||
```
|
||||
scenarios:
|
||||
@@ -25,7 +25,7 @@ In all scenarios we do a post chaos check to wait and verify the specific compon
|
||||
Here there are two options:
|
||||
1. Pass a custom script in the main config scenario list that will run before the chaos and verify the output matches post chaos scenario.
|
||||
|
||||
See [scenarios/post_action_etcd_container.py](https://github.com/redhat-chaos/krkn/blob/main/scenarios/post_action_etcd_container.py) for an example.
|
||||
See [scenarios/post_action_etcd_container.py](https://github.com/krkn-chaos/krkn/blob/main/scenarios/post_action_etcd_container.py) for an example.
|
||||
```
|
||||
- container_scenarios: # List of chaos pod scenarios to load.
|
||||
- - scenarios/container_etcd.yml
|
||||
|
||||
@@ -62,7 +62,7 @@ If changes go into the main repository while you're working on your code it is b
|
||||
|
||||
If not already configured, set the upstream url for kraken.
|
||||
```
|
||||
git remote add upstream https://github.com/redhat-chaos/krkn.git
|
||||
git remote add upstream https://github.com/krkn-chaos/krkn.git
|
||||
```
|
||||
|
||||
Rebase to upstream master branch.
|
||||
|
||||
@@ -14,11 +14,7 @@ For example, for adding a pod level scenario for a new application, refer to the
|
||||
namespace_pattern: ^<namespace>$
|
||||
label_selector: <pod label>
|
||||
kill: <number of pods to kill>
|
||||
- id: wait-for-pods
|
||||
config:
|
||||
namespace_pattern: ^<namespace>$
|
||||
label_selector: <pod label>
|
||||
count: <expected number of pods that match namespace and label>
|
||||
krkn_pod_recovery_time: <expected time for the pod to become ready>
|
||||
```
|
||||
|
||||
#### Node Scenario Yaml Template
|
||||
|
||||
59
docs/health_checks.md
Normal file
59
docs/health_checks.md
Normal file
@@ -0,0 +1,59 @@
|
||||
### Health Checks
|
||||
|
||||
Health checks provide real-time visibility into the impact of chaos scenarios on application availability and performance. Health check configuration supports application endpoints accessible via http / https along with authentication mechanism such as bearer token and authentication credentials.
|
||||
Health checks are configured in the ```config.yaml```
|
||||
|
||||
The system periodically checks the provided URLs based on the defined interval and records the results in Telemetry. The telemetry data includes:
|
||||
|
||||
- Success response ```200``` when the application is running normally.
|
||||
- Failure response other than 200 if the application experiences downtime or errors.
|
||||
|
||||
This helps users quickly identify application health issues and take necessary actions.
|
||||
|
||||
#### Sample health check config
|
||||
```
|
||||
health_checks:
|
||||
interval: <time_in_seconds> # Defines the frequency of health checks, default value is 2 seconds
|
||||
config: # List of application endpoints to check
|
||||
- url: "https://example.com/health"
|
||||
bearer_token: "hfjauljl..." # Bearer token for authentication if any
|
||||
auth:
|
||||
exit_on_failure: True # If value is True exits when health check failed for application, values can be True/False
|
||||
- url: "https://another-service.com/status"
|
||||
bearer_token:
|
||||
auth: ("admin","secretpassword") # Provide authentication credentials (username , password) in tuple format if any, ex:("admin","secretpassword")
|
||||
exit_on_failure: False
|
||||
- url: http://general-service.com
|
||||
bearer_token:
|
||||
auth:
|
||||
exit_on_failure:
|
||||
```
|
||||
#### Sample health check telemetry
|
||||
```
|
||||
"health_checks": [
|
||||
{
|
||||
"url": "https://example.com/health",
|
||||
"status": False,
|
||||
"status_code": "503",
|
||||
"start_timestamp": "2025-02-25 11:51:33",
|
||||
"end_timestamp": "2025-02-25 11:51:40",
|
||||
"duration": "0:00:07"
|
||||
},
|
||||
{
|
||||
"url": "https://another-service.com/status",
|
||||
"status": True,
|
||||
"status_code": 200,
|
||||
"start_timestamp": "2025-02-25 22:18:19",
|
||||
"end_timestamp": "22025-02-25 22:22:46",
|
||||
"duration": "0:04:27"
|
||||
},
|
||||
{
|
||||
"url": "http://general-service.com",
|
||||
"status": True,
|
||||
"status_code": 200,
|
||||
"start_timestamp": "2025-02-25 22:18:19",
|
||||
"end_timestamp": "22025-02-25 22:22:46",
|
||||
"duration": "0:04:27"
|
||||
}
|
||||
],
|
||||
```
|
||||
49
docs/hog_scenarios.md
Normal file
49
docs/hog_scenarios.md
Normal file
@@ -0,0 +1,49 @@
|
||||
### Hog Scenarios
|
||||
|
||||
Hog Scenarios are designed to push the limits of memory, CPU, or I/O on one or more nodes in your cluster.
|
||||
They also serve to evaluate whether your cluster can withstand rogue pods that excessively consume resources
|
||||
without any limits.
|
||||
|
||||
These scenarios involve deploying one or more workloads in the cluster. Based on the specific configuration,
|
||||
these workloads will use a predetermined amount of resources for a specified duration.
|
||||
|
||||
#### Common options
|
||||
|
||||
| Option | Type | Description |
|
||||
|---------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
|`duration`| number | the duration of the stress test in seconds |
|
||||
|`workers`| number (Optional) | the number of threads instantiated by stress-ng, if left empty the number of workers will match the number of available cores in the node. |
|
||||
|`hog-type`| string (Enum) | can be cpu, memory or io. |
|
||||
|`image`| string | the container image of the stress workload |
|
||||
|`namespace`| string | the namespace where the stress workload will be deployed |
|
||||
|`node-selector`| string (Optional) | defines the node selector for choosing target nodes. If not specified, one schedulable node in the cluster will be chosen at random. If multiple nodes match the selector, all of them will be subjected to stress. If number-of-nodes is specified, that many nodes will be randomly selected from those identified by the selector. |
|
||||
|`number-of-nodes`| number (Optional) | restricts the number of selected nodes by the selector|
|
||||
|
||||
|
||||
#### `cpu-hog` options
|
||||
|
||||
| Option | Type |Description|
|
||||
|---|--------|---|
|
||||
|`cpu-load-percentage`| number | the amount of cpu that will be consumed by the hog|
|
||||
|`cpu-method`| string | reflects the cpu load strategy adopted by stress-ng, please refer to the stress-ng documentation for all the available options|
|
||||
|
||||
|
||||
|
||||
|
||||
#### `io-hog` options
|
||||
|
||||
| Option | Type | Description |
|
||||
|-----------------------|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| `io-block-size` |string| the block size written by the stressor |
|
||||
| `io-write-bytes` |string| the total amount of data that will be written by the stressor. The size can be specified as % of free space on the file system or in units of Bytes, KBytes, MBytes and GBytes using the suffix b, k, m or g |
|
||||
| `io-target-pod-folder` |string| the folder where the volume will be mounted in the pod |
|
||||
| `io-target-pod-volume`| dictionary | the pod volume definition that will be stressed by the scenario. |
|
||||
|
||||
> [!CAUTION]
|
||||
> Modifying the structure of `io-target-pod-volume` might alter how the hog operates, potentially rendering it ineffective.
|
||||
|
||||
#### `memory-hog` options
|
||||
|
||||
| Option | Type |Description|
|
||||
|-----------------------|--------|---|
|
||||
|`memory-vm-bytes`| string | the amount of memory that the scenario will try to hog.The size can be specified as % of free space on the file system or in units of Bytes, KBytes, MBytes and GBytes using the suffix b, k, m or g |
|
||||
@@ -11,7 +11,7 @@
|
||||
* [Scenarios](#scenarios)
|
||||
* [Test Environment Recommendations - how and where to run chaos tests](#test-environment-recommendations---how-and-where-to-run-chaos-tests)
|
||||
* [Chaos testing in Practice](#chaos-testing-in-practice)
|
||||
* [OpenShift oraganization](#openshift-organization)
|
||||
* [OpenShift organization](#openshift-organization)
|
||||
* [startx-lab](#startx-lab)
|
||||
|
||||
|
||||
@@ -88,7 +88,8 @@ We want to look at this in terms of CPU, Memory, Disk, Throughput, Network etc.
|
||||
- Appropriate caching and Content Delivery Network should be enabled to be performant and usable when there is a latency on the client side.
|
||||
- Not every user or machine has access to unlimited bandwidth, there might be a delay on the user side ( client ) to access the API’s due to limited bandwidth, throttling or latency depending on the geographic location. It is important to inject latency between the client and API calls to understand the behavior and optimize things including caching wherever possible, using CDN’s or opting for different protocols like HTTP/2 or HTTP/3 vs HTTP.
|
||||
|
||||
|
||||
- Ensure Disruption Budgets are enabled for your critical applications
|
||||
- Protect your application during disruptions by setting a [pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) to avoid downtime. For instance, etcd, zookeeper or similar applications need at least 2 replicas to maintain quorum. This can be ensured by setting PDB maxUnavailable to 1.
|
||||
|
||||
|
||||
### Tooling
|
||||
|
||||
@@ -3,13 +3,13 @@
|
||||
The following ways are supported to run Kraken:
|
||||
|
||||
- Standalone python program through Git.
|
||||
- Containerized version using either Podman or Docker as the runtime via [Krkn-hub](https://github.com/redhat-chaos/krkn-hub)
|
||||
- Containerized version using either Podman or Docker as the runtime via [Krkn-hub](https://github.com/krkn-chaos/krkn-hub)
|
||||
- Kubernetes or OpenShift deployment ( unsupported )
|
||||
|
||||
**NOTE**: It is recommended to run Kraken external to the cluster ( Standalone or Containerized ) hitting the Kubernetes/OpenShift API as running it internal to the cluster might be disruptive to itself and also might not report back the results if the chaos leads to cluster's API server instability.
|
||||
|
||||
**NOTE**: To run Kraken on Power (ppc64le) architecture, build and run a containerized version by following the
|
||||
instructions given [here](https://github.com/redhat-chaos/krkn/blob/main/containers/build_own_image-README.md).
|
||||
instructions given [here](https://github.com/krkn-chaos/krkn/blob/main/containers/build_own_image-README.md).
|
||||
|
||||
**NOTE**: Helper functions for interactions in Krkn are part of [krkn-lib](https://github.com/redhat-chaos/krkn-lib).
|
||||
Please feel free to reuse and expand them as you see fit when adding a new scenario or expanding
|
||||
@@ -19,10 +19,10 @@ the capabilities of the current supported scenarios.
|
||||
### Git
|
||||
|
||||
#### Clone the repository
|
||||
Pick the latest stable release to install [here](https://github.com/redhat-chaos/krkn/releases).
|
||||
Pick the latest stable release to install [here](https://github.com/krkn-chaos/krkn/releases).
|
||||
```
|
||||
$ git clone https://github.com/redhat-chaos/krkn.git --branch <release version>
|
||||
$ cd kraken
|
||||
$ git clone https://github.com/krkn-chaos/krkn.git --branch <release version>
|
||||
$ cd krkn
|
||||
```
|
||||
|
||||
#### Install the dependencies
|
||||
@@ -40,15 +40,6 @@ $ python3.9 run_kraken.py --config <config_file_location>
|
||||
```
|
||||
|
||||
### Run containerized version
|
||||
[Krkn-hub](https://github.com/redhat-chaos/krkn-hub) is a wrapper that allows running Krkn chaos scenarios via podman or docker runtime with scenario parameters/configuration defined as environment variables.
|
||||
[Krkn-hub](https://github.com/krkn-chaos/krkn-hub) is a wrapper that allows running Krkn chaos scenarios via podman or docker runtime with scenario parameters/configuration defined as environment variables.
|
||||
|
||||
Refer [instructions](https://github.com/redhat-chaos/krkn-hub#supported-chaos-scenarios) to get started.
|
||||
|
||||
|
||||
### Run Kraken as a Kubernetes deployment ( unsupported option - standalone or containerized deployers are recommended )
|
||||
Refer [Instructions](https://github.com/redhat-chaos/krkn/blob/main/containers/README.md) on how to deploy and run Kraken as a Kubernetes/OpenShift deployment.
|
||||
|
||||
|
||||
Refer to the [chaos-kraken chart manpage](https://artifacthub.io/packages/helm/startx/chaos-kraken)
|
||||
and especially the [kraken configuration values](https://artifacthub.io/packages/helm/startx/chaos-kraken#chaos-kraken-values-dictionary)
|
||||
for details on how to configure this chart.
|
||||
Refer [instructions](https://github.com/krkn-chaos/krkn-hub#supported-chaos-scenarios) to get started.
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
## Scraping and storing metrics for the run
|
||||
|
||||
There are cases where the state of the cluster and metrics on the cluster during the chaos test run need to be stored long term to review after the cluster is terminated, for example CI and automation test runs. To help with this, Kraken supports capturing metrics for the duration of the scenarios defined in the config.
|
||||
|
||||
The metrics to capture need to be defined in a metrics profile which Kraken consumes to query prometheus with the start and end timestamp of the run. Each run has a unique identifier ( uuid ). The uuid is generated automatically if not set in the config. This feature can be enabled in the [config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) by setting the following:
|
||||
|
||||
```
|
||||
performance_monitoring:
|
||||
capture_metrics: True
|
||||
metrics_profile_path: config/metrics-aggregated.yaml
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set.
|
||||
```
|
||||
|
||||
### Metrics profile
|
||||
A couple of [metric profiles](https://github.com/redhat-chaos/krkn/tree/main/config), [metrics.yaml](https://github.com/redhat-chaos/krkn/blob/main/config/metrics.yaml), and [metrics-aggregated.yaml](https://github.com/redhat-chaos/krkn/blob/main/config/metrics-aggregated.yaml) are shipped by default and can be tweaked to add more metrics to capture during the run. The following are the API server metrics for example:
|
||||
|
||||
```
|
||||
metrics:
|
||||
# API server
|
||||
- query: histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb!~"WATCH", subresource!="log"}[2m])) by (verb,resource,subresource,instance,le)) > 0
|
||||
metricName: API99thLatency
|
||||
|
||||
- query: sum(irate(apiserver_request_total{apiserver="kube-apiserver",verb!="WATCH",subresource!="log"}[2m])) by (verb,instance,resource,code) > 0
|
||||
metricName: APIRequestRate
|
||||
|
||||
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
|
||||
metricName: APIInflightRequests
|
||||
```
|
||||
|
||||
@@ -18,7 +18,7 @@ network_chaos: # Scenario to create an outage
|
||||
```
|
||||
|
||||
##### Sample scenario config for ingress traffic shaping (using a plugin)
|
||||
'''
|
||||
```
|
||||
- id: network_chaos
|
||||
config:
|
||||
node_interface_name: # Dictionary with key as node name(s) and value as a list of its interfaces to test
|
||||
@@ -35,7 +35,7 @@ network_chaos: # Scenario to create an outage
|
||||
bandwidth: 10mbit
|
||||
wait_duration: 120
|
||||
test_duration: 60
|
||||
'''
|
||||
```
|
||||
|
||||
Note: For ingress traffic shaping, ensure that your node doesn't have any [IFB](https://wiki.linuxfoundation.org/networking/ifb) interfaces already present. The scenario relies on creating IFBs to do the shaping, and they are deleted at the end of the scenario.
|
||||
|
||||
|
||||
@@ -2,31 +2,38 @@
|
||||
|
||||
The following node chaos scenarios are supported:
|
||||
|
||||
1. **node_start_scenario**: Scenario to stop the node instance.
|
||||
1. **node_start_scenario**: Scenario to start the node instance.
|
||||
2. **node_stop_scenario**: Scenario to stop the node instance.
|
||||
3. **node_stop_start_scenario**: Scenario to stop and then start the node instance. Not supported on VMware.
|
||||
3. **node_stop_start_scenario**: Scenario to stop the node instance for specified duration and then start the node instance. Not supported on VMware.
|
||||
4. **node_termination_scenario**: Scenario to terminate the node instance.
|
||||
5. **node_reboot_scenario**: Scenario to reboot the node instance.
|
||||
6. **stop_kubelet_scenario**: Scenario to stop the kubelet of the node instance.
|
||||
7. **stop_start_kubelet_scenario**: Scenario to stop and start the kubelet of the node instance.
|
||||
8. **node_crash_scenario**: Scenario to crash the node instance.
|
||||
9. **stop_start_helper_node_scenario**: Scenario to stop and start the helper node and check service status.
|
||||
8. **restart_kubelet_scenario**: Scenario to restart the kubelet of the node instance.
|
||||
9. **node_crash_scenario**: Scenario to crash the node instance.
|
||||
10. **stop_start_helper_node_scenario**: Scenario to stop and start the helper node and check service status.
|
||||
11. **node_disk_detach_attach_scenario**: Scenario to detach node disk for specified duration.
|
||||
|
||||
|
||||
**NOTE**: If the node does not recover from the node_crash_scenario injection, reboot the node to get it back to Ready state.
|
||||
|
||||
**NOTE**: node_start_scenario, node_stop_scenario, node_stop_start_scenario, node_termination_scenario
|
||||
, node_reboot_scenario and stop_start_kubelet_scenario are supported only on AWS, Azure, OpenStack, BareMetal, GCP
|
||||
, VMware and Alibaba as of now.
|
||||
, node_reboot_scenario and stop_start_kubelet_scenario are supported on AWS, Azure, OpenStack, BareMetal, GCP
|
||||
, VMware and Alibaba.
|
||||
|
||||
**NOTE**: Node scenarios are supported only when running the standalone version of Kraken until https://github.com/redhat-chaos/krkn/issues/106 gets fixed.
|
||||
**NOTE**: node_disk_detach_attach_scenario is supported only on AWS and cannot detach root disk.
|
||||
|
||||
|
||||
#### AWS
|
||||
|
||||
How to set up AWS cli to run node scenarios is defined [here](cloud_setup.md#aws).
|
||||
Cloud setup instructions can be found [here](cloud_setup.md#aws). Sample scenario config can be found [here](https://github.com/krkn-chaos/krkn/blob/main/scenarios/openshift/aws_node_scenarios.yml).
|
||||
|
||||
|
||||
|
||||
#### Baremetal
|
||||
|
||||
Sample scenario config can be found [here](https://github.com/krkn-chaos/krkn/blob/main/scenarios/openshift/baremetal_node_scenarios.yml).
|
||||
|
||||
**NOTE**: Baremetal requires setting the IPMI user and password to power on, off, and reboot nodes, using the config options `bm_user` and `bm_password`. It can either be set in the root of the entry in the scenarios config, or it can be set per machine.
|
||||
|
||||
If no per-machine addresses are specified, kraken attempts to use the BMC value in the BareMetalHost object. To list them, you can do 'oc get bmh -o wide --all-namespaces'. If the BMC values are blank, you must specify them per-machine using the config option 'bmc_addr' as specified below.
|
||||
@@ -38,6 +45,8 @@ See the example node scenario or the example below.
|
||||
|
||||
**NOTE**: Baremetal machines are fragile. Some node actions can occasionally corrupt the filesystem if it does not shut down properly, and sometimes the kubelet does not start properly.
|
||||
|
||||
|
||||
|
||||
#### Docker
|
||||
|
||||
The Docker provider can be used to run node scenarios against kind clusters.
|
||||
@@ -46,8 +55,13 @@ The Docker provider can be used to run node scenarios against kind clusters.
|
||||
|
||||
kind was primarily designed for testing Kubernetes itself, but may be used for local development or CI.
|
||||
|
||||
|
||||
|
||||
#### GCP
|
||||
How to set up GCP cli to run node scenarios is defined [here](cloud_setup.md#gcp).
|
||||
Cloud setup instructions can be found [here](cloud_setup.md#gcp). Sample scenario config can be found [here](https://github.com/krkn-chaos/krkn/blob/main/scenarios/openshift/gcp_node_scenarios.yml).
|
||||
|
||||
NOTE: The parallel option is not available for GCP, the api doesn't perform processes at the same time
|
||||
|
||||
|
||||
#### Openstack
|
||||
|
||||
@@ -60,9 +74,11 @@ The supported node level chaos scenarios on an OPENSTACK cloud are `node_stop_st
|
||||
To execute the scenario, ensure the value for `ssh_private_key` in the node scenarios config file is set with the correct private key file path for ssh connection to the helper node. Ensure passwordless ssh is configured on the host running Kraken and the helper node to avoid connection errors.
|
||||
|
||||
|
||||
|
||||
#### Azure
|
||||
|
||||
How to set up Azure cli to run node scenarios is defined [here](cloud_setup.md#azure).
|
||||
Cloud setup instructions can be found [here](cloud_setup.md#azure). Sample scenario config can be found [here](https://github.com/krkn-chaos/krkn/blob/main/scenarios/openshift/azure_node_scenarios.yml).
|
||||
|
||||
|
||||
|
||||
#### Alibaba
|
||||
@@ -73,43 +89,23 @@ How to set up Alibaba cli to run node scenarios is defined [here](cloud_setup.md
|
||||
. Releasing a node is 2 steps, stopping the node and then releasing it.
|
||||
|
||||
|
||||
|
||||
#### VMware
|
||||
How to set up VMware vSphere to run node scenarios is defined [here](cloud_setup.md#vmware)
|
||||
|
||||
This cloud type uses a different configuration style, see actions below and [example config file](../scenarios/openshift/vmware_node_scenarios.yml)
|
||||
See [example config file](../scenarios/openshift/vmware_node_scenarios.yml)
|
||||
|
||||
|
||||
*vmware-node-terminate, vmware-node-reboot, vmware-node-stop, vmware-node-start*
|
||||
|
||||
#### IBMCloud
|
||||
How to set up IBMCloud to run node scenarios is defined [here](cloud_setup.md#ibmcloud)
|
||||
|
||||
This cloud type uses a different configuration style, see actions below and [example config file](../scenarios/openshift/ibmcloud_node_scenarios.yml)
|
||||
|
||||
*ibmcloud-node-terminate, ibmcloud-node-reboot, ibmcloud-node-stop, ibmcloud-node-start
|
||||
*
|
||||
|
||||
|
||||
#### IBMCloud and Vmware example
|
||||
|
||||
|
||||
```
|
||||
- id: ibmcloud-node-stop
|
||||
config:
|
||||
name: "<node_name>"
|
||||
label_selector: "node-role.kubernetes.io/worker" # When node_name is not specified, a node with matching label_selector is selected for node chaos scenario injection
|
||||
runs: 1 # Number of times to inject each scenario under actions (will perform on same node each time)
|
||||
instance_count: 1 # Number of nodes to perform action/select that match the label selector
|
||||
timeout: 30 # Duration to wait for completion of node scenario injection
|
||||
skip_openshift_checks: False # Set to True if you don't want to wait for the status of the nodes to change on OpenShift before passing the scenario
|
||||
- id: ibmcloud-node-start
|
||||
config:
|
||||
name: "<node_name>" #Same name as before
|
||||
label_selector: "node-role.kubernetes.io/worker" # When node_name is not specified, a node with matching label_selector is selected for node chaos scenario injection
|
||||
runs: 1 # Number of times to inject each scenario under actions (will perform on same node each time)
|
||||
instance_count: 1 # Number of nodes to perform action/select that match the label selector
|
||||
timeout: 30 # Duration to wait for completion of node scenario injection
|
||||
skip_openshift_checks: False # Set to True if you don't want to wait for the status of the nodes to change on OpenShift before passing the scenario
|
||||
```
|
||||
- ibmcloud-node-terminate
|
||||
- ibmcloud-node-reboot
|
||||
- ibmcloud-node-stop
|
||||
- ibmcloud-node-start
|
||||
|
||||
|
||||
|
||||
@@ -118,60 +114,3 @@ This cloud type uses a different configuration style, see actions below and [exa
|
||||
**NOTE**: The `node_crash_scenario` and `stop_kubelet_scenario` scenario is supported independent of the cloud platform.
|
||||
|
||||
Use 'generic' or do not add the 'cloud_type' key to your scenario if your cluster is not set up using one of the current supported cloud types.
|
||||
|
||||
Node scenarios can be injected by placing the node scenarios config files under node_scenarios option in the kraken config. Refer to [node_scenarios_example](https://github.com/redhat-chaos/krkn/blob/main/scenarios/node_scenarios_example.yml) config file.
|
||||
|
||||
|
||||
```
|
||||
node_scenarios:
|
||||
- actions: # Node chaos scenarios to be injected.
|
||||
- node_stop_start_scenario
|
||||
- stop_start_kubelet_scenario
|
||||
- node_crash_scenario
|
||||
node_name: # Node on which scenario has to be injected.
|
||||
label_selector: node-role.kubernetes.io/worker # When node_name is not specified, a node with matching label_selector is selected for node chaos scenario injection.
|
||||
instance_count: 1 # Number of nodes to perform action/select that match the label selector.
|
||||
runs: 1 # Number of times to inject each scenario under actions (will perform on same node each time).
|
||||
timeout: 120 # Duration to wait for completion of node scenario injection.
|
||||
cloud_type: aws # Cloud type on which Kubernetes/OpenShift runs.
|
||||
- actions:
|
||||
- node_reboot_scenario
|
||||
node_name:
|
||||
label_selector: node-role.kubernetes.io/infra
|
||||
instance_count: 1
|
||||
timeout: 120
|
||||
cloud_type: azure
|
||||
- actions:
|
||||
- node_crash_scenario
|
||||
node_name:
|
||||
label_selector: node-role.kubernetes.io/infra
|
||||
instance_count: 1
|
||||
timeout: 120
|
||||
- actions:
|
||||
- stop_start_helper_node_scenario # Node chaos scenario for helper node.
|
||||
instance_count: 1
|
||||
timeout: 120
|
||||
helper_node_ip: # ip address of the helper node.
|
||||
service: # Check status of the services on the helper node.
|
||||
- haproxy
|
||||
- dhcpd
|
||||
- named
|
||||
ssh_private_key: /root/.ssh/id_rsa # ssh key to access the helper node.
|
||||
cloud_type: openstack
|
||||
- actions:
|
||||
- node_stop_start_scenario
|
||||
node_name:
|
||||
label_selector: node-role.kubernetes.io/worker
|
||||
instance_count: 1
|
||||
timeout: 120
|
||||
cloud_type: bm
|
||||
bmc_user: defaultuser # For baremetal (bm) cloud type. The default IPMI username. Optional if specified for all machines.
|
||||
bmc_password: defaultpass # For baremetal (bm) cloud type. The default IPMI password. Optional if specified for all machines.
|
||||
bmc_info: # This section is here to specify baremetal per-machine info, so it is optional if there is no per-machine info.
|
||||
node-1: # The node name for the baremetal machine
|
||||
bmc_addr: mgmt-machine1.example.com # Optional. For baremetal nodes with the IPMI BMC address missing from 'oc get bmh'.
|
||||
node-2:
|
||||
bmc_addr: mgmt-machine2.example.com
|
||||
bmc_user: user # The baremetal IPMI user. Overrides the default IPMI user specified above. Optional if the default is set.
|
||||
bmc_password: pass # The baremetal IPMI password. Overrides the default IPMI user specified above. Optional if the default is set.
|
||||
```
|
||||
|
||||
@@ -17,11 +17,8 @@ You can then create the scenario file with the following contents:
|
||||
config:
|
||||
namespace_pattern: ^kube-system$
|
||||
label_selector: k8s-app=kube-scheduler
|
||||
- id: wait-for-pods
|
||||
config:
|
||||
namespace_pattern: ^kube-system$
|
||||
label_selector: k8s-app=kube-scheduler
|
||||
count: 3
|
||||
krkn_pod_recovery_time: 120
|
||||
|
||||
```
|
||||
|
||||
Please adjust the schema reference to point to the [schema file](../scenarios/plugin.schema.json). This file will give you code completion and documentation for the available options in your IDE.
|
||||
|
||||
136
docs/scenario_plugin_api.md
Normal file
136
docs/scenario_plugin_api.md
Normal file
@@ -0,0 +1,136 @@
|
||||
# Scenario Plugin API:
|
||||
|
||||
This API enables seamless integration of Scenario Plugins for Krkn. Plugins are automatically
|
||||
detected and loaded by the plugin loader, provided they extend the `AbstractPluginScenario`
|
||||
abstract class, implement the required methods, and adhere to the specified [naming conventions](#naming-conventions).
|
||||
|
||||
## Plugin folder:
|
||||
|
||||
The plugin loader automatically loads plugins found in the `krkn/scenario_plugins` directory,
|
||||
relative to the Krkn root folder. Each plugin must reside in its own directory and can consist
|
||||
of one or more Python files. The entry point for each plugin is a Python class that extends the
|
||||
[AbstractPluginScenario](../krkn/scenario_plugins/abstract_scenario_plugin.py) abstract class and implements its required methods.
|
||||
|
||||
## `AbstractPluginScenario` abstract class:
|
||||
|
||||
This [abstract class](../krkn/scenario_plugins/abstract_scenario_plugin.py) defines the contract between the plugin and krkn.
|
||||
It consists of two methods:
|
||||
- `run(...)`
|
||||
- `get_scenario_type()`
|
||||
|
||||
Most IDEs can automatically suggest and implement the abstract methods defined in `AbstractPluginScenario`:
|
||||

|
||||
_(IntelliJ PyCharm)_
|
||||
|
||||
### `run(...)`
|
||||
|
||||
```python
|
||||
def run(
|
||||
self,
|
||||
run_uuid: str,
|
||||
scenario: str,
|
||||
krkn_config: dict[str, any],
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
) -> int:
|
||||
|
||||
```
|
||||
|
||||
This method represents the entry point of the plugin and the first method
|
||||
that will be executed.
|
||||
#### Parameters:
|
||||
|
||||
- `run_uuid`:
|
||||
- the uuid of the chaos run generated by krkn for every single run.
|
||||
- `scenario`:
|
||||
- the config file of the scenario that is currently executed
|
||||
- `krkn_config`:
|
||||
- the full dictionary representation of the `config.yaml`
|
||||
- `lib_telemetry`
|
||||
- it is a composite object of all the [krkn-lib](https://krkn-chaos.github.io/krkn-lib-docs/modules.html) objects and methods needed by a krkn plugin to run.
|
||||
- `scenario_telemetry`
|
||||
- the `ScenarioTelemetry` object of the scenario that is currently executed
|
||||
|
||||
### Return value:
|
||||
Returns 0 if the scenario succeeds and 1 if it fails.
|
||||
> [!WARNING]
|
||||
> All the exception must be handled __inside__ the run method and not propagated.
|
||||
|
||||
### `get_scenario_types()`:
|
||||
|
||||
```python def get_scenario_types(self) -> list[str]:```
|
||||
|
||||
Indicates the scenario types specified in the `config.yaml`. For the plugin to be properly
|
||||
loaded, recognized and executed, it must be implemented and must return one or more
|
||||
strings matching `scenario_type` strings set in the config.
|
||||
> [!WARNING]
|
||||
> Multiple strings can map to a *single* `ScenarioPlugin` but the same string cannot map
|
||||
> to different plugins, an exception will be thrown for scenario_type redefinition.
|
||||
|
||||
> [!Note]
|
||||
> The `scenario_type` strings must be unique across all plugins; otherwise, an exception will be thrown.
|
||||
|
||||
## Naming conventions:
|
||||
A key requirement for developing a plugin that will be properly loaded
|
||||
by the plugin loader is following the established naming conventions.
|
||||
These conventions are enforced to maintain a uniform and readable codebase,
|
||||
making it easier to onboard new developers from the community.
|
||||
|
||||
### plugin folder:
|
||||
- the plugin folder must be placed in the `krkn/scenario_plugin` folder starting from the krkn root folder
|
||||
- the plugin folder __cannot__ contain the words
|
||||
- `plugin`
|
||||
- `scenario`
|
||||
### plugin file name and class name:
|
||||
- the plugin file containing the main plugin class must be named in _snake case_ and must have the suffix `_scenario_plugin`:
|
||||
- `example_scenario_plugin.py`
|
||||
- the main plugin class must named in _capital camel case_ and must have the suffix `ScenarioPlugin` :
|
||||
- `ExampleScenarioPlugin`
|
||||
- the file name must match the class name in the respective syntax:
|
||||
- `example_scenario_plugin.py` -> `ExampleScenarioPlugin`
|
||||
|
||||
### scenario type:
|
||||
- the scenario type __must__ be unique between all the scenarios.
|
||||
|
||||
### logging:
|
||||
If your new scenario does not adhere to the naming conventions, an error log will be generated in the Krkn standard output,
|
||||
providing details about the issue:
|
||||
|
||||
```commandline
|
||||
2024-10-03 18:06:31,136 [INFO] 📣 `ScenarioPluginFactory`: types from config.yaml mapped to respective classes for execution:
|
||||
2024-10-03 18:06:31,136 [INFO] ✅ type: application_outages_scenarios ➡️ `ApplicationOutageScenarioPlugin`
|
||||
2024-10-03 18:06:31,136 [INFO] ✅ types: [hog_scenarios, arcaflow_scenario] ➡️ `ArcaflowScenarioPlugin`
|
||||
2024-10-03 18:06:31,136 [INFO] ✅ type: container_scenarios ➡️ `ContainerScenarioPlugin`
|
||||
2024-10-03 18:06:31,136 [INFO] ✅ type: managedcluster_scenarios ➡️ `ManagedClusterScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ types: [pod_disruption_scenarios, pod_network_scenario, vmware_node_scenarios, ibmcloud_node_scenarios] ➡️ `NativeScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: network_chaos_scenarios ➡️ `NetworkChaosScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: node_scenarios ➡️ `NodeActionsScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: pvc_scenarios ➡️ `PvcScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: service_disruption_scenarios ➡️ `ServiceDisruptionScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: service_hijacking_scenarios ➡️ `ServiceHijackingScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: cluster_shut_down_scenarios ➡️ `ShutDownScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: syn_flood_scenarios ➡️ `SynFloodScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: time_scenarios ➡️ `TimeActionsScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: zone_outages_scenarios ➡️ `ZoneOutageScenarioPlugin`
|
||||
|
||||
2024-09-18 14:48:41,735 [INFO] Failed to load Scenario Plugins:
|
||||
|
||||
2024-09-18 14:48:41,735 [ERROR] ⛔ Class: ExamplePluginScenario Module: krkn.scenario_plugins.example.example_scenario_plugin
|
||||
2024-09-18 14:48:41,735 [ERROR] ⚠️ scenario plugin class name must start with a capital letter, end with `ScenarioPlugin`, and cannot be just `ScenarioPlugin`.
|
||||
```
|
||||
|
||||
>[!NOTE]
|
||||
>If you're trying to understand how the scenario types in the config.yaml are mapped to
|
||||
> their corresponding plugins, this log will guide you!
|
||||
> Each scenario plugin class mentioned can be found in the `krkn/scenario_plugin` folder
|
||||
> simply convert the camel case notation and remove the ScenarioPlugin suffix from the class name
|
||||
> e.g `ShutDownScenarioPlugin` class can be found in the `krkn/scenario_plugin/shut_down` folder.
|
||||
|
||||
## ExampleScenarioPlugin
|
||||
The [ExampleScenarioPlugin](../krkn/tests/test_classes/example_scenario_plugin.py) class included in the tests folder can be used as a scaffolding for new plugins and it is considered
|
||||
part of the documentation.
|
||||
|
||||
For any questions or further guidance, feel free to reach out to us on the
|
||||
[Kubernetes workspace](https://kubernetes.slack.com/) in the `#krkn` channel.
|
||||
We’re happy to assist. Now, __release the Krkn!__
|
||||
|
||||
BIN
docs/scenario_plugin_pycharm.gif
Normal file
BIN
docs/scenario_plugin_pycharm.gif
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 340 KiB |
@@ -1,6 +1,9 @@
|
||||
### Service Disruption Scenarios (Previously Delete Namespace Scenario)
|
||||
|
||||
Using this type of scenario configuration one is able to delete crucial objects in a specific namespace, or a namespace matching a certain regex string.
|
||||
Using this type of scenario configuration one is able to delete crucial objects in a specific namespace, or a namespace matching a certain regex string. The goal of this scenario is to ensure Pod Disruption Budgets with appropriate configurations are set to have minimum number of replicas are running at a given time and avoid downtime.
|
||||
|
||||
|
||||
**NOTE**: Protect your application during disruptions by setting a [pod disruption budget](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) to avoid downtime. For instance, etcd, zookeeper or similar applications need at least 2 replicas to maintain quorum. This can be ensured by setting PDB maxUnavailable to 1.
|
||||
|
||||
Configuration Options:
|
||||
|
||||
@@ -16,7 +19,7 @@ Set to '^.*$' and label_selector to "" to randomly select any namespace in your
|
||||
|
||||
**sleep:** Number of seconds to wait between each iteration/count of killing namespaces. Defaults to 10 seconds if not set
|
||||
|
||||
Refer to [namespace_scenarios_example](https://github.com/redhat-chaos/krkn/blob/main/scenarios/regex_namespace.yaml) config file.
|
||||
Refer to [namespace_scenarios_example](https://github.com/krkn-chaos/krkn/blob/main/scenarios/regex_namespace.yaml) config file.
|
||||
|
||||
```
|
||||
scenarios:
|
||||
|
||||
80
docs/service_hijacking_scenarios.md
Normal file
80
docs/service_hijacking_scenarios.md
Normal file
@@ -0,0 +1,80 @@
|
||||
### Service Hijacking Scenarios
|
||||
|
||||
Service Hijacking Scenarios aim to simulate fake HTTP responses from a workload targeted by a
|
||||
`Service` already deployed in the cluster.
|
||||
This scenario is executed by deploying a custom-made web service and modifying the target `Service`
|
||||
selector to direct traffic to this web service for a specified duration.
|
||||
|
||||
The web service's source code is available [here](https://github.com/krkn-chaos/krkn-service-hijacking).
|
||||
It employs a time-based test plan from the scenario configuration file, which specifies the behavior of resources during the chaos scenario as follows:
|
||||
|
||||
```yaml
|
||||
service_target_port: http-web-svc # The port of the service to be hijacked (can be named or numeric, based on the workload and service configuration).
|
||||
service_name: nginx-service # The name of the service that will be hijacked.
|
||||
service_namespace: default # The namespace where the target service is located.
|
||||
image: quay.io/krkn-chaos/krkn-service-hijacking:v0.1.3 # Image of the krkn web service to be deployed to receive traffic.
|
||||
chaos_duration: 30 # Total duration of the chaos scenario in seconds.
|
||||
plan:
|
||||
- resource: "/list/index.php" # Specifies the resource or path to respond to in the scenario. For paths, both the path and query parameters are captured but ignored. For resources, only query parameters are captured.
|
||||
|
||||
steps: # A time-based plan consisting of steps can be defined for each resource.
|
||||
GET: # One or more HTTP methods can be specified for each step. Note: Non-standard methods are supported for fully custom web services (e.g., using NONEXISTENT instead of POST).
|
||||
|
||||
- duration: 15 # Duration in seconds for this step before moving to the next one, if defined. Otherwise, this step will continue until the chaos scenario ends.
|
||||
|
||||
status: 500 # HTTP status code to be returned in this step.
|
||||
mime_type: "application/json" # MIME type of the response for this step.
|
||||
payload: | # The response payload for this step.
|
||||
{
|
||||
"status":"internal server error"
|
||||
}
|
||||
- duration: 15
|
||||
status: 201
|
||||
mime_type: "application/json"
|
||||
payload: |
|
||||
{
|
||||
"status":"resource created"
|
||||
}
|
||||
POST:
|
||||
- duration: 15
|
||||
status: 401
|
||||
mime_type: "application/json"
|
||||
payload: |
|
||||
{
|
||||
"status": "unauthorized"
|
||||
}
|
||||
- duration: 15
|
||||
status: 404
|
||||
mime_type: "text/plain"
|
||||
payload: "not found"
|
||||
|
||||
|
||||
```
|
||||
The scenario will focus on the `service_name` within the `service_namespace`,
|
||||
substituting the selector with a randomly generated one, which is added as a label in the mock service manifest.
|
||||
This allows multiple scenarios to be executed in the same namespace, each targeting different services without
|
||||
causing conflicts.
|
||||
|
||||
The newly deployed mock web service will expose a `service_target_port`,
|
||||
which can be either a named or numeric port based on the service configuration.
|
||||
This ensures that the Service correctly routes HTTP traffic to the mock web service during the chaos run.
|
||||
|
||||
Each step will last for `duration` seconds from the deployment of the mock web service in the cluster.
|
||||
For each HTTP resource, defined as a top-level YAML property of the plan
|
||||
(it could be a specific resource, e.g., /list/index.php, or a path-based resource typical in MVC frameworks),
|
||||
one or more HTTP request methods can be specified. Both standard and custom request methods are supported.
|
||||
|
||||
During this time frame, the web service will respond with:
|
||||
|
||||
- `status`: The [HTTP status code](https://datatracker.ietf.org/doc/html/rfc7231#section-6) (can be standard or custom).
|
||||
- `mime_type`: The [MIME type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types) (can be standard or custom).
|
||||
- `payload`: The response body to be returned to the client.
|
||||
|
||||
At the end of the step `duration`, the web service will proceed to the next step (if available) until
|
||||
the global `chaos_duration` concludes. At this point, the original service will be restored,
|
||||
and the custom web service and its resources will be undeployed.
|
||||
|
||||
__NOTE__: Some clients (e.g., cURL, jQuery) may optimize queries using lightweight methods (like HEAD or OPTIONS)
|
||||
to probe API behavior. If these methods are not defined in the test plan, the web service may respond with
|
||||
a `405` or `404` status code. If you encounter unexpected behavior, consider this use case.
|
||||
|
||||
33
docs/syn_flood_scenarios.md
Normal file
33
docs/syn_flood_scenarios.md
Normal file
@@ -0,0 +1,33 @@
|
||||
### SYN Flood Scenarios
|
||||
|
||||
This scenario generates a substantial amount of TCP traffic directed at one or more Kubernetes services within
|
||||
the cluster to test the server's resiliency under extreme traffic conditions.
|
||||
It can also target hosts outside the cluster by specifying a reachable IP address or hostname.
|
||||
This scenario leverages the distributed nature of Kubernetes clusters to instantiate multiple instances
|
||||
of the same pod against a single host, significantly increasing the effectiveness of the attack.
|
||||
The configuration also allows for the specification of multiple node selectors, enabling Kubernetes to schedule
|
||||
the attacker pods on a user-defined subset of nodes to make the test more realistic.
|
||||
|
||||
```yaml
|
||||
packet-size: 120 # hping3 packet size
|
||||
window-size: 64 # hping 3 TCP window size
|
||||
duration: 10 # chaos scenario duration
|
||||
namespace: default # namespace where the target service(s) are deployed
|
||||
target-service: target-svc # target service name (if set target-service-label must be empty)
|
||||
target-port: 80 # target service TCP port
|
||||
target-service-label : "" # target service label, can be used to target multiple target at the same time
|
||||
# if they have the same label set (if set target-service must be empty)
|
||||
number-of-pods: 2 # number of attacker pod instantiated per each target
|
||||
image: quay.io/krkn-chaos/krkn-syn-flood # syn flood attacker container image
|
||||
attacker-nodes: # this will set the node affinity to schedule the attacker node. Per each node label selector
|
||||
# can be specified multiple values in this way the kube scheduler will schedule the attacker pods
|
||||
# in the best way possible based on the provided labels. Multiple labels can be specified
|
||||
kubernetes.io/hostname:
|
||||
- host_1
|
||||
- host_2
|
||||
kubernetes.io/os:
|
||||
- linux
|
||||
|
||||
```
|
||||
|
||||
The attacker container source code is available [here](https://github.com/krkn-chaos/krkn-syn-flood).
|
||||
@@ -16,7 +16,7 @@ Configuration Options:
|
||||
|
||||
**object_name:** List of the names of pods or nodes you want to skew.
|
||||
|
||||
Refer to [time_scenarios_example](https://github.com/redhat-chaos/krkn/blob/main/scenarios/time_scenarios_example.yml) config file.
|
||||
Refer to [time_scenarios_example](https://github.com/krkn-chaos/krkn/blob/main/scenarios/time_scenarios_example.yml) config file.
|
||||
|
||||
```
|
||||
time_scenarios:
|
||||
|
||||
@@ -13,10 +13,12 @@ zone_outage: # Scenario to create an out
|
||||
duration: 600 # Duration in seconds after which the zone will be back online.
|
||||
vpc_id: # Cluster virtual private network to target.
|
||||
subnet_id: [subnet1, subnet2] # List of subnet-id's to deny both ingress and egress traffic.
|
||||
default_acl_id: acl-xxxxxxxx # (Optional) ID of an existing network ACL to use instead of creating a new one. If provided, this ACL will not be deleted after the scenario.
|
||||
```
|
||||
|
||||
**NOTE**: vpc_id and subnet_id can be obtained from the cloud web console by selecting one of the instances in the targeted zone ( us-west-2a for example ).
|
||||
**NOTE**: Multiple zones will experience downtime in case of targeting multiple subnets which might have an impact on the cluster health especially if the zones have control plane components deployed.
|
||||
**NOTE**: default_acl_id can be obtained from the AWS VPC Console by selecting "Network ACLs" from the left sidebar ( the ID will be in the format 'acl-xxxxxxxx' ). Make sure the selected ACL has the desired ingress/egress rules for your outage scenario ( i.e., deny all ).
|
||||
|
||||
##### Debugging steps in case of failures
|
||||
In case of failures during the steps which revert back the network acl to allow traffic and bring back the cluster nodes in the zone, the nodes in the particular zone will be in `NotReady` condition. Here is how to fix it:
|
||||
|
||||
@@ -2,6 +2,9 @@ kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
extraPortMappings:
|
||||
- containerPort: 30036
|
||||
hostPort: 8888
|
||||
- role: control-plane
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
import yaml
|
||||
import logging
|
||||
import time
|
||||
import kraken.cerberus.setup as cerberus
|
||||
from jinja2 import Template
|
||||
import kraken.invoke.command as runcommand
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
|
||||
|
||||
# Reads the scenario config, applies and deletes a network policy to
|
||||
# block the traffic for the specified duration
|
||||
def run(scenarios_list, config, wait_duration, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
failed_post_scenarios = ""
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for app_outage_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = app_outage_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, app_outage_config)
|
||||
if len(app_outage_config) > 1:
|
||||
try:
|
||||
with open(app_outage_config, "r") as f:
|
||||
app_outage_config_yaml = yaml.full_load(f)
|
||||
scenario_config = app_outage_config_yaml["application_outage"]
|
||||
pod_selector = get_yaml_item_value(
|
||||
scenario_config, "pod_selector", "{}"
|
||||
)
|
||||
traffic_type = get_yaml_item_value(
|
||||
scenario_config, "block", "[Ingress, Egress]"
|
||||
)
|
||||
namespace = get_yaml_item_value(
|
||||
scenario_config, "namespace", ""
|
||||
)
|
||||
duration = get_yaml_item_value(
|
||||
scenario_config, "duration", 60
|
||||
)
|
||||
|
||||
start_time = int(time.time())
|
||||
|
||||
network_policy_template = """---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: kraken-deny
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels: {{ pod_selector }}
|
||||
policyTypes: {{ traffic_type }}
|
||||
"""
|
||||
t = Template(network_policy_template)
|
||||
rendered_spec = t.render(pod_selector=pod_selector, traffic_type=traffic_type)
|
||||
# Write the rendered template to a file
|
||||
with open("kraken_network_policy.yaml", "w") as f:
|
||||
f.write(rendered_spec)
|
||||
# Block the traffic by creating network policy
|
||||
logging.info("Creating the network policy")
|
||||
runcommand.invoke(
|
||||
"kubectl create -f %s -n %s --validate=false" % ("kraken_network_policy.yaml", namespace)
|
||||
)
|
||||
|
||||
# wait for the specified duration
|
||||
logging.info("Waiting for the specified duration in the config: %s" % (duration))
|
||||
time.sleep(duration)
|
||||
|
||||
# unblock the traffic by deleting the network policy
|
||||
logging.info("Deleting the network policy")
|
||||
runcommand.invoke("kubectl delete -f %s -n %s" % ("kraken_network_policy.yaml", namespace))
|
||||
|
||||
logging.info("End of scenario. Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
except Exception as e :
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(app_outage_config)
|
||||
log_exception(app_outage_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
from .arcaflow_plugin import *
|
||||
from .context_auth import ContextAuth
|
||||
@@ -1,178 +0,0 @@
|
||||
import time
|
||||
import arcaflow
|
||||
import os
|
||||
import yaml
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from .context_auth import ContextAuth
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
|
||||
|
||||
def run(scenarios_list: List[str], kubeconfig_path: str, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_post_scenarios = []
|
||||
for scenario in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = scenario
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry,scenario)
|
||||
engine_args = build_args(scenario)
|
||||
status_code = run_workflow(engine_args, kubeconfig_path)
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetry.exitStatus = status_code
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
if status_code != 0:
|
||||
failed_post_scenarios.append(scenario)
|
||||
return failed_post_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
def run_workflow(engine_args: arcaflow.EngineArgs, kubeconfig_path: str) -> int:
|
||||
set_arca_kubeconfig(engine_args, kubeconfig_path)
|
||||
exit_status = arcaflow.run(engine_args)
|
||||
return exit_status
|
||||
|
||||
|
||||
def build_args(input_file: str) -> arcaflow.EngineArgs:
|
||||
"""sets the kubeconfig parsed by setArcaKubeConfig as an input to the arcaflow workflow"""
|
||||
context = Path(input_file).parent
|
||||
workflow = "{}/workflow.yaml".format(context)
|
||||
config = "{}/config.yaml".format(context)
|
||||
if not os.path.exists(context):
|
||||
raise Exception(
|
||||
"context folder for arcaflow workflow not found: {}".format(
|
||||
context)
|
||||
)
|
||||
if not os.path.exists(input_file):
|
||||
raise Exception(
|
||||
"input file for arcaflow workflow not found: {}".format(input_file))
|
||||
if not os.path.exists(workflow):
|
||||
raise Exception(
|
||||
"workflow file for arcaflow workflow not found: {}".format(
|
||||
workflow)
|
||||
)
|
||||
if not os.path.exists(config):
|
||||
raise Exception(
|
||||
"configuration file for arcaflow workflow not found: {}".format(
|
||||
config)
|
||||
)
|
||||
|
||||
engine_args = arcaflow.EngineArgs()
|
||||
engine_args.context = context
|
||||
engine_args.config = config
|
||||
engine_args.input = input_file
|
||||
return engine_args
|
||||
|
||||
|
||||
def set_arca_kubeconfig(engine_args: arcaflow.EngineArgs, kubeconfig_path: str):
|
||||
|
||||
context_auth = ContextAuth()
|
||||
if not os.path.exists(kubeconfig_path):
|
||||
raise Exception("kubeconfig not found in {}".format(kubeconfig_path))
|
||||
|
||||
with open(kubeconfig_path, "r") as stream:
|
||||
try:
|
||||
kubeconfig = yaml.safe_load(stream)
|
||||
context_auth.fetch_auth_data(kubeconfig)
|
||||
except Exception as e:
|
||||
logging.error("impossible to read kubeconfig file in: {}".format(
|
||||
kubeconfig_path))
|
||||
raise e
|
||||
|
||||
kubeconfig_str = set_kubeconfig_auth(kubeconfig, context_auth)
|
||||
|
||||
with open(engine_args.input, "r") as stream:
|
||||
input_file = yaml.safe_load(stream)
|
||||
if "input_list" in input_file and isinstance(input_file["input_list"],list):
|
||||
for index, _ in enumerate(input_file["input_list"]):
|
||||
if isinstance(input_file["input_list"][index], dict):
|
||||
input_file["input_list"][index]["kubeconfig"] = kubeconfig_str
|
||||
else:
|
||||
input_file["kubeconfig"] = kubeconfig_str
|
||||
stream.close()
|
||||
with open(engine_args.input, "w") as stream:
|
||||
yaml.safe_dump(input_file, stream)
|
||||
|
||||
with open(engine_args.config, "r") as stream:
|
||||
config_file = yaml.safe_load(stream)
|
||||
if config_file["deployers"]["image"]["deployer_name"] == "kubernetes":
|
||||
kube_connection = set_kubernetes_deployer_auth(config_file["deployers"]["image"]["connection"], context_auth)
|
||||
config_file["deployers"]["image"]["connection"]=kube_connection
|
||||
with open(engine_args.config, "w") as stream:
|
||||
yaml.safe_dump(config_file, stream,explicit_start=True, width=4096)
|
||||
|
||||
|
||||
def set_kubernetes_deployer_auth(deployer: any, context_auth: ContextAuth) -> any:
|
||||
if context_auth.clusterHost is not None :
|
||||
deployer["host"] = context_auth.clusterHost
|
||||
if context_auth.clientCertificateData is not None :
|
||||
deployer["cert"] = context_auth.clientCertificateData
|
||||
if context_auth.clientKeyData is not None:
|
||||
deployer["key"] = context_auth.clientKeyData
|
||||
if context_auth.clusterCertificateData is not None:
|
||||
deployer["cacert"] = context_auth.clusterCertificateData
|
||||
if context_auth.username is not None:
|
||||
deployer["username"] = context_auth.username
|
||||
if context_auth.password is not None:
|
||||
deployer["password"] = context_auth.password
|
||||
if context_auth.bearerToken is not None:
|
||||
deployer["bearerToken"] = context_auth.bearerToken
|
||||
return deployer
|
||||
|
||||
|
||||
def set_kubeconfig_auth(kubeconfig: any, context_auth: ContextAuth) -> str:
|
||||
"""
|
||||
Builds an arcaflow-compatible kubeconfig representation and returns it as a string.
|
||||
In order to run arcaflow plugins in kubernetes/openshift the kubeconfig must contain client certificate/key
|
||||
and server certificate base64 encoded within the kubeconfig file itself in *-data fields. That is not always the
|
||||
case, infact kubeconfig may contain filesystem paths to those files, this function builds an arcaflow-compatible
|
||||
kubeconfig file and returns it as a string that can be safely included in input.yaml
|
||||
"""
|
||||
|
||||
if "current-context" not in kubeconfig.keys():
|
||||
raise Exception(
|
||||
"invalid kubeconfig file, impossible to determine current-context"
|
||||
)
|
||||
user_id = None
|
||||
cluster_id = None
|
||||
user_name = None
|
||||
cluster_name = None
|
||||
current_context = kubeconfig["current-context"]
|
||||
for context in kubeconfig["contexts"]:
|
||||
if context["name"] == current_context:
|
||||
user_name = context["context"]["user"]
|
||||
cluster_name = context["context"]["cluster"]
|
||||
if user_name is None:
|
||||
raise Exception(
|
||||
"user not set for context {} in kubeconfig file".format(current_context)
|
||||
)
|
||||
if cluster_name is None:
|
||||
raise Exception(
|
||||
"cluster not set for context {} in kubeconfig file".format(current_context)
|
||||
)
|
||||
|
||||
for index, user in enumerate(kubeconfig["users"]):
|
||||
if user["name"] == user_name:
|
||||
user_id = index
|
||||
for index, cluster in enumerate(kubeconfig["clusters"]):
|
||||
if cluster["name"] == cluster_name:
|
||||
cluster_id = index
|
||||
|
||||
if cluster_id is None:
|
||||
raise Exception(
|
||||
"no cluster {} found in kubeconfig users".format(cluster_name)
|
||||
)
|
||||
if "client-certificate" in kubeconfig["users"][user_id]["user"]:
|
||||
kubeconfig["users"][user_id]["user"]["client-certificate-data"] = context_auth.clientCertificateDataBase64
|
||||
del kubeconfig["users"][user_id]["user"]["client-certificate"]
|
||||
|
||||
if "client-key" in kubeconfig["users"][user_id]["user"]:
|
||||
kubeconfig["users"][user_id]["user"]["client-key-data"] = context_auth.clientKeyDataBase64
|
||||
del kubeconfig["users"][user_id]["user"]["client-key"]
|
||||
|
||||
if "certificate-authority" in kubeconfig["clusters"][cluster_id]["cluster"]:
|
||||
kubeconfig["clusters"][cluster_id]["cluster"]["certificate-authority-data"] = context_auth.clusterCertificateDataBase64
|
||||
del kubeconfig["clusters"][cluster_id]["cluster"]["certificate-authority"]
|
||||
kubeconfig_str = yaml.dump(kubeconfig)
|
||||
return kubeconfig_str
|
||||
@@ -1,142 +0,0 @@
|
||||
import yaml
|
||||
import os
|
||||
import base64
|
||||
|
||||
|
||||
class ContextAuth:
|
||||
clusterCertificate: str = None
|
||||
clusterCertificateData: str = None
|
||||
clusterHost: str = None
|
||||
clientCertificate: str = None
|
||||
clientCertificateData: str = None
|
||||
clientKey: str = None
|
||||
clientKeyData: str = None
|
||||
clusterName: str = None
|
||||
username: str = None
|
||||
password: str = None
|
||||
bearerToken: str = None
|
||||
# TODO: integrate in krkn-lib-kubernetes in the next iteration
|
||||
|
||||
@property
|
||||
def clusterCertificateDataBase64(self):
|
||||
if self.clusterCertificateData is not None:
|
||||
return base64.b64encode(bytes(self.clusterCertificateData,'utf8')).decode("ascii")
|
||||
return
|
||||
|
||||
@property
|
||||
def clientCertificateDataBase64(self):
|
||||
if self.clientCertificateData is not None:
|
||||
return base64.b64encode(bytes(self.clientCertificateData,'utf8')).decode("ascii")
|
||||
return
|
||||
|
||||
@property
|
||||
def clientKeyDataBase64(self):
|
||||
if self.clientKeyData is not None:
|
||||
return base64.b64encode(bytes(self.clientKeyData,"utf-8")).decode("ascii")
|
||||
return
|
||||
|
||||
|
||||
|
||||
def fetch_auth_data(self, kubeconfig: any):
|
||||
context_username = None
|
||||
current_context = kubeconfig["current-context"]
|
||||
if current_context is None:
|
||||
raise Exception("no current-context found in kubeconfig")
|
||||
|
||||
for context in kubeconfig["contexts"]:
|
||||
if context["name"] == current_context:
|
||||
context_username = context["context"]["user"]
|
||||
self.clusterName = context["context"]["cluster"]
|
||||
if context_username is None:
|
||||
raise Exception("user not found for context {0}".format(current_context))
|
||||
if self.clusterName is None:
|
||||
raise Exception("cluster not found for context {0}".format(current_context))
|
||||
cluster_id = None
|
||||
user_id = None
|
||||
for index, user in enumerate(kubeconfig["users"]):
|
||||
if user["name"] == context_username:
|
||||
user_id = index
|
||||
if user_id is None :
|
||||
raise Exception("user {0} not found in kubeconfig users".format(context_username))
|
||||
|
||||
for index, cluster in enumerate(kubeconfig["clusters"]):
|
||||
if cluster["name"] == self.clusterName:
|
||||
cluster_id = index
|
||||
|
||||
if cluster_id is None:
|
||||
raise Exception(
|
||||
"no cluster {} found in kubeconfig users".format(self.clusterName)
|
||||
)
|
||||
|
||||
user = kubeconfig["users"][user_id]["user"]
|
||||
cluster = kubeconfig["clusters"][cluster_id]["cluster"]
|
||||
# sets cluster api URL
|
||||
self.clusterHost = cluster["server"]
|
||||
# client certificates
|
||||
|
||||
if "client-key" in user:
|
||||
try:
|
||||
self.clientKey = user["client-key"]
|
||||
self.clientKeyData = self.read_file(user["client-key"])
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
if "client-key-data" in user:
|
||||
try:
|
||||
self.clientKeyData = base64.b64decode(user["client-key-data"]).decode('utf-8')
|
||||
except Exception as e:
|
||||
raise Exception("impossible to decode client-key-data")
|
||||
|
||||
if "client-certificate" in user:
|
||||
try:
|
||||
self.clientCertificate = user["client-certificate"]
|
||||
self.clientCertificateData = self.read_file(user["client-certificate"])
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
if "client-certificate-data" in user:
|
||||
try:
|
||||
self.clientCertificateData = base64.b64decode(user["client-certificate-data"]).decode('utf-8')
|
||||
except Exception as e:
|
||||
raise Exception("impossible to decode client-certificate-data")
|
||||
|
||||
# cluster certificate authority
|
||||
|
||||
if "certificate-authority" in cluster:
|
||||
try:
|
||||
self.clusterCertificate = cluster["certificate-authority"]
|
||||
self.clusterCertificateData = self.read_file(cluster["certificate-authority"])
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
if "certificate-authority-data" in cluster:
|
||||
try:
|
||||
self.clusterCertificateData = base64.b64decode(cluster["certificate-authority-data"]).decode('utf-8')
|
||||
except Exception as e:
|
||||
raise Exception("impossible to decode certificate-authority-data")
|
||||
|
||||
if "username" in user:
|
||||
self.username = user["username"]
|
||||
|
||||
if "password" in user:
|
||||
self.password = user["password"]
|
||||
|
||||
if "token" in user:
|
||||
self.bearerToken = user["token"]
|
||||
|
||||
def read_file(self, filename:str) -> str:
|
||||
if not os.path.exists(filename):
|
||||
raise Exception("file not found {0} ".format(filename))
|
||||
with open(filename, "rb") as file_stream:
|
||||
return file_stream.read().decode('utf-8')
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p
|
||||
a3ViZUNBMB4XDTIzMDMxMzE1NDAxM1oXDTMzMDMxMTE1NDAxM1owFTETMBEGA1UE
|
||||
AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMnz
|
||||
U/gIbJBRGOgNYVKX2fV03ANOwnM4VjquR28QMAdxURqgOFZ6IxYNysHEyxxE9I+I
|
||||
DAm9hi4vQPbOX7FlxUezuzw+ExEfa6RRJ+n+AGJOV1lezCVph6OaJxB1+L1UqaDZ
|
||||
eM3B4cUf/iCc5Y4bs927+CBG3MJL/jmCVPCO+MiSn/l73PXSFNJAYMvRj42zkXqD
|
||||
CVG9CwY2vWgZnnzl01l7jNGtie871AmV2uqKakJrQ2ILhD+8fZk4jE5JBDTCZnqQ
|
||||
pXIc+vERNKLUS8cvjO6Ux8dMv/Z7+xonpXOU59LlpUdHWP9jgCvMTwiOriwqGjJ+
|
||||
pQJWpX9Dm+oxJiVOJzsCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW
|
||||
MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
|
||||
BBQU9pDMtbayJdNM6bp0IG8dcs15qTANBgkqhkiG9w0BAQsFAAOCAQEAtl9TVKPA
|
||||
hTnPODqv0AGTqreS9kLg4WUUjZRaPUkPWmtCoTh2Yf55nRWdHOHeZnCWDSg24x42
|
||||
lpt+13IdqKew1RKTpKCTkicMFi090A01bYu/w39Cm6nOAA5h8zkgSkV5czvQotuV
|
||||
SoN2vB+nbuY28ah5PkdqjMHEZbNwa59cgEke8wB1R1DWFQ/pqflrH2v9ACAuY+5Q
|
||||
i673tA6CXrb1YfaCQnVBzcfvjGS1MqShPKpOLMF+/GccPczNimaBxMnKvYLvf3pN
|
||||
qEUrJC00mAcein8HmxR2Xz8wredbMUUyrQxW29pZJwfGE5GU0olnlsA0lZLbTwio
|
||||
xoolo5y+fsK/dA==
|
||||
-----END CERTIFICATE-----
|
||||
@@ -1,19 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDITCCAgmgAwIBAgIBAjANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p
|
||||
a3ViZUNBMB4XDTIzMDUwMTA4NTc0N1oXDTI2MDUwMTA4NTc0N1owMTEXMBUGA1UE
|
||||
ChMOc3lzdGVtOm1hc3RlcnMxFjAUBgNVBAMTDW1pbmlrdWJlLXVzZXIwggEiMA0G
|
||||
CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0b7uy9nQYrh7uC5NODve7dFNLAgo5
|
||||
pWRS6Kx13ULA55gOpieZiI5/1jwUBjOz0Hhl5QAdHC1HDNu5wf4MmwIEheuq3kMA
|
||||
mfuvNxW2BnWSDuXyUMlBfqlwg5o6W8ndEWaK33D7wd2WQsSsAnhQPJSjnzWKvWKq
|
||||
+Kbcygc4hdss/ZWN+SXLTahNpHBw0sw8AcJqddNeXs2WI5GdZmbXL4QZI36EaNUm
|
||||
m4xKmKRKYIP9wYkmXOV/D2h1meM44y4lul5v2qvo6I+umJ84q4W1/W1vVmAzyVfL
|
||||
v1TQCUx8cpKMHzw3ma6CTBCtU3Oq9HKHBnf8GyHZicmV7ESzf/phJu4ZAgMBAAGj
|
||||
YDBeMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUH
|
||||
AwIwDAYDVR0TAQH/BAIwADAfBgNVHSMEGDAWgBQU9pDMtbayJdNM6bp0IG8dcs15
|
||||
qTANBgkqhkiG9w0BAQsFAAOCAQEABNzEQQMYUcLsBASHladEjr46avKn7gREfaDl
|
||||
Y5PBvgCPP42q/sW/9iCNY3UpT9TJZWM6s01+0p6I96jYbRQER1NX7O4OgQYHmFw2
|
||||
PF6UOG2vMo54w11OvL7sbr4d+nkE6ItdM9fLDIJ3fEOYJZkSoxhOL/U3jSjIl7Wu
|
||||
KCIlpM/M/gcZ4w2IvcLrWtvswbFNUd+dwQfBGcQTmSQDOLE7MqSvzYAkeNv73GLB
|
||||
ieba7gs/PmoTFsf9nW60iXymDDF4MtODn15kqT/y1uD6coujmiEiIomBfxqAkUCU
|
||||
0ciP/KF5oOEMmMedm7/peQxaRTMdRSk4yu7vbj/BxnTcj039Qg==
|
||||
-----END CERTIFICATE-----
|
||||
@@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEAtG+7svZ0GK4e7guTTg73u3RTSwIKOaVkUuisdd1CwOeYDqYn
|
||||
mYiOf9Y8FAYzs9B4ZeUAHRwtRwzbucH+DJsCBIXrqt5DAJn7rzcVtgZ1kg7l8lDJ
|
||||
QX6pcIOaOlvJ3RFmit9w+8HdlkLErAJ4UDyUo581ir1iqvim3MoHOIXbLP2Vjfkl
|
||||
y02oTaRwcNLMPAHCanXTXl7NliORnWZm1y+EGSN+hGjVJpuMSpikSmCD/cGJJlzl
|
||||
fw9odZnjOOMuJbpeb9qr6OiPrpifOKuFtf1tb1ZgM8lXy79U0AlMfHKSjB88N5mu
|
||||
gkwQrVNzqvRyhwZ3/Bsh2YnJlexEs3/6YSbuGQIDAQABAoIBAQCdJxPb8zt6o2zc
|
||||
98f8nJy378D7+3LccmjGrVBH98ZELXIKkDy9RGqYfQcmiaBOZKv4U1OeBwSIdXKK
|
||||
f6O9ZuSC/AEeeSbyRysmmFuYhlewNrmgKyyelqsNDBIv8fIHUTh2i9Xj8B4G2XBi
|
||||
QGR5vcnYGLqRdBGTx63Nb0iKuksDCwPAuPA/e0ySz9HdWL1j4bqpVSYsOIXsqTDr
|
||||
CVnxUeSIL0fFQnRm3IASXQD7zdq9eEFX7vESeleZoz8qNcKb4Na/C3N6crScjgH7
|
||||
qyNZ2zNLfy1LT84k8uc1TMX2KcEVEmfdDv5cCnUH2ic12CwXMZ0vgId5LJTaHx4x
|
||||
ytIQIe5hAoGBANB+TsRXP4KzcjZlUUfiAp/pWUM4kVktbsfZa1R2NEuIGJUxPk3P
|
||||
7WS0WX5W75QKRg+UWTubg5kfd0f9fklLgofmliBnY/HrpgdyugJmUZBgzIxmy0k+
|
||||
aCe0biD1gULfyyrKtfe8k5wRFstzhfGszlOf2ebR87sSVNBuF2lEwPTvAoGBAN2M
|
||||
0/XrsodGU4B9Mj86Go2gb2k2WU2izI0cO+tm2S5U5DvKmVEnmjXfPRaOFj2UUQjo
|
||||
cljnDAinbN+O0+Inc35qsEeYdAIepNAPglzcpfTHagja9mhx2idLYTXGhbZLL+Ei
|
||||
TRzMyP27NF+GVVfYU/cA86ns6NboG6spohmnqh13AoGAKPc4aNGv0/GIVnHP56zb
|
||||
0SnbdR7PSFNp+fCZay4Slmi2U9IqKMXbIjdhgjZ4uoDORU9jvReQYuzQ1h9TyfkB
|
||||
O8yt4M4P0D/6DmqXa9NI4XJznn6wIMMXWf3UybsTW913IQBVgsjVxAuDjBQ11Eec
|
||||
/sdg3D6SgkZWzeFjzjZJJ5cCgYBSYVg7fE3hERxhjawOaJuRCBQFSklAngVzfwkk
|
||||
yhR9ruFC/l2uGIy19XFwnprUgP700gIa3qbR3PeV1TUiRcsjOaacqKqSUzSzjODL
|
||||
iNxIvZHHAyxWv+b/b38REOWNWD3QeAG2cMtX1bFux7OaO31VPkxcZhRaPOp05cE5
|
||||
yudtlwKBgDBbR7RLYn03OPm3NDBLLjTybhD8Iu8Oj7UeNCiEWAdZpqIKYnwSxMzQ
|
||||
kdo4aTENA/seEwq+XDV7TwbUIFFJg5gDXIhkcK2c9kiO2bObCAmKpBlQCcrp0a5X
|
||||
NSBk1N/ZG/Qhqns7z8k01KN4LNcdpRoNiYYPgY+p3xbY8+nWhv+q
|
||||
-----END RSA PRIVATE KEY-----
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user