Compare commits

..

1 Commits

Author SHA1 Message Date
Janos Bonic
d8c4c27a7d Fixes #265: Replace powerfulseal 2022-06-07 17:11:53 +02:00
396 changed files with 6816 additions and 35736 deletions

View File

@@ -1,4 +0,0 @@
[run]
omit =
tests/*
krkn/tests/**

1
.github/CODEOWNERS vendored
View File

@@ -1 +0,0 @@
* @paigerube14 @tsebastiani @chaitanyaenr

View File

@@ -1,43 +0,0 @@
---
name: Bug report
about: Create a report an issue
title: "[BUG]"
labels: bug
---
# Bug Description
## **Describe the bug**
A clear and concise description of what the bug is.
## **To Reproduce**
Any specific steps used to reproduce the behavior
### Scenario File
Scenario file(s) that were specified in your config file (can be starred (*) with confidential information )
```yaml
<config>
```
### Config File
Config file you used when error was seen (the default used is config/config.yaml)
```yaml
<config>
```
## **Expected behavior**
A clear and concise description of what you expected to happen.
## **Krkn Output**
Krkn output to help show your problem
## **Additional context**
Add any other context about the problem

View File

@@ -1,16 +0,0 @@
---
name: New Feature Request
about: Suggest an idea for this project
title: ''
labels: enhancement
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to see added/changed. Ex. new parameter in [xxx] scenario, new scenario that does [xxx]
**Additional context**
Add any other context about the feature request here.

View File

@@ -1,47 +0,0 @@
# Type of change
- [ ] Refactor
- [ ] New feature
- [ ] Bug fix
- [ ] Optimization
# Description
<-- Provide a brief description of the changes made in this PR. -->
## Related Tickets & Documents
If no related issue, please create one and start the converasation on wants of
- Related Issue #:
- Closes #:
# Documentation
- [ ] **Is documentation needed for this update?**
If checked, a documentation PR must be created and merged in the [website repository](https://github.com/krkn-chaos/website/).
## Related Documentation PR (if applicable)
<-- Add the link to the corresponding documentation PR in the website repository -->
# Checklist before requesting a review
[ ] Ensure the changes and proposed solution have been discussed in the relevant issue and have received acknowledgment from the community or maintainers. See [contributing guidelines](https://krkn-chaos.dev/docs/contribution-guidelines/)
See [testing your changes](https://krkn-chaos.dev/docs/developers-guide/testing-changes/) and run on any Kubernetes or OpenShift cluster to validate your changes
- [ ] I have performed a self-review of my code by running krkn and specific scenario
- [ ] If it is a core feature, I have added thorough unit tests with above 80% coverage
*REQUIRED*:
Description of combination of tests performed and output of run
```bash
python run_kraken.py
...
<---insert test results output--->
```
OR
```bash
python -m coverage run -a -m unittest discover -s tests -v
...
<---insert test results output--->
```

View File

@@ -1,7 +0,0 @@
## Release {VERSION}
### Download Artifacts
- 📦 Krkn sources (noarch): [krkn-{VERSION}-src.tar.gz](https://krkn-chaos.gateway.scarf.sh/krkn-src-{VERSION}.tar.gz)
### Changes
{CHANGES}

0
.github/release.yml vendored
View File

40
.github/workflows/build.yml vendored Normal file
View File

@@ -0,0 +1,40 @@
name: Build Krkn
on:
push:
branches:
- main
pull_request:
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Create multi-node KinD cluster
uses: chaos-kubox/actions/kind@main
- name: Install environment
run: |
sudo apt-get install build-essential python3-dev
pip install -r requirements.txt
- name: Run unit tests
run: python -m unittest discover
- name: Run e2e tests
run: ./CI/run.sh
- name: Build the Docker images
run: docker build --no-cache -t quay.io/chaos-kubox/krkn containers/
- name: Login in quay
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
run: docker login quay.io -u ${QUAY_USER} -p ${QUAY_TOKEN}
env:
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}
- name: Push the Docker images
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
run: docker push quay.io/chaos-kubox/krkn
- name: Rebuild krkn-hub
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
uses: chaos-kubox/actions/krkn-hub@main
with:
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}

View File

@@ -1,55 +0,0 @@
name: Docker Image CI
on:
push:
tags: ['v[0-9].[0-9]+.[0-9]+']
pull_request:
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Build the Docker images
if: startsWith(github.ref, 'refs/tags')
run: |
./containers/compile_dockerfile.sh
docker build --no-cache -t quay.io/krkn-chaos/krkn containers/ --build-arg TAG=${GITHUB_REF#refs/tags/}
docker tag quay.io/krkn-chaos/krkn quay.io/redhat-chaos/krkn
docker tag quay.io/krkn-chaos/krkn quay.io/krkn-chaos/krkn:${GITHUB_REF#refs/tags/}
docker tag quay.io/krkn-chaos/krkn quay.io/redhat-chaos/krkn:${GITHUB_REF#refs/tags/}
- name: Test Build the Docker images
if: ${{ github.event_name == 'pull_request' }}
run: |
./containers/compile_dockerfile.sh
docker build --no-cache -t quay.io/krkn-chaos/krkn containers/ --build-arg PR_NUMBER=${{ github.event.pull_request.number }}
- name: Login in quay
if: startsWith(github.ref, 'refs/tags')
run: docker login quay.io -u ${QUAY_USER} -p ${QUAY_TOKEN}
env:
QUAY_USER: ${{ secrets.QUAY_USERNAME }}
QUAY_TOKEN: ${{ secrets.QUAY_PASSWORD }}
- name: Push the KrknChaos Docker images
if: startsWith(github.ref, 'refs/tags')
run: |
docker push quay.io/krkn-chaos/krkn
docker push quay.io/krkn-chaos/krkn:${GITHUB_REF#refs/tags/}
- name: Login in to redhat-chaos quay
if: startsWith(github.ref, 'refs/tags/v')
run: docker login quay.io -u ${QUAY_USER} -p ${QUAY_TOKEN}
env:
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}
- name: Push the RedHat Chaos Docker images
if: startsWith(github.ref, 'refs/tags')
run: |
docker push quay.io/redhat-chaos/krkn
docker push quay.io/redhat-chaos/krkn:${GITHUB_REF#refs/tags/}
- name: Rebuild krkn-hub
if: startsWith(github.ref, 'refs/tags')
uses: redhat-chaos/actions/krkn-hub@main
with:
QUAY_USER: ${{ secrets.QUAY_USERNAME }}
QUAY_TOKEN: ${{ secrets.QUAY_PASSWORD }}
AUTOPUSH: ${{ secrets.AUTOPUSH }}

View File

@@ -1,60 +0,0 @@
name: Create Release
on:
push:
tags:
- 'v*'
jobs:
release:
permissions:
contents: write
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: calculate previous tag
run: |
git fetch --tags origin
PREVIOUS_TAG=$(git tag --sort=-creatordate | sed -n '2 p')
echo $PREVIOUS_TAG
echo "PREVIOUS_TAG=$PREVIOUS_TAG" >> "$GITHUB_ENV"
- name: generate release notes from template
id: release-notes
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
NOTES=$(gh api \
--method POST \
-H "Accept: application/vnd.github+json" \
-H "X-GitHub-Api-Version: 2022-11-28" \
/repos/krkn-chaos/krkn/releases/generate-notes \
-f "tag_name=${{ github.ref_name }}" -f "target_commitish=main" -f "previous_tag_name=${{ env.PREVIOUS_TAG }}" | jq -r .body)
echo "NOTES<<EOF" >> $GITHUB_ENV
echo "$NOTES" >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
- name: replace placeholders in template
run: |
echo "${{ env.NOTES }}"
TEMPLATE=$(cat .github/release-template.md)
VERSION=${{ github.ref_name }}
NOTES="${{ env.NOTES }}"
OUTPUT=${TEMPLATE//\{VERSION\}/$VERSION}
OUTPUT=${OUTPUT//\{CHANGES\}/$NOTES}
echo "$OUTPUT" > release-notes.md
- name: create release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
gh release create ${{ github.ref_name }} --title "${{ github.ref_name }}" -F release-notes.md
- name: Install Syft
run: |
curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sudo sh -s -- -b /usr/local/bin
- name: Generate SBOM
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
syft . --scope all-layers --output cyclonedx-json > sbom.json
echo "SBOM generated successfully!"
gh release upload ${{ github.ref_name }} sbom.json

View File

@@ -1,45 +0,0 @@
name: Require Documentation Update
on:
pull_request:
types: [opened, edited, synchronize]
branches:
- main
jobs:
check-docs:
name: Check Documentation Update
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Check if Documentation is Required
id: check_docs
run: |
echo "Checking PR body for documentation checkbox..."
# Read the PR body from the GitHub event payload
if echo "${{ github.event.pull_request.body }}" | grep -qi '\[x\].*documentation needed'; then
echo "Documentation required detected."
echo "docs_required=true" >> $GITHUB_OUTPUT
else
echo "Documentation not required."
echo "docs_required=false" >> $GITHUB_OUTPUT
fi
- name: Enforce Documentation Update (if required)
if: steps.check_docs.outputs.docs_required == 'true'
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
# Retrieve feature branch and repository owner from the GitHub context
FEATURE_BRANCH="${{ github.head_ref }}"
REPO_OWNER="${{ github.repository_owner }}"
WEBSITE_REPO="website"
echo "Searching for a merged documentation PR for feature branch: $FEATURE_BRANCH in $REPO_OWNER/$WEBSITE_REPO..."
MERGED_PR=$(gh pr list --repo "$REPO_OWNER/$WEBSITE_REPO" --state merged --json headRefName,title,url | jq -r \
--arg FEATURE_BRANCH "$FEATURE_BRANCH" '.[] | select(.title | contains($FEATURE_BRANCH)) | .url')
if [[ -z "$MERGED_PR" ]]; then
echo ":x: Documentation PR for branch '$FEATURE_BRANCH' is required and has not been merged."
exit 1
else
echo ":white_check_mark: Found merged documentation PR: $MERGED_PR"
fi

View File

@@ -1,52 +0,0 @@
name: Manage Stale Issues and Pull Requests
on:
schedule:
# Run daily at 1:00 AM UTC
- cron: '0 1 * * *'
workflow_dispatch:
permissions:
issues: write
pull-requests: write
jobs:
stale:
name: Mark and Close Stale Issues and PRs
runs-on: ubuntu-latest
steps:
- name: Mark and close stale issues and PRs
uses: actions/stale@v9
with:
days-before-issue-stale: 60
days-before-issue-close: 14
stale-issue-label: 'stale'
stale-issue-message: |
This issue has been automatically marked as stale because it has not had any activity in the last 60 days.
It will be closed in 14 days if no further activity occurs.
If this issue is still relevant, please leave a comment or remove the stale label.
Thank you for your contributions to krkn!
close-issue-message: |
This issue has been automatically closed due to inactivity.
If you believe this issue is still relevant, please feel free to reopen it or create a new issue with updated information.
Thank you for your understanding!
close-issue-reason: 'not_planned'
days-before-pr-stale: 90
days-before-pr-close: 14
stale-pr-label: 'stale'
stale-pr-message: |
This pull request has been automatically marked as stale because it has not had any activity in the last 90 days.
It will be closed in 14 days if no further activity occurs.
If this PR is still relevant, please rebase it, address any pending reviews, or leave a comment.
Thank you for your contributions to krkn!
close-pr-message: |
This pull request has been automatically closed due to inactivity.
If you believe this PR is still relevant, please feel free to reopen it or create a new pull request with updated changes.
Thank you for your understanding!
# Exempt labels
exempt-issue-labels: 'bug,enhancement,good first issue'
exempt-pr-labels: 'pending discussions,hold'
remove-stale-when-updated: true

View File

@@ -1,206 +0,0 @@
name: Functional & Unit Tests
on:
pull_request:
push:
branches:
- main
jobs:
tests:
# Common steps
name: Functional & Unit Tests
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Create multi-node KinD cluster
uses: redhat-chaos/actions/kind@main
- name: Deploy prometheus & Port Forwarding
uses: redhat-chaos/actions/prometheus@main
- name: Deploy Elasticsearch
with:
ELASTIC_PORT: ${{ env.ELASTIC_PORT }}
RUN_ID: ${{ github.run_id }}
uses: redhat-chaos/actions/elastic@main
- name: Download elastic password
uses: actions/download-artifact@v4
with:
name: elastic_password_${{ github.run_id }}
- name: Set elastic password on env
run: |
ELASTIC_PASSWORD=$(cat elastic_password.txt)
echo "ELASTIC_PASSWORD=$ELASTIC_PASSWORD" >> "$GITHUB_ENV"
- name: Install Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
architecture: 'x64'
- name: Install environment
run: |
sudo apt-get install build-essential python3-dev
pip install --upgrade pip
pip install -r requirements.txt
pip install coverage
- name: Deploy test workloads
run: |
es_pod_name=$(kubectl get pods -l "app=elasticsearch-master" -o name)
echo "POD_NAME: $es_pod_name"
kubectl --namespace default port-forward $es_pod_name 9200 &
prom_name=$(kubectl get pods -n monitoring -l "app.kubernetes.io/name=prometheus" -o name)
kubectl --namespace monitoring port-forward $prom_name 9090 &
# Wait for Elasticsearch to be ready
echo "Waiting for Elasticsearch to be ready..."
for i in {1..30}; do
if curl -k -s -u elastic:$ELASTIC_PASSWORD https://localhost:9200/_cluster/health > /dev/null 2>&1; then
echo "Elasticsearch is ready!"
break
fi
echo "Attempt $i: Elasticsearch not ready yet, waiting..."
sleep 2
done
kubectl apply -f CI/templates/outage_pod.yaml
kubectl wait --for=condition=ready pod -l scenario=outage --timeout=300s
kubectl apply -f CI/templates/container_scenario_pod.yaml
kubectl wait --for=condition=ready pod -l scenario=container --timeout=300s
kubectl create namespace namespace-scenario
kubectl apply -f CI/templates/time_pod.yaml
kubectl wait --for=condition=ready pod -l scenario=time-skew --timeout=300s
kubectl apply -f CI/templates/service_hijacking.yaml
kubectl wait --for=condition=ready pod -l "app.kubernetes.io/name=proxy" --timeout=300s
kubectl apply -f CI/legacy/scenarios/volume_scenario.yaml
kubectl wait --for=condition=ready pod kraken-test-pod -n kraken --timeout=300s
- name: Get Kind nodes
run: |
kubectl get nodes --show-labels=true
# Pull request only steps
- name: Run unit tests
run: python -m coverage run -a -m unittest discover -s tests -v
- name: Setup Functional Tests
run: |
yq -i '.kraken.performance_monitoring="localhost:9090"' CI/config/common_test_config.yaml
yq -i '.elastic.elastic_port=9200' CI/config/common_test_config.yaml
yq -i '.elastic.elastic_url="https://localhost"' CI/config/common_test_config.yaml
yq -i '.elastic.enable_elastic=False' CI/config/common_test_config.yaml
yq -i '.elastic.password="${{env.ELASTIC_PASSWORD}}"' CI/config/common_test_config.yaml
yq -i '.performance_monitoring.prometheus_url="http://localhost:9090"' CI/config/common_test_config.yaml
echo "test_app_outages" >> ./CI/tests/functional_tests
echo "test_container" >> ./CI/tests/functional_tests
echo "test_cpu_hog" >> ./CI/tests/functional_tests
echo "test_customapp_pod" >> ./CI/tests/functional_tests
echo "test_io_hog" >> ./CI/tests/functional_tests
echo "test_memory_hog" >> ./CI/tests/functional_tests
echo "test_namespace" >> ./CI/tests/functional_tests
echo "test_net_chaos" >> ./CI/tests/functional_tests
echo "test_node" >> ./CI/tests/functional_tests
echo "test_pod" >> ./CI/tests/functional_tests
echo "test_pod_error" >> ./CI/tests/functional_tests
echo "test_service_hijacking" >> ./CI/tests/functional_tests
echo "test_pod_network_filter" >> ./CI/tests/functional_tests
echo "test_pod_server" >> ./CI/tests/functional_tests
echo "test_time" >> ./CI/tests/functional_tests
# echo "test_pvc" >> ./CI/tests/functional_tests
# Push on main only steps + all other functional to collect coverage
# for the badge
- name: Configure AWS Credentials
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
uses: aws-actions/configure-aws-credentials@v4
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region : ${{ secrets.AWS_REGION }}
- name: Setup Post Merge Request Functional Tests
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
run: |
yq -i '.telemetry.username="${{secrets.TELEMETRY_USERNAME}}"' CI/config/common_test_config.yaml
yq -i '.telemetry.password="${{secrets.TELEMETRY_PASSWORD}}"' CI/config/common_test_config.yaml
echo "test_telemetry" >> ./CI/tests/functional_tests
# Final common steps
- name: Run Functional tests
env:
AWS_BUCKET: ${{ secrets.AWS_BUCKET }}
run: |
./CI/run.sh
cat ./CI/results.markdown >> $GITHUB_STEP_SUMMARY
echo >> $GITHUB_STEP_SUMMARY
- name: Upload CI logs
if: ${{ always() }}
uses: actions/upload-artifact@v4
with:
name: ci-logs
path: CI/out
if-no-files-found: error
- name: Collect coverage report
if: ${{ always() }}
run: |
python -m coverage html
python -m coverage json
- name: Publish coverage report to job summary
if: ${{ always() }}
run: |
pip install html2text
html2text --ignore-images --ignore-links -b 0 htmlcov/index.html >> $GITHUB_STEP_SUMMARY
- name: Upload coverage data
if: ${{ always() }}
uses: actions/upload-artifact@v4
with:
name: coverage
path: htmlcov
if-no-files-found: error
- name: Upload json coverage
if: ${{ always() }}
uses: actions/upload-artifact@v4
with:
name: coverage.json
path: coverage.json
if-no-files-found: error
- name: Check CI results
if: ${{ always() }}
run: "! grep Fail CI/results.markdown"
badge:
permissions:
contents: write
name: Generate Coverage Badge
runs-on: ubuntu-latest
needs:
- tests
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
steps:
- name: Check out doc repo
uses: actions/checkout@master
with:
repository: krkn-chaos/krkn-lib-docs
path: krkn-lib-docs
ssh-key: ${{ secrets.KRKN_LIB_DOCS_PRIV_KEY }}
- name: Download json coverage
uses: actions/download-artifact@v4
with:
name: coverage.json
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Copy badge on GitHub Page Repo
env:
COLOR: yellow
run: |
# generate coverage badge on previously calculated total coverage
# and copy in the docs page
export TOTAL=$(python -c "import json;print(json.load(open('coverage.json'))['totals']['percent_covered_display'])")
[[ $TOTAL > 40 ]] && COLOR=green
echo "TOTAL: $TOTAL"
echo "COLOR: $COLOR"
curl "https://img.shields.io/badge/coverage-$TOTAL%25-$COLOR" > ./krkn-lib-docs/coverage_badge_krkn.svg
- name: Push updated Coverage Badge
run: |
cd krkn-lib-docs
git add .
git config user.name "krkn-chaos"
git config user.email "krkn-actions@users.noreply.github.com"
git commit -m "[KRKN] Coverage Badge ${GITHUB_REF##*/}" || echo "no changes to commit"
git push

9
.gitignore vendored
View File

@@ -16,7 +16,6 @@ __pycache__/*
*.out
kube-burner*
kube_burner*
recommender_*.json
# Project files
.ropeproject
@@ -24,8 +23,6 @@ recommender_*.json
.pydevproject
.settings
.idea
.vscode
config/debug.yaml
tags
# Package files
@@ -62,9 +59,5 @@ inspect.local.*
!CI/config/common_test_config.yaml
CI/out/*
CI/ci_results
CI/legacy/*node.yaml
CI/scenarios/*node.yaml
CI/results.markdown
#env
chaos/*

View File

@@ -1,6 +0,0 @@
[allowlist]
description = "Global Allowlist"
paths = [
'''kraken/arcaflow_plugin/fixtures/*'''
]

View File

@@ -1,9 +0,0 @@
# Krkn Adopters
This is a list of organizations that have publicly acknowledged usage of Krkn and shared details of how they are leveraging it in their environment for chaos engineering use cases. Do you want to add yourself to this list? Please fork the repository and open a PR with the required change.
| Organization | Since | Website | Use-Case |
|:-|:-|:-|:-|
| MarketAxess | 2024 | https://www.marketaxess.com/ | Kraken enables us to achieve our goal of increasing the reliability of our cloud products on Kubernetes. The tool allows us to automatically run various chaos scenarios, identify resilience and performance bottlenecks, and seamlessly restore the system to its original state once scenarios finish. These chaos scenarios include pod disruptions, node (EC2) outages, simulating availability zone (AZ) outages, and filling up storage spaces like EBS and EFS. The community is highly responsive to requests and works on expanding the tool's capabilities. MarketAxess actively contributes to the project, adding features such as the ability to leverage existing network ACLs and proposing several feature improvements to enhance test coverage. |
| Red Hat Openshift | 2020 | https://www.redhat.com/ | Kraken is a highly reliable chaos testing tool used to ensure the quality and resiliency of Red Hat Openshift. The engineering team runs all the test scenarios under Kraken on different cloud platforms on both self-managed and cloud services environments prior to the release of a new version of the product. The team also contributes to the Kraken project consistently which helps the test scenarios to keep up with the new features introduced to the product. Inclusion of this test coverage has contributed to gaining the trust of new and existing customers of the product. |
| IBM | 2023 | https://www.ibm.com/ | While working on AI for Chaos Testing at IBM Research, we closely collaborated with the Kraken (Krkn) team to advance intelligent chaos engineering. Our contributions included developing AI-enabled chaos injection strategies and integrating reinforcement learning (RL)-based fault search techniques into the Krkn tool, enabling it to identify and explore system vulnerabilities more efficiently. Kraken stands out as one of the most user-friendly and effective tools for chaos engineering, and the Kraken teams deep technical involvement played a crucial role in the success of this collaboration—helping bridge cutting-edge AI research with practical, real-world system reliability testing. |

View File

@@ -1,7 +1,7 @@
## CI Tests
### First steps
Edit [functional_tests](tests/functional_tests) with tests you want to run
Edit [my_tests](tests/my_tests) with tests you want to run
### How to run
```./CI/run.sh```
@@ -11,7 +11,7 @@ This will run kraken using python, make sure python3 is set up and configured pr
### Adding a test case
1. Add in simple scenario yaml file to execute under [../CI/scenarios/](legacy)
1. Add in simple scenario yaml file to execute under [../CI/scenarios/](scenarios)
2. Copy [test_application_outages.sh](tests/test_app_outages.sh) for example on how to get started
@@ -27,7 +27,7 @@ This will run kraken using python, make sure python3 is set up and configured pr
e. 15: Make sure name of config in line 14 matches what you pass on this line
4. Add test name to [functional_tests](../CI/tests/functional_tests) file
4. Add test name to [my_tests](../CI/tests/my_tests) file
a. This will be the name of the file without ".sh"

View File

@@ -1,74 +1,31 @@
kraken:
distribution: kubernetes # Distribution can be kubernetes or openshift.
kubeconfig_path: ~/.kube/config # Path to kubeconfig.
distribution: openshift # Distribution can be kubernetes or openshift.
kubeconfig_path: /root/.kube/config # Path to kubeconfig.
exit_on_failure: False # Exit when a post action scenario fails.
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
signal_address: 0.0.0.0 # Signal listening address
port: 8081 # Signal port
auto_rollback: True # Enable auto rollback for scenarios.
rollback_versions_directory: /tmp/kraken-rollback # Directory to store rollback version files.
litmus_version: v1.13.6 # Litmus version to install.
litmus_uninstall: False # If you want to uninstall litmus if failure.
chaos_scenarios: # List of policies/chaos scenarios to load.
- $scenario_type: # List of chaos pod scenarios to load.
- $scenario_file
$post_config
cerberus:
cerberus_enabled: False # Enable it when cerberus is previously installed.
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal.
performance_monitoring:
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift.
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
kube_burner_binary_url: "https://github.com/cloud-bulldozer/kube-burner/releases/download/v0.9.1/kube-burner-0.9.1-Linux-x86_64.tar.gz"
capture_metrics: False
config_path: config/kube_burner.yaml # Define the Elasticsearch url and index name in this config.
metrics_profile_path: config/metrics-aggregated.yaml
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
uuid: # uuid for the run is generated by default if not set.
enable_alerts: True # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
enable_metrics: True
alert_profile: config/alerts.yaml # Path or URL to alert profile with the prometheus queries
metrics_profile: config/metrics-report.yaml
check_critical_alerts: True # Path to alert profile with the prometheus queries.
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error.
alert_profile: config/alerts # Path to alert profile with the prometheus queries.
tunings:
wait_duration: 6 # Duration to wait between each chaos scenario.
iterations: 1 # Number of times to execute the scenarios.
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever.
telemetry:
enabled: False # enable/disables the telemetry collection feature
api_url: https://yvnn4rfoi7.execute-api.us-west-2.amazonaws.com/test #telemetry service endpoint
username: $TELEMETRY_USERNAME # telemetry service username
password: $TELEMETRY_PASSWORD # telemetry service password
prometheus_namespace: 'monitoring' # prometheus namespace
prometheus_pod_name: 'prometheus-kind-prometheus-kube-prome-prometheus-0' # prometheus pod_name
prometheus_container_name: 'prometheus'
prometheus_backup: True # enables/disables prometheus data collection
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
backup_threads: 5 # number of telemetry download/upload threads
archive_path: /tmp # local path where the archive files will be temporarly stored
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
archive_size: 10000 # the size of the prometheus data archive size in KB. The lower the size of archive is
logs_backup: True
logs_filter_patterns:
- "(\\w{3}\\s\\d{1,2}\\s\\d{2}:\\d{2}:\\d{2}\\.\\d+).+" # Sep 9 11:20:36.123425532
- "kinit (\\d+/\\d+/\\d+\\s\\d{2}:\\d{2}:\\d{2})\\s+" # kinit 2023/09/15 11:20:36 log
- "(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z).+" # 2023-09-15T11:20:36.123425532Z log
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
events_backup: True # enables/disables cluster events collection
telemetry_group: "funtests"
elastic:
enable_elastic: False
verify_certs: False
elastic_url: "https://192.168.39.196" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
elastic_port: 32766
username: "elastic"
password: "test"
metrics_index: "krkn-metrics"
alerts_index: "krkn-alerts"
telemetry_index: "krkn-telemetry"
health_checks: # Utilizing health check endpoints to observe application behavior during chaos injection.
interval: # Interval in seconds to perform health checks, default value is 2 seconds
config: # Provide list of health check configurations for applications
- url: # Provide application endpoint
bearer_token: # Bearer token for authentication if any
auth: # Provide authentication credentials (username , password) in tuple format if any, ex:("admin","secretpassword")
exit_on_failure: # If value is True exits when health check failed for application, values can be True/False

View File

@@ -1,89 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
kubernetes.io/metadata.name: kraken
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/enforce-version: v1.24
pod-security.kubernetes.io/warn: privileged
security.openshift.io/scc.podSecurityLabelSync: "false"
name: kraken
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: kraken-test-pv
namespace: kraken
labels:
type: local
spec:
storageClassName: manual
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/mnt/data"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: kraken-test-pvc
namespace: kraken
spec:
storageClassName: manual
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Pod
metadata:
name: kraken-test-pod
namespace: kraken
spec:
securityContext:
fsGroup: 1001
# initContainer to fix permissions on the mounted volume
initContainers:
- name: fix-permissions
image: 'quay.io/centos7/httpd-24-centos7:centos7'
command:
- sh
- -c
- |
echo "Setting up permissions for /home/kraken..."
# Create the directory if it doesn't exist
mkdir -p /home/kraken
# Set ownership to user 1001 and group 1001
chown -R 1001:1001 /home/kraken
# Set permissions to allow read/write
chmod -R 755 /home/kraken
rm -rf /home/kraken/*
echo "Permissions fixed. Current state:"
ls -la /home/kraken
volumeMounts:
- mountPath: "/home/kraken"
name: kraken-test-pv
securityContext:
runAsUser: 0 # Run as root to fix permissions
volumes:
- name: kraken-test-pv
persistentVolumeClaim:
claimName: kraken-test-pvc
containers:
- name: kraken-test-container
image: 'quay.io/centos7/httpd-24-centos7:centos7'
securityContext:
runAsUser: 1001
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
volumeMounts:
- mountPath: "/home/kraken"
name: kraken-test-pv

View File

@@ -1,25 +1,9 @@
#!/bin/bash
MAX_RETRIES=60
set -x
KUBECTL=`which kubectl 2>/dev/null`
[[ $? != 0 ]] && echo "[ERROR]: kubectl missing, please install it and try again" && exit 1
ci_tests_loc="CI/tests/my_tests"
wait_cluster_become_ready() {
COUNT=1
until `$KUBECTL get namespace > /dev/null 2>&1`
do
echo "[INF] waiting Kubernetes to become ready, after $COUNT check"
sleep 3
[[ $COUNT == $MAX_RETRIES ]] && echo "[ERR] max retries exceeded, failing" && exit 1
((COUNT++))
done
}
ci_tests_loc="CI/tests/functional_tests"
echo -e "********* Running Functional Tests Suite *********\n\n"
echo "running test suit consisting of ${ci_tests}"
rm -rf CI/out
@@ -36,32 +20,7 @@ echo 'Test | Result | Duration' >> $results
echo '-----------------------|--------|---------' >> $results
# Run each test
failed_tests=()
for test_name in `cat CI/tests/functional_tests`
for test_name in `cat CI/tests/my_tests`
do
#wait_cluster_become_ready
return_value=`./CI/run_test.sh $test_name $results`
if [[ $return_value == 1 ]]
then
echo "Failed"
failed_tests+=("$test_name")
fi
wait_cluster_become_ready
./CI/run_test.sh $test_name $results
done
if (( ${#failed_tests[@]}>0 ))
then
echo -e "\n\n======================================================================"
echo -e "\n FUNCTIONAL TESTS FAILED ${failed_tests[*]} ABORTING"
echo -e "\n======================================================================\n\n"
for test in "${failed_tests[@]}"
do
echo -e "\n********** $test KRKN RUN OUTPUT **********\n"
cat "CI/out/$test.out"
echo -e "\n********************************************\n\n\n\n"
done
exit 1
fi

View File

@@ -1,4 +1,5 @@
#!/bin/bash
set -x
readonly SECONDS_PER_HOUR=3600
readonly SECONDS_PER_MINUTE=60
function get_time_format() {
@@ -13,7 +14,9 @@ ci_test=`echo $1`
results_file=$2
echo -e "test: ${ci_test}" >&2
echo -e "\n======================================================================"
echo -e " CI test for ${ci_test} "
echo -e "======================================================================\n"
ci_results="CI/out/$ci_test.out"
# Test ci
@@ -25,16 +28,13 @@ then
# if the test passes update the results and complete
duration=$SECONDS
duration=$(get_time_format $duration)
echo -e "> $ci_test: Successful\n" >&2
echo "$ci_test: Successful"
echo "$ci_test | Pass | $duration" >> $results_file
count=$retries
# return value for run.sh
echo 0
else
duration=$SECONDS
duration=$(get_time_format $duration)
echo -e "> $ci_test: Failed\n" >&2
echo "$ci_test: Failed"
echo "$ci_test | Fail | $duration" >> $results_file
# return value for run.sh
echo 1
echo "Logs for "$ci_test
fi

View File

@@ -0,0 +1,5 @@
application_outage: # Scenario to create an outage of an application by blocking traffic
duration: 10 # Duration in seconds after which the routes will be accessible
namespace: openshift-monitoring # Namespace to target - all application routes will go inaccessible if pod selector is empty
pod_selector: {} # Pods to target
block: [Ingress, Egress] # It can be Ingress or Egress or Ingress, Egress

View File

@@ -0,0 +1,8 @@
scenarios:
- name: "kill machine config container"
namespace: "openshift-machine-config-operator"
label_selector: "k8s-app=machine-config-server"
container_name: "hello-openshift"
action: "kill 1"
count: 1
retry_wait: 60

View File

@@ -0,0 +1,31 @@
---
kind: Pod
apiVersion: v1
metadata:
name: hello-pod
creationTimestamp:
labels:
name: hello-openshift
spec:
containers:
- name: hello-openshift
image: openshift/hello-openshift
ports:
- containerPort: 5050
protocol: TCP
resources: {}
volumeMounts:
- name: tmp
mountPath: "/tmp"
terminationMessagePath: "/dev/termination-log"
imagePullPolicy: IfNotPresent
securityContext:
capabilities: {}
privileged: false
volumes:
- name: tmp
emptyDir: {}
restartPolicy: Always
dnsPolicy: ClusterFirst
serviceAccount: ''
status: {}

View File

@@ -0,0 +1,31 @@
config:
runStrategy:
runs: 1
maxSecondsBetweenRuns: 30
minSecondsBetweenRuns: 1
scenarios:
- name: "delete hello pods"
steps:
- podAction:
matches:
- labels:
namespace: "default"
selector: "hello-openshift"
filters:
- randomSample:
size: 1
actions:
- kill:
probability: 1
force: true
- podAction:
matches:
- labels:
namespace: "default"
selector: "hello-openshift"
retries:
retriesTimeout:
timeout: 180
actions:
- checkPodCount:
count: 1

View File

@@ -0,0 +1,7 @@
scenarios:
- action: delete
namespace: "^.*ingress.*$"
label_selector:
runs: 1
sleep: 15
wait_time: 30

View File

@@ -0,0 +1,6 @@
network_chaos: # Scenario to create an outage by simulating random variations in the network.
duration: 10 # seconds
instance_count: 1
execution: serial
egress:
bandwidth: 100mbit

View File

@@ -0,0 +1,34 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: litmus
spec:
# It can be true/false
annotationCheck: 'false'
# It can be active/stop
engineState: 'active'
chaosServiceAccount: litmus-sa
monitoring: false
# It can be delete/retain
jobCleanUpPolicy: 'delete'
experiments:
- name: node-cpu-hog
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '10'
# Number of cores of node CPU to be consumed
- name: NODE_CPU_CORE
value: '1'
# percentage of total nodes to target
- name: NODES_AFFECTED_PERC
value: '30'
# ENTER THE COMMA SEPARATED TARGET NODES NAME
- name: TARGET_NODES
value: $WORKER_NODE

View File

@@ -0,0 +1,34 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: litmus
spec:
# It can be true/false
annotationCheck: 'false'
# It can be active/stop
engineState: 'active'
chaosServiceAccount: litmus-sa
monitoring: false
# It can be delete/retain
jobCleanUpPolicy: 'delete'
experiments:
- name: node-cpu-hog
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '10'
# Number of cores of node CPU to be consumed
- name: NODE_CPU_CORE
value: '1'
# percentage of total nodes to target
- name: NODES_AFFECTED_PERC
value: '30'
# ENTER THE COMMA SEPARATED TARGET NODES NAME
- name: TARGET_NODES
value:

View File

@@ -0,0 +1,35 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: litmus
spec:
# It can be delete/retain
jobCleanUpPolicy: 'retain'
# It can be active/stop
engineState: 'active'
chaosServiceAccount: litmus-sa
experiments:
- name: node-io-stress
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '10'
## specify the size as percentage of free space on the file system
- name: FILESYSTEM_UTILIZATION_PERCENTAGE
value: '100'
## Number of core of CPU
- name: CPU
value: '1'
## Total number of workers default value is 4
- name: NUMBER_OF_WORKERS
value: '3'
## enter the comma separated target nodes name
- name: TARGET_NODES
value: $WORKER_NODE

View File

@@ -0,0 +1,35 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: litmus
spec:
# It can be delete/retain
jobCleanUpPolicy: 'retain'
# It can be active/stop
engineState: 'active'
chaosServiceAccount: litmus-sa
experiments:
- name: node-io-stress
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '10'
## specify the size as percentage of free space on the file system
- name: FILESYSTEM_UTILIZATION_PERCENTAGE
value: '100'
## Number of core of CPU
- name: CPU
value: '1'
## Total number of workers default value is 4
- name: NUMBER_OF_WORKERS
value: '3'
## enter the comma separated target nodes name
- name: TARGET_NODES
value:

View File

@@ -0,0 +1,28 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: litmus
spec:
# It can be delete/retain
jobCleanUpPolicy: 'retain'
# It can be active/stop
engineState: 'active'
chaosServiceAccount: litmus-sa
experiments:
- name: node-memory-hog
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '10'
## Specify the size as percent of total node capacity Ex: '30'
## Note: For consuming memory in mebibytes change the variable to MEMORY_CONSUMPTION_MEBIBYTES
- name: MEMORY_CONSUMPTION_PERCENTAGE
value: '30'
# ENTER THE COMMA SEPARATED TARGET NODES NAME
- name: TARGET_NODES
value: $WORKER_NODE

View File

@@ -0,0 +1,28 @@
apiVersion: litmuschaos.io/v1alpha1
kind: ChaosEngine
metadata:
name: nginx-chaos
namespace: litmus
spec:
# It can be delete/retain
jobCleanUpPolicy: 'retain'
# It can be active/stop
engineState: 'active'
chaosServiceAccount: litmus-sa
experiments:
- name: node-memory-hog
spec:
components:
env:
# set chaos duration (in sec) as desired
- name: TOTAL_CHAOS_DURATION
value: '10'
## Specify the size as percent of total node capacity Ex: '30'
## Note: For consuming memory in mebibytes change the variable to MEMORY_CONSUMPTION_MEBIBYTES
- name: MEMORY_CONSUMPTION_PERCENTAGE
value: '30'
# ENTER THE COMMA SEPARATED TARGET NODES NAME
- name: TARGET_NODES
value:

View File

@@ -0,0 +1,5 @@
time_scenarios:
- action: skew_time
object_type: pod
label_selector: k8s-app=etcd
container_name: ""

View File

@@ -0,0 +1,44 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: kraken-test-pv
labels:
type: local
spec:
storageClassName: manual
capacity:
storage: 2Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/mnt/data"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: kraken-test-pvc
spec:
storageClassName: manual
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Pod
metadata:
name: kraken-test-pod
spec:
volumes:
- name: kraken-test-pv
persistentVolumeClaim:
claimName: kraken-test-pvc
containers:
- name: kraken-test-container
image: 'image-registry.openshift-image-registry.svc:5000/openshift/httpd:latest'
volumeMounts:
- mountPath: "/home/krake-dir/"
name: kraken-test-pv
securityContext:
privileged: true

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: container
labels:
scenario: container
spec:
hostNetwork: true
containers:
- name: fedtools
image: docker.io/fedora/tools
command:
- /bin/sh
- -c
- |
sleep infinity

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: outage
labels:
scenario: outage
spec:
hostNetwork: true
containers:
- name: fedtools
image: quay.io/krkn-chaos/krkn:tools
command:
- /bin/sh
- -c
- |
sleep infinity

View File

@@ -1,29 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: pod-network-filter-test
labels:
app.kubernetes.io/name: pod-network-filter
spec:
containers:
- name: nginx
image: quay.io/krkn-chaos/krkn-funtests:pod-network-filter
ports:
- containerPort: 5000
name: pod-network-prt
---
apiVersion: v1
kind: Service
metadata:
name: pod-network-filter-service
spec:
selector:
app.kubernetes.io/name: pod-network-filter
type: NodePort
ports:
- name: pod-network-filter-svc
protocol: TCP
port: 80
targetPort: pod-network-prt
nodePort: 30037

View File

@@ -1,29 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: nginx
labels:
app.kubernetes.io/name: proxy
spec:
containers:
- name: nginx
image: nginx:stable
ports:
- containerPort: 80
name: http-web-svc
---
apiVersion: v1
kind: Service
metadata:
name: nginx-service
spec:
selector:
app.kubernetes.io/name: proxy
type: NodePort
ports:
- name: name-of-service-port
protocol: TCP
port: 80
targetPort: http-web-svc
nodePort: 30036

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: time-skew
labels:
scenario: time-skew
spec:
hostNetwork: true
containers:
- name: fedtools
image: quay.io/krkn-chaos/krkn:tools
command:
- /bin/sh
- -c
- |
sleep infinity

View File

@@ -1,26 +1,18 @@
ERRORED=false
function finish {
if [ $? != 0 ] && [ $ERRORED != "true" ]
if [ $? -eq 1 ] && [ $ERRORED != "true" ]
then
error
fi
}
function error {
exit_code=$?
if [ $exit_code == 1 ]
then
echo "Error caught."
ERRORED=true
elif [ $exit_code == 2 ]
then
echo "Run with exit code 2 detected, it is expected, wrapping the exit code with 0 to avoid pipeline failure"
exit 0
fi
echo "Error caught."
ERRORED=true
}
function get_node {
worker_node=$(kubectl get nodes --no-headers | grep worker | head -n 1)
worker_node=$(oc get nodes --no-headers | grep worker | head -n 1)
export WORKER_NODE=$worker_node
}

View File

@@ -1 +0,0 @@

12
CI/tests/my_tests Normal file
View File

@@ -0,0 +1,12 @@
test_pods
test_nodes
test_time
test_app_outages
test_container
test_zone
test_io_hog
test_mem_hog
test_cpu_hog
test_shut_down
test_net_chaos
test_namespace

View File

@@ -7,20 +7,12 @@ trap finish EXIT
function functional_test_app_outage {
yq -i '.application_outage.duration=10' scenarios/openshift/app_outage.yaml
yq -i '.application_outage.pod_selector={"scenario":"outage"}' scenarios/openshift/app_outage.yaml
yq -i '.application_outage.namespace="default"' scenarios/openshift/app_outage.yaml
export scenario_type="application_outages_scenarios"
export scenario_file="scenarios/openshift/app_outage.yaml"
export scenario_type="application_outages"
export scenario_file="CI/scenarios/app_outage.yaml"
export post_config=""
kubectl get services -A
kubectl get pods
envsubst < CI/config/common_test_config.yaml > CI/config/app_outage.yaml
cat $scenario_file
cat CI/config/app_outage.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/app_outage.yaml
python3 run_kraken.py -c CI/config/app_outage.yaml
echo "App outage scenario test: Success"
}

View File

@@ -8,18 +8,14 @@ trap finish EXIT
pod_file="CI/scenarios/hello_pod.yaml"
function functional_test_container_crash {
yq -i '.scenarios[0].namespace="default"' scenarios/openshift/container_etcd.yml
yq -i '.scenarios[0].label_selector="scenario=container"' scenarios/openshift/container_etcd.yml
yq -i '.scenarios[0].container_name="fedtools"' scenarios/openshift/container_etcd.yml
export scenario_type="container_scenarios"
export scenario_file="scenarios/openshift/container_etcd.yml"
export scenario_file="- CI/scenarios/container_scenario.yml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/container_config.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/container_config.yaml -d True
python3 run_kraken.py -c CI/config/container_config.yaml
echo "Container scenario test: Success"
kubectl get pods -n kube-system -l component=etcd
}
functional_test_container_crash

18
CI/tests/test_cpu_hog.sh Normal file → Executable file
View File

@@ -6,15 +6,15 @@ trap error ERR
trap finish EXIT
function functional_test_cpu_hog {
yq -i '."node-selector"="kubernetes.io/hostname=kind-worker2"' scenarios/kube/cpu-hog.yml
function functional_test_litmus_cpu {
export scenario_type="hog_scenarios"
export scenario_file="scenarios/kube/cpu-hog.yml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/cpu_hog.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/cpu_hog.yaml
echo "CPU Hog: Success"
export scenario_type="litmus_scenarios"
export scenario_file="- scenarios/templates/litmus-rbac.yaml"
export post_config="- CI/scenarios/node_cpu_hog_engine_node.yaml"
envsubst < CI/config/common_test_config.yaml > CI/config/litmus_config.yaml
envsubst < CI/scenarios/node_cpu_hog_engine.yaml > CI/scenarios/node_cpu_hog_engine_node.yaml
python3 run_kraken.py -c CI/config/litmus_config.yaml
echo "Litmus scenario $1 test: Success"
}
functional_test_cpu_hog
functional_test_litmus_cpu

View File

@@ -1,18 +0,0 @@
set -xeEo pipefail
source CI/tests/common.sh
trap error ERR
trap finish EXIT
function functional_test_customapp_pod_node_selector {
export scenario_type="pod_disruption_scenarios"
export scenario_file="scenarios/openshift/customapp_pod.yaml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/customapp_pod_config.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/customapp_pod_config.yaml -d True
echo "Pod disruption with node_label_selector test: Success"
}
functional_test_customapp_pod_node_selector

20
CI/tests/test_io_hog.sh Normal file → Executable file
View File

@@ -5,16 +5,16 @@ source CI/tests/common.sh
trap error ERR
trap finish EXIT
function functional_test_io_hog {
yq -i '."node-selector"="kubernetes.io/hostname=kind-worker2"' scenarios/kube/io-hog.yml
export scenario_type="hog_scenarios"
export scenario_file="scenarios/kube/io-hog.yml"
export post_config=""
cat $scenario_file
envsubst < CI/config/common_test_config.yaml > CI/config/io_hog.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/io_hog.yaml
echo "IO Hog: Success"
function functional_test_litmus_io {
export scenario_type="litmus_scenarios"
export scenario_file="- scenarios/templates/litmus-rbac.yaml"
export post_config="- CI/scenarios/node_io_engine_node.yaml"
envsubst < CI/config/common_test_config.yaml > CI/config/litmus_config.yaml
envsubst < CI/scenarios/node_io_engine.yaml > CI/scenarios/node_io_engine_node.yaml
python3 run_kraken.py -c CI/config/litmus_config.yaml
echo "Litmus scenario $1 test: Success"
}
functional_test_io_hog
functional_test_litmus_io

20
CI/tests/test_mem_hog.sh Executable file
View File

@@ -0,0 +1,20 @@
set -xeEo pipefail
source CI/tests/common.sh
trap error ERR
trap finish EXIT
function functional_test_litmus_mem {
export scenario_type="litmus_scenarios"
export scenario_file="- scenarios/templates/litmus-rbac.yaml"
export post_config="- CI/scenarios/node_mem_engine_node.yaml"
envsubst < CI/config/common_test_config.yaml > CI/config/litmus_config.yaml
envsubst < CI/scenarios/node_mem_engine.yaml > CI/scenarios/node_mem_engine_node.yaml
python3 run_kraken.py -c CI/config/litmus_config.yaml
echo "Litmus scenario $1 test: Success"
}
functional_test_litmus_mem "- CI/scenarios/node_mem_engine.yaml"

View File

@@ -1,19 +0,0 @@
set -xeEo pipefail
source CI/tests/common.sh
trap error ERR
trap finish EXIT
function functional_test_memory_hog {
yq -i '."node-selector"="kubernetes.io/hostname=kind-worker2"' scenarios/kube/memory-hog.yml
export scenario_type="hog_scenarios"
export scenario_file="scenarios/kube/memory-hog.yml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/memory_hog.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/memory_hog.yaml
echo "Memory Hog: Success"
}
functional_test_memory_hog

View File

@@ -6,14 +6,13 @@ trap error ERR
trap finish EXIT
function funtional_test_namespace_deletion {
export scenario_type="service_disruption_scenarios"
export scenario_file="scenarios/openshift/ingress_namespace.yaml"
export scenario_type="namespace_scenarios"
export scenario_file="- CI/scenarios/ingress_namespace.yaml"
export post_config=""
yq '.scenarios[0].namespace="^namespace-scenario$"' -i scenarios/openshift/ingress_namespace.yaml
yq '.scenarios[0].wait_time=30' -i scenarios/openshift/ingress_namespace.yaml
yq '.scenarios[0].action="delete"' -i scenarios/openshift/ingress_namespace.yaml
envsubst < CI/config/common_test_config.yaml > CI/config/namespace_config.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/namespace_config.yaml
python3 run_kraken.py -c CI/config/namespace_config.yaml
echo $?
echo "Namespace scenario test: Success"
}

View File

@@ -7,19 +7,12 @@ trap finish EXIT
function functional_test_network_chaos {
yq -i '.network_chaos.duration=10' scenarios/openshift/network_chaos.yaml
yq -i '.network_chaos.node_name="kind-worker2"' scenarios/openshift/network_chaos.yaml
yq -i '.network_chaos.egress.bandwidth="100mbit"' scenarios/openshift/network_chaos.yaml
yq -i 'del(.network_chaos.interfaces)' scenarios/openshift/network_chaos.yaml
yq -i 'del(.network_chaos.label_selector)' scenarios/openshift/network_chaos.yaml
yq -i 'del(.network_chaos.egress.latency)' scenarios/openshift/network_chaos.yaml
yq -i 'del(.network_chaos.egress.loss)' scenarios/openshift/network_chaos.yaml
export scenario_type="network_chaos_scenarios"
export scenario_file="scenarios/openshift/network_chaos.yaml"
export scenario_type="network_chaos"
export scenario_file="CI/scenarios/network_chaos.yaml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/network_chaos.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/network_chaos.yaml
python3 run_kraken.py -c CI/config/network_chaos.yaml
echo "Network Chaos test: Success"
}

View File

@@ -1,18 +0,0 @@
uset -xeEo pipefail
source CI/tests/common.sh
trap error ERR
trap finish EXIT
function functional_test_node_stop_start {
export scenario_type="node_scenarios"
export scenario_file="scenarios/kind/node_scenarios_example.yml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/node_config.yaml
cat CI/config/node_config.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/node_config.yaml
echo "Node Stop/Start scenario test: Success"
}
functional_test_node_stop_start

View File

@@ -13,7 +13,7 @@ function funtional_test_node_crash {
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/node_config.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/node_config.yaml
python3 run_kraken.py -c CI/config/node_config.yaml
echo "Node scenario test: Success"
}

View File

@@ -1,20 +0,0 @@
set -xeEo pipefail
source CI/tests/common.sh
trap error ERR
trap finish EXIT
function functional_test_pod_crash {
export scenario_type="pod_disruption_scenarios"
export scenario_file="scenarios/kind/pod_etcd.yml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/pod_config.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml
echo "Pod disruption scenario test: Success"
date
kubectl get pods -n kube-system -l component=etcd -o yaml
}
functional_test_pod_crash

View File

@@ -1,28 +0,0 @@
source CI/tests/common.sh
trap error ERR
trap finish EXIT
function functional_test_pod_error {
export scenario_type="pod_disruption_scenarios"
export scenario_file="scenarios/kind/pod_etcd.yml"
export post_config=""
yq -i '.[0].config.kill=5' scenarios/kind/pod_etcd.yml
envsubst < CI/config/common_test_config.yaml > CI/config/pod_config.yaml
cat CI/config/pod_config.yaml
cat scenarios/kind/pod_etcd.yml
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml
ret=$?
echo "\n\nret $ret"
if [[ $ret -ge 1 ]]; then
echo "Pod disruption error scenario test: Success"
else
echo "Pod disruption error scenario test: Failure"
exit 1
fi
}
functional_test_pod_error

View File

@@ -1,62 +0,0 @@
function functional_pod_network_filter {
export SERVICE_URL="http://localhost:8889"
export scenario_type="network_chaos_ng_scenarios"
export scenario_file="scenarios/kube/pod-network-filter.yml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/pod_network_filter.yaml
yq -i '.[0].test_duration=10' scenarios/kube/pod-network-filter.yml
yq -i '.[0].label_selector=""' scenarios/kube/pod-network-filter.yml
yq -i '.[0].ingress=false' scenarios/kube/pod-network-filter.yml
yq -i '.[0].egress=true' scenarios/kube/pod-network-filter.yml
yq -i '.[0].target="pod-network-filter-test"' scenarios/kube/pod-network-filter.yml
yq -i '.[0].protocols=["tcp"]' scenarios/kube/pod-network-filter.yml
yq -i '.[0].ports=[443]' scenarios/kube/pod-network-filter.yml
yq -i '.performance_monitoring.check_critical_alerts=False' CI/config/pod_network_filter.yaml
## Test webservice deployment
kubectl apply -f ./CI/templates/pod_network_filter.yaml
COUNTER=0
while true
do
curl $SERVICE_URL
EXITSTATUS=$?
if [ "$EXITSTATUS" -eq "0" ]
then
break
fi
sleep 1
COUNTER=$((COUNTER+1))
[ $COUNTER -eq "100" ] && echo "maximum number of retry reached, test failed" && exit 1
done
cat scenarios/kube/pod-network-filter.yml
python3 -m coverage run -a run_kraken.py -c CI/config/pod_network_filter.yaml > krkn_pod_network.out 2>&1 &
PID=$!
# wait until the dns resolution starts failing and the service returns 400
DNS_FAILURE_STATUS=0
while true
do
OUT_STATUS_CODE=$(curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL)
if [ "$OUT_STATUS_CODE" -eq "404" ]
then
DNS_FAILURE_STATUS=404
fi
if [ "$DNS_FAILURE_STATUS" -eq "404" ] && [ "$OUT_STATUS_CODE" -eq "200" ]
then
echo "service restored"
break
fi
COUNTER=$((COUNTER+1))
[ $COUNTER -eq "100" ] && echo "maximum number of retry reached, test failed" && exit 1
sleep 2
done
wait $PID
}
functional_pod_network_filter

View File

@@ -1,35 +0,0 @@
set -xeEo pipefail
source CI/tests/common.sh
trap error ERR
trap finish EXIT
function functional_test_pod_server {
export scenario_type="pod_disruption_scenarios"
export scenario_file="scenarios/kind/pod_etcd.yml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/pod_config.yaml
yq -i '.[0].config.kill=1' scenarios/kind/pod_etcd.yml
yq -i '.tunings.daemon_mode=True' CI/config/pod_config.yaml
cat CI/config/pod_config.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml &
sleep 15
curl -X POST http:/0.0.0.0:8081/STOP
wait
yq -i '.kraken.signal_state="PAUSE"' CI/config/pod_config.yaml
yq -i '.tunings.daemon_mode=False' CI/config/pod_config.yaml
cat CI/config/pod_config.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml &
sleep 5
curl -X POST http:/0.0.0.0:8081/RUN
wait
echo "Pod disruption with server scenario test: Success"
}
functional_test_pod_server

19
CI/tests/test_pods.sh Executable file
View File

@@ -0,0 +1,19 @@
set -xeEo pipefail
source CI/tests/common.sh
trap error ERR
trap finish EXIT
function funtional_test_pod_deletion {
export scenario_type="pod_scenarios"
export scenario_file="- CI/scenarios/hello_pod_killing.yml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/pod_config.yaml
python3 run_kraken.py -c CI/config/pod_config.yaml
echo $?
echo "Pod scenario test: Success"
}
funtional_test_pod_deletion

View File

@@ -1,18 +0,0 @@
set -xeEo pipefail
source CI/tests/common.sh
trap error ERR
trap finish EXIT
function functional_test_pvc_fill {
export scenario_type="pvc_scenarios"
export scenario_file="scenarios/kind/pvc_scenario.yaml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/pvc_config.yaml
cat CI/config/pvc_config.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/pvc_config.yaml --debug True
echo "PVC Fill scenario test: Success"
}
functional_test_pvc_fill

View File

@@ -1,119 +0,0 @@
set -xeEo pipefail
source CI/tests/common.sh
trap error ERR
trap finish EXIT
# port mapping has been configured in kind-config.yml
SERVICE_URL=http://localhost:8888
PAYLOAD_GET_1="{ \
\"status\":\"internal server error\" \
}"
STATUS_CODE_GET_1=500
PAYLOAD_PATCH_1="resource patched"
STATUS_CODE_PATCH_1=201
PAYLOAD_POST_1="{ \
\"status\": \"unauthorized\" \
}"
STATUS_CODE_POST_1=401
PAYLOAD_GET_2="{ \
\"status\":\"resource created\" \
}"
STATUS_CODE_GET_2=201
PAYLOAD_PATCH_2="bad request"
STATUS_CODE_PATCH_2=400
PAYLOAD_POST_2="not found"
STATUS_CODE_POST_2=404
JSON_MIME="application/json"
TEXT_MIME="text/plain; charset=utf-8"
function functional_test_service_hijacking {
export scenario_type="service_hijacking_scenarios"
export scenario_file="scenarios/kube/service_hijacking.yaml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/service_hijacking.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/service_hijacking.yaml > /tmp/krkn.log 2>&1 &
PID=$!
#Waiting the hijacking to have effect
COUNTER=0
while [ `curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php` == 404 ]
do
echo "waiting scenario to kick in."
sleep 1
COUNTER=$((COUNTER+1))
[ $COUNTER -eq "100" ] && echo "maximum number of retry reached, test failed" && exit 1
done
#Checking Step 1 GET on /list/index.php
OUT_GET="`curl -X GET -s $SERVICE_URL/list/index.php`"
OUT_CONTENT=`curl -X GET -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
OUT_STATUS_CODE=`curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
[ "${PAYLOAD_GET_1//[$'\t\r\n ']}" == "${OUT_GET//[$'\t\r\n ']}" ] && echo "Step 1 GET Payload OK" || (echo "Payload did not match. Test failed." && exit 1)
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_GET_1" ] && echo "Step 1 GET Status Code OK" || (echo " Step 1 GET status code did not match. Test failed." && exit 1)
[ "$OUT_CONTENT" == "$JSON_MIME" ] && echo "Step 1 GET MIME OK" || (echo " Step 1 GET MIME did not match. Test failed." && exit 1)
#Checking Step 1 POST on /list/index.php
OUT_POST="`curl -s -X POST $SERVICE_URL/list/index.php`"
OUT_STATUS_CODE=`curl -X POST -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
OUT_CONTENT=`curl -X POST -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
[ "${PAYLOAD_POST_1//[$'\t\r\n ']}" == "${OUT_POST//[$'\t\r\n ']}" ] && echo "Step 1 POST Payload OK" || (echo "Payload did not match. Test failed." && exit 1)
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_POST_1" ] && echo "Step 1 POST Status Code OK" || (echo "Step 1 POST status code did not match. Test failed." && exit 1)
[ "$OUT_CONTENT" == "$JSON_MIME" ] && echo "Step 1 POST MIME OK" || (echo " Step 1 POST MIME did not match. Test failed." && exit 1)
#Checking Step 1 PATCH on /patch
OUT_PATCH="`curl -s -X PATCH $SERVICE_URL/patch`"
OUT_STATUS_CODE=`curl -X PATCH -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/patch`
OUT_CONTENT=`curl -X PATCH -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/patch`
[ "${PAYLOAD_PATCH_1//[$'\t\r\n ']}" == "${OUT_PATCH//[$'\t\r\n ']}" ] && echo "Step 1 PATCH Payload OK" || (echo "Payload did not match. Test failed." && exit 1)
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_PATCH_1" ] && echo "Step 1 PATCH Status Code OK" || (echo "Step 1 PATCH status code did not match. Test failed." && exit 1)
[ "$OUT_CONTENT" == "$TEXT_MIME" ] && echo "Step 1 PATCH MIME OK" || (echo " Step 1 PATCH MIME did not match. Test failed." && exit 1)
# wait for the next step
sleep 16
#Checking Step 2 GET on /list/index.php
OUT_GET="`curl -X GET -s $SERVICE_URL/list/index.php`"
OUT_CONTENT=`curl -X GET -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
OUT_STATUS_CODE=`curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
[ "${PAYLOAD_GET_2//[$'\t\r\n ']}" == "${OUT_GET//[$'\t\r\n ']}" ] && echo "Step 2 GET Payload OK" || (echo "Step 2 GET Payload did not match. Test failed." && exit 1)
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_GET_2" ] && echo "Step 2 GET Status Code OK" || (echo "Step 2 GET status code did not match. Test failed." && exit 1)
[ "$OUT_CONTENT" == "$JSON_MIME" ] && echo "Step 2 GET MIME OK" || (echo " Step 2 GET MIME did not match. Test failed." && exit 1)
#Checking Step 2 POST on /list/index.php
OUT_POST="`curl -s -X POST $SERVICE_URL/list/index.php`"
OUT_CONTENT=`curl -X POST -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
OUT_STATUS_CODE=`curl -X POST -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
[ "${PAYLOAD_POST_2//[$'\t\r\n ']}" == "${OUT_POST//[$'\t\r\n ']}" ] && echo "Step 2 POST Payload OK" || (echo "Step 2 POST Payload did not match. Test failed." && exit 1)
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_POST_2" ] && echo "Step 2 POST Status Code OK" || (echo "Step 2 POST status code did not match. Test failed." && exit 1)
[ "$OUT_CONTENT" == "$TEXT_MIME" ] && echo "Step 2 POST MIME OK" || (echo " Step 2 POST MIME did not match. Test failed." && exit 1)
#Checking Step 2 PATCH on /patch
OUT_PATCH="`curl -s -X PATCH $SERVICE_URL/patch`"
OUT_CONTENT=`curl -X PATCH -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/patch`
OUT_STATUS_CODE=`curl -X PATCH -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/patch`
[ "${PAYLOAD_PATCH_2//[$'\t\r\n ']}" == "${OUT_PATCH//[$'\t\r\n ']}" ] && echo "Step 2 PATCH Payload OK" || (echo "Step 2 PATCH Payload did not match. Test failed." && exit 1)
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_PATCH_2" ] && echo "Step 2 PATCH Status Code OK" || (echo "Step 2 PATCH status code did not match. Test failed." && exit 1)
[ "$OUT_CONTENT" == "$TEXT_MIME" ] && echo "Step 2 PATCH MIME OK" || (echo " Step 2 PATCH MIME did not match. Test failed." && exit 1)
wait $PID
cat /tmp/krkn.log
# now checking if service has been restore correctly and nginx responds correctly
curl -s $SERVICE_URL | grep nginx! && echo "BODY: Service restored!" || (echo "BODY: failed to restore service" && exit 1)
OUT_STATUS_CODE=`curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL`
[ "$OUT_STATUS_CODE" == "200" ] && echo "STATUS_CODE: Service restored!" || (echo "STATUS_CODE: failed to restore service" && exit 1)
echo "Service Hijacking Chaos test: Success"
}
functional_test_service_hijacking

View File

@@ -12,7 +12,7 @@ function functional_test_shut_down {
export scenario_file="- CI/scenarios/cluster_shut_down_scenario.yml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/shut_down.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/shut_down.yaml
python3 run_kraken.py -c CI/config/shut_down.yaml
echo "Cluster shut down scenario test: Success"
}

View File

@@ -1,37 +0,0 @@
set -xeEo pipefail
source CI/tests/common.sh
trap error ERR
trap finish EXIT
function functional_test_telemetry {
AWS_CLI=`which aws`
[ -z "$AWS_CLI" ]&& echo "AWS cli not found in path" && exit 1
[ -z "$AWS_BUCKET" ] && echo "AWS bucket not set in environment" && exit 1
export RUN_TAG="funtest-telemetry"
yq -i '.telemetry.enabled=True' CI/config/common_test_config.yaml
yq -i '.telemetry.full_prometheus_backup=True' CI/config/common_test_config.yaml
yq -i '.performance_monitoring.check_critical_alerts=True' CI/config/common_test_config.yaml
yq -i '.performance_monitoring.prometheus_url="http://localhost:9090"' CI/config/common_test_config.yaml
yq -i '.telemetry.run_tag=env(RUN_TAG)' CI/config/common_test_config.yaml
export scenario_type="pod_disruption_scenarios"
export scenario_file="scenarios/kind/pod_etcd.yml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/telemetry.yaml
retval=$(python3 -m coverage run -a run_kraken.py -c CI/config/telemetry.yaml)
RUN_FOLDER=`cat CI/out/test_telemetry.out | grep amazonaws.com | sed -rn "s#.*https:\/\/.*\/files/(.*)#\1#p"`
$AWS_CLI s3 ls "s3://$AWS_BUCKET/$RUN_FOLDER/" | awk '{ print $4 }' > s3_remote_files
echo "checking if telemetry files are uploaded on s3"
cat s3_remote_files | grep critical-alerts-00.log || ( echo "FAILED: critical-alerts-00.log not uploaded" && exit 1 )
cat s3_remote_files | grep prometheus-00.tar || ( echo "FAILED: prometheus backup not uploaded" && exit 1 )
cat s3_remote_files | grep telemetry.json || ( echo "FAILED: telemetry.json not uploaded" && exit 1 )
echo "all files uploaded!"
echo "Telemetry Collection: Success"
}
functional_test_telemetry

View File

@@ -7,16 +7,12 @@ trap finish EXIT
function functional_test_time_scenario {
yq -i '.time_scenarios[0].label_selector="scenario=time-skew"' scenarios/openshift/time_scenarios_example.yml
yq -i '.time_scenarios[0].container_name=""' scenarios/openshift/time_scenarios_example.yml
yq -i '.time_scenarios[0].namespace="default"' scenarios/openshift/time_scenarios_example.yml
yq -i '.time_scenarios[1].label_selector="kubernetes.io/hostname=kind-worker2"' scenarios/openshift/time_scenarios_example.yml
export scenario_type="time_scenarios"
export scenario_file="scenarios/openshift/time_scenarios_example.yml"
export scenario_file="CI/scenarios/time_scenarios.yml"
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/time_config.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/time_config.yaml
python3 run_kraken.py -c CI/config/time_config.yaml
echo "Time scenario test: Success"
}

View File

@@ -13,7 +13,7 @@ function functional_test_zone_crash {
export post_config=""
envsubst < CI/config/common_test_config.yaml > CI/config/zone3_config.yaml
envsubst < CI/scenarios/zone_outage.yaml > CI/scenarios/zone_outage_env.yaml
python3 -m coverage run -a run_kraken.py -c CI/config/zone3_config.yaml
python3 run_kraken.py -c CI/config/zone3_config.yaml
echo "zone3 scenario test: Success"
}

273
CLAUDE.md
View File

@@ -1,273 +0,0 @@
# CLAUDE.md - Krkn Chaos Engineering Framework
## Project Overview
Krkn (Kraken) is a chaos engineering tool for Kubernetes/OpenShift clusters. It injects deliberate failures to validate cluster resilience. Plugin-based architecture with multi-cloud support (AWS, Azure, GCP, IBM Cloud, VMware, Alibaba, OpenStack).
## Repository Structure
```
krkn/
├── krkn/
│ ├── scenario_plugins/ # Chaos scenario plugins (pod, node, network, hogs, etc.)
│ ├── utils/ # Utility functions
│ ├── rollback/ # Rollback management
│ ├── prometheus/ # Prometheus integration
│ └── cerberus/ # Health monitoring
├── tests/ # Unit tests (unittest framework)
├── scenarios/ # Example scenario configs (openshift/, kube/, kind/)
├── config/ # Configuration files
└── CI/ # CI/CD test scripts
```
## Quick Start
```bash
# Setup (ALWAYS use virtual environment)
python3 -m venv venv
source venv/bin/activate
pip install -r requirements.txt
# Run Krkn
python run_kraken.py --config config/config.yaml
# Note: Scenarios are specified in config.yaml under kraken.chaos_scenarios
# There is no --scenario flag; edit config/config.yaml to select scenarios
# Run tests
python -m unittest discover -s tests -v
python -m coverage run -a -m unittest discover -s tests -v
```
## Critical Requirements
### Python Environment
- **Python 3.9+** required
- **NEVER install packages globally** - always use virtual environment
- **CRITICAL**: `docker` must be <7.0 and `requests` must be <2.32 (Unix socket compatibility)
### Key Dependencies
- **krkn-lib** (5.1.13): Core library for Kubernetes/OpenShift operations
- **kubernetes** (34.1.0): Kubernetes Python client
- **docker** (<7.0), **requests** (<2.32): DO NOT upgrade without verifying compatibility
- Cloud SDKs: boto3 (AWS), azure-mgmt-* (Azure), google-cloud-compute (GCP), ibm_vpc (IBM), pyVmomi (VMware)
## Plugin Architecture (CRITICAL)
**Strictly enforced naming conventions:**
### Naming Rules
- **Module files**: Must end with `_scenario_plugin.py` and use snake_case
- Example: `pod_disruption_scenario_plugin.py`
- **Class names**: Must be CamelCase and end with `ScenarioPlugin`
- Example: `PodDisruptionScenarioPlugin`
- Must match module filename (snake_case ↔ CamelCase)
- **Directory structure**: Plugin dirs CANNOT contain "scenario" or "plugin"
- Location: `krkn/scenario_plugins/<plugin_name>/`
### Plugin Implementation
Every plugin MUST:
1. Extend `AbstractScenarioPlugin`
2. Implement `run()` method
3. Implement `get_scenario_types()` method
```python
from krkn.scenario_plugins import AbstractScenarioPlugin
class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
def run(self, config, scenarios_list, kubeconfig_path, wait_duration):
pass
def get_scenario_types(self):
return ["pod_scenarios", "pod_outage"]
```
### Creating a New Plugin
1. Create directory: `krkn/scenario_plugins/<plugin_name>/`
2. Create module: `<plugin_name>_scenario_plugin.py`
3. Create class: `<PluginName>ScenarioPlugin` extending `AbstractScenarioPlugin`
4. Implement `run()` and `get_scenario_types()`
5. Create unit test: `tests/test_<plugin_name>_scenario_plugin.py`
6. Add example scenario: `scenarios/<platform>/<scenario>.yaml`
**DO NOT**: Violate naming conventions (factory will reject), include "scenario"/"plugin" in directory names, create plugins without tests.
## Testing
### Unit Tests
```bash
# Run all tests
python -m unittest discover -s tests -v
# Specific test
python -m unittest tests.test_pod_disruption_scenario_plugin
# With coverage
python -m coverage run -a -m unittest discover -s tests -v
python -m coverage html
```
**Test requirements:**
- Naming: `test_<module>_scenario_plugin.py`
- Mock external dependencies (Kubernetes API, cloud providers)
- Test success, failure, and edge cases
- Keep tests isolated and independent
### Functional Tests
Located in `CI/tests/`. Can be run locally on a kind cluster with Prometheus and Elasticsearch set up.
**Setup for local testing:**
1. Deploy Prometheus and Elasticsearch on your kind cluster:
- Prometheus setup: https://krkn-chaos.dev/docs/developers-guide/testing-changes/#prometheus
- Elasticsearch setup: https://krkn-chaos.dev/docs/developers-guide/testing-changes/#elasticsearch
2. Or disable monitoring features in `config/config.yaml`:
```yaml
performance_monitoring:
enable_alerts: False
enable_metrics: False
check_critical_alerts: False
```
**Note:** Functional tests run automatically in CI with full monitoring enabled.
## Cloud Provider Implementations
Node chaos scenarios are cloud-specific. Each in `krkn/scenario_plugins/node_actions/<provider>_node_scenarios.py`:
- AWS, Azure, GCP, IBM Cloud, VMware, Alibaba, OpenStack, Bare Metal
Implement: stop, start, reboot, terminate instances.
**When modifying**: Maintain consistency with other providers, handle API errors, add logging, update tests.
### Adding Cloud Provider Support
1. Create: `krkn/scenario_plugins/node_actions/<provider>_node_scenarios.py`
2. Extend: `abstract_node_scenarios.AbstractNodeScenarios`
3. Implement: `stop_instances`, `start_instances`, `reboot_instances`, `terminate_instances`
4. Add SDK to `requirements.txt`
5. Create unit test with mocked SDK
6. Add example scenario: `scenarios/openshift/<provider>_node_scenarios.yml`
## Configuration
**Main config**: `config/config.yaml`
- `kraken`: Core settings
- `cerberus`: Health monitoring
- `performance_monitoring`: Prometheus
- `elastic`: Elasticsearch telemetry
**Scenario configs**: `scenarios/` directory
```yaml
- config:
scenario_type: <type> # Must match plugin's get_scenario_types()
```
## Code Style
- **Import order**: Standard library, third-party, local imports
- **Naming**: snake_case (functions/variables), CamelCase (classes)
- **Logging**: Use Python's `logging` module
- **Error handling**: Return appropriate exit codes
- **Docstrings**: Required for public functions/classes
## Exit Codes
Krkn uses specific exit codes to communicate execution status:
- `0`: Success - all scenarios passed, no critical alerts
- `1`: Scenario failure - one or more scenarios failed
- `2`: Critical alerts fired during execution
- `3+`: Health check failure (Cerberus monitoring detected issues)
**When implementing scenarios:**
- Return `0` on success
- Return `1` on scenario-specific failures
- Propagate health check failures appropriately
- Log exit code reasons clearly
## Container Support
Krkn can run inside a container. See `containers/` directory.
**Building custom image:**
```bash
cd containers
./compile_dockerfile.sh # Generates Dockerfile from template
docker build -t krkn:latest .
```
**Running containerized:**
```bash
docker run -v ~/.kube:/root/.kube:Z \
-v $(pwd)/config:/config:Z \
-v $(pwd)/scenarios:/scenarios:Z \
krkn:latest
```
## Git Workflow
- **NEVER commit directly to main**
- **NEVER use `--force` without approval**
- **ALWAYS create feature branches**: `git checkout -b feature/description`
- **ALWAYS run tests before pushing**
**Conventional commits**: `feat:`, `fix:`, `test:`, `docs:`, `refactor:`
```bash
git checkout main && git pull origin main
git checkout -b feature/your-feature-name
# Make changes, write tests
python -m unittest discover -s tests -v
git add <specific-files>
git commit -m "feat: description"
git push -u origin feature/your-feature-name
```
## Environment Variables
- `KUBECONFIG`: Path to kubeconfig
- `AWS_*`, `AZURE_*`, `GOOGLE_APPLICATION_CREDENTIALS`: Cloud credentials
- `PROMETHEUS_URL`, `ELASTIC_URL`, `ELASTIC_PASSWORD`: Monitoring config
**NEVER commit credentials or API keys.**
## Common Pitfalls
1. Missing virtual environment - always activate venv
2. Running functional tests without cluster setup
3. Ignoring exit codes
4. Modifying krkn-lib directly (it's a separate package)
5. Upgrading docker/requests beyond version constraints
## Before Writing Code
1. Check for existing implementations
2. Review existing plugins as examples
3. Maintain consistency with cloud provider patterns
4. Plan rollback logic
5. Write tests alongside code
6. Update documentation
## When Adding Dependencies
1. Check if functionality exists in krkn-lib or current dependencies
2. Verify compatibility with existing versions
3. Pin specific versions in `requirements.txt`
4. Check for security vulnerabilities
5. Test thoroughly for conflicts
## Common Development Tasks
### Modifying Existing Plugin
1. Read plugin code and corresponding test
2. Make changes
3. Update/add unit tests
4. Run: `python -m unittest tests.test_<plugin>_scenario_plugin`
### Writing Unit Tests
1. Create: `tests/test_<module>_scenario_plugin.py`
2. Import `unittest` and plugin class
3. Mock external dependencies
4. Test success, failure, and edge cases
5. Run: `python -m unittest tests.test_<module>_scenario_plugin`

View File

@@ -1,104 +0,0 @@
## CNCF Community Code of Conduct v1.3
Other languages available:
- [Arabic/العربية](code-of-conduct-languages/ar.md)
- [Bulgarian/Български](code-of-conduct-languages/bg.md)
- [Chinese/中文](code-of-conduct-languages/zh.md)
- [Czech/Česky](code-of-conduct-languages/cs.md)
- [Farsi/فارسی](code-of-conduct-languages/fa.md)
- [French/Français](code-of-conduct-languages/fr.md)
- [German/Deutsch](code-of-conduct-languages/de.md)
- [Hindi/हिन्दी](code-of-conduct-languages/hi.md)
- [Indonesian/Bahasa Indonesia](code-of-conduct-languages/id.md)
- [Italian/Italiano](code-of-conduct-languages/it.md)
- [Japanese/日本語](code-of-conduct-languages/jp.md)
- [Korean/한국어](code-of-conduct-languages/ko.md)
- [Polish/Polski](code-of-conduct-languages/pl.md)
- [Portuguese/Português](code-of-conduct-languages/pt.md)
- [Russian/Русский](code-of-conduct-languages/ru.md)
- [Spanish/Español](code-of-conduct-languages/es.md)
- [Turkish/Türkçe](code-of-conduct-languages/tr.md)
- [Ukrainian/Українська](code-of-conduct-languages/uk.md)
- [Vietnamese/Tiếng Việt](code-of-conduct-languages/vi.md)
### Community Code of Conduct
As contributors, maintainers, and participants in the CNCF community, and in the interest of fostering
an open and welcoming community, we pledge to respect all people who participate or contribute
through reporting issues, posting feature requests, updating documentation,
submitting pull requests or patches, attending conferences or events, or engaging in other community or project activities.
We are committed to making participation in the CNCF community a harassment-free experience for everyone, regardless of age, body size, caste, disability, ethnicity, level of experience, family status, gender, gender identity and expression, marital status, military or veteran status, nationality, personal appearance, race, religion, sexual orientation, socioeconomic status, tribe, or any other dimension of diversity.
## Scope
This code of conduct applies:
* within project and community spaces,
* in other spaces when an individual CNCF community participant's words or actions are directed at or are about a CNCF project, the CNCF community, or another CNCF community participant.
### CNCF Events
CNCF events that are produced by the Linux Foundation with professional events staff are governed by the Linux Foundation [Events Code of Conduct](https://events.linuxfoundation.org/code-of-conduct/) available on the event page. This is designed to be used in conjunction with the CNCF Code of Conduct.
## Our Standards
The CNCF Community is open, inclusive and respectful. Every member of our community has the right to have their identity respected.
Examples of behavior that contributes to a positive environment include but are not limited to:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
* Using welcoming and inclusive language
Examples of unacceptable behavior include but are not limited to:
* The use of sexualized language or imagery
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment in any form
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Violence, threatening violence, or encouraging others to engage in violent behavior
* Stalking or following someone without their consent
* Unwelcome physical contact
* Unwelcome sexual or romantic attention or advances
* Other conduct which could reasonably be considered inappropriate in a
professional setting
The following behaviors are also prohibited:
* Providing knowingly false or misleading information in connection with a Code of Conduct investigation or otherwise intentionally tampering with an investigation.
* Retaliating against a person because they reported an incident or provided information about an incident as a witness.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct.
By adopting this Code of Conduct, project maintainers commit themselves to fairly and consistently applying these principles to every aspect
of managing a CNCF project.
Project maintainers who do not follow or enforce the Code of Conduct may be temporarily or permanently removed from the project team.
## Reporting
For incidents occurring in the Kubernetes community, contact the [Kubernetes Code of Conduct Committee](https://git.k8s.io/community/committee-code-of-conduct) via <conduct@kubernetes.io>. You can expect a response within three business days.
For other projects, or for incidents that are project-agnostic or impact multiple CNCF projects, please contact the [CNCF Code of Conduct Committee](https://www.cncf.io/conduct/committee/) via <conduct@cncf.io>. Alternatively, you can contact any of the individual members of the [CNCF Code of Conduct Committee](https://www.cncf.io/conduct/committee/) to submit your report. For more detailed instructions on how to submit a report, including how to submit a report anonymously, please see our [Incident Resolution Procedures](https://github.com/cncf/foundation/blob/main/code-of-conduct/coc-incident-resolution-procedures.md). You can expect a response within three business days.
For incidents occurring at CNCF event that is produced by the Linux Foundation, please contact <eventconduct@cncf.io>.
## Enforcement
Upon review and investigation of a reported incident, the CoC response team that has jurisdiction will determine what action is appropriate based on this Code of Conduct and its related documentation.
For information about which Code of Conduct incidents are handled by project leadership, which incidents are handled by the CNCF Code of Conduct Committee, and which incidents are handled by the Linux Foundation (including its events team), see our [Jurisdiction Policy](https://github.com/cncf/foundation/blob/main/code-of-conduct/coc-committee-jurisdiction-policy.md).
## Amendments
Consistent with the CNCF Charter, any substantive changes to this Code of Conduct must be approved by the Technical Oversight Committee.
## Acknowledgements
This Code of Conduct is adapted from the Contributor Covenant
(http://contributor-covenant.org), version 2.0 available at
http://contributor-covenant.org/version/2/0/code_of_conduct/

View File

@@ -1,83 +0,0 @@
The governance model adopted here is heavily influenced by a set of CNCF projects, especially drew
reference from [Kubernetes governance](https://github.com/kubernetes/community/blob/master/governance.md).
*For similar structures some of the same wordings from kubernetes governance are borrowed to adhere
to the originally construed meaning.*
## Principles
- **Open**: Krkn is open source community.
- **Welcoming and respectful**: See [Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
- **Transparent and accessible**: Work and collaboration should be done in public.
Changes to the Krkn organization, Krkn code repositories, and CNCF related activities (e.g.
level, involvement, etc) are done in public.
- **Merit**: Ideas and contributions are accepted according to their technical merit
and alignment with project objectives, scope and design principles.
## Code of Conduct
Krkn follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
Here is an excerpt:
> As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities.
## Maintainer Levels
### Contributor
Contributors contributor to the community. Anyone can become a contributor by participating in discussions, reporting bugs, or contributing code or documentation.
#### Responsibilities:
Be active in the community and adhere to the Code of Conduct.
Report bugs and suggest new features.
Contribute high-quality code and documentation.
### Member
Members are active contributors to the community. Members have demonstrated a strong understanding of the project's codebase and conventions.
#### Responsibilities:
Review pull requests for correctness, quality, and adherence to project standards.
Provide constructive and timely feedback to contributors.
Ensure that all contributions are well-tested and documented.
Work with maintainers to ensure a smooth and efficient release process.
### Maintainer
Maintainers are responsible for the overall health and direction of the project. They are long-standing contributors who have shown a deep commitment to the project's success.
#### Responsibilities:
Set the technical direction and vision for the project.
Manage releases and ensure the stability of the main branch.
Make decisions on feature inclusion and project priorities.
Mentor other contributors and help grow the community.
Resolve disputes and make final decisions when consensus cannot be reached.
### Owner
Owners have administrative access to the project and are the final decision-makers.
#### Responsibilities:
Manage the core team of maintainers and approvers.
Set the overall vision and strategy for the project.
Handle administrative tasks, such as managing the project's repository and other resources.
Represent the project in the broader open-source community.
# Credits
Sections of this documents have been borrowed from [Kubernetes governance](https://github.com/kubernetes/community/blob/master/governance.md)

View File

@@ -1,34 +0,0 @@
## Overview
This document contains a list of maintainers in this repo.
This file lists the maintainers and committers of the Krkn project.
In short, maintainers are people who are in charge of the maintenance of the Krkn project. Committers are active community members who have shown that they are committed to the continuous development of the project through ongoing engagement with the community.
For detailed description of the roles, see [Governance](./GOVERNANCE.md) page.
## Current Maintainers
| Maintainer | GitHub ID | Email | Contribution Level |
|---------------------| --------------------------------------------------------- | ----------------------- | ---------------------- |
| Ravi Elluri | [chaitanyaenr](https://github.com/chaitanyaenr) | nelluri@redhat.com | Owner |
| Pradeep Surisetty | [psuriset](https://github.com/psuriset) | psuriset@redhat.com | Owner |
| Paige Patton | [paigerube14](https://github.com/paigerube14) | prubenda@redhat.com | Maintainer |
| Tullio Sebastiani | [tsebastiani](https://github.com/tsebastiani) | tsebasti@redhat.com | Maintainer |
| Yogananth Subramanian | [yogananth-subramanian](https://github.com/yogananth-subramanian) | ysubrama@redhat.com |Maintainer |
| Sahil Shah | [shahsahil264](https://github.com/shahsahil264) | sahshah@redhat.com | Member |
Note : It is mandatory for all Krkn community members to follow our [Code of Conduct](./CODE_OF_CONDUCT.md)
## Contributor Ladder
This project follows a contributor ladder model, where contributors can take on more responsibilities as they gain experience and demonstrate their commitment to the project.
The roles are:
* Contributor: A contributor to the community whether it be with code, docs or issues
* Member: A contributor who is active in the community and reviews pull requests.
* Maintainer: A contributor who is responsible for the overall health and direction of the project.
* Owner: A contributor who has administrative ownership of the project.

120
README.md
View File

@@ -1,43 +1,127 @@
# Krkn aka Kraken
![Workflow-Status](https://github.com/krkn-chaos/krkn/actions/workflows/docker-image.yml/badge.svg)
![coverage](https://krkn-chaos.github.io/krkn-lib-docs/coverage_badge_krkn.svg)
![action](https://github.com/krkn-chaos/krkn/actions/workflows/tests.yml/badge.svg)
[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/10548/badge)](https://www.bestpractices.dev/projects/10548)
[![Docker Repository on Quay](https://quay.io/repository/chaos-kubox/krkn?tab=tags&tag=latest "Docker Repository on Quay")](https://quay.io/chaos-kubox/krkn)
![Krkn logo](media/logo.png)
Chaos and resiliency testing tool for Kubernetes.
Kraken injects deliberate failures into Kubernetes clusters to check if it is resilient to turbulent conditions.
Chaos and resiliency testing tool for Kubernetes and OpenShift.
Kraken injects deliberate failures into Kubernetes/OpenShift clusters to check if it is resilient to turbulent conditions.
### Workflow
![Kraken workflow](media/kraken-workflow.png)
![Kraken workflow](media/kraken-workflow.png)
### Demo
[![Kraken demo](media/KrakenStarting.png)](https://youtu.be/LN-fZywp_mo "Kraken Demo - Click to Watch!")
<!-- ### Demo
[![Kraken demo](media/KrakenStarting.png)](https://youtu.be/LN-fZywp_mo "Kraken Demo - Click to Watch!") -->
### Chaos Testing Guide
[Guide](docs/index.md) encapsulates:
- Test methodology that needs to be embraced.
- Best practices that an OpenShift cluster, platform and applications running on top of it should take into account for best user experience, performance, resilience and reliability.
- Tooling.
- Scenarios supported.
- Test environment recommendations as to how and where to run chaos tests.
- Chaos testing in practice.
The guide is hosted at [https://chaos-kubox.github.io/krkn/](https://chaos-kubox.github.io/krkn/).
### How to Get Started
Instructions on how to setup, configure and run Kraken can be found in the [documentation](https://krkn-chaos.dev/docs/).
Instructions on how to setup, configure and run Kraken can be found at [Installation](docs/installation.md).
See the [getting started doc](docs/getting_started.md) on support on how to get started with your own custom scenario or editing current scenarios for your specific usage.
After installation, refer back to the below sections for supported scenarios and how to tweak the kraken config to load them on your cluster.
### Blogs, podcasts and interviews
Additional resources, including blog posts, podcasts, and community interviews, can be found on the [website](https://krkn-chaos.dev/blog)
#### Running Kraken with minimal configuration tweaks
For cases where you want to run Kraken with minimal configuration changes, refer to [Kraken-hub](https://github.com/chaos-kubox/krkn-hub). One use case is CI integration where you do not want to carry around different configuration files for the scenarios.
### Setting up infrastructure dependencies
Kraken indexes the metrics specified in the profile into Elasticsearch in addition to leveraging Cerberus for understanding the health of the Kubernetes/OpenShift cluster under test. More information on the features is documented below. The infrastructure pieces can be easily installed and uninstalled by running:
```
$ cd kraken
$ podman-compose up or $ docker-compose up # Spins up the containers specified in the docker-compose.yml file present in the run directory.
$ podman-compose down or $ docker-compose down # Delete the containers installed.
```
This will manage the Cerberus and Elasticsearch containers on the host on which you are running Kraken.
**NOTE**: Make sure you have enough resources (memory and disk) on the machine on top of which the containers are running as Elasticsearch is resource intensive. Cerberus monitors the system components by default, the [config](config/cerberus.yaml) can be tweaked to add applications namespaces, routes and other components to monitor as well. The command will keep running until killed since detached mode is not supported as of now.
### Config
Instructions on how to setup the config and the options supported can be found at [Config](docs/config.md).
### Kubernetes/OpenShift chaos scenarios supported
Scenario type | Kubernetes | OpenShift
--------------------------- | ------------- | -------------------- |
[Pod Scenarios](docs/pod_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
[Container Scenarios](docs/container_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
[Node Scenarios](docs/node_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
[Time Scenarios](docs/time_scenarios.md) | :x: | :heavy_check_mark: |
[Litmus Scenarios](docs/litmus_scenarios.md) | :x: | :heavy_check_mark: |
[Cluster Shut Down Scenarios](docs/cluster_shut_down_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
[Namespace Scenarios](docs/namespace_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
[Zone Outage Scenarios](docs/zone_outage.md) | :heavy_check_mark: | :heavy_check_mark: |
[Application_outages](docs/application_outages.md) | :heavy_check_mark: | :heavy_check_mark: |
[PVC scenario](docs/pvc_scenario.md) | :heavy_check_mark: | :heavy_check_mark: |
[Network_Chaos](docs/network_chaos.md) | :heavy_check_mark: | :heavy_check_mark: |
### Kraken scenario pass/fail criteria and report
It is important to make sure to check if the targeted component recovered from the chaos injection and also if the Kubernetes/OpenShift cluster is healthy as failures in one component can have an adverse impact on other components. Kraken does this by:
- Having built in checks for pod and node based scenarios to ensure the expected number of replicas and nodes are up. It also supports running custom scripts with the checks.
- Leveraging [Cerberus](https://github.com/openshift-scale/cerberus) to monitor the cluster under test and consuming the aggregated go/no-go signal to determine pass/fail post chaos. It is highly recommended to turn on the Cerberus health check feature available in Kraken. Instructions on installing and setting up Cerberus can be found [here](https://github.com/openshift-scale/cerberus#installation) or can be installed from Kraken using the [instructions](https://github.com/chaos-kubox/krkn#setting-up-infrastructure-dependencies). Once Cerberus is up and running, set cerberus_enabled to True and cerberus_url to the url where Cerberus publishes go/no-go signal in the Kraken config file. Cerberus can monitor [application routes](https://github.com/chaos-kubox/cerberus/blob/main/docs/config.md#watch-routes) during the chaos and fails the run if it encounters downtime as it is a potential downtime in a customers, or users environment as well. It is especially important during the control plane chaos scenarios including the API server, Etcd, Ingress etc. It can be enabled by setting `check_applicaton_routes: True` in the [Kraken config](https://github.com/chaos-kubox/krkn/blob/main/config/config.yaml) provided application routes are being monitored in the [cerberus config](https://github.com/chaos-kubox/krkn/blob/main/config/cerberus.yaml).
- Leveraging [kube-burner](docs/alerts.md) alerting feature to fail the runs in case of critical alerts.
### Signaling
In CI runs or any external job it is useful to stop Kraken once a certain test or state gets reached. We created a way to signal to kraken to pause the chaos or stop it completely using a signal posted to a port of your choice.
For example if we have a test run loading the cluster running and kraken separately running; we want to be able to know when to start/stop the kraken run based on when the test run completes or gets to a certain loaded state.
More detailed information on enabling and leveraging this feature can be found [here](docs/signal.md).
### Performance monitoring
Monitoring the Kubernetes/OpenShift cluster to observe the impact of Kraken chaos scenarios on various components is key to find out the bottlenecks as it is important to make sure the cluster is healthy in terms if both recovery as well as performance during/after the failure has been injected. Instructions on enabling it can be found [here](docs/performance_dashboards.md).
### Scraping and storing metrics long term
Kraken supports capturing metrics for the duration of the scenarios defined in the config and indexes then into Elasticsearch to be able to store and evaluate the state of the runs long term. The indexed metrics can be visualized with the help of Grafana. It uses [Kube-burner](https://github.com/cloud-bulldozer/kube-burner) under the hood. The metrics to capture need to be defined in a metrics profile which Kraken consumes to query prometheus ( installed by default in OpenShift ) with the start and end timestamp of the run. Information on enabling and leveraging this feature can be found [here](docs/metrics.md).
### Alerts
In addition to checking the recovery and health of the cluster and components under test, Kraken takes in a profile with the Prometheus expressions to validate and alerts, exits with a non-zero return code depending on the severity set. This feature can be used to determine pass/fail or alert on abnormalities observed in the cluster based on the metrics. Information on enabling and leveraging this feature can be found [here](docs/alerts.md).
### Blogs and other useful resources
- Blog post on introduction to Kraken: https://www.openshift.com/blog/introduction-to-kraken-a-chaos-tool-for-openshift/kubernetes
- Discussion and demo on how Kraken can be leveraged to ensure OpenShift is reliable, performant and scalable: https://www.youtube.com/watch?v=s1PvupI5sD0&ab_channel=OpenShift
- Blog post emphasizing the importance of making Chaos part of Performance and Scale runs to mimic the production environments: https://www.openshift.com/blog/making-chaos-part-of-kubernetes/openshift-performance-and-scalability-tests
### Roadmap
Enhancements being planned can be found in the [roadmap](ROADMAP.md).
Following is a list of enhancements that we are planning to work on adding support in Kraken. Of course any help/contributions are greatly appreciated.
- [Ability to visualize the metrics that are being captured by Kraken and stored in Elasticsearch](https://github.com/chaos-kubox/krkn/issues/124)
- Ability to shape the ingress network similar to how Kraken supports [egress traffic shaping](https://github.com/chaos-kubox/krkn/blob/main/docs/network_chaos.md) today.
- Continue to improve [Chaos Testing Guide](https://cloud-bulldozer.github.io/kraken/) in terms of adding best practices, test environment recommendations and scenarios to make sure the OpenShift platform, as well the applications running on top it, are resilient and performant under chaotic conditions.
- Support for running Kraken on Kubernetes distribution - see https://github.com/chaos-kubox/krkn/issues/185, https://github.com/chaos-kubox/krkn/issues/186
- Sweet logo for Kraken - see https://github.com/chaos-kubox/krkn/issues/195
### Contributions
We are always looking for more enhancements, fixes to make it better, any contributions are most welcome. Feel free to report or work on the issues filed on github.
[More information on how to Contribute](https://krkn-chaos.dev/docs/contribution-guidelines/)
[More information on how to Contribute](docs/contribute.md)
If adding a new scenario or tweaking the main config, be sure to add in updates into the CI to be sure the CI is up to date.
Please read [this file]((CI/README.md#adding-a-test-case)) for more information on updates.
### Community
Key Members(slack_usernames/full name): paigerube14/Paige Rubendall, mffiedler/Mike Fiedler, tsebasti/Tullio Sebastiani, yogi/Yogananth Subramanian, sahil/Sahil Shah, pradeep/Pradeep Surisetty and ravielluri/Naga Ravi Chaitanya Elluri.
* [**#krkn on Kubernetes Slack**](https://kubernetes.slack.com/messages/C05SFMHRWK1)
The Linux Foundation® (TLF) has registered trademarks and uses trademarks. For a list of TLF trademarks, see [Trademark Usage](https://www.linuxfoundation.org/legal/trademark-usage).
Key Members(slack_usernames/full name): paigerube14/Paige Rubendall, mffiedler/Mike Fiedler, ravielluri/Naga Ravi Chaitanya Elluri.
* [**#sig-scalability on Kubernetes Slack**](https://kubernetes.slack.com)
* [**#forum-chaos on CoreOS Slack internal to Red Hat**](https://coreos.slack.com)

View File

@@ -1,55 +0,0 @@
### Release Protocol: The Community-First Cycle
This document outlines the project's release protocol, a methodology designed to ensure a responsive and transparent development process that is closely aligned with the needs of our users and contributors. This protocol is tailored for projects in their early stages, prioritizing agility and community feedback over a rigid, time-boxed schedule.
#### 1. Key Principles
* **Community as the Compass:** The primary driver for all development is feedback from our user and contributor community.
* **Prioritization by Impact:** Tasks are prioritized based on their impact on user experience, the urgency of bug fixes, and the value of community-contributed features.
* **Event-Driven Releases:** Releases are not bound by a fixed calendar. New versions are published when a significant body of work is complete, a critical issue is resolved, or a new feature is ready for adoption.
* **Transparency and Communication:** All development decisions, progress, and plans are communicated openly through our issue tracker, pull requests, and community channels.
#### 2. The Release Lifecycle
The release cycle is a continuous flow of activities rather than a series of sequential phases.
**2.1. Discovery & Prioritization**
* New features and bug fixes are identified through user feedback on our issue tracker, community discussions, and direct contributions.
* The core maintainers, in collaboration with the community, continuously evaluate and tag issues to create an open and dynamic backlog.
**2.2. Development & Code Review**
* Work is initiated based on the highest-priority items in the backlog.
* All code contributions are made via pull requests (PRs).
* PRs are reviewed by maintainers and other contributors to ensure code quality, adherence to project standards, and overall stability.
**2.3. Release Readiness**
A new release is considered ready when one of the following conditions is met:
* A major new feature has been completed and thoroughly tested.
* A critical security vulnerability or bug has been addressed.
* A sufficient number of smaller improvements and fixes have been merged, providing meaningful value to users.
**2.4. Versioning**
We adhere to [**Semantic Versioning 2.0.0**](https://semver.org/).
* **Major version (`X.y.z`)**: Reserved for releases that introduce breaking changes.
* **Minor version (`x.Y.z`)**: Used for new features or significant non-breaking changes.
* **Patch version (`x.y.Z`)**: Used for bug fixes and small, non-functional improvements.
#### 3. Roles and Responsibilities
* **Members:** The [core team](https://github.com/krkn-chaos/krkn/blob/main/MAINTAINERS.md) responsible for the project's health. Their duties include:
* Reviewing pull requests.
* Contributing code and documentation via pull requests.
* Engaging in discussions and providing feedback.
* **Maintainers and Owners:** The [core team](https://github.com/krkn-chaos/krkn/blob/main/MAINTAINERS.md) responsible for the project's health. Their duties include:
* Facilitating community discussions and prioritization.
* Reviewing and merging pull requests.
* Cutting and announcing official releases.
* **Contributors:** The community. Their duties include:
* Reporting bugs and suggesting new features.
* Contributing code and documentation via pull requests.
* Engaging in discussions and providing feedback.
#### 4. Adoption and Future Evolution
This protocol is designed for the current stage of the project. As the project matures and the contributor base grows, the maintainers will evaluate the need for a more structured methodology to ensure continued scalability and stability.

View File

@@ -1,20 +0,0 @@
## Krkn Roadmap
Following are a list of enhancements that we are planning to work on adding support in Krkn. Of course any help/contributions are greatly appreciated.
- [x] [Ability to run multiple chaos scenarios in parallel under load to mimic real world outages](https://github.com/krkn-chaos/krkn/issues/424)
- [x] [Centralized storage for chaos experiments artifacts](https://github.com/krkn-chaos/krkn/issues/423)
- [x] [Support for causing DNS outages](https://github.com/krkn-chaos/krkn/issues/394)
- [x] [Chaos recommender](https://github.com/krkn-chaos/krkn/tree/main/utils/chaos-recommender) to suggest scenarios having probability of impacting the service under test using profiling results
- [x] Chaos AI integration to improve test coverage while reducing fault space to save costs and execution time [krkn-chaos-ai](https://github.com/krkn-chaos/krkn-chaos-ai)
- [x] [Support for pod level network traffic shaping](https://github.com/krkn-chaos/krkn/issues/393)
- [ ] [Ability to visualize the metrics that are being captured by Kraken and stored in Elasticsearch](https://github.com/krkn-chaos/krkn/issues/124)
- [x] Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/krkn-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
- [x] Continue to improve [Chaos Testing Guide](https://krkn-chaos.github.io/krkn) in terms of adding best practices, test environment recommendations and scenarios to make sure the OpenShift platform, as well the applications running on top it, are resilient and performant under chaotic conditions.
- [x] [Switch documentation references to Kubernetes](https://github.com/krkn-chaos/krkn/issues/495)
- [x] [OCP and Kubernetes functionalities segregation](https://github.com/krkn-chaos/krkn/issues/497)
- [x] [Krknctl - client for running Krkn scenarios with ease](https://github.com/krkn-chaos/krknctl)
- [x] [AI Chat bot to help get started with Krkn and commands](https://github.com/krkn-chaos/krkn-lightspeed)
- [ ] [Ability to roll back cluster to original state if chaos fails](https://github.com/krkn-chaos/krkn/issues/804)
- [ ] Add recovery time metrics to each scenario for each better regression analysis
- [ ] [Add resiliency scoring to chaos scenarios ran on cluster](https://github.com/krkn-chaos/krkn/issues/125)

View File

@@ -1,43 +0,0 @@
# Security Policy
We attach great importance to code security. We are very grateful to the users, security vulnerability researchers, etc. for reporting security vulnerabilities to the Krkn community. All reported security vulnerabilities will be carefully assessed and addressed in a timely manner.
## Security Checks
Krkn leverages [Snyk](https://snyk.io/) to ensure that any security vulnerabilities found
in the code base and dependencies are fixed and published in the latest release. Security
vulnerability checks are enabled for each pull request to enable developers to get insights
and proactively fix them.
## Reporting a Vulnerability
The Krkn project treats security vulnerabilities seriously, so we
strive to take action quickly when required.
The project requests that security issues be disclosed in a responsible
manner to allow adequate time to respond. If a security issue or
vulnerability has been found, please disclose the details to our
dedicated email address:
cncf-krkn-maintainers@lists.cncf.io
You can also use the [GitHub vulnerability report mechanism](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability) to report the security vulnerability.
Please include as much information as possible with the report. The
following details assist with analysis efforts:
- Description of the vulnerability
- Affected component (version, commit, branch etc)
- Affected code (file path, line numbers)
- Exploit code
## Security Team
The security team currently consists of the [Maintainers of Krkn](https://github.com/krkn-chaos/krkn/blob/main/MAINTAINERS.md)
## Process and Supported Releases
The Krkn security team will investigate and provide a fix in a timely mannner depending on the severity. The fix will be included in the new release of Krkn and details will be included in the release notes.

View File

@@ -8,13 +8,13 @@ orchestration_user: "{{ lookup('env', 'ORCHESTRATION_USER')|default('root', true
###############################################################################
# kube config location
kubeconfig_path: "{{ lookup('env', 'KUBECONFIG_PATH')|default('~/.kube/config', true) }}"
kubeconfig_path: "{{ lookup('env', 'KUBECONFIG_PATH')|default('/root/.kube/config', true) }}"
# kraken dir location on jump host
kraken_dir: "{{ lookup('env', 'KRAKEN_DIR')|default('~/kraken', true) }}"
kraken_dir: "{{ lookup('env', 'KRAKEN_DIR')|default('/root/kraken', true) }}"
# kraken config path location
kraken_config: "{{ lookup('env', 'KRAKEN_CONFIG')|default('~/kraken/config/config.yaml', true) }}"
kraken_config: "{{ lookup('env', 'KRAKEN_CONFIG')|default('/root/kraken/config/config.yaml', true) }}"
# kraken repository location
kraken_repository: "{{ lookup('env', 'KRAKEN_REPOSITORY')|default('https://github.com/openshift-scale/kraken.git', true) }}"

11
config/alerts Normal file
View File

@@ -0,0 +1,11 @@
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[5m:]) > 0.01
description: 5 minutes avg. etcd fsync latency on {{$labels.pod}} higher than 10ms {{$value}}
severity: error
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket[5m]))[5m:]) > 0.1
description: 5 minutes avg. etcd netowrk peer round trip on {{$labels.pod}} higher than 100ms {{$value}}
severity: info
- expr: increase(etcd_server_leader_changes_seen_total[2m]) > 0
description: etcd leader changes observed
severity: critical

View File

@@ -1,129 +0,0 @@
# etcd
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[10m:]) > 0.01
description: 10 minutes avg. 99th etcd fsync latency on {{$labels.pod}} higher than 10ms. {{$value}}s
severity: warning
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[10m:]) > 1
description: 10 minutes avg. 99th etcd fsync latency on {{$labels.pod}} higher than 1s. {{$value}}s
severity: error
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))[10m:]) > 0.03
description: 10 minutes avg. 99th etcd commit latency on {{$labels.pod}} higher than 30ms. {{$value}}s
severity: warning
- expr: rate(etcd_server_leader_changes_seen_total[2m]) > 0
description: etcd leader changes observed
severity: warning
- expr: (last_over_time(etcd_mvcc_db_total_size_in_bytes[5m]) / last_over_time(etcd_server_quota_backend_bytes[5m]))*100 > 95
description: etcd cluster database is running full.
severity: critical
- expr: (last_over_time(etcd_mvcc_db_total_size_in_use_in_bytes[5m]) / last_over_time(etcd_mvcc_db_total_size_in_bytes[5m])) < 0.5
description: etcd database size in use is less than 50% of the actual allocated storage.
severity: warning
- expr: rate(etcd_server_proposals_failed_total{job=~".*etcd.*"}[15m]) > 5
description: etcd cluster has high number of proposal failures.
severity: warning
- expr: histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket{job=~".*etcd.*"}[5m])) > 0.15
description: etcd cluster member communication is slow.
severity: warning
- expr: histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job=~".*etcd.*", grpc_method!="Defragment", grpc_type="unary"}[5m])) without(grpc_type)) > 0.15
description: etcd grpc requests are slow.
severity: critical
- expr: 100 * sum(rate(grpc_server_handled_total{job=~".*etcd.*", grpc_code=~"Unknown|FailedPrecondition|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded"}[5m])) without (grpc_type, grpc_code) / sum(rate(grpc_server_handled_total{job=~".*etcd.*"}[5m])) without (grpc_type, grpc_code) > 5
description: etcd cluster has high number of failed grpc requests.
severity: critical
- expr: etcd_server_has_leader{job=~".*etcd.*"} == 0
description: etcd cluster has no leader.
severity: warning
- expr: sum(up{job=~".*etcd.*"} == bool 1) without (instance) < ((count(up{job=~".*etcd.*"}) without (instance) + 1) / 2)
description: etcd cluster has insufficient number of members.
severity: warning
- expr: max without (endpoint) ( sum without (instance) (up{job=~".*etcd.*"} == bool 0) or count without (To) ( sum without (instance) (rate(etcd_network_peer_sent_failures_total{job=~".*etcd.*"}[120s])) > 0.01 )) > 0
description: etcd cluster members are down.
severity: warning
# API server
- expr: avg_over_time(histogram_quantile(0.99, sum(irate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb=~"POST|PUT|DELETE|PATCH", subresource!~"log|exec|portforward|attach|proxy"}[2m])) by (le, resource, verb))[10m:]) > 1
description: 10 minutes avg. 99th mutating API call latency for {{$labels.verb}}/{{$labels.resource}} higher than 1 second. {{$value}}s
severity: error
- expr: avg_over_time(histogram_quantile(0.99, sum(irate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb=~"LIST|GET", subresource!~"log|exec|portforward|attach|proxy", scope="resource"}[2m])) by (le, resource, verb, scope))[5m:]) > 1
description: 5 minutes avg. 99th read-only API call latency for {{$labels.verb}}/{{$labels.resource}} in scope {{$labels.scope}} higher than 1 second. {{$value}}s
severity: error
- expr: avg_over_time(histogram_quantile(0.99, sum(irate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb=~"LIST|GET", subresource!~"log|exec|portforward|attach|proxy", scope="namespace"}[2m])) by (le, resource, verb, scope))[5m:]) > 5
description: 5 minutes avg. 99th read-only API call latency for {{$labels.verb}}/{{$labels.resource}} in scope {{$labels.scope}} higher than 5 seconds. {{$value}}s
severity: error
- expr: avg_over_time(histogram_quantile(0.99, sum(irate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb=~"LIST|GET", subresource!~"log|exec|portforward|attach|proxy", scope="cluster"}[2m])) by (le, resource, verb, scope))[5m:]) > 30
description: 5 minutes avg. 99th read-only API call latency for {{$labels.verb}}/{{$labels.resource}} in scope {{$labels.scope}} higher than 30 seconds. {{$value}}s
severity: error
# Control plane pods
- expr: up{job=~"crio|kubelet"} == 0
description: "{{$labels.node}}/{{$labels.job}} down"
severity: warning
- expr: up{job="ovnkube-node"} == 0
description: "{{$labels.instance}}/{{$labels.pod}} {{$labels.job}} down"
severity: warning
# Service sync latency
- expr: histogram_quantile(0.99, sum(rate(kubeproxy_network_programming_duration_seconds_bucket[2m])) by (le)) > 10
description: 99th Kubeproxy network programming latency higher than 10 seconds. {{$value}}s
severity: warning
# Prometheus alerts
- expr: ALERTS{severity="critical", alertstate="firing"} > 0
description: Critical prometheus alert. {{$labels.alertname}}
severity: warning
# etcd CPU and usage increase
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-etcd', container='etcd'}[1m])) * 100 / sum(machine_cpu_cores) > 5
description: Etcd CPU usage increased significantly by {{$value}}%
severity: critical
# etcd memory usage increase
- expr: sum(deriv(container_memory_usage_bytes{image!='', namespace='openshift-etcd', container='etcd'}[5m])) * 100 / sum(node_memory_MemTotal_bytes) > 5
description: Etcd memory usage increased significantly by {{$value}}%
severity: critical
# Openshift API server CPU and memory usage increase
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-apiserver', container='openshift-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
description: openshift apiserver cpu usage increased significantly by {{$value}}%
severity: critical
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-apiserver', container='openshift-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
description: openshift apiserver memory usage increased significantly by {{$value}}%
severity: critical
# Openshift kube API server CPU and memory usage increase
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-kube-apiserver', container='kube-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
description: openshift apiserver cpu usage increased significantly by {{$value}}%
severity: critical
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-kube-apiserver', container='kube-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
description: openshift apiserver memory usage increased significantly by {{$value}}%
severity: critical
# Master node CPU usage increase
- expr: (sum((sum(deriv(pod:container_cpu_usage:sum{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(machine_cpu_cores) > 5
description: master nodes cpu usage increased significantly by {{$value}}%
severity: critical
# Master nodes memory usage increase
- expr: (sum((sum(deriv(container_memory_usage_bytes{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(node_memory_MemTotal_bytes) > 5
description: master nodes memory usage increased significantly by {{$value}}%
severity: critical

View File

@@ -1,139 +0,0 @@
# etcd
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[10m:]) > 0.01
description: 10 minutes avg. 99th etcd fsync latency on {{$labels.pod}} higher than 10ms. {{$value}}s
severity: warning
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[10m:]) > 1
description: 10 minutes avg. 99th etcd fsync latency on {{$labels.pod}} higher than 1s. {{$value}}s
severity: error
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))[10m:]) > 0.03
description: 10 minutes avg. 99th etcd commit latency on {{$labels.pod}} higher than 30ms. {{$value}}s
severity: warning
- expr: rate(etcd_server_leader_changes_seen_total[2m]) > 0
description: etcd leader changes observed
severity: warning
- expr: (last_over_time(etcd_mvcc_db_total_size_in_bytes[5m]) / last_over_time(etcd_server_quota_backend_bytes[5m]))*100 > 95
description: etcd cluster database is running full.
severity: critical
- expr: (last_over_time(etcd_mvcc_db_total_size_in_use_in_bytes[5m]) / last_over_time(etcd_mvcc_db_total_size_in_bytes[5m])) < 0.5
description: etcd database size in use is less than 50% of the actual allocated storage.
severity: warning
- expr: rate(etcd_server_proposals_failed_total{job=~".*etcd.*"}[15m]) > 5
description: etcd cluster has high number of proposal failures.
severity: warning
- expr: histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket{job=~".*etcd.*"}[5m])) > 0.15
description: etcd cluster member communication is slow.
severity: warning
- expr: histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job=~".*etcd.*", grpc_method!="Defragment", grpc_type="unary"}[5m])) without(grpc_type)) > 0.15
description: etcd grpc requests are slow.
severity: critical
- expr: 100 * sum(rate(grpc_server_handled_total{job=~".*etcd.*", grpc_code=~"Unknown|FailedPrecondition|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded"}[5m])) without (grpc_type, grpc_code) / sum(rate(grpc_server_handled_total{job=~".*etcd.*"}[5m])) without (grpc_type, grpc_code) > 5
description: etcd cluster has high number of failed grpc requests.
severity: critical
- expr: etcd_server_has_leader{job=~".*etcd.*"} == 0
description: etcd cluster has no leader.
severity: warning
- expr: sum(up{job=~".*etcd.*"} == bool 1) without (instance) < ((count(up{job=~".*etcd.*"}) without (instance) + 1) / 2)
description: etcd cluster has insufficient number of members.
severity: warning
- expr: max without (endpoint) ( sum without (instance) (up{job=~".*etcd.*"} == bool 0) or count without (To) ( sum without (instance) (rate(etcd_network_peer_sent_failures_total{job=~".*etcd.*"}[120s])) > 0.01 )) > 0
description: etcd cluster members are down.
severity: warning
# API server
- expr: avg_over_time(histogram_quantile(0.99, sum(irate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb=~"POST|PUT|DELETE|PATCH", subresource!~"log|exec|portforward|attach|proxy"}[2m])) by (le, resource, verb))[10m:]) > 1
description: 10 minutes avg. 99th mutating API call latency for {{$labels.verb}}/{{$labels.resource}} higher than 1 second. {{$value}}s
severity: error
- expr: avg_over_time(histogram_quantile(0.99, sum(irate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb=~"LIST|GET", subresource!~"log|exec|portforward|attach|proxy", scope="resource"}[2m])) by (le, resource, verb, scope))[5m:]) > 1
description: 5 minutes avg. 99th read-only API call latency for {{$labels.verb}}/{{$labels.resource}} in scope {{$labels.scope}} higher than 1 second. {{$value}}s
severity: error
- expr: avg_over_time(histogram_quantile(0.99, sum(irate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb=~"LIST|GET", subresource!~"log|exec|portforward|attach|proxy", scope="namespace"}[2m])) by (le, resource, verb, scope))[5m:]) > 5
description: 5 minutes avg. 99th read-only API call latency for {{$labels.verb}}/{{$labels.resource}} in scope {{$labels.scope}} higher than 5 seconds. {{$value}}s
severity: error
- expr: avg_over_time(histogram_quantile(0.99, sum(irate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb=~"LIST|GET", subresource!~"log|exec|portforward|attach|proxy", scope="cluster"}[2m])) by (le, resource, verb, scope))[5m:]) > 30
description: 5 minutes avg. 99th read-only API call latency for {{$labels.verb}}/{{$labels.resource}} in scope {{$labels.scope}} higher than 30 seconds. {{$value}}s
severity: error
# Control plane pods
- expr: up{apiserver=~"kube-apiserver|openshift-apiserver"} == 0
description: "{{$labels.apiserver}} {{$labels.instance}} down"
severity: warning
- expr: up{namespace=~"openshift-etcd"} == 0
description: "{{$labels.namespace}}/{{$labels.pod}} down"
severity: warning
- expr: up{namespace=~"openshift-.*(kube-controller-manager|scheduler|controller-manager|sdn|ovn-kubernetes|dns)"} == 0
description: "{{$labels.namespace}}/{{$labels.pod}} down"
severity: warning
- expr: up{job=~"crio|kubelet"} == 0
description: "{{$labels.node}}/{{$labels.job}} down"
severity: warning
- expr: up{job="ovnkube-node"} == 0
description: "{{$labels.instance}}/{{$labels.pod}} {{$labels.job}} down"
severity: warning
# Service sync latency
- expr: histogram_quantile(0.99, sum(rate(kubeproxy_network_programming_duration_seconds_bucket[2m])) by (le)) > 10
description: 99th Kubeproxy network programming latency higher than 10 seconds. {{$value}}s
severity: warning
# Prometheus alerts
- expr: ALERTS{severity="critical", alertstate="firing"} > 0
description: Critical prometheus alert. {{$labels.alertname}}
severity: warning
# etcd CPU and usage increase
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-etcd', container='etcd'}[1m])) * 100 / sum(machine_cpu_cores) > 5
description: Etcd CPU usage increased significantly by {{$value}}%
severity: critical
# etcd memory usage increase
- expr: sum(deriv(container_memory_usage_bytes{image!='', namespace='openshift-etcd', container='etcd'}[5m])) * 100 / sum(node_memory_MemTotal_bytes) > 5
description: Etcd memory usage increased significantly by {{$value}}%
severity: critical
# Openshift API server CPU and memory usage increase
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-apiserver', container='openshift-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
description: openshift apiserver cpu usage increased significantly by {{$value}}%
severity: critical
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-apiserver', container='openshift-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
description: openshift apiserver memory usage increased significantly by {{$value}}%
severity: critical
# Openshift kube API server CPU and memory usage increase
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-kube-apiserver', container='kube-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
description: openshift apiserver cpu usage increased significantly by {{$value}}%
severity: critical
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-kube-apiserver', container='kube-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
description: openshift apiserver memory usage increased significantly by {{$value}}%
severity: critical
# Master node CPU usage increase
- expr: (sum((sum(deriv(pod:container_cpu_usage:sum{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(machine_cpu_cores) > 5
description: master nodes cpu usage increased significantly by {{$value}}%
severity: critical
# Master nodes memory usage increase
- expr: (sum((sum(deriv(container_memory_usage_bytes{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(node_memory_MemTotal_bytes) > 5
description: master nodes memory usage increased significantly by {{$value}}%
severity: critical

View File

@@ -1,6 +1,6 @@
cerberus:
distribution: openshift # Distribution can be kubernetes or openshift
kubeconfig_path: ~/.kube/config # Path to kubeconfig
kubeconfig_path: /root/.kube/config # Path to kubeconfig
port: 8080 # http server port where cerberus status is published
watch_nodes: True # Set to True for the cerberus to monitor the cluster nodes
watch_cluster_operators: True # Set to True for cerberus to monitor cluster operators

View File

@@ -1,131 +1,69 @@
kraken:
kubeconfig_path: ~/.kube/config # Path to kubeconfig
distribution: openshift # Distribution can be kubernetes or openshift
kubeconfig_path: /root/.kube/config # Path to kubeconfig
exit_on_failure: False # Exit when a post action scenario fails
auto_rollback: True # Enable auto rollback for scenarios.
rollback_versions_directory: /tmp/kraken-rollback # Directory to store rollback version files.
port: 8081
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
signal_address: 0.0.0.0 # Signal listening address
port: 8081 # Signal port
chaos_scenarios:
# List of policies/chaos scenarios to load
- hog_scenarios:
- scenarios/kube/cpu-hog.yml
- scenarios/kube/memory-hog.yml
- scenarios/kube/io-hog.yml
- application_outages_scenarios:
- scenarios/openshift/app_outage.yaml
- container_scenarios: # List of chaos pod scenarios to load
- scenarios/openshift/container_etcd.yml
- pod_network_scenarios:
- scenarios/openshift/network_chaos_ingress.yml
- scenarios/openshift/pod_network_outage.yml
- pod_disruption_scenarios:
- scenarios/openshift/etcd.yml
- scenarios/openshift/regex_openshift_pod_kill.yml
- scenarios/openshift/prom_kill.yml
- scenarios/openshift/openshift-apiserver.yml
- scenarios/openshift/openshift-kube-apiserver.yml
- node_scenarios: # List of chaos node scenarios to load
- scenarios/openshift/aws_node_scenarios.yml
- scenarios/openshift/vmware_node_scenarios.yml
- scenarios/openshift/ibmcloud_node_scenarios.yml
- time_scenarios: # List of chaos time scenarios to load
- scenarios/openshift/time_scenarios_example.yml
- cluster_shut_down_scenarios:
- scenarios/openshift/cluster_shut_down_scenario.yml
- service_disruption_scenarios:
- scenarios/openshift/regex_namespace.yaml
- scenarios/openshift/ingress_namespace.yaml
- zone_outages_scenarios:
- scenarios/openshift/zone_outage.yaml
- pvc_scenarios:
- scenarios/openshift/pvc_scenario.yaml
- network_chaos_scenarios:
- scenarios/openshift/network_chaos.yaml
- service_hijacking_scenarios:
- scenarios/kube/service_hijacking.yaml
- syn_flood_scenarios:
- scenarios/kube/syn_flood.yaml
- network_chaos_ng_scenarios:
- scenarios/kube/pod-network-filter.yml
- scenarios/kube/node-network-filter.yml
- kubevirt_vm_outage:
- scenarios/kubevirt/kubevirt-vm-outage.yaml
litmus_version: v1.13.6 # Litmus version to install
litmus_uninstall: False # If you want to uninstall litmus if failure
litmus_uninstall_before_run: True # If you want to uninstall litmus before a new run starts
chaos_scenarios: # List of policies/chaos scenarios to load
- container_scenarios: # List of chaos pod scenarios to load
- - scenarios/openshift/container_etcd.yml
- pod_scenarios:
- - scenarios/openshift/etcd.yml
- - scenarios/openshift/regex_openshift_pod_kill.yml
- scenarios/openshift/post_action_regex.py
- node_scenarios: # List of chaos node scenarios to load
- scenarios/openshift/node_scenarios_example.yml
- pod_scenarios:
- - scenarios/openshift/openshift-apiserver.yml
- - scenarios/openshift/openshift-kube-apiserver.yml
- time_scenarios: # List of chaos time scenarios to load
- scenarios/openshift/time_scenarios_example.yml
- litmus_scenarios: # List of litmus scenarios to load
- - scenarios/openshift/templates/litmus-rbac.yaml
- scenarios/openshift/node_cpu_hog_engine.yaml
- - scenarios/openshift/templates/litmus-rbac.yaml
- scenarios/openshift/node_mem_engine.yaml
- - scenarios/openshift/templates/litmus-rbac.yaml
- scenarios/openshift/node_io_engine.yaml
- cluster_shut_down_scenarios:
- - scenarios/openshift/cluster_shut_down_scenario.yml
- scenarios/openshift/post_action_shut_down.py
- namespace_scenarios:
- - scenarios/openshift/regex_namespace.yaml
- - scenarios/openshift/ingress_namespace.yaml
- scenarios/openshift/post_action_namespace.py
- zone_outages:
- scenarios/openshift/zone_outage.yaml
- application_outages:
- scenarios/openshift/app_outage.yaml
- pvc_scenarios:
- scenarios/openshift/pvc_scenario.yaml
- network_chaos:
- scenarios/openshift/network_chaos.yaml
cerberus:
cerberus_enabled: False # Enable it when cerberus is previously installed
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
performance_monitoring:
prometheus_url: '' # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
kube_burner_binary_url: "https://github.com/cloud-bulldozer/kube-burner/releases/download/v0.9.1/kube-burner-0.9.1-Linux-x86_64.tar.gz"
capture_metrics: False
config_path: config/kube_burner.yaml # Define the Elasticsearch url and index name in this config
metrics_profile_path: config/metrics-aggregated.yaml
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
uuid: # uuid for the run is generated by default if not set
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
enable_metrics: False
alert_profile: config/alerts.yaml # Path or URL to alert profile with the prometheus queries
metrics_profile: config/metrics-report.yaml
check_critical_alerts: False # When enabled will check prometheus for critical alerts firing post chaos
elastic:
enable_elastic: False
verify_certs: False
elastic_url: "" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
elastic_port: 32766
username: "elastic"
password: "test"
metrics_index: "krkn-metrics"
alerts_index: "krkn-alerts"
telemetry_index: "krkn-telemetry"
alert_profile: config/alerts # Path to alert profile with the prometheus queries
tunings:
wait_duration: 1 # Duration to wait between each chaos scenario
wait_duration: 60 # Duration to wait between each chaos scenario
iterations: 1 # Number of times to execute the scenarios
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever
telemetry:
enabled: False # enable/disables the telemetry collection feature
api_url: https://ulnmf9xv7j.execute-api.us-west-2.amazonaws.com/production #telemetry service endpoint
username: username # telemetry service username
password: password # telemetry service password
prometheus_backup: True # enables/disables prometheus data collection
prometheus_namespace: "" # namespace where prometheus is deployed (if distribution is kubernetes)
prometheus_container_name: "" # name of the prometheus container name (if distribution is kubernetes)
prometheus_pod_name: "" # name of the prometheus pod (if distribution is kubernetes)
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
backup_threads: 5 # number of telemetry download/upload threads
archive_path: /tmp # local path where the archive files will be temporarly stored
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
archive_size: 500000
telemetry_group: '' # if set will archive the telemetry in the S3 bucket on a folder named after the value, otherwise will use "default"
# the size of the prometheus data archive size in KB. The lower the size of archive is
# the higher the number of archive files will be produced and uploaded (and processed by backup_threads
# simultaneously).
# For unstable/slow connection is better to keep this value low
# increasing the number of backup_threads, in this way, on upload failure, the retry will happen only on the
# failed chunk without affecting the whole upload.
logs_backup: True
logs_filter_patterns:
- "(\\w{3}\\s\\d{1,2}\\s\\d{2}:\\d{2}:\\d{2}\\.\\d+).+" # Sep 9 11:20:36.123425532
- "kinit (\\d+/\\d+/\\d+\\s\\d{2}:\\d{2}:\\d{2})\\s+" # kinit 2023/09/15 11:20:36 log
- "(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z).+" # 2023-09-15T11:20:36.123425532Z log
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
events_backup: True # enables/disables cluster events collection
health_checks: # Utilizing health check endpoints to observe application behavior during chaos injection.
interval: # Interval in seconds to perform health checks, default value is 2 seconds
config: # Provide list of health check configurations for applications
- url: # Provide application endpoint
bearer_token: # Bearer token for authentication if any
auth: # Provide authentication credentials (username , password) in tuple format if any, ex:("admin","secretpassword")
exit_on_failure: # If value is True exits when health check failed for application, values can be True/False
kubevirt_checks: # Utilizing virt check endpoints to observe ssh ability to VMI's during chaos injection.
interval: 2 # Interval in seconds to perform virt checks, default value is 2 seconds
namespace: # Namespace where to find VMI's
name: # Regex Name style of VMI's to watch, optional, will watch all VMI names in the namespace if left blank
only_failures: False # Boolean of whether to show all VMI's failures and successful ssh connection (False), or only failure status' (True)
disconnected: False # Boolean of how to try to connect to the VMIs; if True will use the ip_address to try ssh from within a node, if false will use the name and uses virtctl to try to connect; Default is False
ssh_node: "" # If set, will be a backup way to ssh to a node. Will want to set to a node that isn't targeted in chaos
node_names: ""
exit_on_failure: # If value is True and VMI's are failing post chaos returns failure, values can be True/False

View File

@@ -1,39 +0,0 @@
kraken:
distribution: kubernetes # Distribution can be kubernetes or openshift
kubeconfig_path: ~/.kube/config # Path to kubeconfig
exit_on_failure: False # Exit when a post action scenario fails
port: 8081
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
signal_address: 0.0.0.0 # Signal listening address
chaos_scenarios: # List of policies/chaos scenarios to load
- pod_disruption_scenarios:
- scenarios/kube/pod.yml
cerberus:
cerberus_enabled: False # Enable it when cerberus is previously installed
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
performance_monitoring:
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
uuid: # uuid for the run is generated by default if not set
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
alert_profile: config/alerts.yaml # Path to alert profile with the prometheus queries
elastic:
enable_elastic: False
tunings:
wait_duration: 60 # Duration to wait between each chaos scenario
iterations: 1 # Number of times to execute the scenarios
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever
telemetry:
enabled: False # enable/disables the telemetry collection feature
archive_path: /tmp # local path where the archive files will be temporarly stored
events_backup: False # enables/disables cluster events collection
logs_backup: False
health_checks: # Utilizing health check endpoints to observe application behavior during chaos injection.

View File

@@ -1,28 +1,37 @@
kraken:
distribution: kubernetes # Distribution can be kubernetes or openshift
kubeconfig_path: ~/.kube/config # Path to kubeconfig
distribution: kubernetes # Distribution can be kubernetes or openshift
kubeconfig_path: /root/.kube/config # Path to kubeconfig
exit_on_failure: False # Exit when a post action scenario fails
port: 8081
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
litmus_version: v1.13.6 # Litmus version to install
litmus_uninstall: False # If you want to uninstall litmus if failure
litmus_uninstall_before_run: True # If you want to uninstall litmus before a new run starts
chaos_scenarios: # List of policies/chaos scenarios to load
- container_scenarios: # List of chaos pod scenarios to load
- scenarios/kube/container_dns.yml
- plugin_scenarios:
- scenarios/kube/scheduler.yml
- - scenarios/kube/container_dns.yml
- pod_scenarios:
- - scenarios/kube/scheduler.yml
cerberus:
cerberus_enabled: False # Enable it when cerberus is previously installed
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
performance_monitoring:
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
kube_burner_binary_url: "https://github.com/cloud-bulldozer/kube-burner/releases/download/v0.9.1/kube-burner-0.9.1-Linux-x86_64.tar.gz"
capture_metrics: False
config_path: config/kube_burner.yaml # Define the Elasticsearch url and index name in this config
metrics_profile_path: config/metrics-aggregated.yaml
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
uuid: # uuid for the run is generated by default if not set
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
alert_profile: config/alerts.yaml # Path to alert profile with the prometheus queries
check_critical_alerts: False # When enabled will check prometheus for critical alerts firing post chaos after soak time for the cluster to settle down
alert_profile: config/alerts # Path to alert profile with the prometheus queries
tunings:
wait_duration: 60 # Duration to wait between each chaos scenario
iterations: 1 # Number of times to execute the scenarios

View File

@@ -1,26 +1,32 @@
kraken:
distribution: openshift # Distribution can be kubernetes or openshift
kubeconfig_path: ~/.kube/config # Path to kubeconfig
kubeconfig_path: /root/.kube/config # Path to kubeconfig
exit_on_failure: False # Exit when a post action scenario fails
port: 8081
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
signal_address: 0.0.0.0 # Signal listening address
port: 8081 # Signal port
litmus_version: v1.13.6 # Litmus version to install
litmus_uninstall: False # If you want to uninstall litmus if failure
litmus_uninstall_before_run: True # If you want to uninstall litmus before a new run starts
chaos_scenarios: # List of policies/chaos scenarios to load
- plugin_scenarios: # List of chaos pod scenarios to load
- scenarios/openshift/etcd.yml
- scenarios/openshift/regex_openshift_pod_kill.yml
- scenarios/openshift/prom_kill.yml
- pod_scenarios: # List of chaos pod scenarios to load
- - scenarios/openshift/etcd.yml
- - scenarios/openshift/regex_openshift_pod_kill.yml
- scenarios/openshift/post_action_regex.py
- node_scenarios: # List of chaos node scenarios to load
- scenarios/openshift/node_scenarios_example.yml
- plugin_scenarios:
- scenarios/openshift/openshift-apiserver.yml
- scenarios/openshift/openshift-kube-apiserver.yml
- scenarios/openshift/node_scenarios_example.yml
- pod_scenarios:
- - scenarios/openshift/openshift-apiserver.yml
- - scenarios/openshift/openshift-kube-apiserver.yml
- time_scenarios: # List of chaos time scenarios to load
- scenarios/openshift/time_scenarios_example.yml
- litmus_scenarios: # List of litmus scenarios to load
- - https://hub.litmuschaos.io/api/chaos/1.10.0?file=charts/generic/node-cpu-hog/rbac.yaml
- scenarios/openshift/node_cpu_hog_engine.yaml
- cluster_shut_down_scenarios:
- scenarios/openshift/cluster_shut_down_scenario.yml
- service_disruption_scenarios:
- - scenarios/openshift/cluster_shut_down_scenario.yml
- scenarios/openshift/post_action_shut_down.py
- namespace_scenarios:
- scenarios/openshift/regex_namespace.yaml
- scenarios/openshift/ingress_namespace.yaml
- zone_outages:
@@ -35,49 +41,22 @@ kraken:
cerberus:
cerberus_enabled: True # Enable it when cerberus is previously installed
cerberus_url: http://0.0.0.0:8080 # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
performance_monitoring:
deploy_dashboards: True # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
kube_burner_binary_url: "https://github.com/cloud-bulldozer/kube-burner/releases/download/v0.9.1/kube-burner-0.9.1-Linux-x86_64.tar.gz"
capture_metrics: True
config_path: config/kube_burner.yaml # Define the Elasticsearch url and index name in this config
metrics_profile_path: config/metrics-aggregated.yaml
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
uuid: # uuid for the run is generated by default if not set
enable_alerts: True # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
alert_profile: config/alerts.yaml # Path to alert profile with the prometheus queries
alert_profile: config/alerts # Path to alert profile with the prometheus queries
tunings:
wait_duration: 60 # Duration to wait between each chaos scenario
iterations: 1 # Number of times to execute the scenarios
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever
telemetry:
enabled: False # enable/disables the telemetry collection feature
api_url: https://ulnmf9xv7j.execute-api.us-west-2.amazonaws.com/production #telemetry service endpoint
username: username # telemetry service username
password: password # telemetry service password
prometheus_backup: True # enables/disables prometheus data collection
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
backup_threads: 5 # number of telemetry download/upload threads
archive_path: /tmp # local path where the archive files will be temporarly stored
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
archive_size: 500000 # the size of the prometheus data archive size in KB. The lower the size of archive is
# the higher the number of archive files will be produced and uploaded (and processed by backup_threads
# simultaneously).
# For unstable/slow connection is better to keep this value low
# increasing the number of backup_threads, in this way, on upload failure, the retry will happen only on the
# failed chunk without affecting the whole upload.
logs_backup: True
logs_filter_patterns:
- "(\\w{3}\\s\\d{1,2}\\s\\d{2}:\\d{2}:\\d{2}\\.\\d+).+" # Sep 9 11:20:36.123425532
- "kinit (\\d+/\\d+/\\d+\\s\\d{2}:\\d{2}:\\d{2})\\s+" # kinit 2023/09/15 11:20:36 log
- "(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z).+" # 2023-09-15T11:20:36.123425532Z log
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
elastic:
elastic_url: "" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
elastic_index: "" # Elastic search index pattern to post results to

15
config/kube_burner.yaml Normal file
View File

@@ -0,0 +1,15 @@
---
global:
writeToFile: true
metricsDirectory: collected-metrics
measurements:
- name: podLatency
esIndex: kraken
indexerConfig:
enabled: true
esServers: [http://0.0.0.0:9200] # Please change this to the respective Elasticsearch in use if you haven't run the podman-compose command to setup the infrastructure containers
insecureSkipVerify: true
defaultIndex: kraken
type: elastic

View File

@@ -1,126 +1,133 @@
metrics:
# API server
- query: histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb!~"WATCH", subresource!="log"}[2m])) by (verb,resource,subresource,instance,le)) > 0
metricName: API99thLatency
- query: sum(irate(apiserver_request_total{apiserver="kube-apiserver",verb!="WATCH",subresource!="log"}[2m])) by (verb,instance,resource,code) > 0
metricName: APIRequestRate
instant: True
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
metricName: APIInflightRequests
instant: True
- query: histogram_quantile(0.99, rate(apiserver_current_inflight_requests[5m]))
metricName: APIInflightRequests
instant: True
# Container & pod metrics
- query: (sum(container_memory_rss{name!="",container!="POD",namespace=~"openshift-(etcd|oauth-apiserver|.*apiserver|ovn-kubernetes|sdn|ingress|authentication|.*controller-manager|.*scheduler)"}) by (container, pod, namespace, node) and on (node) kube_node_role{role="master"}) > 0
metricName: containerMemory-Masters
instant: true
- query: (sum(irate(container_cpu_usage_seconds_total{name!="",container!="POD",namespace=~"openshift-(etcd|oauth-apiserver|sdn|ovn-kubernetes|.*apiserver|authentication|.*controller-manager|.*scheduler)"}[2m]) * 100) by (container, pod, namespace, node) and on (node) kube_node_role{role="master"}) > 0
metricName: containerCPU-Masters
instant: true
- query: (sum(irate(container_cpu_usage_seconds_total{pod!="",container="prometheus",namespace="openshift-monitoring"}[2m]) * 100) by (container, pod, namespace, node) and on (node) kube_node_role{role="infra"}) > 0
metricName: containerCPU-Prometheus
instant: true
- query: (avg(irate(container_cpu_usage_seconds_total{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress)"}[2m]) * 100 and on (node) kube_node_role{role="worker"}) by (namespace, container)) > 0
metricName: containerCPU-AggregatedWorkers
instant: true
- query: (avg(irate(container_cpu_usage_seconds_total{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress|monitoring|image-registry|logging)"}[2m]) * 100 and on (node) kube_node_role{role="infra"}) by (namespace, container)) > 0
metricName: containerCPU-AggregatedInfra
- query: (sum(container_memory_rss{pod!="",namespace="openshift-monitoring",name!="",container="prometheus"}) by (container, pod, namespace, node) and on (node) kube_node_role{role="infra"}) > 0
metricName: containerMemory-Prometheus
instant: True
- query: avg(container_memory_rss{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress)"} and on (node) kube_node_role{role="worker"}) by (container, namespace)
metricName: containerMemory-AggregatedWorkers
instant: True
- query: avg(container_memory_rss{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress|monitoring|image-registry|logging)"} and on (node) kube_node_role{role="infra"}) by (container, namespace)
metricName: containerMemory-AggregatedInfra
instant: True
# Node metrics
- query: (sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) > 0
metricName: nodeCPU-Masters
instant: True
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
metricName: maxCPU-Masters
instant: true
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
metricName: nodeMemory-Masters
instant: true
- query: (avg((sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))) by (mode)) > 0
metricName: nodeCPU-AggregatedWorkers
instant: True
- query: (avg((sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))) by (mode)) > 0
metricName: nodeCPU-AggregatedInfra
instant: True
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
metricName: nodeMemory-Masters
instant: true
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
metricName: maxMemory-Masters
instant: true
- query: avg(node_memory_MemAvailable_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
metricName: nodeMemoryAvailable-Masters
- query: avg(node_memory_MemAvailable_bytes and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
metricName: nodeMemoryAvailable-AggregatedWorkers
instant: True
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
metricName: maxCPU-Workers
instant: true
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
metricName: maxMemory-Workers
instant: true
- query: avg(node_memory_MemAvailable_bytes and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
metricName: nodeMemoryAvailable-AggregatedInfra
instant: True
- query: avg(node_memory_Active_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
metricName: nodeMemoryActive-Masters
instant: True
- query: avg(node_memory_Active_bytes and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
metricName: nodeMemoryActive-AggregatedWorkers
instant: True
- query: avg(avg(node_memory_Active_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
metricName: nodeMemoryActive-AggregatedInfra
instant: True
- query: avg(node_memory_Cached_bytes) by (instance) + avg(node_memory_Buffers_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
metricName: nodeMemoryCached+nodeMemoryBuffers-Masters
- query: avg(node_memory_Cached_bytes + node_memory_Buffers_bytes and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
metricName: nodeMemoryCached+nodeMemoryBuffers-AggregatedWorkers
- query: avg(node_memory_Cached_bytes + node_memory_Buffers_bytes and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
metricName: nodeMemoryCached+nodeMemoryBuffers-AggregatedInfra
- query: irate(node_network_receive_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
metricName: rxNetworkBytes-Masters
- query: avg(irate(node_network_receive_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
metricName: rxNetworkBytes-AggregatedWorkers
- query: avg(irate(node_network_receive_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
metricName: rxNetworkBytes-AggregatedInfra
- query: irate(node_network_transmit_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
metricName: txNetworkBytes-Masters
- query: avg(irate(node_network_transmit_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
metricName: txNetworkBytes-AggregatedWorkers
- query: avg(irate(node_network_transmit_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
metricName: txNetworkBytes-AggregatedInfra
- query: rate(node_disk_written_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
metricName: nodeDiskWrittenBytes-Masters
- query: avg(rate(node_disk_written_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
metricName: nodeDiskWrittenBytes-AggregatedWorkers
- query: avg(rate(node_disk_written_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
metricName: nodeDiskWrittenBytes-AggregatedInfra
- query: rate(node_disk_read_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
metricName: nodeDiskReadBytes-Masters
- query: avg(rate(node_disk_read_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
metricName: nodeDiskReadBytes-AggregatedWorkers
- query: avg(rate(node_disk_read_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
metricName: nodeDiskReadBytes-AggregatedInfra
# Etcd metrics
- query: sum(rate(etcd_server_leader_changes_seen_total[2m]))
metricName: etcdLeaderChangesRate
instant: True
- query: etcd_server_is_leader > 0
metricName: etcdServerIsLeader
instant: True
- query: histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))
metricName: 99thEtcdDiskBackendCommitDurationSeconds
instant: True
- query: histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))
metricName: 99thEtcdDiskWalFsyncDurationSeconds
instant: True
- query: histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket[5m]))
metricName: 99thEtcdRoundTripTimeSeconds
instant: True
- query: etcd_mvcc_db_total_size_in_bytes
metricName: etcdDBPhysicalSizeBytes
- query: etcd_mvcc_db_total_size_in_use_in_bytes
metricName: etcdDBLogicalSizeBytes
- query: sum by (cluster_version)(etcd_cluster_version)
metricName: etcdVersion
@@ -128,16 +135,50 @@ metrics:
- query: sum(rate(etcd_object_counts{}[5m])) by (resource) > 0
metricName: etcdObjectCount
instant: True
- query: histogram_quantile(0.99,sum(rate(etcd_request_duration_seconds_bucket[2m])) by (le,operation,apiserver)) > 0
metricName: P99APIEtcdRequestLatency
instant: True
- query: sum by (instance) (apiserver_storage_objects)
metricName: etcdTotalObjectCount
instant: True
# Cluster metrics
- query: count(kube_namespace_created)
metricName: namespaceCount
- query: topk(500, max by(resource) (apiserver_storage_objects))
metricName: etcdTopObectCount
instant: True
- query: sum(kube_pod_status_phase{}) by (phase)
metricName: podStatusCount
- query: count(kube_secret_info{})
metricName: secretCount
- query: count(kube_deployment_labels{})
metricName: deploymentCount
- query: count(kube_configmap_info{})
metricName: configmapCount
- query: count(kube_service_info{})
metricName: serviceCount
- query: kube_node_role
metricName: nodeRoles
instant: true
- query: sum(kube_node_status_condition{status="true"}) by (condition)
metricName: nodeStatus
- query: (sum(rate(container_fs_writes_bytes_total{container!="",device!~".+dm.+"}[5m])) by (device, container, node) and on (node) kube_node_role{role="master"}) > 0
metricName: containerDiskUsage
- query: cluster_version{type="completed"}
metricName: clusterVersion
instant: true
# Golang metrics
- query: go_memstats_heap_alloc_bytes{job=~"apiserver|api|etcd"}
metricName: goHeapAllocBytes
- query: go_memstats_heap_inuse_bytes{job=~"apiserver|api|etcd"}
metricName: goHeapInuseBytes
- query: go_gc_duration_seconds{job=~"apiserver|api|etcd",quantile="1"}
metricName: goGCDurationSeconds

View File

@@ -1,248 +0,0 @@
metrics:
# API server
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
metricName: APIInflightRequests
instant: true
# Kubelet & CRI-O
# Average and max of the CPU usage from all worker's kubelet
- query: avg(avg_over_time(irate(process_cpu_seconds_total{service="kubelet",job="kubelet"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
metricName: cpu-kubelet
instant: true
- query: max(max_over_time(irate(process_cpu_seconds_total{service="kubelet",job="kubelet"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
metricName: max-cpu-kubelet
instant: true
# Average of the memory usage from all worker's kubelet
- query: avg(avg_over_time(process_resident_memory_bytes{service="kubelet",job="kubelet"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
metricName: memory-kubelet
instant: true
# Max of the memory usage from all worker's kubelet
- query: max(max_over_time(process_resident_memory_bytes{service="kubelet",job="kubelet"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
metricName: max-memory-kubelet
instant: true
- query: max_over_time(sum(process_resident_memory_bytes{service="kubelet",job="kubelet"} and on (node) kube_node_role{role="worker"})[.elapsed:])
metricName: max-memory-sum-kubelet
instant: true
# Average and max of the CPU usage from all worker's CRI-O
- query: avg(avg_over_time(irate(process_cpu_seconds_total{service="kubelet",job="crio"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
metricName: cpu-crio
instant: true
- query: max(max_over_time(irate(process_cpu_seconds_total{service="kubelet",job="crio"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
metricName: max-cpu-crio
instant: true
# Average of the memory usage from all worker's CRI-O
- query: avg(avg_over_time(process_resident_memory_bytes{service="kubelet",job="crio"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
metricName: memory-crio
instant: true
# Max of the memory usage from all worker's CRI-O
- query: max(max_over_time(process_resident_memory_bytes{service="kubelet",job="crio"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
metricName: max-memory-crio
instant: true
# Etcd
- query: avg(avg_over_time(histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))[.elapsed:]))
metricName: 99thEtcdDiskBackendCommit
instant: true
- query: avg(avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[.elapsed:]))
metricName: 99thEtcdDiskWalFsync
instant: true
- query: avg(avg_over_time(histogram_quantile(0.99, irate(etcd_network_peer_round_trip_time_seconds_bucket[2m]))[.elapsed:]))
metricName: 99thEtcdRoundTripTime
instant: true
# Control-plane
- query: avg(avg_over_time(topk(1, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-kube-controller-manager"}[2m])) by (pod))[.elapsed:]))
metricName: cpu-kube-controller-manager
instant: true
- query: max(max_over_time(topk(1, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-kube-controller-manager"}[2m])) by (pod))[.elapsed:]))
metricName: max-cpu-kube-controller-manager
instant: true
- query: avg(avg_over_time(topk(1, sum(container_memory_rss{name!="", namespace="openshift-kube-controller-manager"}) by (pod))[.elapsed:]))
metricName: memory-kube-controller-manager
instant: true
- query: max(max_over_time(topk(1, sum(container_memory_rss{name!="", namespace="openshift-kube-controller-manager"}) by (pod))[.elapsed:]))
metricName: max-memory-kube-controller-manager
instant: true
- query: avg(avg_over_time(topk(3, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-kube-apiserver"}[2m])) by (pod))[.elapsed:]))
metricName: cpu-kube-apiserver
instant: true
- query: avg(avg_over_time(topk(3, sum(container_memory_rss{name!="", namespace="openshift-kube-apiserver"}) by (pod))[.elapsed:]))
metricName: memory-kube-apiserver
instant: true
- query: avg(avg_over_time(topk(3, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-apiserver"}[2m])) by (pod))[.elapsed:]))
metricName: cpu-openshift-apiserver
instant: true
- query: avg(avg_over_time(topk(3, sum(container_memory_rss{name!="", namespace="openshift-apiserver"}) by (pod))[.elapsed:]))
metricName: memory-openshift-apiserver
instant: true
- query: avg(avg_over_time(topk(3, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-etcd"}[2m])) by (pod))[.elapsed:]))
metricName: cpu-etcd
instant: true
- query: avg(avg_over_time(topk(3,sum(container_memory_rss{name!="", namespace="openshift-etcd"}) by (pod))[.elapsed:]))
metricName: memory-etcd
instant: true
- query: avg(avg_over_time(topk(1, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-controller-manager"}[2m])) by (pod))[.elapsed:]))
metricName: cpu-openshift-controller-manager
instant: true
- query: avg(avg_over_time(topk(1, sum(container_memory_rss{name!="", namespace="openshift-controller-manager"}) by (pod))[.elapsed:]))
metricName: memory-openshift-controller-manager
instant: true
# multus
- query: avg(avg_over_time(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-multus", pod=~"(multus).+", container!="POD"}[2m])[.elapsed:])) by (container)
metricName: cpu-multus
instant: true
- query: avg(avg_over_time(container_memory_rss{name!="", namespace="openshift-multus", pod=~"(multus).+", container!="POD"}[.elapsed:])) by (container)
metricName: memory-multus
instant: true
# OVNKubernetes - standard & IC
- query: avg(avg_over_time(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-ovn-kubernetes", pod=~"(ovnkube-master|ovnkube-control-plane).+", container!="POD"}[2m])[.elapsed:])) by (container)
metricName: cpu-ovn-control-plane
instant: true
- query: avg(avg_over_time(container_memory_rss{name!="", namespace="openshift-ovn-kubernetes", pod=~"(ovnkube-master|ovnkube-control-plane).+", container!="POD"}[.elapsed:])) by (container)
metricName: memory-ovn-control-plane
instant: true
- query: avg(avg_over_time(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-ovn-kubernetes", pod=~"ovnkube-node.+", container!="POD"}[2m])[.elapsed:])) by (container)
metricName: cpu-ovnkube-node
instant: true
- query: avg(avg_over_time(container_memory_rss{name!="", namespace="openshift-ovn-kubernetes", pod=~"ovnkube-node.+", container!="POD"}[.elapsed:])) by (container)
metricName: memory-ovnkube-node
instant: true
# Nodes
- query: avg(avg_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
metricName: cpu-masters
instant: true
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
metricName: memory-masters
instant: true
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
metricName: max-memory-masters
instant: true
- query: avg(avg_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
metricName: cpu-workers
instant: true
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
metricName: max-cpu-workers
instant: true
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
metricName: memory-workers
instant: true
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
metricName: max-memory-workers
instant: true
- query: sum( (node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)") )
metricName: memory-sum-workers
instant: true
- query: avg(avg_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
metricName: cpu-infra
instant: true
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
metricName: max-cpu-infra
instant: true
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
metricName: memory-infra
instant: true
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
metricName: max-memory-infra
instant: true
- query: max_over_time(sum((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))[.elapsed:])
metricName: max-memory-sum-infra
instant: true
# Monitoring and ingress
- query: avg(avg_over_time(sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}[2m])) by (pod)[.elapsed:]))
metricName: cpu-prometheus
instant: true
- query: max(max_over_time(sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}[2m])) by (pod)[.elapsed:]))
metricName: max-cpu-prometheus
instant: true
- query: avg(avg_over_time(sum(container_memory_rss{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}) by (pod)[.elapsed:]))
metricName: memory-prometheus
instant: true
- query: max(max_over_time(sum(container_memory_rss{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}) by (pod)[.elapsed:]))
metricName: max-memory-prometheus
instant: true
- query: avg(avg_over_time(sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-ingress", pod=~"router-default.+"}[2m])) by (pod)[.elapsed:]))
metricName: cpu-router
instant: true
- query: avg(avg_over_time(sum(container_memory_rss{name!="", namespace="openshift-ingress", pod=~"router-default.+"}) by (pod)[.elapsed:]))
metricName: memory-router
instant: true
# Cluster
- query: avg_over_time(cluster:memory_usage:ratio[.elapsed:])
metricName: memory-cluster-usage-ratio
instant: true
- query: avg_over_time(cluster:node_cpu:ratio[.elapsed:])
metricName: cpu-cluster-usage-ratio
instant: true
# Retain the raw CPU seconds totals for comparison
- query: sum(node_cpu_seconds_total and on (instance) label_replace(kube_node_role{role="worker",role!="infra"}, "instance", "$1", "node", "(.+)")) by (mode)
metricName: nodeCPUSeconds-Workers
instant: true
- query: sum(node_cpu_seconds_total and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) by (mode)
metricName: nodeCPUSeconds-Masters
instant: true
- query: sum(node_cpu_seconds_total and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (mode)
metricName: nodeCPUSeconds-Infra
instant: true

View File

@@ -1,7 +1,13 @@
metrics:
# API server
- query: irate(apiserver_request_total{verb="POST", resource="pods", subresource="binding",code="201"}[2m]) > 0
metricName: schedulingThroughput
- query: histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb!~"WATCH", subresource!="log"}[2m])) by (verb,resource,subresource,instance,le)) > 0
metricName: API99thLatency
- query: sum(irate(apiserver_request_total{apiserver="kube-apiserver",verb!="WATCH",subresource!="log"}[2m])) by (verb,instance,resource,code) > 0
metricName: APIRequestRate
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
metricName: APIInflightRequests
# Containers & pod metrics
- query: sum(irate(container_cpu_usage_seconds_total{name!="",namespace=~"openshift-(etcd|oauth-apiserver|.*apiserver|ovn-kubernetes|sdn|ingress|authentication|.*controller-manager|.*scheduler|monitoring|logging|image-registry)"}[2m]) * 100) by (pod, namespace, node)
@@ -27,17 +33,8 @@ metrics:
metricName: crioMemory
# Node metrics
- query: (sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) > 0
metricName: nodeCPU-Masters
- query: (avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
metricName: nodeMemory-Masters
- query: (sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) > 0
metricName: nodeCPU-Workers
- query: (avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[2m:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
metricName: nodeMemory-Workers
- query: sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) > 0
metricName: nodeCPU
- query: avg(node_memory_MemAvailable_bytes) by (instance)
metricName: nodeMemoryAvailable
@@ -45,9 +42,6 @@ metrics:
- query: avg(node_memory_Active_bytes) by (instance)
metricName: nodeMemoryActive
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
metricName: maxMemory-Masters
- query: avg(node_memory_Cached_bytes) by (instance) + avg(node_memory_Buffers_bytes) by (instance)
metricName: nodeMemoryCached+nodeMemoryBuffers
@@ -90,4 +84,34 @@ metrics:
- query: sum by (cluster_version)(etcd_cluster_version)
metricName: etcdVersion
instant: true
instant: true
# Cluster metrics
- query: count(kube_namespace_created)
metricName: namespaceCount
- query: sum(kube_pod_status_phase{}) by (phase)
metricName: podStatusCount
- query: count(kube_secret_info{})
metricName: secretCount
- query: count(kube_deployment_labels{})
metricName: deploymentCount
- query: count(kube_configmap_info{})
metricName: configmapCount
- query: count(kube_service_info{})
metricName: serviceCount
- query: kube_node_role
metricName: nodeRoles
instant: true
- query: sum(kube_node_status_condition{status="true"}) by (condition)
metricName: nodeStatus
- query: cluster_version{type="completed"}
metricName: clusterVersion
instant: true

View File

@@ -1,35 +0,0 @@
application: openshift-etcd
namespaces: openshift-etcd
labels: app=openshift-etcd
kubeconfig: ~/.kube/config.yaml
prometheus_endpoint: <Prometheus_Endpoint>
auth_token: <Auth_Token>
scrape_duration: 10m
chaos_library: "kraken"
log_level: INFO
json_output_file: False
json_output_folder_path:
# for output purpose only do not change if not needed
chaos_tests:
GENERIC:
- pod_failure
- container_failure
- node_failure
- zone_outage
- time_skew
- namespace_failure
- power_outage
CPU:
- node_cpu_hog
NETWORK:
- application_outage
- node_network_chaos
- pod_network_chaos
MEM:
- node_memory_hog
- pvc_disk_fill
threshold: .7
cpu_threshold: .5
mem_threshold: .5

28
containers/Dockerfile Normal file
View File

@@ -0,0 +1,28 @@
# Dockerfile for kraken
FROM quay.io/openshift/origin-tests:latest as origintests
FROM quay.io/centos/centos:stream9
LABEL org.opencontainers.image.authors="Red Hat OpenShift Chaos Engineering"
ENV KUBECONFIG /root/.kube/config
# Copy OpenShift CLI, Kubernetes CLI from origin-tests image
COPY --from=origintests /usr/bin/oc /usr/bin/oc
COPY --from=origintests /usr/bin/kubectl /usr/bin/kubectl
# Install dependencies
RUN yum install epel-release -y && \
yum install -y git python python3-pip jq gettext && \
python3 -m pip install -U pip && \
rpm --import https://packages.microsoft.com/keys/microsoft.asc && \
echo -e "[azure-cli]\nname=Azure CLI\nbaseurl=https://packages.microsoft.com/yumrepos/azure-cli\nenabled=1\ngpgcheck=1\ngpgkey=https://packages.microsoft.com/keys/microsoft.asc" > /etc/yum.repos.d/azure-cli.repo && yum install -y azure-cli && \
git clone https://github.com/openshift-scale/kraken /root/kraken && \
mkdir -p /root/.kube && cd /root/kraken && \
pip3 install -r requirements.txt
WORKDIR /root/kraken
ENTRYPOINT ["python3", "run_kraken.py"]
CMD ["--config=config/config.yaml"]

View File

@@ -0,0 +1,25 @@
# Dockerfile for kraken
FROM ppc64le/centos:8
MAINTAINER Red Hat OpenShift Performance and Scale
ENV KUBECONFIG /root/.kube/config
RUN curl -L -o kubernetes-client-linux-ppc64le.tar.gz https://dl.k8s.io/v1.19.0/kubernetes-client-linux-ppc64le.tar.gz \
&& tar xf kubernetes-client-linux-ppc64le.tar.gz && mv kubernetes/client/bin/kubectl /usr/bin/ && rm -rf kubernetes-client-linux-ppc64le.tar.gz
RUN curl -L -o openshift-client-linux.tar.gz https://mirror.openshift.com/pub/openshift-v4/ppc64le/clients/ocp/stable/openshift-client-linux.tar.gz \
&& tar xf openshift-client-linux.tar.gz -C /usr/bin && rm -rf openshift-client-linux.tar.gz
# Install dependencies
RUN yum install epel-release -y && \
yum install -y git python36 python3-pip gcc libffi-devel python36-devel openssl-devel gcc-c++ make jq gettext && \
git clone https://github.com/cloud-bulldozer/kraken /root/kraken && \
mkdir -p /root/.kube && cd /root/kraken && \
pip3 install cryptography==3.3.2 && \
pip3 install -r requirements.txt setuptools==40.3.0 urllib3==1.25.4
WORKDIR /root/kraken
ENTRYPOINT python3 run_kraken.py --config=config/config.yaml

View File

@@ -1,90 +0,0 @@
# oc build
FROM golang:1.24.9 AS oc-build
RUN apt-get update && apt-get install -y --no-install-recommends libkrb5-dev
WORKDIR /tmp
# oc build
RUN git clone --branch release-4.18 https://github.com/openshift/oc.git
WORKDIR /tmp/oc
RUN go mod edit -go 1.24.9 &&\
go mod edit -require github.com/moby/buildkit@v0.12.5 &&\
go mod edit -require github.com/containerd/containerd@v1.7.29&&\
go mod edit -require github.com/docker/docker@v27.5.1+incompatible&&\
go mod edit -require github.com/opencontainers/runc@v1.2.8&&\
go mod edit -require github.com/go-git/go-git/v5@v5.13.0&&\
go mod edit -require github.com/opencontainers/selinux@v1.13.0&&\
go mod edit -require github.com/ulikunitz/xz@v0.5.15&&\
go mod edit -require golang.org/x/net@v0.38.0&&\
go mod edit -require github.com/containerd/containerd@v1.7.27&&\
go mod edit -require golang.org/x/oauth2@v0.27.0&&\
go mod edit -require golang.org/x/crypto@v0.35.0&&\
go mod edit -replace github.com/containerd/containerd@v1.7.27=github.com/containerd/containerd@v1.7.29&&\
go mod tidy && go mod vendor
RUN make GO_REQUIRED_MIN_VERSION:= oc
# virtctl build
WORKDIR /tmp
RUN git clone https://github.com/kubevirt/kubevirt.git
WORKDIR /tmp/kubevirt
RUN go mod edit -go 1.24.9 &&\
go work use &&\
go build -o virtctl ./cmd/virtctl/
FROM fedora:40
ARG PR_NUMBER
ARG TAG
RUN groupadd -g 1001 krkn && useradd -m -u 1001 -g krkn krkn
RUN dnf update -y
ENV KUBECONFIG /home/krkn/.kube/config
# This overwrites any existing configuration in /etc/yum.repos.d/kubernetes.repo
RUN dnf update && dnf install -y --setopt=install_weak_deps=False \
git python3.11 jq yq gettext wget which ipmitool openssh-server &&\
dnf clean all
# copy oc client binary from oc-build image
COPY --from=oc-build /tmp/oc/oc /usr/bin/oc
COPY --from=oc-build /tmp/kubevirt/virtctl /usr/bin/virtctl
# krkn build
RUN git clone https://github.com/krkn-chaos/krkn.git /home/krkn/kraken && \
mkdir -p /home/krkn/.kube
RUN mkdir -p /home/krkn/.ssh && \
chmod 700 /home/krkn/.ssh
WORKDIR /home/krkn/kraken
# default behaviour will be to build main
# if it is a PR trigger the PR itself will be checked out
RUN if [ -n "$PR_NUMBER" ]; then git fetch origin pull/${PR_NUMBER}/head:pr-${PR_NUMBER} && git checkout pr-${PR_NUMBER};fi
# if it is a TAG trigger checkout the tag
RUN if [ -n "$TAG" ]; then git checkout "$TAG";fi
RUN python3.11 -m ensurepip --upgrade --default-pip
RUN python3.11 -m pip install --upgrade pip setuptools==78.1.1
# removes the the vulnerable versions of setuptools and pip
RUN rm -rf "$(pip cache dir)"
RUN rm -rf /tmp/*
RUN rm -rf /usr/local/lib/python3.11/ensurepip/_bundled
RUN pip3.11 install -r requirements.txt
RUN pip3.11 install jsonschema
LABEL krknctl.title.global="Krkn Base Image"
LABEL krknctl.description.global="This is the krkn base image."
LABEL krknctl.input_fields.global='$KRKNCTL_INPUT'
# SSH setup script
RUN chmod +x /home/krkn/kraken/containers/setup-ssh.sh
# Main entrypoint script
RUN chmod +x /home/krkn/kraken/containers/entrypoint.sh
RUN chown -R krkn:krkn /home/krkn && chmod 755 /home/krkn
USER krkn
ENTRYPOINT ["/bin/bash", "/home/krkn/kraken/containers/entrypoint.sh"]
CMD ["--config=config/config.yaml"]

View File

@@ -1,14 +1,28 @@
### Kraken image
Container image gets automatically built by quay.io at [Kraken image](https://quay.io/redhat-chaos/krkn).
Container image gets automatically built by quay.io at [Kraken image](https://quay.io/chaos-kubox/krkn).
### Run containerized version
Refer [instructions](https://krkn-chaos.dev/docs/installation/) for information on how to run the containerized version of kraken.
Refer [instructions](https://github.com/chaos-kubox/krkn/blob/main/docs/installation.md#run-containerized-version) for information on how to run the containerized version of kraken.
### Run Custom Kraken Image
Refer to [instructions](https://github.com/chaos-kubox/krkn/blob/main/containers/build_own_image-README.md) for information on how to run a custom containerized version of kraken using podman.
Refer to [instructions](https://github.com/redhat-chaos/krkn/blob/main/containers/build_own_image-README.md) for information on how to run a custom containerized version of kraken using podman.
### Kraken as a KubeApp
To run containerized Kraken as a Kubernetes/OpenShift Deployment, follow these steps:
1. Configure the [config.yaml](https://github.com/chaos-kubox/krkn/blob/main/config/config.yaml) file according to your requirements.
2. Create a namespace under which you want to run the kraken pod using `kubectl create ns <namespace>`.
3. Switch to `<namespace>` namespace:
- In Kubernetes, use `kubectl config set-context --current --namespace=<namespace>`
- In OpenShift, use `oc project <namespace>`
4. Create a ConfigMap named kube-config using `kubectl create configmap kube-config --from-file=<path_to_kubeconfig>`
5. Create a ConfigMap named kraken-config using `kubectl create configmap kraken-config --from-file=<path_to_kraken_config>`
6. Create a ConfigMap named scenarios-config using `kubectl create configmap scenarios-config --from-file=<path_to_scenarios_folder>`
7. Create a service account to run the kraken pod `kubectl create serviceaccount useroot`.
8. In Openshift, add privileges to service account and execute `oc adm policy add-scc-to-user privileged -z useroot`.
9. Create a Job using `kubectl apply -f kraken.yml` and monitor the status using `oc get jobs` and `oc get pods`.
NOTE: It is not recommended to run Kraken internal to the cluster as the pod which is running Kraken might get disrupted.

View File

@@ -1,13 +1,13 @@
# Building your own Kraken image
1. Git clone the Kraken repository using `git clone https://github.com/redhat-chaos/krkn.git`.
1. Git clone the Kraken repository using `git clone https://github.com/openshift-scale/kraken.git`.
2. Modify the python code and yaml files to address your needs.
3. Execute `podman build -t <new_image_name>:latest .` in the containers directory within kraken to build an image from a Dockerfile.
4. Execute `podman run --detach --name <container_name> <new_image_name>:latest` to start a container based on your new image.
# Building the Kraken image on IBM Power (ppc64le)
1. Git clone the Kraken repository using `git clone https://github.com/redhat-chaos/krkn.git` on an IBM Power Systems server.
1. Git clone the Kraken repository using `git clone https://github.com/cloud-bulldozer/kraken.git` on an IBM Power Systems server.
2. Modify the python code and yaml files to address your needs.
3. Execute `podman build -t <new_image_name>:latest -f Dockerfile-ppc64le` in the containers directory within kraken to build an image from the Dockerfile for Power.
4. Execute `podman run --detach --name <container_name> <new_image_name>:latest` to start a container based on your new image.

View File

@@ -1,5 +0,0 @@
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
export KRKNCTL_INPUT=$(cat krknctl-input.json|tr -d "\n")
envsubst '${KRKNCTL_INPUT}' < Dockerfile.template > Dockerfile

View File

@@ -1,7 +0,0 @@
#!/bin/bash
# Run SSH setup
./containers/setup-ssh.sh
# Change to kraken directory
# Execute the main command
exec python3.9 run_kraken.py "$@"

39
containers/kraken.yml Normal file
View File

@@ -0,0 +1,39 @@
---
apiVersion: batch/v1
kind: Job
metadata:
name: kraken
spec:
parallelism: 1
completions: 1
template:
metadata:
labels:
tool: Kraken
spec:
serviceAccountName: useroot
containers:
- name: kraken
securityContext:
privileged: true
image: quay.io/chaos-kubox/krkn
command: ["/bin/sh", "-c"]
args: ["python3 run_kraken.py -c config/config.yaml"]
volumeMounts:
- mountPath: "/root/.kube"
name: config
- mountPath: "/root/kraken/config"
name: kraken-config
- mountPath: "/root/kraken/scenarios"
name: scenarios-config
restartPolicy: Never
volumes:
- name: config
configMap:
name: kube-config
- name: kraken-config
configMap:
name: kraken-config
- name: scenarios-config
configMap:
name: scenarios-config

View File

@@ -1,553 +0,0 @@
[
{
"name": "cerberus-enabled",
"short_description": "Enable Cerberus",
"description": "Enables Cerberus Support",
"variable": "CERBERUS_ENABLED",
"type": "enum",
"default": "False",
"allowed_values": "True,False",
"separator": ",",
"required": "false"
},
{
"name": "cerberus-url",
"short_description": "Cerberus URL",
"description": "Cerberus http url",
"variable": "CERBERUS_URL",
"type": "string",
"default": "http://0.0.0.0:8080",
"validator": "^(http|https):\/\/.*",
"required": "false"
},
{
"name": "distribution",
"short_description": "Orchestrator distribution",
"description": "Selects the orchestrator distribution",
"variable": "DISTRIBUTION",
"type": "enum",
"default": "openshift",
"allowed_values": "openshift,kubernetes",
"separator": ",",
"required": "false"
},
{
"name": "ssh-public-key",
"short_description": "Krkn ssh public key path",
"description": "Sets the path where krkn will search for ssh public key (in container)",
"variable": "KRKN_SSH_PUBLIC",
"type": "string",
"default": "",
"required": "false"
},
{
"name": "ssh-private-key",
"short_description": "Krkn ssh private key path",
"description": "Sets the path where krkn will search for ssh private key (in container)",
"variable": "KRKN_SSH_PRIVATE",
"type": "string",
"default": "",
"required": "false"
},
{
"name": "krkn-kubeconfig",
"short_description": "Krkn kubeconfig path",
"description": "Sets the path where krkn will search for kubeconfig (in container)",
"variable": "KRKN_KUBE_CONFIG",
"type": "string",
"default": "/home/krkn/.kube/config",
"required": "false"
},
{
"name": "wait-duration",
"short_description": "Post chaos wait duration",
"description": "waits for a certain amount of time after the scenario",
"variable": "WAIT_DURATION",
"type": "number",
"default": "1"
},
{
"name": "iterations",
"short_description": "Chaos scenario iterations",
"description": "number of times the same chaos scenario will be executed",
"variable": "ITERATIONS",
"type": "number",
"default": "1"
},
{
"name": "daemon-mode",
"short_description": "Sets krkn daemon mode",
"description": "if set the scenario will execute forever",
"variable": "DAEMON_MODE",
"type": "enum",
"allowed_values": "True,False",
"separator": ",",
"default": "False",
"required": "false"
},
{
"name": "prometheus-url",
"short_description": "Prometheus url",
"description": "Prometheus url for when running on kuberenetes",
"variable": "PROMETHEUS_URL",
"type": "string",
"default": "",
"required": "false"
},
{
"name": "prometheus-token",
"short_description": "Prometheus bearer token",
"description": "Prometheus bearer token for prometheus url authentication",
"variable": "PROMETHEUS_TOKEN",
"type": "string",
"default": "",
"required": "false"
},
{
"name": "uuid",
"short_description": "Sets krkn run uuid",
"description": "sets krkn run uuid instead of generating it",
"variable": "UUID",
"type": "string",
"default": "",
"required": "false"
},
{
"name": "capture-metrics",
"short_description": "Enables metrics capture",
"description": "Enables metrics capture",
"variable": "CAPTURE_METRICS",
"type": "enum",
"allowed_values": "True,False",
"separator": ",",
"default": "False",
"required": "false"
},
{
"name": "enable-alerts",
"short_description": "Enables cluster alerts check",
"description": "Enables cluster alerts check",
"variable": "ENABLE_ALERTS",
"type": "enum",
"allowed_values": "True,False",
"separator": ",",
"default": "False",
"required": "false"
},
{
"name": "alerts-path",
"short_description": "Cluster alerts path file (in container)",
"description": "Allows to specify a different alert file path",
"variable": "ALERTS_PATH",
"type": "string",
"default": "config/alerts.yaml",
"required": "false"
},
{
"name": "metrics-path",
"short_description": "Cluster metrics path file (in container)",
"description": "Allows to specify a different metrics file path",
"variable": "METRICS_PATH",
"type": "string",
"default": "config/metrics-aggregated.yaml",
"required": "false"
},
{
"name": "enable-es",
"short_description": "Enables elastic search data collection",
"description": "Enables elastic search data collection",
"variable": "ENABLE_ES",
"type": "enum",
"allowed_values": "True,False",
"separator": ",",
"default": "False",
"required": "false"
},
{
"name": "es-server",
"short_description": "Elasticsearch instance URL",
"description": "Elasticsearch instance URL",
"variable": "ES_SERVER",
"type": "string",
"default": "http://0.0.0.0",
"required": "false"
},
{
"name": "es-port",
"short_description": "Elasticsearch instance port",
"description": "Elasticsearch instance port",
"variable": "ES_PORT",
"type": "number",
"default": "443",
"required": "false"
},
{
"name": "es-username",
"short_description": "Elasticsearch instance username",
"description": "Elasticsearch instance username",
"variable": "ES_USERNAME",
"type": "string",
"default": "elastic",
"required": "false"
},
{
"name": "es-password",
"short_description": "Elasticsearch instance password",
"description": "Elasticsearch instance password",
"variable": "ES_PASSWORD",
"type": "string",
"default": "",
"required": "false"
},
{
"name": "es-verify-certs",
"short_description": "Enables elasticsearch TLS certificate verification",
"description": "Enables elasticsearch TLS certificate verification",
"variable": "ES_VERIFY_CERTS",
"type": "enum",
"allowed_values": "True,False",
"separator": ",",
"default": "False",
"required": "false"
},
{
"name": "es-metrics-index",
"short_description": "Elasticsearch metrics index",
"description": "Index name for metrics in Elasticsearch",
"variable": "ES_METRICS_INDEX",
"type": "string",
"default": "krkn-metrics",
"required": "false"
},
{
"name": "es-alerts-index",
"short_description": "Elasticsearch alerts index",
"description": "Index name for alerts in Elasticsearch",
"variable": "ES_ALERTS_INDEX",
"type": "string",
"default": "krkn-alerts",
"required": "false"
},
{
"name": "es-telemetry-index",
"short_description": "Elasticsearch telemetry index",
"description": "Index name for telemetry in Elasticsearch",
"variable": "ES_TELEMETRY_INDEX",
"type": "string",
"default": "krkn-telemetry",
"required": "false"
},
{
"name": "check-critical-alerts",
"short_description": "Check critical alerts",
"description": "Enables checking for critical alerts",
"variable": "CHECK_CRITICAL_ALERTS",
"type": "enum",
"allowed_values": "True,False",
"separator": ",",
"default": "False",
"required": "false"
},
{
"name": "telemetry-enabled",
"short_description": "Enable telemetry",
"description": "Enables telemetry support",
"variable": "TELEMETRY_ENABLED",
"type": "enum",
"allowed_values": "True,False",
"separator": ",",
"default": "False",
"required": "false"
},
{
"name": "telemetry-api-url",
"short_description": "Telemetry API URL",
"description": "API endpoint for telemetry data",
"variable": "TELEMETRY_API_URL",
"type": "string",
"default": "https://ulnmf9xv7j.execute-api.us-west-2.amazonaws.com/production",
"validator": "^(http|https):\/\/.*",
"required": "false"
},
{
"name": "telemetry-username",
"short_description": "Telemetry username",
"description": "Username for telemetry authentication",
"variable": "TELEMETRY_USERNAME",
"type": "string",
"default": "redhat-chaos",
"required": "false"
},
{
"name": "telemetry-password",
"short_description": "Telemetry password",
"description": "Password for telemetry authentication",
"variable": "TELEMETRY_PASSWORD",
"type": "string",
"default": "",
"required": "false"
},
{
"name": "telemetry-prometheus-backup",
"short_description": "Prometheus backup for telemetry",
"description": "Enables Prometheus backup for telemetry",
"variable": "TELEMETRY_PROMETHEUS_BACKUP",
"type": "enum",
"allowed_values": "True,False",
"separator": ",",
"default": "True",
"required": "false"
},
{
"name": "telemetry-full-prometheus-backup",
"short_description": "Full Prometheus backup",
"description": "Enables full Prometheus backup for telemetry",
"variable": "TELEMETRY_FULL_PROMETHEUS_BACKUP",
"type": "enum",
"allowed_values": "True,False",
"separator": ",",
"default": "False",
"required": "false"
},
{
"name": "telemetry-backup-threads",
"short_description": "Telemetry backup threads",
"description": "Number of threads for telemetry backup",
"variable": "TELEMETRY_BACKUP_THREADS",
"type": "number",
"default": "5",
"required": "false"
},
{
"name": "telemetry-archive-path",
"short_description": "Telemetry archive path",
"description": "Path to save telemetry archive",
"variable": "TELEMETRY_ARCHIVE_PATH",
"type": "string",
"default": "/tmp",
"required": "false"
},
{
"name": "telemetry-max-retries",
"short_description": "Telemetry max retries",
"description": "Maximum retries for telemetry operations",
"variable": "TELEMETRY_MAX_RETRIES",
"type": "number",
"default": "0",
"required": "false"
},
{
"name": "telemetry-run-tag",
"short_description": "Telemetry run tag",
"description": "Tag for telemetry run",
"variable": "TELEMETRY_RUN_TAG",
"type": "string",
"default": "chaos",
"required": "false"
},
{
"name": "telemetry-group",
"short_description": "Telemetry group",
"description": "Group name for telemetry data",
"variable": "TELEMETRY_GROUP",
"type": "string",
"default": "default",
"required": "false"
},
{
"name": "telemetry-archive-size",
"short_description": "Telemetry archive size",
"description": "Maximum size for telemetry archives",
"variable": "TELEMETRY_ARCHIVE_SIZE",
"type": "number",
"default": "1000",
"required": "false"
},
{
"name": "telemetry-logs-backup",
"short_description": "Telemetry logs backup",
"description": "Enables logs backup for telemetry",
"variable": "TELEMETRY_LOGS_BACKUP",
"type": "enum",
"allowed_values": "True,False",
"separator": ",",
"default": "False",
"required": "false"
},
{
"name": "telemetry-filter-pattern",
"short_description": "Telemetry filter pattern",
"description": "Filter pattern for telemetry logs",
"variable": "TELEMETRY_FILTER_PATTERN",
"type": "string",
"default": "[\"(\\\\w{3}\\\\s\\\\d{1,2}\\\\s\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d+).+\",\"kinit (\\\\d+/\\\\d+/\\\\d+\\\\s\\\\d{2}:\\\\d{2}:\\\\d{2})\\\\s+\",\"(\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d+Z).+\"]",
"required": "false"
},
{
"name": "telemetry-cli-path",
"short_description": "Telemetry CLI path (oc)",
"description": "Path to telemetry CLI tool (oc)",
"variable": "TELEMETRY_CLI_PATH",
"type": "string",
"default": "",
"required": "false"
},
{
"name": "telemetry-events-backup",
"short_description": "Telemetry events backup",
"description": "Enables events backup for telemetry",
"variable": "TELEMETRY_EVENTS_BACKUP",
"type": "enum",
"allowed_values": "True,False",
"separator": ",",
"default": "True",
"required": "false"
},
{
"name": "health-check-interval",
"short_description": "Heath check interval",
"description": "How often to check the health check urls",
"variable": "HEALTH_CHECK_INTERVAL",
"type": "number",
"default": "2",
"required": "false"
},
{
"name": "health-check-url",
"short_description": "Health check url",
"description": "Url to check the health of",
"variable": "HEALTH_CHECK_URL",
"type": "string",
"default": "",
"required": "false"
},
{
"name": "health-check-auth",
"short_description": "Health check authentication tuple",
"description": "Authentication tuple to authenticate into health check URL",
"variable": "HEALTH_CHECK_AUTH",
"type": "string",
"default": "",
"required": "false"
},
{
"name": "health-check-bearer-token",
"short_description": "Health check bearer token",
"description": "Bearer token to authenticate into health check URL",
"variable": "HEALTH_CHECK_BEARER_TOKEN",
"type": "string",
"default": "",
"required": "false"
},
{
"name": "health-check-exit",
"short_description": "Health check exit on failure",
"description": "Exit on failure when health check URL is not able to connect",
"variable": "HEALTH_CHECK_EXIT_ON_FAILURE",
"type": "enum",
"allowed_values": "True,False",
"separator": ",",
"default": "False",
"required": "false"
},
{
"name": "health-check-verify",
"short_description": "SSL Verification of health check url",
"description": "SSL Verification to authenticate into health check URL",
"variable": "HEALTH_CHECK_VERIFY",
"type": "enum",
"allowed_values": "True,False",
"separator": ",",
"default": "False",
"required": "false"
},
{
"name": "kubevirt-check-interval",
"short_description": "Kube Virt check interval",
"description": "How often to check the kube virt check Vms ssh status",
"variable": "KUBE_VIRT_CHECK_INTERVAL",
"type": "number",
"default": "2",
"required": "false"
},
{
"name": "kubevirt-namespace",
"short_description": "KubeVirt namespace to check",
"description": "KubeVirt namespace to check the health of",
"variable": "KUBE_VIRT_NAMESPACE",
"type": "string",
"default": "",
"required": "false"
},
{
"name": "kubevirt-name",
"short_description": "KubeVirt regex names to watch",
"description": "KubeVirt regex names to check VMs",
"variable": "KUBE_VIRT_NAME",
"type": "string",
"default": "",
"required": "false"
},
{
"name": "kubevirt-only-failures",
"short_description": "KubeVirt checks only report if failure occurs",
"description": "KubeVirt checks only report if failure occurs",
"variable": "KUBE_VIRT_FAILURES",
"type": "enum",
"allowed_values": "True,False,true,false",
"separator": ",",
"default": "False",
"required": "false"
},
{
"name": "kubevirt-disconnected",
"short_description": "KubeVirt checks in disconnected mode",
"description": "KubeVirt checks in disconnected mode, bypassing the clusters Api",
"variable": "KUBE_VIRT_DISCONNECTED",
"type": "enum",
"allowed_values": "True,False,true,false",
"separator": ",",
"default": "False",
"required": "false"
},
{
"name": "kubevirt-ssh-node",
"short_description": "KubeVirt node to ssh from",
"description": "KubeVirt node to ssh from, should be available whole chaos run",
"variable": "KUBE_VIRT_SSH_NODE",
"type": "string",
"default": "",
"required": "false"
},
{
"name": "kubevirt-exit-on-failure",
"short_description": "KubeVirt fail if failed vms at end of run",
"description": "KubeVirt fails run if vms still have false status",
"variable": "KUBE_VIRT_EXIT_ON_FAIL",
"type": "enum",
"allowed_values": "True,False,true,false",
"separator": ",",
"default": "False",
"required": "false"
},
{
"name": "kubevirt-node-node",
"short_description": "KubeVirt node to filter vms on",
"description": "Only track VMs in KubeVirt on given node name",
"variable": "KUBE_VIRT_NODE_NAME",
"type": "string",
"default": "",
"required": "false"
},
{
"name": "krkn-debug",
"short_description": "Krkn debug mode",
"description": "Enables debug mode for Krkn",
"variable": "KRKN_DEBUG",
"type": "enum",
"allowed_values": "True,False",
"separator": ",",
"default": "False",
"required": "false"
}
]

Some files were not shown because too many files have changed in this diff Show More