mirror of
https://github.com/krkn-chaos/krkn.git
synced 2026-03-14 15:41:11 +00:00
Compare commits
234 Commits
v2.0.10
...
custom_wei
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b9d7c8ba12 | ||
|
|
e8075743ab | ||
|
|
ec5511b2db | ||
|
|
4e7dca9474 | ||
|
|
edf0f3d1c9 | ||
|
|
8c9bce6987 | ||
|
|
5608482f1b | ||
|
|
a14d3955a6 | ||
|
|
f655ec1a73 | ||
|
|
dfc350ac03 | ||
|
|
c474b810b2 | ||
|
|
072e8d0e87 | ||
|
|
aee61061ac | ||
|
|
544cac8bbb | ||
|
|
49b1affdb8 | ||
|
|
c1dd43fe87 | ||
|
|
8dad2a3996 | ||
|
|
cebc60f5a8 | ||
|
|
2065443622 | ||
|
|
b6ef7fa052 | ||
|
|
4f305e78aa | ||
|
|
b17e933134 | ||
|
|
beea484597 | ||
|
|
0222b0f161 | ||
|
|
f7e674d5ad | ||
|
|
7aea12ce6c | ||
|
|
625e1e90cf | ||
|
|
a9f1ce8f1b | ||
|
|
66e364e293 | ||
|
|
898ce76648 | ||
|
|
4a0f4e7cab | ||
|
|
819191866d | ||
|
|
37ca4bbce7 | ||
|
|
b9dd4e40d3 | ||
|
|
3fd249bb88 | ||
|
|
773107245c | ||
|
|
05bc201528 | ||
|
|
9a316550e1 | ||
|
|
9c261e2599 | ||
|
|
0cc82dc65d | ||
|
|
269e21e9eb | ||
|
|
d0dbe3354a | ||
|
|
4a0686daf3 | ||
|
|
822bebac0c | ||
|
|
a13150b0f5 | ||
|
|
0443637fe1 | ||
|
|
36585630f2 | ||
|
|
1401724312 | ||
|
|
fa204a515c | ||
|
|
b3a5fc2d53 | ||
|
|
05600b62b3 | ||
|
|
126599e02c | ||
|
|
b3d6a19d24 | ||
|
|
65100f26a7 | ||
|
|
10b6e4663e | ||
|
|
ce52183a26 | ||
|
|
e9ab3b47b3 | ||
|
|
3e14fe07b7 | ||
|
|
d9271a4bcc | ||
|
|
850930631e | ||
|
|
15eee80c55 | ||
|
|
ff3c4f5313 | ||
|
|
4c74df301f | ||
|
|
b60b66de43 | ||
|
|
2458022248 | ||
|
|
18385cba2b | ||
|
|
e7fa6bdebc | ||
|
|
c3f6b1a7ff | ||
|
|
f2ba8b85af | ||
|
|
ba3fdea403 | ||
|
|
42d18a8e04 | ||
|
|
4b3617bd8a | ||
|
|
eb7a1e243c | ||
|
|
197ce43f9a | ||
|
|
eecdeed73c | ||
|
|
ef606d0f17 | ||
|
|
9981c26304 | ||
|
|
4ebfc5dde5 | ||
|
|
4527d073c6 | ||
|
|
93d6967331 | ||
|
|
b462c46b28 | ||
|
|
ab4ae85896 | ||
|
|
6acd6f9bd3 | ||
|
|
787759a591 | ||
|
|
957cb355be | ||
|
|
35609484d4 | ||
|
|
959337eb63 | ||
|
|
f4bdbff9dc | ||
|
|
954202cab7 | ||
|
|
a373dcf453 | ||
|
|
d0c604a516 | ||
|
|
82582f5bc3 | ||
|
|
37f0f1eb8b | ||
|
|
d2eab21f95 | ||
|
|
d84910299a | ||
|
|
48f19c0a0e | ||
|
|
eb86885bcd | ||
|
|
967fd14bd7 | ||
|
|
5cefe80286 | ||
|
|
9ee76ce337 | ||
|
|
fd3e7ee2c8 | ||
|
|
c85c435b5d | ||
|
|
d5284ace25 | ||
|
|
c3098ec80b | ||
|
|
6629c7ec33 | ||
|
|
fb6af04b09 | ||
|
|
dc1215a61b | ||
|
|
f74aef18f8 | ||
|
|
166204e3c5 | ||
|
|
fc7667aef1 | ||
|
|
3eea42770f | ||
|
|
77a46e3869 | ||
|
|
b801308d4a | ||
|
|
97f4c1fd9c | ||
|
|
c54390d8b1 | ||
|
|
543729b18a | ||
|
|
a0ea4dc749 | ||
|
|
a5459792ef | ||
|
|
d434bb26fa | ||
|
|
fee41d404e | ||
|
|
8663ee8893 | ||
|
|
a072f0306a | ||
|
|
8221392356 | ||
|
|
671fc581dd | ||
|
|
11508ce017 | ||
|
|
0d78139fb6 | ||
|
|
a3baffe8ee | ||
|
|
438b08fcd5 | ||
|
|
9b930a02a5 | ||
|
|
194e3b87ee | ||
|
|
8c05e44c23 | ||
|
|
88f8cf49f1 | ||
|
|
015ba4d90d | ||
|
|
26fdbef144 | ||
|
|
d77e6dc79c | ||
|
|
2885645e77 | ||
|
|
84169e2d4e | ||
|
|
05bc404d32 | ||
|
|
e8fd432fc5 | ||
|
|
ec05675e3a | ||
|
|
c91648d35c | ||
|
|
24aa9036b0 | ||
|
|
816363d151 | ||
|
|
90c52f907f | ||
|
|
4f250c9601 | ||
|
|
6480adc00a | ||
|
|
5002f210ae | ||
|
|
62c5afa9a2 | ||
|
|
c109fc0b17 | ||
|
|
fff675f3dd | ||
|
|
c125e5acf7 | ||
|
|
ca6995a1a1 | ||
|
|
50cf91ac9e | ||
|
|
11069c6982 | ||
|
|
106d9bf1ae | ||
|
|
17f832637c | ||
|
|
0e5c8c55a4 | ||
|
|
9d9a6f9b80 | ||
|
|
f8fe2ae5b7 | ||
|
|
77b1dd32c7 | ||
|
|
9df727ccf5 | ||
|
|
70c8fec705 | ||
|
|
0731144a6b | ||
|
|
9337052e7b | ||
|
|
dc8d7ad75b | ||
|
|
1cc44e1f18 | ||
|
|
c8190fd1c1 | ||
|
|
9078b35e46 | ||
|
|
e6b1665aa1 | ||
|
|
c56819365c | ||
|
|
6a657576cb | ||
|
|
f04f1f1101 | ||
|
|
bddbd42f8c | ||
|
|
630dbd805b | ||
|
|
10d26ba50e | ||
|
|
d47286ae21 | ||
|
|
890e3012dd | ||
|
|
d0dafa872d | ||
|
|
149eb8fcd3 | ||
|
|
4c462a8971 | ||
|
|
5bdbf622c3 | ||
|
|
0dcb901da1 | ||
|
|
6e94df9cfc | ||
|
|
87c2b3c8fd | ||
|
|
7e4b2aff65 | ||
|
|
27f0845182 | ||
|
|
4c9cd5bced | ||
|
|
075dbd10c7 | ||
|
|
e080ad2ee2 | ||
|
|
693520f306 | ||
|
|
bf909a7c18 | ||
|
|
abbcfe09ec | ||
|
|
32fb6eec07 | ||
|
|
608b7c847f | ||
|
|
edd0159251 | ||
|
|
cf9f7702ed | ||
|
|
cfe624f153 | ||
|
|
62f50db195 | ||
|
|
aee838d3ac | ||
|
|
3b4d8a13f9 | ||
|
|
a86bb6ab95 | ||
|
|
7f0110972b | ||
|
|
126f4ebb35 | ||
|
|
83d99bbb02 | ||
|
|
2624102d65 | ||
|
|
02587bcbe6 | ||
|
|
c51bf04f9e | ||
|
|
41195b1a60 | ||
|
|
ab80acbee7 | ||
|
|
3573d13ea9 | ||
|
|
9c5251d52f | ||
|
|
a0bba27edc | ||
|
|
0d0143d1e0 | ||
|
|
0004c05f81 | ||
|
|
57a747a34a | ||
|
|
22108ae4e7 | ||
|
|
cecaa1eda3 | ||
|
|
5450ecb914 | ||
|
|
cad6b68f43 | ||
|
|
0eba329305 | ||
|
|
ce8593f2f0 | ||
|
|
9061ddbb5b | ||
|
|
dd4d0d0389 | ||
|
|
0cabe5e91d | ||
|
|
32fe0223ff | ||
|
|
a25736ad08 | ||
|
|
440890d252 | ||
|
|
69bf20fc76 | ||
|
|
2a42a2dc31 | ||
|
|
21ab8d475d | ||
|
|
b024cfde19 | ||
|
|
c7e068a562 | ||
|
|
64cfd2ca4d | ||
|
|
9cb701a616 |
5
.coveragerc
Normal file
5
.coveragerc
Normal file
@@ -0,0 +1,5 @@
|
||||
[run]
|
||||
omit =
|
||||
tests/*
|
||||
krkn/tests/**
|
||||
CI/tests_v2/*
|
||||
1
.github/CODEOWNERS
vendored
Normal file
1
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* @paigerube14 @tsebastiani @chaitanyaenr
|
||||
43
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
43
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report an issue
|
||||
title: "[BUG]"
|
||||
labels: bug
|
||||
---
|
||||
|
||||
# Bug Description
|
||||
|
||||
## **Describe the bug**
|
||||
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
## **To Reproduce**
|
||||
|
||||
Any specific steps used to reproduce the behavior
|
||||
|
||||
### Scenario File
|
||||
Scenario file(s) that were specified in your config file (can be starred (*) with confidential information )
|
||||
```yaml
|
||||
<config>
|
||||
|
||||
```
|
||||
|
||||
### Config File
|
||||
Config file you used when error was seen (the default used is config/config.yaml)
|
||||
|
||||
```yaml
|
||||
<config>
|
||||
|
||||
```
|
||||
|
||||
## **Expected behavior**
|
||||
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
## **Krkn Output**
|
||||
|
||||
Krkn output to help show your problem
|
||||
|
||||
## **Additional context**
|
||||
|
||||
Add any other context about the problem
|
||||
16
.github/ISSUE_TEMPLATE/feature.md
vendored
Normal file
16
.github/ISSUE_TEMPLATE/feature.md
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
name: New Feature Request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to see added/changed. Ex. new parameter in [xxx] scenario, new scenario that does [xxx]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the feature request here.
|
||||
47
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
47
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
# Type of change
|
||||
|
||||
- [ ] Refactor
|
||||
- [ ] New feature
|
||||
- [ ] Bug fix
|
||||
- [ ] Optimization
|
||||
|
||||
# Description
|
||||
<-- Provide a brief description of the changes made in this PR. -->
|
||||
|
||||
## Related Tickets & Documents
|
||||
If no related issue, please create one and start the converasation on wants of
|
||||
|
||||
- Related Issue #:
|
||||
- Closes #:
|
||||
|
||||
# Documentation
|
||||
- [ ] **Is documentation needed for this update?**
|
||||
|
||||
If checked, a documentation PR must be created and merged in the [website repository](https://github.com/krkn-chaos/website/).
|
||||
|
||||
## Related Documentation PR (if applicable)
|
||||
<-- Add the link to the corresponding documentation PR in the website repository -->
|
||||
|
||||
# Checklist before requesting a review
|
||||
[ ] Ensure the changes and proposed solution have been discussed in the relevant issue and have received acknowledgment from the community or maintainers. See [contributing guidelines](https://krkn-chaos.dev/docs/contribution-guidelines/)
|
||||
See [testing your changes](https://krkn-chaos.dev/docs/developers-guide/testing-changes/) and run on any Kubernetes or OpenShift cluster to validate your changes
|
||||
- [ ] I have performed a self-review of my code by running krkn and specific scenario
|
||||
- [ ] If it is a core feature, I have added thorough unit tests with above 80% coverage
|
||||
|
||||
*REQUIRED*:
|
||||
Description of combination of tests performed and output of run
|
||||
|
||||
```bash
|
||||
python run_kraken.py
|
||||
...
|
||||
<---insert test results output--->
|
||||
```
|
||||
|
||||
OR
|
||||
|
||||
|
||||
```bash
|
||||
python -m coverage run -a -m unittest discover -s tests -v
|
||||
...
|
||||
<---insert test results output--->
|
||||
```
|
||||
7
.github/release-template.md
vendored
Normal file
7
.github/release-template.md
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
## Release {VERSION}
|
||||
|
||||
### Download Artifacts
|
||||
- 📦 Krkn sources (noarch): [krkn-{VERSION}-src.tar.gz](https://krkn-chaos.gateway.scarf.sh/krkn-src-{VERSION}.tar.gz)
|
||||
|
||||
### Changes
|
||||
{CHANGES}
|
||||
60
.github/workflows/release.yml
vendored
Normal file
60
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Create Release
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
jobs:
|
||||
release:
|
||||
permissions:
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: calculate previous tag
|
||||
run: |
|
||||
git fetch --tags origin
|
||||
PREVIOUS_TAG=$(git tag --sort=-creatordate | sed -n '2 p')
|
||||
echo $PREVIOUS_TAG
|
||||
echo "PREVIOUS_TAG=$PREVIOUS_TAG" >> "$GITHUB_ENV"
|
||||
|
||||
- name: generate release notes from template
|
||||
id: release-notes
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
NOTES=$(gh api \
|
||||
--method POST \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
/repos/krkn-chaos/krkn/releases/generate-notes \
|
||||
-f "tag_name=${{ github.ref_name }}" -f "target_commitish=main" -f "previous_tag_name=${{ env.PREVIOUS_TAG }}" | jq -r .body)
|
||||
echo "NOTES<<EOF" >> $GITHUB_ENV
|
||||
echo "$NOTES" >> $GITHUB_ENV
|
||||
echo "EOF" >> $GITHUB_ENV
|
||||
|
||||
- name: replace placeholders in template
|
||||
run: |
|
||||
echo "${{ env.NOTES }}"
|
||||
TEMPLATE=$(cat .github/release-template.md)
|
||||
VERSION=${{ github.ref_name }}
|
||||
NOTES="${{ env.NOTES }}"
|
||||
OUTPUT=${TEMPLATE//\{VERSION\}/$VERSION}
|
||||
OUTPUT=${OUTPUT//\{CHANGES\}/$NOTES}
|
||||
echo "$OUTPUT" > release-notes.md
|
||||
- name: create release
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh release create ${{ github.ref_name }} --title "${{ github.ref_name }}" -F release-notes.md
|
||||
|
||||
- name: Install Syft
|
||||
run: |
|
||||
curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sudo sh -s -- -b /usr/local/bin
|
||||
|
||||
- name: Generate SBOM
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
syft . --scope all-layers --output cyclonedx-json > sbom.json
|
||||
echo "SBOM generated successfully!"
|
||||
gh release upload ${{ github.ref_name }} sbom.json
|
||||
45
.github/workflows/require-docs.yml
vendored
Normal file
45
.github/workflows/require-docs.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
name: Require Documentation Update
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited, synchronize]
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
check-docs:
|
||||
name: Check Documentation Update
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Check if Documentation is Required
|
||||
id: check_docs
|
||||
run: |
|
||||
echo "Checking PR body for documentation checkbox..."
|
||||
# Read the PR body from the GitHub event payload
|
||||
if echo "${{ github.event.pull_request.body }}" | grep -qi '\[x\].*documentation needed'; then
|
||||
echo "Documentation required detected."
|
||||
echo "docs_required=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Documentation not required."
|
||||
echo "docs_required=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Enforce Documentation Update (if required)
|
||||
if: steps.check_docs.outputs.docs_required == 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
# Retrieve feature branch and repository owner from the GitHub context
|
||||
FEATURE_BRANCH="${{ github.head_ref }}"
|
||||
REPO_OWNER="${{ github.repository_owner }}"
|
||||
WEBSITE_REPO="website"
|
||||
echo "Searching for a merged documentation PR for feature branch: $FEATURE_BRANCH in $REPO_OWNER/$WEBSITE_REPO..."
|
||||
MERGED_PR=$(gh pr list --repo "$REPO_OWNER/$WEBSITE_REPO" --state merged --json headRefName,title,url | jq -r \
|
||||
--arg FEATURE_BRANCH "$FEATURE_BRANCH" '.[] | select(.title | contains($FEATURE_BRANCH)) | .url')
|
||||
if [[ -z "$MERGED_PR" ]]; then
|
||||
echo ":x: Documentation PR for branch '$FEATURE_BRANCH' is required and has not been merged."
|
||||
exit 1
|
||||
else
|
||||
echo ":white_check_mark: Found merged documentation PR: $MERGED_PR"
|
||||
fi
|
||||
52
.github/workflows/stale.yml
vendored
Normal file
52
.github/workflows/stale.yml
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
name: Manage Stale Issues and Pull Requests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run daily at 1:00 AM UTC
|
||||
- cron: '0 1 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
name: Mark and Close Stale Issues and PRs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Mark and close stale issues and PRs
|
||||
uses: actions/stale@v9
|
||||
with:
|
||||
days-before-issue-stale: 60
|
||||
days-before-issue-close: 14
|
||||
stale-issue-label: 'stale'
|
||||
stale-issue-message: |
|
||||
This issue has been automatically marked as stale because it has not had any activity in the last 60 days.
|
||||
It will be closed in 14 days if no further activity occurs.
|
||||
If this issue is still relevant, please leave a comment or remove the stale label.
|
||||
Thank you for your contributions to krkn!
|
||||
close-issue-message: |
|
||||
This issue has been automatically closed due to inactivity.
|
||||
If you believe this issue is still relevant, please feel free to reopen it or create a new issue with updated information.
|
||||
Thank you for your understanding!
|
||||
close-issue-reason: 'not_planned'
|
||||
|
||||
days-before-pr-stale: 90
|
||||
days-before-pr-close: 14
|
||||
stale-pr-label: 'stale'
|
||||
stale-pr-message: |
|
||||
This pull request has been automatically marked as stale because it has not had any activity in the last 90 days.
|
||||
It will be closed in 14 days if no further activity occurs.
|
||||
If this PR is still relevant, please rebase it, address any pending reviews, or leave a comment.
|
||||
Thank you for your contributions to krkn!
|
||||
close-pr-message: |
|
||||
This pull request has been automatically closed due to inactivity.
|
||||
If you believe this PR is still relevant, please feel free to reopen it or create a new pull request with updated changes.
|
||||
Thank you for your understanding!
|
||||
|
||||
# Exempt labels
|
||||
exempt-issue-labels: 'bug,enhancement,good first issue'
|
||||
exempt-pr-labels: 'pending discussions,hold'
|
||||
|
||||
remove-stale-when-updated: true
|
||||
130
.github/workflows/tests.yml
vendored
130
.github/workflows/tests.yml
vendored
@@ -14,46 +14,51 @@ jobs:
|
||||
uses: actions/checkout@v3
|
||||
- name: Create multi-node KinD cluster
|
||||
uses: redhat-chaos/actions/kind@main
|
||||
- name: Install Helm & add repos
|
||||
run: |
|
||||
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
|
||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
||||
helm repo add stable https://charts.helm.sh/stable
|
||||
helm repo update
|
||||
- name: Deploy prometheus & Port Forwarding
|
||||
uses: redhat-chaos/actions/prometheus@main
|
||||
- name: Deploy Elasticsearch
|
||||
with:
|
||||
ELASTIC_PORT: ${{ env.ELASTIC_PORT }}
|
||||
RUN_ID: ${{ github.run_id }}
|
||||
uses: redhat-chaos/actions/elastic@main
|
||||
- name: Download elastic password
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: elastic_password_${{ github.run_id }}
|
||||
- name: Set elastic password on env
|
||||
run: |
|
||||
kubectl create namespace prometheus-k8s
|
||||
helm install \
|
||||
--wait --timeout 360s \
|
||||
kind-prometheus \
|
||||
prometheus-community/kube-prometheus-stack \
|
||||
--namespace prometheus-k8s \
|
||||
--set prometheus.service.nodePort=30000 \
|
||||
--set prometheus.service.type=NodePort \
|
||||
--set grafana.service.nodePort=31000 \
|
||||
--set grafana.service.type=NodePort \
|
||||
--set alertmanager.service.nodePort=32000 \
|
||||
--set alertmanager.service.type=NodePort \
|
||||
--set prometheus-node-exporter.service.nodePort=32001 \
|
||||
--set prometheus-node-exporter.service.type=NodePort
|
||||
|
||||
SELECTOR=`kubectl -n prometheus-k8s get service kind-prometheus-kube-prome-prometheus -o wide --no-headers=true | awk '{ print $7 }'`
|
||||
POD_NAME=`kubectl -n prometheus-k8s get pods --selector="$SELECTOR" --no-headers=true | awk '{ print $1 }'`
|
||||
kubectl -n prometheus-k8s port-forward $POD_NAME 9090:9090 &
|
||||
sleep 5
|
||||
ELASTIC_PASSWORD=$(cat elastic_password.txt)
|
||||
echo "ELASTIC_PASSWORD=$ELASTIC_PASSWORD" >> "$GITHUB_ENV"
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
python-version: '3.11'
|
||||
architecture: 'x64'
|
||||
- name: Install environment
|
||||
run: |
|
||||
sudo apt-get install build-essential python3-dev
|
||||
pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
pip install coverage
|
||||
|
||||
- name: Deploy test workloads
|
||||
run: |
|
||||
# es_pod_name=$(kubectl get pods -l "app=elasticsearch-master" -o name)
|
||||
# echo "POD_NAME: $es_pod_name"
|
||||
# kubectl --namespace default port-forward $es_pod_name 9200 &
|
||||
# prom_name=$(kubectl get pods -n monitoring -l "app.kubernetes.io/name=prometheus" -o name)
|
||||
# kubectl --namespace monitoring port-forward $prom_name 9090 &
|
||||
|
||||
# Wait for Elasticsearch to be ready
|
||||
echo "Waiting for Elasticsearch to be ready..."
|
||||
for i in {1..30}; do
|
||||
if curl -k -s -u elastic:$ELASTIC_PASSWORD https://localhost:9200/_cluster/health > /dev/null 2>&1; then
|
||||
echo "Elasticsearch is ready!"
|
||||
break
|
||||
fi
|
||||
echo "Attempt $i: Elasticsearch not ready yet, waiting..."
|
||||
sleep 2
|
||||
done
|
||||
kubectl apply -f CI/templates/outage_pod.yaml
|
||||
kubectl wait --for=condition=ready pod -l scenario=outage --timeout=300s
|
||||
kubectl apply -f CI/templates/container_scenario_pod.yaml
|
||||
@@ -63,31 +68,43 @@ jobs:
|
||||
kubectl wait --for=condition=ready pod -l scenario=time-skew --timeout=300s
|
||||
kubectl apply -f CI/templates/service_hijacking.yaml
|
||||
kubectl wait --for=condition=ready pod -l "app.kubernetes.io/name=proxy" --timeout=300s
|
||||
kubectl apply -f CI/legacy/scenarios/volume_scenario.yaml
|
||||
kubectl wait --for=condition=ready pod kraken-test-pod -n kraken --timeout=300s
|
||||
- name: Get Kind nodes
|
||||
run: |
|
||||
kubectl get nodes --show-labels=true
|
||||
# Pull request only steps
|
||||
- name: Run unit tests
|
||||
if: github.event_name == 'pull_request'
|
||||
run: python -m coverage run -a -m unittest discover -s tests -v
|
||||
|
||||
- name: Setup Pull Request Functional Tests
|
||||
if: |
|
||||
github.event_name == 'pull_request'
|
||||
- name: Setup Functional Tests
|
||||
run: |
|
||||
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.performance_monitoring="localhost:9090"' CI/config/common_test_config.yaml
|
||||
echo "test_service_hijacking" > ./CI/tests/functional_tests
|
||||
echo "test_app_outages" >> ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_time" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_io_hog" >> ./CI/tests/functional_tests
|
||||
|
||||
yq -i '.elastic.elastic_port=9200' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.elastic_url="https://localhost"' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.enable_elastic=False' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.password="${{env.ELASTIC_PASSWORD}}"' CI/config/common_test_config.yaml
|
||||
yq -i '.performance_monitoring.prometheus_url="http://localhost:9090"' CI/config/common_test_config.yaml
|
||||
echo "test_app_outages" > ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
echo "test_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_customapp_pod" >> ./CI/tests/functional_tests
|
||||
echo "test_io_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_node" >> ./CI/tests/functional_tests
|
||||
echo "test_service_hijacking" >> ./CI/tests/functional_tests
|
||||
echo "test_pod_network_filter" >> ./CI/tests/functional_tests
|
||||
echo "test_pod_server" >> ./CI/tests/functional_tests
|
||||
echo "test_time" >> ./CI/tests/functional_tests
|
||||
echo "test_node_network_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_pod_network_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_cerberus_unhealthy" >> ./CI/tests/functional_tests
|
||||
echo "test_pod_error" >> ./CI/tests/functional_tests
|
||||
echo "test_pod" >> ./CI/tests/functional_tests
|
||||
# echo "test_pvc" >> ./CI/tests/functional_tests
|
||||
|
||||
|
||||
# Push on main only steps + all other functional to collect coverage
|
||||
# for the badge
|
||||
@@ -101,22 +118,9 @@ jobs:
|
||||
- name: Setup Post Merge Request Functional Tests
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: |
|
||||
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.performance_monitoring="localhost:9090"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.username="${{secrets.TELEMETRY_USERNAME}}"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.password="${{secrets.TELEMETRY_PASSWORD}}"' CI/config/common_test_config.yaml
|
||||
echo "test_telemetry" > ./CI/tests/functional_tests
|
||||
echo "test_service_hijacking" >> ./CI/tests/functional_tests
|
||||
echo "test_app_outages" >> ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_time" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_io_hog" >> ./CI/tests/functional_tests
|
||||
|
||||
echo "test_telemetry" >> ./CI/tests/functional_tests
|
||||
# Final common steps
|
||||
- name: Run Functional tests
|
||||
env:
|
||||
@@ -126,33 +130,40 @@ jobs:
|
||||
cat ./CI/results.markdown >> $GITHUB_STEP_SUMMARY
|
||||
echo >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload CI logs
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ci-logs
|
||||
path: CI/out
|
||||
if-no-files-found: error
|
||||
- name: Collect coverage report
|
||||
if: ${{ always() }}
|
||||
run: |
|
||||
python -m coverage html
|
||||
python -m coverage json
|
||||
- name: Publish coverage report to job summary
|
||||
if: ${{ always() }}
|
||||
run: |
|
||||
pip install html2text
|
||||
html2text --ignore-images --ignore-links -b 0 htmlcov/index.html >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload coverage data
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage
|
||||
path: htmlcov
|
||||
if-no-files-found: error
|
||||
- name: Upload json coverage
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage.json
|
||||
path: coverage.json
|
||||
if-no-files-found: error
|
||||
- name: Check CI results
|
||||
run: grep Fail CI/results.markdown && false || true
|
||||
if: ${{ always() }}
|
||||
run: "! grep Fail CI/results.markdown"
|
||||
|
||||
badge:
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -175,7 +186,7 @@ jobs:
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.9
|
||||
python-version: '3.11'
|
||||
- name: Copy badge on GitHub Page Repo
|
||||
env:
|
||||
COLOR: yellow
|
||||
@@ -192,7 +203,8 @@ jobs:
|
||||
cd krkn-lib-docs
|
||||
git add .
|
||||
git config user.name "krkn-chaos"
|
||||
git config user.email "<>"
|
||||
git config user.email "krkn-actions@users.noreply.github.com"
|
||||
git commit -m "[KRKN] Coverage Badge ${GITHUB_REF##*/}" || echo "no changes to commit"
|
||||
git push
|
||||
|
||||
|
||||
|
||||
53
.github/workflows/tests_v2.yml
vendored
Normal file
53
.github/workflows/tests_v2.yml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
name: Tests v2 (pytest functional)
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
tests-v2:
|
||||
name: Tests v2 (pytest functional)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Create KinD cluster
|
||||
uses: redhat-chaos/actions/kind@main
|
||||
|
||||
- name: Pre-load test images into KinD
|
||||
run: |
|
||||
docker pull nginx:alpine
|
||||
kind load docker-image nginx:alpine
|
||||
docker pull quay.io/krkn-chaos/krkn:tools
|
||||
kind load docker-image quay.io/krkn-chaos/krkn:tools
|
||||
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.11'
|
||||
architecture: 'x64'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get install -y build-essential python3-dev
|
||||
pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
pip install -r CI/tests_v2/requirements.txt
|
||||
|
||||
- name: Run tests_v2
|
||||
run: |
|
||||
KRKN_TEST_COVERAGE=1 python -m pytest CI/tests_v2/ -v --timeout=300 --reruns=1 --reruns-delay=5 \
|
||||
--html=CI/tests_v2/report.html -n auto --junitxml=CI/tests_v2/results.xml
|
||||
|
||||
- name: Upload tests_v2 artifacts
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: tests-v2-results
|
||||
path: |
|
||||
CI/tests_v2/report.html
|
||||
CI/tests_v2/results.xml
|
||||
CI/tests_v2/assets/
|
||||
if-no-files-found: ignore
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -17,6 +17,7 @@ __pycache__/*
|
||||
kube-burner*
|
||||
kube_burner*
|
||||
recommender_*.json
|
||||
resiliency*.json
|
||||
|
||||
# Project files
|
||||
.ropeproject
|
||||
@@ -64,6 +65,10 @@ CI/out/*
|
||||
CI/ci_results
|
||||
CI/legacy/*node.yaml
|
||||
CI/results.markdown
|
||||
# CI tests_v2 (pytest-html / pytest outputs)
|
||||
CI/tests_v2/results.xml
|
||||
CI/tests_v2/report.html
|
||||
CI/tests_v2/assets/
|
||||
|
||||
#env
|
||||
chaos/*
|
||||
|
||||
9
ADOPTERS.md
Normal file
9
ADOPTERS.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Krkn Adopters
|
||||
|
||||
This is a list of organizations that have publicly acknowledged usage of Krkn and shared details of how they are leveraging it in their environment for chaos engineering use cases. Do you want to add yourself to this list? Please fork the repository and open a PR with the required change.
|
||||
|
||||
| Organization | Since | Website | Use-Case |
|
||||
|:-|:-|:-|:-|
|
||||
| MarketAxess | 2024 | https://www.marketaxess.com/ | Kraken enables us to achieve our goal of increasing the reliability of our cloud products on Kubernetes. The tool allows us to automatically run various chaos scenarios, identify resilience and performance bottlenecks, and seamlessly restore the system to its original state once scenarios finish. These chaos scenarios include pod disruptions, node (EC2) outages, simulating availability zone (AZ) outages, and filling up storage spaces like EBS and EFS. The community is highly responsive to requests and works on expanding the tool's capabilities. MarketAxess actively contributes to the project, adding features such as the ability to leverage existing network ACLs and proposing several feature improvements to enhance test coverage. |
|
||||
| Red Hat Openshift | 2020 | https://www.redhat.com/ | Kraken is a highly reliable chaos testing tool used to ensure the quality and resiliency of Red Hat Openshift. The engineering team runs all the test scenarios under Kraken on different cloud platforms on both self-managed and cloud services environments prior to the release of a new version of the product. The team also contributes to the Kraken project consistently which helps the test scenarios to keep up with the new features introduced to the product. Inclusion of this test coverage has contributed to gaining the trust of new and existing customers of the product. |
|
||||
| IBM | 2023 | https://www.ibm.com/ | While working on AI for Chaos Testing at IBM Research, we closely collaborated with the Kraken (Krkn) team to advance intelligent chaos engineering. Our contributions included developing AI-enabled chaos injection strategies and integrating reinforcement learning (RL)-based fault search techniques into the Krkn tool, enabling it to identify and explore system vulnerabilities more efficiently. Kraken stands out as one of the most user-friendly and effective tools for chaos engineering, and the Kraken team’s deep technical involvement played a crucial role in the success of this collaboration—helping bridge cutting-edge AI research with practical, real-world system reliability testing. |
|
||||
@@ -2,26 +2,30 @@ kraken:
|
||||
distribution: kubernetes # Distribution can be kubernetes or openshift.
|
||||
kubeconfig_path: ~/.kube/config # Path to kubeconfig.
|
||||
exit_on_failure: False # Exit when a post action scenario fails.
|
||||
litmus_version: v1.13.6 # Litmus version to install.
|
||||
litmus_uninstall: False # If you want to uninstall litmus if failure.
|
||||
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
port: 8081 # Signal port
|
||||
auto_rollback: True # Enable auto rollback for scenarios.
|
||||
rollback_versions_directory: /tmp/kraken-rollback # Directory to store rollback version files.
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load.
|
||||
- $scenario_type: # List of chaos pod scenarios to load.
|
||||
- $scenario_file
|
||||
$post_config
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed.
|
||||
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal.
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift.
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
capture_metrics: False
|
||||
metrics_profile_path: config/metrics-aggregated.yaml
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set.
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error.
|
||||
alert_profile: config/alerts.yaml # Path to alert profile with the prometheus queries.
|
||||
enable_alerts: True # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
enable_metrics: True
|
||||
alert_profile: config/alerts.yaml # Path or URL to alert profile with the prometheus queries
|
||||
metrics_profile: config/metrics-report.yaml
|
||||
check_critical_alerts: True # Path to alert profile with the prometheus queries.
|
||||
|
||||
tunings:
|
||||
wait_duration: 6 # Duration to wait between each chaos scenario.
|
||||
@@ -32,13 +36,13 @@ telemetry:
|
||||
api_url: https://yvnn4rfoi7.execute-api.us-west-2.amazonaws.com/test #telemetry service endpoint
|
||||
username: $TELEMETRY_USERNAME # telemetry service username
|
||||
password: $TELEMETRY_PASSWORD # telemetry service password
|
||||
prometheus_namespace: 'prometheus-k8s' # prometheus namespace
|
||||
prometheus_namespace: 'monitoring' # prometheus namespace
|
||||
prometheus_pod_name: 'prometheus-kind-prometheus-kube-prome-prometheus-0' # prometheus pod_name
|
||||
prometheus_container_name: 'prometheus'
|
||||
prometheus_backup: True # enables/disables prometheus data collection
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
archive_path: /tmp # local path where the archive files will be temporarly stored
|
||||
archive_path: /tmp # local path where the archive files will be temporarily stored
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 10000 # the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
@@ -52,8 +56,6 @@ telemetry:
|
||||
telemetry_group: "funtests"
|
||||
elastic:
|
||||
enable_elastic: False
|
||||
collect_metrics: False
|
||||
collect_alerts: False
|
||||
verify_certs: False
|
||||
elastic_url: "https://192.168.39.196" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
|
||||
elastic_port: 32766
|
||||
@@ -62,3 +64,11 @@ elastic:
|
||||
metrics_index: "krkn-metrics"
|
||||
alerts_index: "krkn-alerts"
|
||||
telemetry_index: "krkn-telemetry"
|
||||
|
||||
health_checks: # Utilizing health check endpoints to observe application behavior during chaos injection.
|
||||
interval: # Interval in seconds to perform health checks, default value is 2 seconds
|
||||
config: # Provide list of health check configurations for applications
|
||||
- url: # Provide application endpoint
|
||||
bearer_token: # Bearer token for authentication if any
|
||||
auth: # Provide authentication credentials (username , password) in tuple format if any, ex:("admin","secretpassword")
|
||||
exit_on_failure: # If value is True exits when health check failed for application, values can be True/False
|
||||
|
||||
@@ -45,15 +45,45 @@ metadata:
|
||||
name: kraken-test-pod
|
||||
namespace: kraken
|
||||
spec:
|
||||
securityContext:
|
||||
fsGroup: 1001
|
||||
# initContainer to fix permissions on the mounted volume
|
||||
initContainers:
|
||||
- name: fix-permissions
|
||||
image: 'quay.io/centos7/httpd-24-centos7:centos7'
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
echo "Setting up permissions for /home/kraken..."
|
||||
# Create the directory if it doesn't exist
|
||||
mkdir -p /home/kraken
|
||||
# Set ownership to user 1001 and group 1001
|
||||
chown -R 1001:1001 /home/kraken
|
||||
# Set permissions to allow read/write
|
||||
chmod -R 755 /home/kraken
|
||||
rm -rf /home/kraken/*
|
||||
echo "Permissions fixed. Current state:"
|
||||
ls -la /home/kraken
|
||||
volumeMounts:
|
||||
- mountPath: "/home/kraken"
|
||||
name: kraken-test-pv
|
||||
securityContext:
|
||||
runAsUser: 0 # Run as root to fix permissions
|
||||
volumes:
|
||||
- name: kraken-test-pv
|
||||
persistentVolumeClaim:
|
||||
claimName: kraken-test-pvc
|
||||
containers:
|
||||
- name: kraken-test-container
|
||||
image: 'quay.io/centos7/httpd-24-centos7:latest'
|
||||
volumeMounts:
|
||||
- mountPath: "/home/krake-dir/"
|
||||
name: kraken-test-pv
|
||||
image: 'quay.io/centos7/httpd-24-centos7:centos7'
|
||||
securityContext:
|
||||
privileged: true
|
||||
runAsUser: 1001
|
||||
runAsNonRoot: true
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
volumeMounts:
|
||||
- mountPath: "/home/kraken"
|
||||
name: kraken-test-pv
|
||||
|
||||
79
CI/templates/mock_cerberus.yaml
Normal file
79
CI/templates/mock_cerberus.yaml
Normal file
@@ -0,0 +1,79 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: mock-cerberus-server
|
||||
namespace: default
|
||||
data:
|
||||
server.py: |
|
||||
#!/usr/bin/env python3
|
||||
from http.server import HTTPServer, BaseHTTPRequestHandler
|
||||
import json
|
||||
|
||||
class MockCerberusHandler(BaseHTTPRequestHandler):
|
||||
def do_GET(self):
|
||||
if self.path == '/':
|
||||
# Return True to indicate cluster is healthy
|
||||
self.send_response(200)
|
||||
self.send_header('Content-type', 'text/plain')
|
||||
self.end_headers()
|
||||
self.wfile.write(b'True')
|
||||
elif self.path.startswith('/history'):
|
||||
# Return empty history (no failures)
|
||||
self.send_response(200)
|
||||
self.send_header('Content-type', 'application/json')
|
||||
self.end_headers()
|
||||
response = {
|
||||
"history": {
|
||||
"failures": []
|
||||
}
|
||||
}
|
||||
self.wfile.write(json.dumps(response).encode())
|
||||
else:
|
||||
self.send_response(404)
|
||||
self.end_headers()
|
||||
|
||||
def log_message(self, format, *args):
|
||||
print(f"[MockCerberus] {format % args}")
|
||||
|
||||
if __name__ == '__main__':
|
||||
server = HTTPServer(('0.0.0.0', 8080), MockCerberusHandler)
|
||||
print("[MockCerberus] Starting mock cerberus server on port 8080...")
|
||||
server.serve_forever()
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: mock-cerberus
|
||||
namespace: default
|
||||
labels:
|
||||
app: mock-cerberus
|
||||
spec:
|
||||
containers:
|
||||
- name: mock-cerberus
|
||||
image: python:3.9-slim
|
||||
command: ["python3", "/app/server.py"]
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
volumeMounts:
|
||||
- name: server-script
|
||||
mountPath: /app
|
||||
volumes:
|
||||
- name: server-script
|
||||
configMap:
|
||||
name: mock-cerberus-server
|
||||
defaultMode: 0755
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mock-cerberus
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
app: mock-cerberus
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
targetPort: 8080
|
||||
type: ClusterIP
|
||||
85
CI/templates/mock_cerberus_unhealthy.yaml
Normal file
85
CI/templates/mock_cerberus_unhealthy.yaml
Normal file
@@ -0,0 +1,85 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: mock-cerberus-unhealthy-server
|
||||
namespace: default
|
||||
data:
|
||||
server.py: |
|
||||
#!/usr/bin/env python3
|
||||
from http.server import HTTPServer, BaseHTTPRequestHandler
|
||||
import json
|
||||
|
||||
class MockCerberusUnhealthyHandler(BaseHTTPRequestHandler):
|
||||
def do_GET(self):
|
||||
if self.path == '/':
|
||||
# Return False to indicate cluster is unhealthy
|
||||
self.send_response(200)
|
||||
self.send_header('Content-type', 'text/plain')
|
||||
self.end_headers()
|
||||
self.wfile.write(b'False')
|
||||
elif self.path.startswith('/history'):
|
||||
# Return history with failures
|
||||
self.send_response(200)
|
||||
self.send_header('Content-type', 'application/json')
|
||||
self.end_headers()
|
||||
response = {
|
||||
"history": {
|
||||
"failures": [
|
||||
{
|
||||
"component": "node",
|
||||
"name": "test-node",
|
||||
"timestamp": "2024-01-01T00:00:00Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
self.wfile.write(json.dumps(response).encode())
|
||||
else:
|
||||
self.send_response(404)
|
||||
self.end_headers()
|
||||
|
||||
def log_message(self, format, *args):
|
||||
print(f"[MockCerberusUnhealthy] {format % args}")
|
||||
|
||||
if __name__ == '__main__':
|
||||
server = HTTPServer(('0.0.0.0', 8080), MockCerberusUnhealthyHandler)
|
||||
print("[MockCerberusUnhealthy] Starting mock cerberus unhealthy server on port 8080...")
|
||||
server.serve_forever()
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: mock-cerberus-unhealthy
|
||||
namespace: default
|
||||
labels:
|
||||
app: mock-cerberus-unhealthy
|
||||
spec:
|
||||
containers:
|
||||
- name: mock-cerberus-unhealthy
|
||||
image: python:3.9-slim
|
||||
command: ["python3", "/app/server.py"]
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
volumeMounts:
|
||||
- name: server-script
|
||||
mountPath: /app
|
||||
volumes:
|
||||
- name: server-script
|
||||
configMap:
|
||||
name: mock-cerberus-unhealthy-server
|
||||
defaultMode: 0755
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mock-cerberus-unhealthy
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
app: mock-cerberus-unhealthy
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
targetPort: 8080
|
||||
type: ClusterIP
|
||||
@@ -8,9 +8,9 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: docker.io/fedora/tools
|
||||
image: quay.io/krkn-chaos/krkn:tools
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
sleep infinity
|
||||
sleep infinity
|
||||
|
||||
29
CI/templates/pod_network_filter.yaml
Normal file
29
CI/templates/pod_network_filter.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-network-filter-test
|
||||
labels:
|
||||
app.kubernetes.io/name: pod-network-filter
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: quay.io/krkn-chaos/krkn-funtests:pod-network-filter
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
name: pod-network-prt
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: pod-network-filter-service
|
||||
spec:
|
||||
selector:
|
||||
app.kubernetes.io/name: pod-network-filter
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: pod-network-filter-svc
|
||||
protocol: TCP
|
||||
port: 80
|
||||
targetPort: pod-network-prt
|
||||
nodePort: 30037
|
||||
@@ -8,9 +8,9 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: docker.io/fedora/tools
|
||||
image: quay.io/krkn-chaos/krkn:tools
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
sleep infinity
|
||||
sleep infinity
|
||||
|
||||
@@ -13,7 +13,13 @@ function functional_test_app_outage {
|
||||
export scenario_type="application_outages_scenarios"
|
||||
export scenario_file="scenarios/openshift/app_outage.yaml"
|
||||
export post_config=""
|
||||
|
||||
kubectl get services -A
|
||||
|
||||
kubectl get pods
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/app_outage.yaml
|
||||
cat $scenario_file
|
||||
cat CI/config/app_outage.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/app_outage.yaml
|
||||
echo "App outage scenario test: Success"
|
||||
}
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_arca_cpu_hog {
|
||||
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/kube/cpu-hog/input.yaml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/cpu-hog/input.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/arca_cpu_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/arca_cpu_hog.yaml
|
||||
echo "Arcaflow CPU Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_arca_cpu_hog
|
||||
@@ -1,19 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_arca_io_hog {
|
||||
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/kube/io-hog/input.yaml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/io-hog/input.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/arca_io_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/arca_io_hog.yaml
|
||||
echo "Arcaflow IO Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_arca_io_hog
|
||||
@@ -1,19 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_arca_memory_hog {
|
||||
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/kube/memory-hog/input.yaml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/memory-hog/input.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/arca_memory_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/arca_memory_hog.yaml
|
||||
echo "Arcaflow Memory Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_arca_memory_hog
|
||||
79
CI/tests/test_cerberus_unhealthy.sh
Executable file
79
CI/tests/test_cerberus_unhealthy.sh
Executable file
@@ -0,0 +1,79 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_cerberus_unhealthy {
|
||||
echo "========================================"
|
||||
echo "Starting Cerberus Unhealthy Test"
|
||||
echo "========================================"
|
||||
|
||||
# Deploy mock cerberus unhealthy server
|
||||
echo "Deploying mock cerberus unhealthy server..."
|
||||
kubectl apply -f CI/templates/mock_cerberus_unhealthy.yaml
|
||||
|
||||
# Wait for mock cerberus unhealthy pod to be ready
|
||||
echo "Waiting for mock cerberus unhealthy to be ready..."
|
||||
kubectl wait --for=condition=ready pod -l app=mock-cerberus-unhealthy --timeout=300s
|
||||
|
||||
# Verify mock cerberus service is accessible
|
||||
echo "Verifying mock cerberus unhealthy service..."
|
||||
mock_cerberus_ip=$(kubectl get service mock-cerberus-unhealthy -o jsonpath='{.spec.clusterIP}')
|
||||
echo "Mock Cerberus Unhealthy IP: $mock_cerberus_ip"
|
||||
|
||||
# Test cerberus endpoint from within the cluster (should return False)
|
||||
kubectl run cerberus-unhealthy-test --image=curlimages/curl:latest --rm -i --restart=Never -- \
|
||||
curl -s http://mock-cerberus-unhealthy.default.svc.cluster.local:8080/ || echo "Cerberus unhealthy test curl completed"
|
||||
|
||||
# Configure scenario for pod disruption with cerberus enabled
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/kind/pod_etcd.yml"
|
||||
export post_config=""
|
||||
|
||||
# Generate config with cerberus enabled
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/cerberus_unhealthy_test_config.yaml
|
||||
|
||||
# Enable cerberus in the config but DON'T exit_on_failure (so the test can verify the behavior)
|
||||
# Using yq jq-wrapper syntax with -i -y
|
||||
yq -i '.cerberus.cerberus_enabled = true' CI/config/cerberus_unhealthy_test_config.yaml
|
||||
yq -i ".cerberus.cerberus_url = \"http://${mock_cerberus_ip}:8080\"" CI/config/cerberus_unhealthy_test_config.yaml
|
||||
yq -i '.kraken.exit_on_failure = false' CI/config/cerberus_unhealthy_test_config.yaml
|
||||
|
||||
echo "========================================"
|
||||
echo "Cerberus Unhealthy Configuration:"
|
||||
yq '.cerberus' CI/config/cerberus_unhealthy_test_config.yaml
|
||||
echo "exit_on_failure:"
|
||||
yq '.kraken.exit_on_failure' CI/config/cerberus_unhealthy_test_config.yaml
|
||||
echo "========================================"
|
||||
|
||||
# Run kraken with cerberus unhealthy (should detect unhealthy but not exit due to exit_on_failure=false)
|
||||
echo "Running kraken with cerberus unhealthy integration..."
|
||||
|
||||
# We expect this to complete (not exit 1) because exit_on_failure is false
|
||||
# But cerberus should log that the cluster is unhealthy
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/cerberus_unhealthy_test_config.yaml || {
|
||||
exit_code=$?
|
||||
echo "Kraken exited with code: $exit_code"
|
||||
# If exit_code is 1, that's expected when cerberus reports unhealthy and exit_on_failure would be true
|
||||
# But since we set exit_on_failure=false, it should not exit
|
||||
if [ $exit_code -eq 1 ]; then
|
||||
echo "WARNING: Kraken exited with 1, which may indicate cerberus detected unhealthy cluster"
|
||||
fi
|
||||
}
|
||||
|
||||
# Verify cerberus was called by checking mock cerberus logs
|
||||
echo "Checking mock cerberus unhealthy logs..."
|
||||
kubectl logs -l app=mock-cerberus-unhealthy --tail=50
|
||||
|
||||
# Cleanup
|
||||
echo "Cleaning up mock cerberus unhealthy..."
|
||||
kubectl delete -f CI/templates/mock_cerberus_unhealthy.yaml || true
|
||||
|
||||
echo "========================================"
|
||||
echo "Cerberus unhealthy functional test: Success"
|
||||
echo "========================================"
|
||||
}
|
||||
|
||||
functional_test_cerberus_unhealthy
|
||||
@@ -16,8 +16,10 @@ function functional_test_container_crash {
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/container_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/container_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/container_config.yaml -d True
|
||||
echo "Container scenario test: Success"
|
||||
|
||||
kubectl get pods -n kube-system -l component=etcd
|
||||
}
|
||||
|
||||
functional_test_container_crash
|
||||
|
||||
20
CI/tests/test_cpu_hog.sh
Normal file
20
CI/tests/test_cpu_hog.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_cpu_hog {
|
||||
yq -i '."node-selector"="kubernetes.io/hostname=kind-worker2"' scenarios/kube/cpu-hog.yml
|
||||
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/cpu-hog.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/cpu_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/cpu_hog.yaml
|
||||
echo "CPU Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_cpu_hog
|
||||
18
CI/tests/test_customapp_pod.sh
Executable file
18
CI/tests/test_customapp_pod.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_customapp_pod_node_selector {
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/openshift/customapp_pod.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/customapp_pod_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/customapp_pod_config.yaml -d True
|
||||
echo "Pod disruption with node_label_selector test: Success"
|
||||
}
|
||||
|
||||
functional_test_customapp_pod_node_selector
|
||||
20
CI/tests/test_io_hog.sh
Normal file
20
CI/tests/test_io_hog.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_io_hog {
|
||||
yq -i '."node-selector"="kubernetes.io/hostname=kind-worker2"' scenarios/kube/io-hog.yml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/io-hog.yml"
|
||||
export post_config=""
|
||||
|
||||
cat $scenario_file
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/io_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/io_hog.yaml
|
||||
echo "IO Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_io_hog
|
||||
19
CI/tests/test_memory_hog.sh
Normal file
19
CI/tests/test_memory_hog.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_memory_hog {
|
||||
yq -i '."node-selector"="kubernetes.io/hostname=kind-worker2"' scenarios/kube/memory-hog.yml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/memory-hog.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/memory_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/memory_hog.yaml
|
||||
echo "Memory Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_memory_hog
|
||||
18
CI/tests/test_node.sh
Executable file
18
CI/tests/test_node.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
uset -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_node_stop_start {
|
||||
export scenario_type="node_scenarios"
|
||||
export scenario_file="scenarios/kind/node_scenarios_example.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/node_config.yaml
|
||||
cat CI/config/node_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/node_config.yaml
|
||||
echo "Node Stop/Start scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_node_stop_start
|
||||
165
CI/tests/test_node_network_chaos.sh
Executable file
165
CI/tests/test_node_network_chaos.sh
Executable file
@@ -0,0 +1,165 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_node_network_chaos {
|
||||
echo "Starting node network chaos functional test"
|
||||
|
||||
# Get a worker node
|
||||
get_node
|
||||
export TARGET_NODE=$(echo $WORKER_NODE | awk '{print $1}')
|
||||
echo "Target node: $TARGET_NODE"
|
||||
|
||||
# Deploy nginx workload on the target node
|
||||
echo "Deploying nginx workload on $TARGET_NODE..."
|
||||
kubectl create deployment nginx-node-net-chaos --image=nginx:latest
|
||||
|
||||
# Add node selector to ensure pod runs on target node
|
||||
kubectl patch deployment nginx-node-net-chaos -p '{"spec":{"template":{"spec":{"nodeSelector":{"kubernetes.io/hostname":"'$TARGET_NODE'"}}}}}'
|
||||
|
||||
# Expose service
|
||||
kubectl expose deployment nginx-node-net-chaos --port=80 --target-port=80 --name=nginx-node-net-chaos-svc
|
||||
|
||||
# Wait for nginx to be ready
|
||||
echo "Waiting for nginx pod to be ready on $TARGET_NODE..."
|
||||
kubectl wait --for=condition=ready pod -l app=nginx-node-net-chaos --timeout=120s
|
||||
|
||||
# Verify pod is on correct node
|
||||
export POD_NAME=$(kubectl get pods -l app=nginx-node-net-chaos -o jsonpath='{.items[0].metadata.name}')
|
||||
export POD_NODE=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.nodeName}')
|
||||
echo "Pod $POD_NAME is running on node $POD_NODE"
|
||||
|
||||
if [ "$POD_NODE" != "$TARGET_NODE" ]; then
|
||||
echo "ERROR: Pod is not on target node (expected $TARGET_NODE, got $POD_NODE)"
|
||||
kubectl get pods -l app=nginx-node-net-chaos -o wide
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Setup port-forward to access nginx
|
||||
echo "Setting up port-forward to nginx service..."
|
||||
kubectl port-forward service/nginx-node-net-chaos-svc 8091:80 &
|
||||
PORT_FORWARD_PID=$!
|
||||
sleep 3 # Give port-forward time to start
|
||||
|
||||
# Test baseline connectivity
|
||||
echo "Testing baseline connectivity..."
|
||||
response=$(curl -s -o /dev/null -w "%{http_code}" --max-time 5 http://localhost:8091 || echo "000")
|
||||
if [ "$response" != "200" ]; then
|
||||
echo "ERROR: Nginx not responding correctly (got $response, expected 200)"
|
||||
kubectl get pods -l app=nginx-node-net-chaos
|
||||
kubectl describe pod $POD_NAME
|
||||
exit 1
|
||||
fi
|
||||
echo "Baseline test passed: nginx responding with 200"
|
||||
|
||||
# Measure baseline latency
|
||||
echo "Measuring baseline latency..."
|
||||
baseline_start=$(date +%s%3N)
|
||||
curl -s http://localhost:8091 > /dev/null || true
|
||||
baseline_end=$(date +%s%3N)
|
||||
baseline_latency=$((baseline_end - baseline_start))
|
||||
echo "Baseline latency: ${baseline_latency}ms"
|
||||
|
||||
# Configure node network chaos scenario
|
||||
echo "Configuring node network chaos scenario..."
|
||||
yq -i '.[0].config.target="'$TARGET_NODE'"' scenarios/kube/node-network-chaos.yml
|
||||
yq -i '.[0].config.namespace="default"' scenarios/kube/node-network-chaos.yml
|
||||
yq -i '.[0].config.test_duration=20' scenarios/kube/node-network-chaos.yml
|
||||
yq -i '.[0].config.latency="200ms"' scenarios/kube/node-network-chaos.yml
|
||||
yq -i '.[0].config.loss=15' scenarios/kube/node-network-chaos.yml
|
||||
yq -i '.[0].config.bandwidth="10mbit"' scenarios/kube/node-network-chaos.yml
|
||||
yq -i '.[0].config.ingress=true' scenarios/kube/node-network-chaos.yml
|
||||
yq -i '.[0].config.egress=true' scenarios/kube/node-network-chaos.yml
|
||||
yq -i '.[0].config.force=false' scenarios/kube/node-network-chaos.yml
|
||||
yq -i 'del(.[0].config.interfaces)' scenarios/kube/node-network-chaos.yml
|
||||
|
||||
# Prepare krkn config
|
||||
export scenario_type="network_chaos_ng_scenarios"
|
||||
export scenario_file="scenarios/kube/node-network-chaos.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/node_network_chaos_config.yaml
|
||||
|
||||
# Run krkn in background
|
||||
echo "Starting krkn with node network chaos scenario..."
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/node_network_chaos_config.yaml &
|
||||
KRKN_PID=$!
|
||||
echo "Krkn started with PID: $KRKN_PID"
|
||||
|
||||
# Wait for chaos to start (give it time to inject chaos)
|
||||
echo "Waiting for chaos injection to begin..."
|
||||
sleep 10
|
||||
|
||||
# Test during chaos - check for increased latency or packet loss effects
|
||||
echo "Testing network behavior during chaos..."
|
||||
chaos_test_count=0
|
||||
chaos_success=0
|
||||
|
||||
for i in {1..5}; do
|
||||
chaos_test_count=$((chaos_test_count + 1))
|
||||
chaos_start=$(date +%s%3N)
|
||||
response=$(curl -s -o /dev/null -w "%{http_code}" --max-time 10 http://localhost:8091 || echo "000")
|
||||
chaos_end=$(date +%s%3N)
|
||||
chaos_latency=$((chaos_end - chaos_start))
|
||||
|
||||
echo "Attempt $i: HTTP $response, latency: ${chaos_latency}ms"
|
||||
|
||||
# We expect either increased latency or some failures due to packet loss
|
||||
if [ "$response" == "200" ] || [ "$response" == "000" ]; then
|
||||
chaos_success=$((chaos_success + 1))
|
||||
fi
|
||||
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "Chaos test results: $chaos_success/$chaos_test_count requests processed"
|
||||
|
||||
# Verify node-level chaos affects pod
|
||||
echo "Verifying node-level chaos affects pod on $TARGET_NODE..."
|
||||
# The node chaos should affect all pods on the node
|
||||
|
||||
# Wait for krkn to complete
|
||||
echo "Waiting for krkn to complete..."
|
||||
wait $KRKN_PID || true
|
||||
echo "Krkn completed"
|
||||
|
||||
# Wait a bit for cleanup
|
||||
sleep 5
|
||||
|
||||
# Verify recovery - nginx should respond normally again
|
||||
echo "Verifying service recovery..."
|
||||
recovery_attempts=0
|
||||
max_recovery_attempts=10
|
||||
|
||||
while [ $recovery_attempts -lt $max_recovery_attempts ]; do
|
||||
recovery_attempts=$((recovery_attempts + 1))
|
||||
response=$(curl -s -o /dev/null -w "%{http_code}" --max-time 5 http://localhost:8091 || echo "000")
|
||||
|
||||
if [ "$response" == "200" ]; then
|
||||
echo "Recovery verified: nginx responding normally (attempt $recovery_attempts)"
|
||||
break
|
||||
fi
|
||||
|
||||
echo "Recovery attempt $recovery_attempts/$max_recovery_attempts: got $response, retrying..."
|
||||
sleep 3
|
||||
done
|
||||
|
||||
if [ "$response" != "200" ]; then
|
||||
echo "ERROR: Service did not recover after chaos (got $response)"
|
||||
kubectl get pods -l app=nginx-node-net-chaos
|
||||
kubectl describe pod $POD_NAME
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
echo "Cleaning up test resources..."
|
||||
kill $PORT_FORWARD_PID 2>/dev/null || true
|
||||
kubectl delete deployment nginx-node-net-chaos --ignore-not-found=true
|
||||
kubectl delete service nginx-node-net-chaos-svc --ignore-not-found=true
|
||||
|
||||
echo "Node network chaos test: Success"
|
||||
}
|
||||
|
||||
functional_test_node_network_chaos
|
||||
21
CI/tests/test_pod.sh
Executable file
21
CI/tests/test_pod.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_pod_crash {
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/kind/pod_path_provisioner.yml"
|
||||
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml
|
||||
echo "Pod disruption scenario test: Success"
|
||||
date
|
||||
kubectl get pods -n local-path-storage -l app=local-path-provisioner -o yaml
|
||||
}
|
||||
|
||||
functional_test_pod_crash
|
||||
31
CI/tests/test_pod_error.sh
Executable file
31
CI/tests/test_pod_error.sh
Executable file
@@ -0,0 +1,31 @@
|
||||
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_pod_error {
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/kind/pod_etcd.yml"
|
||||
export post_config=""
|
||||
# this test will check if krkn exits with an error when too many pods are targeted
|
||||
yq -i '.[0].config.kill=5' scenarios/kind/pod_etcd.yml
|
||||
yq -i '.[0].config.krkn_pod_recovery_time=1' scenarios/kind/pod_etcd.yml
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_config.yaml
|
||||
cat CI/config/pod_config.yaml
|
||||
|
||||
cat scenarios/kind/pod_etcd.yml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml
|
||||
|
||||
ret=$?
|
||||
echo "\n\nret $ret"
|
||||
if [[ $ret -ge 1 ]]; then
|
||||
echo "Pod disruption error scenario test: Success"
|
||||
else
|
||||
echo "Pod disruption error scenario test: Failure"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
functional_test_pod_error
|
||||
143
CI/tests/test_pod_network_chaos.sh
Executable file
143
CI/tests/test_pod_network_chaos.sh
Executable file
@@ -0,0 +1,143 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_pod_network_chaos {
|
||||
echo "Starting pod network chaos functional test"
|
||||
|
||||
# Deploy nginx workload
|
||||
echo "Deploying nginx workload..."
|
||||
kubectl create deployment nginx-pod-net-chaos --image=nginx:latest
|
||||
kubectl expose deployment nginx-pod-net-chaos --port=80 --target-port=80 --name=nginx-pod-net-chaos-svc
|
||||
|
||||
# Wait for nginx to be ready
|
||||
echo "Waiting for nginx pod to be ready..."
|
||||
kubectl wait --for=condition=ready pod -l app=nginx-pod-net-chaos --timeout=120s
|
||||
|
||||
# Get pod name
|
||||
export POD_NAME=$(kubectl get pods -l app=nginx-pod-net-chaos -o jsonpath='{.items[0].metadata.name}')
|
||||
echo "Target pod: $POD_NAME"
|
||||
|
||||
# Setup port-forward to access nginx
|
||||
echo "Setting up port-forward to nginx service..."
|
||||
kubectl port-forward service/nginx-pod-net-chaos-svc 8090:80 &
|
||||
PORT_FORWARD_PID=$!
|
||||
sleep 3 # Give port-forward time to start
|
||||
|
||||
# Test baseline connectivity
|
||||
echo "Testing baseline connectivity..."
|
||||
response=$(curl -s -o /dev/null -w "%{http_code}" --max-time 5 http://localhost:8090 || echo "000")
|
||||
if [ "$response" != "200" ]; then
|
||||
echo "ERROR: Nginx not responding correctly (got $response, expected 200)"
|
||||
kubectl get pods -l app=nginx-pod-net-chaos
|
||||
kubectl describe pod $POD_NAME
|
||||
exit 1
|
||||
fi
|
||||
echo "Baseline test passed: nginx responding with 200"
|
||||
|
||||
# Measure baseline latency
|
||||
echo "Measuring baseline latency..."
|
||||
baseline_start=$(date +%s%3N)
|
||||
curl -s http://localhost:8090 > /dev/null || true
|
||||
baseline_end=$(date +%s%3N)
|
||||
baseline_latency=$((baseline_end - baseline_start))
|
||||
echo "Baseline latency: ${baseline_latency}ms"
|
||||
|
||||
# Configure pod network chaos scenario
|
||||
echo "Configuring pod network chaos scenario..."
|
||||
yq -i '.[0].config.target="'$POD_NAME'"' scenarios/kube/pod-network-chaos.yml
|
||||
yq -i '.[0].config.namespace="default"' scenarios/kube/pod-network-chaos.yml
|
||||
yq -i '.[0].config.test_duration=20' scenarios/kube/pod-network-chaos.yml
|
||||
yq -i '.[0].config.latency="200ms"' scenarios/kube/pod-network-chaos.yml
|
||||
yq -i '.[0].config.loss=15' scenarios/kube/pod-network-chaos.yml
|
||||
yq -i '.[0].config.bandwidth="10mbit"' scenarios/kube/pod-network-chaos.yml
|
||||
yq -i '.[0].config.ingress=true' scenarios/kube/pod-network-chaos.yml
|
||||
yq -i '.[0].config.egress=true' scenarios/kube/pod-network-chaos.yml
|
||||
yq -i 'del(.[0].config.interfaces)' scenarios/kube/pod-network-chaos.yml
|
||||
|
||||
# Prepare krkn config
|
||||
export scenario_type="network_chaos_ng_scenarios"
|
||||
export scenario_file="scenarios/kube/pod-network-chaos.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_network_chaos_config.yaml
|
||||
|
||||
# Run krkn in background
|
||||
echo "Starting krkn with pod network chaos scenario..."
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_network_chaos_config.yaml &
|
||||
KRKN_PID=$!
|
||||
echo "Krkn started with PID: $KRKN_PID"
|
||||
|
||||
# Wait for chaos to start (give it time to inject chaos)
|
||||
echo "Waiting for chaos injection to begin..."
|
||||
sleep 10
|
||||
|
||||
# Test during chaos - check for increased latency or packet loss effects
|
||||
echo "Testing network behavior during chaos..."
|
||||
chaos_test_count=0
|
||||
chaos_success=0
|
||||
|
||||
for i in {1..5}; do
|
||||
chaos_test_count=$((chaos_test_count + 1))
|
||||
chaos_start=$(date +%s%3N)
|
||||
response=$(curl -s -o /dev/null -w "%{http_code}" --max-time 10 http://localhost:8090 || echo "000")
|
||||
chaos_end=$(date +%s%3N)
|
||||
chaos_latency=$((chaos_end - chaos_start))
|
||||
|
||||
echo "Attempt $i: HTTP $response, latency: ${chaos_latency}ms"
|
||||
|
||||
# We expect either increased latency or some failures due to packet loss
|
||||
if [ "$response" == "200" ] || [ "$response" == "000" ]; then
|
||||
chaos_success=$((chaos_success + 1))
|
||||
fi
|
||||
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "Chaos test results: $chaos_success/$chaos_test_count requests processed"
|
||||
|
||||
# Wait for krkn to complete
|
||||
echo "Waiting for krkn to complete..."
|
||||
wait $KRKN_PID || true
|
||||
echo "Krkn completed"
|
||||
|
||||
# Wait a bit for cleanup
|
||||
sleep 5
|
||||
|
||||
# Verify recovery - nginx should respond normally again
|
||||
echo "Verifying service recovery..."
|
||||
recovery_attempts=0
|
||||
max_recovery_attempts=10
|
||||
|
||||
while [ $recovery_attempts -lt $max_recovery_attempts ]; do
|
||||
recovery_attempts=$((recovery_attempts + 1))
|
||||
response=$(curl -s -o /dev/null -w "%{http_code}" --max-time 5 http://localhost:8090 || echo "000")
|
||||
|
||||
if [ "$response" == "200" ]; then
|
||||
echo "Recovery verified: nginx responding normally (attempt $recovery_attempts)"
|
||||
break
|
||||
fi
|
||||
|
||||
echo "Recovery attempt $recovery_attempts/$max_recovery_attempts: got $response, retrying..."
|
||||
sleep 3
|
||||
done
|
||||
|
||||
if [ "$response" != "200" ]; then
|
||||
echo "ERROR: Service did not recover after chaos (got $response)"
|
||||
kubectl get pods -l app=nginx-pod-net-chaos
|
||||
kubectl describe pod $POD_NAME
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
echo "Cleaning up test resources..."
|
||||
kill $PORT_FORWARD_PID 2>/dev/null || true
|
||||
kubectl delete deployment nginx-pod-net-chaos --ignore-not-found=true
|
||||
kubectl delete service nginx-pod-net-chaos-svc --ignore-not-found=true
|
||||
|
||||
echo "Pod network chaos test: Success"
|
||||
}
|
||||
|
||||
functional_test_pod_network_chaos
|
||||
62
CI/tests/test_pod_network_filter.sh
Executable file
62
CI/tests/test_pod_network_filter.sh
Executable file
@@ -0,0 +1,62 @@
|
||||
function functional_pod_network_filter {
|
||||
export SERVICE_URL="http://localhost:8889"
|
||||
export scenario_type="network_chaos_ng_scenarios"
|
||||
export scenario_file="scenarios/kube/pod-network-filter.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_network_filter.yaml
|
||||
yq -i '.[0].test_duration=10' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].label_selector=""' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].ingress=false' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].egress=true' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].target="pod-network-filter-test"' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].protocols=["tcp"]' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].ports=[443]' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.performance_monitoring.check_critical_alerts=False' CI/config/pod_network_filter.yaml
|
||||
|
||||
## Test webservice deployment
|
||||
kubectl apply -f ./CI/templates/pod_network_filter.yaml
|
||||
COUNTER=0
|
||||
while true
|
||||
do
|
||||
curl $SERVICE_URL
|
||||
EXITSTATUS=$?
|
||||
if [ "$EXITSTATUS" -eq "0" ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
COUNTER=$((COUNTER+1))
|
||||
[ $COUNTER -eq "100" ] && echo "maximum number of retry reached, test failed" && exit 1
|
||||
done
|
||||
|
||||
cat scenarios/kube/pod-network-filter.yml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_network_filter.yaml > krkn_pod_network.out 2>&1 &
|
||||
PID=$!
|
||||
|
||||
# wait until the dns resolution starts failing and the service returns 400
|
||||
DNS_FAILURE_STATUS=0
|
||||
while true
|
||||
do
|
||||
OUT_STATUS_CODE=$(curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL)
|
||||
if [ "$OUT_STATUS_CODE" -eq "404" ]
|
||||
then
|
||||
DNS_FAILURE_STATUS=404
|
||||
fi
|
||||
|
||||
if [ "$DNS_FAILURE_STATUS" -eq "404" ] && [ "$OUT_STATUS_CODE" -eq "200" ]
|
||||
then
|
||||
echo "service restored"
|
||||
break
|
||||
fi
|
||||
COUNTER=$((COUNTER+1))
|
||||
[ $COUNTER -eq "100" ] && echo "maximum number of retry reached, test failed" && exit 1
|
||||
sleep 2
|
||||
done
|
||||
|
||||
wait $PID
|
||||
|
||||
}
|
||||
|
||||
functional_pod_network_filter
|
||||
|
||||
35
CI/tests/test_pod_server.sh
Executable file
35
CI/tests/test_pod_server.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_pod_server {
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/kind/pod_etcd.yml"
|
||||
export post_config=""
|
||||
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_config.yaml
|
||||
yq -i '.[0].config.kill=1' scenarios/kind/pod_etcd.yml
|
||||
|
||||
yq -i '.tunings.daemon_mode=True' CI/config/pod_config.yaml
|
||||
cat CI/config/pod_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml &
|
||||
sleep 15
|
||||
curl -X POST http:/0.0.0.0:8081/STOP
|
||||
|
||||
wait
|
||||
|
||||
yq -i '.kraken.signal_state="PAUSE"' CI/config/pod_config.yaml
|
||||
yq -i '.tunings.daemon_mode=False' CI/config/pod_config.yaml
|
||||
cat CI/config/pod_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml &
|
||||
sleep 5
|
||||
curl -X POST http:/0.0.0.0:8081/RUN
|
||||
wait
|
||||
|
||||
echo "Pod disruption with server scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_pod_server
|
||||
18
CI/tests/test_pvc.sh
Executable file
18
CI/tests/test_pvc.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_pvc_fill {
|
||||
export scenario_type="pvc_scenarios"
|
||||
export scenario_file="scenarios/kind/pvc_scenario.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pvc_config.yaml
|
||||
cat CI/config/pvc_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pvc_config.yaml --debug True
|
||||
echo "PVC Fill scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_pvc_fill
|
||||
@@ -39,7 +39,7 @@ function functional_test_service_hijacking {
|
||||
export scenario_file="scenarios/kube/service_hijacking.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/service_hijacking.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/service_hijacking.yaml > /dev/null 2>&1 &
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/service_hijacking.yaml > /tmp/krkn.log 2>&1 &
|
||||
PID=$!
|
||||
#Waiting the hijacking to have effect
|
||||
COUNTER=0
|
||||
@@ -100,8 +100,13 @@ function functional_test_service_hijacking {
|
||||
[ "${PAYLOAD_PATCH_2//[$'\t\r\n ']}" == "${OUT_PATCH//[$'\t\r\n ']}" ] && echo "Step 2 PATCH Payload OK" || (echo "Step 2 PATCH Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_PATCH_2" ] && echo "Step 2 PATCH Status Code OK" || (echo "Step 2 PATCH status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$TEXT_MIME" ] && echo "Step 2 PATCH MIME OK" || (echo " Step 2 PATCH MIME did not match. Test failed." && exit 1)
|
||||
|
||||
|
||||
|
||||
wait $PID
|
||||
|
||||
cat /tmp/krkn.log
|
||||
|
||||
# now checking if service has been restore correctly and nginx responds correctly
|
||||
curl -s $SERVICE_URL | grep nginx! && echo "BODY: Service restored!" || (echo "BODY: failed to restore service" && exit 1)
|
||||
OUT_STATUS_CODE=`curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL`
|
||||
|
||||
@@ -18,12 +18,13 @@ function functional_test_telemetry {
|
||||
yq -i '.performance_monitoring.prometheus_url="http://localhost:9090"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.run_tag=env(RUN_TAG)' CI/config/common_test_config.yaml
|
||||
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/cpu-hog/input.yaml"
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/kind/pod_path_provisioner.yml"
|
||||
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/telemetry.yaml
|
||||
retval=$(python3 -m coverage run -a run_kraken.py -c CI/config/telemetry.yaml)
|
||||
RUN_FOLDER=`cat CI/out/test_telemetry.out | grep amazonaws.com | sed -rn "s#.*https:\/\/.*\/files/(.*)#\1#p"`
|
||||
RUN_FOLDER=`cat CI/out/test_telemetry.out | grep amazonaws.com | sed -rn "s#.*https:\/\/.*\/files/(.*)#\1#p" | sed 's/\x1b\[[0-9;]*m//g'`
|
||||
$AWS_CLI s3 ls "s3://$AWS_BUCKET/$RUN_FOLDER/" | awk '{ print $4 }' > s3_remote_files
|
||||
echo "checking if telemetry files are uploaded on s3"
|
||||
cat s3_remote_files | grep critical-alerts-00.log || ( echo "FAILED: critical-alerts-00.log not uploaded" && exit 1 )
|
||||
|
||||
175
CI/tests_v2/CONTRIBUTING_TESTS.md
Normal file
175
CI/tests_v2/CONTRIBUTING_TESTS.md
Normal file
@@ -0,0 +1,175 @@
|
||||
# Adding a New Scenario Test (CI/tests_v2)
|
||||
|
||||
This guide explains how to add a new chaos scenario test to the v2 pytest framework. The layout is **folder-per-scenario**: each scenario has its own directory under `scenarios/<scenario_name>/` containing the test file, Kubernetes resources, and the Krkn scenario base YAML.
|
||||
|
||||
## Option 1: Scaffold script (recommended)
|
||||
|
||||
From the **repository root**:
|
||||
|
||||
```bash
|
||||
python CI/tests_v2/scaffold.py --scenario service_hijacking
|
||||
```
|
||||
|
||||
This creates:
|
||||
|
||||
- `CI/tests_v2/scenarios/service_hijacking/test_service_hijacking.py` — A test class extending `BaseScenarioTest` with a stub `test_happy_path` and `WORKLOAD_MANIFEST` pointing to the folder’s `resource.yaml`.
|
||||
- `CI/tests_v2/scenarios/service_hijacking/resource.yaml` — A placeholder Deployment (namespace is patched at deploy time).
|
||||
- `CI/tests_v2/scenarios/service_hijacking/scenario_base.yaml` — A placeholder Krkn scenario; edit this with the structure expected by your scenario type.
|
||||
|
||||
The script automatically registers the marker in `CI/tests_v2/pytest.ini`. For example, it adds:
|
||||
|
||||
```
|
||||
service_hijacking: marks a test as a service_hijacking scenario test
|
||||
```
|
||||
|
||||
**Next steps after scaffolding:**
|
||||
|
||||
1. Verify the marker was added to `pytest.ini` (the scaffold does this automatically).
|
||||
2. Edit `scenario_base.yaml` with the structure your Krkn scenario type expects (see `scenarios/application_outage/scenario_base.yaml` and `scenarios/pod_disruption/scenario_base.yaml` for examples). The top-level key should match `SCENARIO_NAME`.
|
||||
3. If your scenario uses a **list** structure (like pod_disruption) instead of a **dict** with a top-level key, set `NAMESPACE_KEY_PATH` (e.g. `[0, "config", "namespace_pattern"]`) and `NAMESPACE_IS_REGEX = True` if the namespace is a regex pattern.
|
||||
4. The generated `test_happy_path` already uses `self.run_scenario(self.tmp_path, ns)` and assertions. Add more test methods (e.g. negative tests with `@pytest.mark.no_workload`) as needed.
|
||||
5. Adjust `resource.yaml` if your scenario needs a different workload (e.g. specific image or labels).
|
||||
|
||||
If your Kraken scenario type string is not `<scenario>_scenarios`, pass it explicitly:
|
||||
|
||||
```bash
|
||||
python CI/tests_v2/scaffold.py --scenario node_disruption --scenario-type node_scenarios
|
||||
```
|
||||
|
||||
## Option 2: Manual setup
|
||||
|
||||
1. **Create the scenario folder**
|
||||
`CI/tests_v2/scenarios/<scenario_name>/`.
|
||||
|
||||
2. **Add resource.yaml**
|
||||
Kubernetes manifest(s) for the workload (Deployment or Pod). Use a distinct label (e.g. `app: <scenario>-target`). Omit or leave `metadata.namespace`; the framework patches it at deploy time.
|
||||
|
||||
3. **Add scenario_base.yaml**
|
||||
The canonical Krkn scenario structure. Tests will load this, patch namespace (and any overrides), write to `tmp_path`, and pass to `build_config`. See existing scenarios for the format your scenario type expects.
|
||||
|
||||
4. **Add test_<scenario>.py**
|
||||
- Import `BaseScenarioTest` from `lib.base` and helpers from `lib.utils` (e.g. `assert_kraken_success`, `get_pods_list`, `scenario_dir` if needed).
|
||||
- Define a class extending `BaseScenarioTest` with:
|
||||
- `WORKLOAD_MANIFEST = "CI/tests_v2/scenarios/<scenario_name>/resource.yaml"`
|
||||
- `WORKLOAD_IS_PATH = True`
|
||||
- `LABEL_SELECTOR = "app=<label>"`
|
||||
- `SCENARIO_NAME = "<scenario_name>"`
|
||||
- `SCENARIO_TYPE = "<scenario_type>"` (e.g. `application_outages_scenarios`)
|
||||
- `NAMESPACE_KEY_PATH`: path to the namespace field (e.g. `["application_outage", "namespace"]` for dict-based, or `[0, "config", "namespace_pattern"]` for list-based)
|
||||
- `NAMESPACE_IS_REGEX = False` (or `True` for regex patterns like pod_disruption)
|
||||
- `OVERRIDES_KEY_PATH = ["<top-level key>"]` if the scenario supports overrides (e.g. duration, block).
|
||||
- Add `@pytest.mark.functional` and `@pytest.mark.<scenario>` on the class.
|
||||
- In at least one test, call `self.run_scenario(self.tmp_path, self.ns)` and assert with `assert_kraken_success`, `assert_pod_count_unchanged`, and `assert_all_pods_running_and_ready`. Use `self.k8s_core`, `self.tmp_path`, etc. (injected by the base class).
|
||||
|
||||
5. **Register the marker**
|
||||
In `CI/tests_v2/pytest.ini`, under `markers`:
|
||||
```
|
||||
<scenario>: marks a test as a <scenario> scenario test
|
||||
```
|
||||
|
||||
## Conventions
|
||||
|
||||
- **Folder-per-scenario**: One directory per scenario under `scenarios/`. All assets (test, resource.yaml, scenario_base.yaml, and any extra YAMLs) live there for easy tracking and onboarding.
|
||||
- **Ephemeral namespace**: Every test gets a unique `krkn-test-<uuid>` namespace. The base class deploys the workload into it before the test; no manual deploy is required.
|
||||
- **Negative tests**: For tests that don’t need a workload (e.g. invalid scenario, bad namespace), use `@pytest.mark.no_workload`. The test will still get a namespace but no workload will be deployed.
|
||||
- **Scenario type**: `SCENARIO_TYPE` must match the key in Kraken’s config (e.g. `application_outages_scenarios`, `pod_disruption_scenarios`). See `CI/tests_v2/config/common_test_config.yaml` and the scenario plugin’s `get_scenario_types()`.
|
||||
- **Assertions**: Use `assert_kraken_success(result, context=f"namespace={ns}", tmp_path=self.tmp_path)` so failures include stdout/stderr and optional log files.
|
||||
- **Timeouts**: Use constants from `lib.base` (`READINESS_TIMEOUT`, `POLICY_WAIT_TIMEOUT`, etc.) instead of magic numbers.
|
||||
|
||||
## Exit Code Handling
|
||||
|
||||
Kraken uses the following exit codes: **0** = success; **1** = scenario failure (e.g. post scenarios still failing); **2** = critical alerts fired; **3+** = health check / KubeVirt check failures; **-1** = infrastructure error (bad config, no kubeconfig).
|
||||
|
||||
- **Happy-path tests**: Use `assert_kraken_success(result, ...)`. By default only exit code 0 is accepted.
|
||||
- **Alert-aware tests**: If you enable `check_critical_alerts` and expect alerts, use `assert_kraken_success(result, allowed_codes=(0, 2), ...)` so exit code 2 is treated as acceptable.
|
||||
- **Expected-failure tests**: Use `assert_kraken_failure(result, context=..., tmp_path=self.tmp_path)` for negative tests (invalid scenario, bad namespace, etc.). This gives the same diagnostic quality (log dump, tmp_path hint) as success assertions. Prefer this over a bare `assert result.returncode != 0`.
|
||||
|
||||
## Running your new tests
|
||||
|
||||
```bash
|
||||
pytest CI/tests_v2/ -v -m <scenario>
|
||||
```
|
||||
|
||||
For debugging with logs and keeping failed namespaces:
|
||||
|
||||
```bash
|
||||
pytest CI/tests_v2/ -v -m <scenario> --log-cli-level=DEBUG --keep-ns-on-fail
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Naming Conventions
|
||||
|
||||
Follow these conventions so the framework stays consistent as new scenarios are added.
|
||||
|
||||
### Quick Reference
|
||||
|
||||
| Element | Pattern | Example |
|
||||
|---|---|---|
|
||||
| Scenario folder | `scenarios/<snake_case>/` | `scenarios/node_disruption/` |
|
||||
| Test file | `test_<scenario>.py` | `test_node_disruption.py` |
|
||||
| Test class | `Test<CamelCase>(BaseScenarioTest)` | `TestNodeDisruption` |
|
||||
| Pytest marker | `@pytest.mark.<scenario>` (matches folder) | `@pytest.mark.node_disruption` |
|
||||
| Scenario YAML | `scenario_base.yaml` | — |
|
||||
| Workload YAML | `resource.yaml` | — |
|
||||
| Extra YAMLs | `<descriptive_name>.yaml` | `nginx_http.yaml` |
|
||||
| Lib modules | `lib/<concern>.py` | `lib/deploy.py` |
|
||||
| Public fixtures | `<verb>_<noun>` or `<noun>` | `run_kraken`, `test_namespace` |
|
||||
| Private/autouse fixtures | `_<descriptive>` | `_cleanup_stale_namespaces` |
|
||||
| Assertion helpers | `assert_<condition>` | `assert_pod_count_unchanged` |
|
||||
| Query helpers | `get_<resource>` or `find_<resource>_by_<criteria>` | `get_pods_list`, `find_network_policy_by_prefix` |
|
||||
| Env var overrides | `KRKN_TEST_<NAME>` | `KRKN_TEST_READINESS_TIMEOUT` |
|
||||
|
||||
### Folders
|
||||
|
||||
- One folder per scenario under `scenarios/`. The folder name is `snake_case` and must match the `SCENARIO_NAME` class attribute in the test.
|
||||
- Shared framework code lives in `lib/`. Each module covers a single concern (`k8s`, `namespace`, `deploy`, `kraken`, `utils`, `base`, `preflight`).
|
||||
- Do **not** add scenario-specific code to `lib/`; keep it in the scenario folder as module-level helpers.
|
||||
|
||||
### Files
|
||||
|
||||
- Test files: `test_<scenario>.py`. This is required for pytest discovery (`test_*.py`).
|
||||
- Workload manifests: always `resource.yaml`. If a scenario needs additional K8s resources (e.g. a Service for traffic testing), use a descriptive name like `nginx_http.yaml`.
|
||||
- Scenario config: always `scenario_base.yaml`. This is the template that `load_and_patch_scenario` loads and patches.
|
||||
|
||||
### Classes
|
||||
|
||||
- One test class per file: `Test<CamelCase>` extending `BaseScenarioTest`.
|
||||
- The CamelCase name must be the PascalCase equivalent of the folder name (e.g. `pod_disruption` -> `TestPodDisruption`).
|
||||
|
||||
### Test Methods
|
||||
|
||||
- Prefix: `test_` (pytest requirement).
|
||||
- Use descriptive names that convey **what is being verified**, not implementation details.
|
||||
- Good: `test_pod_crash_and_recovery`, `test_traffic_blocked_during_outage`, `test_invalid_scenario_fails`.
|
||||
- Avoid: `test_run_1`, `test_scenario`, `test_it_works`.
|
||||
|
||||
### Fixtures
|
||||
|
||||
- **Public fixtures** (intended for use in tests): use `<verb>_<noun>` or plain `<noun>`. Examples: `run_kraken`, `deploy_workload`, `test_namespace`, `kubectl`.
|
||||
- **Private/autouse fixtures** (framework internals): prefix with `_`. Examples: `_kube_config_loaded`, `_preflight_checks`, `_inject_common_fixtures`.
|
||||
- K8s client fixtures use the `k8s_` prefix: `k8s_core`, `k8s_apps`, `k8s_networking`, `k8s_client`.
|
||||
|
||||
### Helpers and Utilities
|
||||
|
||||
- **Assertions**: `assert_<what_is_expected>`. Always raise `AssertionError` with a message that includes the namespace.
|
||||
- **K8s queries**: `get_<resource>_list` for direct API calls, `find_<resource>_by_<criteria>` for filtered lookups.
|
||||
- **Private helpers**: prefix with `_` for module-internal functions (e.g. `_pods`, `_policies`, `_get_nested`).
|
||||
|
||||
### Constants and Environment Variables
|
||||
|
||||
- Timeout constants: `UPPER_CASE` in `lib/base.py`. Each is overridable via an env var prefixed `KRKN_TEST_`.
|
||||
- Feature flags: `KRKN_TEST_DRY_RUN`, `KRKN_TEST_COVERAGE`. Always use the `KRKN_TEST_` prefix so all tunables are discoverable with `grep KRKN_TEST_`.
|
||||
|
||||
### Markers
|
||||
|
||||
- Every test class gets `@pytest.mark.functional` (framework-wide) and `@pytest.mark.<scenario>` (scenario-specific).
|
||||
- The scenario marker name matches the folder name exactly.
|
||||
- Behavioral modifiers use plain descriptive names: `no_workload`, `order`.
|
||||
- Register all custom markers in `pytest.ini` to avoid warnings.
|
||||
|
||||
## Adding Dependencies
|
||||
|
||||
- **Runtime (Kraken needs it)**: Add to the **root** `requirements.txt`. Pin a version (e.g. `package==1.2.3` or `package>=1.2,<2`).
|
||||
- **Test-only (only CI/tests_v2 needs it)**: Add to **`CI/tests_v2/requirements.txt`**. Pin a version there as well.
|
||||
- After changing either file, run `make setup` (or `make -f CI/tests_v2/Makefile setup`) from the repo root to verify both files install cleanly together.
|
||||
97
CI/tests_v2/Makefile
Normal file
97
CI/tests_v2/Makefile
Normal file
@@ -0,0 +1,97 @@
|
||||
# CI/tests_v2 functional tests - single entry point.
|
||||
# Run from repo root: make -f CI/tests_v2/Makefile <target>
|
||||
# Or from CI/tests_v2: make <target> (REPO_ROOT is resolved automatically).
|
||||
|
||||
# Resolve repo root: go to Makefile dir then up two levels (CI/tests_v2 -> repo root)
|
||||
REPO_ROOT := $(shell cd "$(dir $(firstword $(MAKEFILE_LIST)))" && cd ../.. && pwd)
|
||||
VENV := $(REPO_ROOT)/venv
|
||||
PYTHON := $(VENV)/bin/python
|
||||
PIP := $(VENV)/bin/pip
|
||||
CLUSTER_NAME ?= ci-krkn
|
||||
TESTS_DIR := $(REPO_ROOT)/CI/tests_v2
|
||||
|
||||
.PHONY: setup preflight test test-fast test-debug test-scenario test-dry-run clean help
|
||||
|
||||
help:
|
||||
@echo "CI/tests_v2 functional tests - usage: make [target]"
|
||||
@echo ""
|
||||
@echo "Targets:"
|
||||
@echo " setup Create venv (if missing), install Python deps, create KinD cluster (kind-config-dev.yml)."
|
||||
@echo " Run once before first test. Override cluster config: KIND_CONFIG=path make setup"
|
||||
@echo ""
|
||||
@echo " preflight Check Python 3.9+, kind, kubectl, Docker, cluster reachability, test deps."
|
||||
@echo " Invoked automatically by test targets; run standalone to validate environment."
|
||||
@echo ""
|
||||
@echo " test Full run: retries (2), timeout 300s, HTML report, JUnit XML, coverage."
|
||||
@echo " Use for CI or final verification. Output: report.html, results.xml"
|
||||
@echo ""
|
||||
@echo " test-fast Quick run: no retries, 120s timeout, no report. For fast local iteration."
|
||||
@echo ""
|
||||
@echo " test-debug Debug run: verbose (-s), keep failed namespaces (--keep-ns-on-fail), DEBUG logging."
|
||||
@echo " Use when investigating failures; inspect kept namespaces with kubectl."
|
||||
@echo ""
|
||||
@echo " test-scenario Run only one scenario. Requires SCENARIO=<marker>."
|
||||
@echo " Example: make test-scenario SCENARIO=pod_disruption"
|
||||
@echo ""
|
||||
@echo " test-dry-run Validate scenario plumbing only (no Kraken execution). Sets KRKN_TEST_DRY_RUN=1."
|
||||
@echo ""
|
||||
@echo " clean Delete KinD cluster $(CLUSTER_NAME) and remove report.html, results.xml."
|
||||
@echo ""
|
||||
@echo " help Show this help."
|
||||
@echo ""
|
||||
@echo "Run from repo root: make -f CI/tests_v2/Makefile <target>"
|
||||
@echo "Or from CI/tests_v2: make <target>"
|
||||
|
||||
setup: $(VENV)/.installed
|
||||
@echo "Running cluster setup..."
|
||||
$(MAKE) -f $(TESTS_DIR)/Makefile preflight
|
||||
cd $(REPO_ROOT) && ./CI/tests_v2/setup_env.sh
|
||||
@echo "Setup complete. Run 'make test' or 'make -f CI/tests_v2/Makefile test' from repo root."
|
||||
|
||||
$(VENV)/.installed: $(REPO_ROOT)/requirements.txt $(TESTS_DIR)/requirements.txt
|
||||
@if [ ! -d "$(VENV)" ]; then python3 -m venv $(VENV); echo "Created venv at $(VENV)"; fi
|
||||
$(PYTHON) -m pip install -q --upgrade pip
|
||||
# Root = Kraken runtime; tests_v2 = test-only plugins; both required for functional tests.
|
||||
$(PIP) install -q -r $(REPO_ROOT)/requirements.txt
|
||||
$(PIP) install -q -r $(TESTS_DIR)/requirements.txt
|
||||
@touch $(VENV)/.installed
|
||||
@echo "Python deps installed."
|
||||
|
||||
preflight:
|
||||
@echo "Preflight: checking Python, tools, and cluster..."
|
||||
@command -v python3 >/dev/null 2>&1 || { echo "Error: python3 not found."; exit 1; }
|
||||
@python3 -c "import sys; exit(0 if sys.version_info >= (3, 9) else 1)" || { echo "Error: Python 3.9+ required."; exit 1; }
|
||||
@command -v kind >/dev/null 2>&1 || { echo "Error: kind not installed."; exit 1; }
|
||||
@command -v kubectl >/dev/null 2>&1 || { echo "Error: kubectl not installed."; exit 1; }
|
||||
@docker info >/dev/null 2>&1 || { echo "Error: Docker not running (required for KinD)."; exit 1; }
|
||||
@if kind get clusters 2>/dev/null | grep -qx "$(CLUSTER_NAME)"; then \
|
||||
kubectl cluster-info >/dev/null 2>&1 || { echo "Error: Cluster $(CLUSTER_NAME) exists but cluster-info failed."; exit 1; }; \
|
||||
else \
|
||||
echo "Note: Cluster $(CLUSTER_NAME) not found. Run 'make setup' to create it."; \
|
||||
fi
|
||||
@$(PYTHON) -c "import pytest_rerunfailures, pytest_html, pytest_timeout, pytest_order" 2>/dev/null || \
|
||||
{ echo "Error: Install test deps with 'make setup' or pip install -r CI/tests_v2/requirements.txt"; exit 1; }
|
||||
@echo "Preflight OK."
|
||||
|
||||
test: preflight
|
||||
cd $(REPO_ROOT) && KRKN_TEST_COVERAGE=1 $(PYTHON) -m pytest $(TESTS_DIR)/ -v --timeout=300 --reruns=2 --reruns-delay=10 \
|
||||
--html=$(TESTS_DIR)/report.html -n auto --junitxml=$(TESTS_DIR)/results.xml
|
||||
|
||||
test-fast: preflight
|
||||
cd $(REPO_ROOT) && $(PYTHON) -m pytest $(TESTS_DIR)/ -v -p no:rerunfailures -n auto --timeout=120
|
||||
|
||||
test-debug: preflight
|
||||
cd $(REPO_ROOT) && $(PYTHON) -m pytest $(TESTS_DIR)/ -v -s -p no:rerunfailures --timeout=300 \
|
||||
--keep-ns-on-fail --log-cli-level=DEBUG
|
||||
|
||||
test-scenario: preflight
|
||||
@if [ -z "$(SCENARIO)" ]; then echo "Error: set SCENARIO=pod_disruption (or application_outage, etc.)"; exit 1; fi
|
||||
cd $(REPO_ROOT) && $(PYTHON) -m pytest $(TESTS_DIR)/ -v -m "$(SCENARIO)" --timeout=300 --reruns=2 --reruns-delay=10
|
||||
|
||||
test-dry-run: preflight
|
||||
cd $(REPO_ROOT) && KRKN_TEST_DRY_RUN=1 $(PYTHON) -m pytest $(TESTS_DIR)/ -v
|
||||
|
||||
clean:
|
||||
@kind delete cluster --name $(CLUSTER_NAME) 2>/dev/null || true
|
||||
@rm -f $(TESTS_DIR)/report.html $(TESTS_DIR)/results.xml
|
||||
@echo "Cleaned cluster and report artifacts."
|
||||
198
CI/tests_v2/README.md
Normal file
198
CI/tests_v2/README.md
Normal file
@@ -0,0 +1,198 @@
|
||||
# Pytest Functional Tests (tests_v2)
|
||||
|
||||
This directory contains a pytest-based functional test framework that runs **alongside** the existing bash tests in `CI/tests/`. It covers the **pod disruption** and **application outage** scenarios with proper assertions, retries, and reporting.
|
||||
|
||||
Each test runs in its **own ephemeral Kubernetes namespace** (`krkn-test-<uuid>`). Before the test, the framework creates the namespace, deploys the target workload, and waits for pods to be ready. After the test, the namespace is deleted (cascading all resources). **You do not need to deploy any workloads manually.**
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Without a cluster, tests that need one will **skip** with a clear message (e.g. *"Could not load kube config"*). No manual workload deployment is required; workloads are deployed automatically into ephemeral namespaces per test.
|
||||
|
||||
- **KinD cluster** (or any Kubernetes cluster) running with `kubectl` configured (e.g. `KUBECONFIG` or default `~/.kube/config`).
|
||||
- **Python 3.9+** and main repo deps: `pip install -r requirements.txt`.
|
||||
|
||||
### Supported clusters
|
||||
|
||||
- **KinD** (recommended): Use `make -f CI/tests_v2/Makefile setup` from the repo root. Fastest for local dev; uses a 2-node dev config by default. Override with `KIND_CONFIG=/path/to/kind-config.yml` for a larger cluster.
|
||||
- **Minikube**: Should work; ensure `kubectl` context is set. Not tested in CI.
|
||||
- **Remote/cloud cluster**: Tests create and delete namespaces; use with caution. Use `--require-kind` to avoid accidentally running against production (tests will skip unless context is kind/minikube).
|
||||
|
||||
### Setting up the cluster
|
||||
|
||||
**Option A: Use the setup script (recommended)**
|
||||
|
||||
From the repository root, with `kind` and `kubectl` installed:
|
||||
|
||||
```bash
|
||||
# Create KinD cluster (defaults to CI/tests_v2/kind-config-dev.yml; override with KIND_CONFIG=...)
|
||||
./CI/tests_v2/setup_env.sh
|
||||
```
|
||||
|
||||
Then in the same shell (or after `export KUBECONFIG=~/.kube/config` in another terminal), activate your venv and install Python deps:
|
||||
|
||||
```bash
|
||||
python3 -m venv venv
|
||||
source venv/bin/activate # or: source venv/Scripts/activate on Windows
|
||||
pip install -r requirements.txt
|
||||
pip install -r CI/tests_v2/requirements.txt
|
||||
```
|
||||
|
||||
**Option B: Manual setup**
|
||||
|
||||
1. Install [kind](https://kind.sigs.k8s.io/docs/user/quick-start/) and [kubectl](https://kubernetes.io/docs/tasks/tools/).
|
||||
2. Create a cluster (from repo root):
|
||||
```bash
|
||||
kind create cluster --name kind --config kind-config.yml
|
||||
```
|
||||
3. Wait for the cluster:
|
||||
```bash
|
||||
kubectl wait --for=condition=Ready nodes --all --timeout=120s
|
||||
```
|
||||
4. Create a virtualenv, activate it, and install dependencies (as in Option A).
|
||||
5. Run tests from repo root: `pytest CI/tests_v2/ -v ...`
|
||||
|
||||
## Install test dependencies
|
||||
|
||||
From the repository root:
|
||||
|
||||
```bash
|
||||
pip install -r CI/tests_v2/requirements.txt
|
||||
```
|
||||
|
||||
This adds `pytest-rerunfailures`, `pytest-html`, `pytest-timeout`, and `pytest-order` (pytest and coverage come from the main `requirements.txt`).
|
||||
|
||||
## Dependency Management
|
||||
|
||||
Dependencies are split into two files:
|
||||
|
||||
- **Root `requirements.txt`** — Kraken runtime (cloud SDKs, Kubernetes client, krkn-lib, pytest, coverage, etc.). Required to run Kraken.
|
||||
- **`CI/tests_v2/requirements.txt`** — Test-only pytest plugins (rerunfailures, html, timeout, order, xdist). Not needed by Kraken itself.
|
||||
|
||||
**Rule of thumb:** If Kraken needs it at runtime, add to root. If only the functional tests need it, add to `CI/tests_v2/requirements.txt`.
|
||||
|
||||
Running `make -f CI/tests_v2/Makefile setup` (or `make setup` from `CI/tests_v2`) creates the venv and installs **both** files automatically; you do not need to install them separately. The Makefile re-installs when either file changes (via the `.installed` sentinel).
|
||||
|
||||
## Run tests
|
||||
|
||||
All commands below are from the **repository root**.
|
||||
|
||||
### Basic run (with retries and HTML report)
|
||||
|
||||
```bash
|
||||
pytest CI/tests_v2/ -v --timeout=300 --reruns=2 --reruns-delay=10 --html=CI/tests_v2/report.html --junitxml=CI/tests_v2/results.xml
|
||||
```
|
||||
|
||||
- Failed tests are **retried up to 2 times** with a 10s delay (configurable in `CI/tests_v2/pytest.ini`).
|
||||
- Each test has a **5-minute timeout**.
|
||||
- Open `CI/tests_v2/report.html` in a browser for a detailed report.
|
||||
|
||||
### Run in parallel (faster suite)
|
||||
|
||||
```bash
|
||||
pytest CI/tests_v2/ -v -n 4 --timeout=300
|
||||
```
|
||||
|
||||
Ephemeral namespaces make tests parallel-safe; use `-n` with the number of workers (e.g. 4).
|
||||
|
||||
### Run without retries (for debugging)
|
||||
|
||||
```bash
|
||||
pytest CI/tests_v2/ -v -p no:rerunfailures
|
||||
```
|
||||
|
||||
### Run with coverage
|
||||
|
||||
```bash
|
||||
python -m coverage run -m pytest CI/tests_v2/ -v
|
||||
python -m coverage report
|
||||
```
|
||||
|
||||
To append to existing coverage from unit tests, ensure coverage was started with `coverage run -a` for earlier runs, or run the full test suite in one go.
|
||||
|
||||
### Run only pod disruption tests
|
||||
|
||||
```bash
|
||||
pytest CI/tests_v2/ -v -m pod_disruption
|
||||
```
|
||||
|
||||
### Run only application outage tests
|
||||
|
||||
```bash
|
||||
pytest CI/tests_v2/ -v -m application_outage
|
||||
```
|
||||
|
||||
### Run with verbose output and no capture
|
||||
|
||||
```bash
|
||||
pytest CI/tests_v2/ -v -s
|
||||
```
|
||||
|
||||
### Keep failed test namespaces for debugging
|
||||
|
||||
When a test fails, its ephemeral namespace is normally deleted. To **keep** the namespace so you can inspect pods, logs, and network policies:
|
||||
|
||||
```bash
|
||||
pytest CI/tests_v2/ -v --keep-ns-on-fail
|
||||
```
|
||||
|
||||
On failure, the namespace name is printed (e.g. `[keep-ns-on-fail] Keeping namespace krkn-test-a1b2c3d4 for debugging`). Use `kubectl get pods -n krkn-test-a1b2c3d4` (and similar) to debug, then delete the namespace manually when done.
|
||||
|
||||
### Logging and cluster options
|
||||
|
||||
- **Structured logging**: Use `--log-cli-level=DEBUG` to see namespace creation, workload deploy, and readiness in the console. Use `--log-file=test.log` to capture logs to a file.
|
||||
- **Require dev cluster**: To avoid running against the wrong cluster, use `--require-kind`. Tests will skip unless the current kube context cluster name contains "kind" or "minikube".
|
||||
- **Stale namespace cleanup**: At session start, namespaces matching `krkn-test-*` that are older than 30 minutes are deleted (e.g. from a previous crashed run).
|
||||
- **Timeout overrides**: Set env vars to tune timeouts (e.g. in CI): `KRKN_TEST_READINESS_TIMEOUT`, `KRKN_TEST_DEPLOY_TIMEOUT`, `KRKN_TEST_NS_CLEANUP_TIMEOUT`, `KRKN_TEST_POLICY_WAIT_TIMEOUT`, `KRKN_TEST_KRAKEN_PROC_WAIT_TIMEOUT`, `KRKN_TEST_TIMEOUT_BUDGET`.
|
||||
|
||||
## Architecture
|
||||
|
||||
- **Folder-per-scenario**: Each scenario lives under `scenarios/<scenario_name>/` with:
|
||||
- **test_<scenario>.py** — Test class extending `BaseScenarioTest`; sets `WORKLOAD_MANIFEST`, `SCENARIO_NAME`, `SCENARIO_TYPE`, `NAMESPACE_KEY_PATH`, and optionally `OVERRIDES_KEY_PATH`.
|
||||
- **resource.yaml** — Kubernetes resources (Deployment/Pod) for the scenario; namespace is patched at deploy time.
|
||||
- **scenario_base.yaml** — Canonical Krkn scenario; the base class loads it, patches namespace (and overrides), and passes it to Kraken via `run_scenario()`. Optional extra YAMLs (e.g. `nginx_http.yaml` for application_outage) can live in the same folder.
|
||||
- **lib/**: Shared framework — `lib/base.py` defines `BaseScenarioTest`, timeout constants (env-overridable), and scenario helpers (`load_and_patch_scenario`, `run_scenario`); `lib/utils.py` provides assertion and K8s helpers; `lib/k8s.py` provides K8s client fixtures; `lib/namespace.py` provides namespace lifecycle; `lib/deploy.py` provides `deploy_workload`, `wait_for_pods_running`, `wait_for_deployment_replicas`; `lib/kraken.py` provides `run_kraken`, `build_config` (using `CI/tests_v2/config/common_test_config.yaml`).
|
||||
- **conftest.py**: Re-exports fixtures from the lib modules and defines `pytest_addoption`, logging, and `repo_root`.
|
||||
- **Adding a new scenario**: Use the scaffold script (see [CONTRIBUTING_TESTS.md](CONTRIBUTING_TESTS.md)) to create `scenarios/<name>/` with test file, `resource.yaml`, and `scenario_base.yaml`, or copy an existing scenario folder and adapt.
|
||||
|
||||
## What is tested
|
||||
|
||||
Each test runs in an isolated ephemeral namespace; workloads are deployed automatically before the test and the namespace is deleted after (unless `--keep-ns-on-fail` is set and the test failed).
|
||||
|
||||
- **scenarios/pod_disruption/**
|
||||
Pod disruption scenario. `resource.yaml` is a deployment with label `app=krkn-pod-disruption-target`; `scenario_base.yaml` is loaded and `namespace_pattern` is patched to the test namespace. The test:
|
||||
1. Records baseline pod UIDs and restart counts.
|
||||
2. Runs Kraken with the pod disruption scenario.
|
||||
3. Asserts that chaos had an effect (UIDs changed or restart count increased).
|
||||
4. Waits for pods to be Running and all containers Ready.
|
||||
5. Asserts pod count is unchanged and all pods are healthy.
|
||||
|
||||
- **scenarios/application_outage/**
|
||||
Application outage scenario (block Ingress/Egress to target pods, then restore). `resource.yaml` is the main workload (outage pod); `scenario_base.yaml` is loaded and patched with namespace (and duration/block as needed). Optional `nginx_http.yaml` is used by the traffic test. Tests include:
|
||||
- **test_app_outage_block_restore_and_variants**: Happy path with default, exclude_label, and block variants (Ingress, Egress, both); Krkn exit 0, pods still Running/Ready.
|
||||
- **test_network_policy_created_then_deleted**: Policy with prefix `krkn-deny-` appears during run and is gone after.
|
||||
- **test_traffic_blocked_during_outage** (disabled, planned): Deploys nginx with label `scenario=outage`, port-forwards; during outage curl fails, after run curl succeeds.
|
||||
- **test_invalid_scenario_fails**: Invalid scenario file (missing `application_outage` key) causes Kraken to exit non-zero.
|
||||
- **test_bad_namespace_fails**: Scenario targeting a non-existent namespace causes Kraken to exit non-zero.
|
||||
|
||||
## Configuration
|
||||
|
||||
- **pytest.ini**: Markers (`functional`, `pod_disruption`, `application_outage`, `no_workload`). Use `--timeout=300`, `--reruns=2`, `--reruns-delay=10` on the command line for full runs.
|
||||
- **conftest.py**: Re-exports fixtures from `lib/k8s.py`, `lib/namespace.py`, `lib/deploy.py`, `lib/kraken.py` (e.g. `test_namespace`, `deploy_workload`, `k8s_core`, `wait_for_pods_running`, `run_kraken`, `build_config`). Configs are built from `CI/tests_v2/config/common_test_config.yaml` with monitoring disabled for local runs. Timeout constants in `lib/base.py` can be overridden via env vars.
|
||||
- **Cluster access**: Reads and applies use the Kubernetes Python client; `kubectl` is still used for `port-forward` and for running Kraken.
|
||||
- **utils.py**: Pod/network policy helpers and assertion helpers (`assert_all_pods_running_and_ready`, `assert_pod_count_unchanged`, `assert_kraken_success`, `assert_kraken_failure`, `patch_namespace_in_docs`).
|
||||
|
||||
## Relationship to existing CI
|
||||
|
||||
- The **existing** bash tests in `CI/tests/` and `CI/run.sh` are **unchanged**. They continue to run as before in GitHub Actions.
|
||||
- This framework is **additive**. To run it in CI later, add a separate job or step that runs `pytest CI/tests_v2/ ...` from the repo root.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- **`pytest.skip: Could not load kube config`** — No cluster or bad KUBECONFIG. Run `make -f CI/tests_v2/Makefile setup` (or `make setup` from `CI/tests_v2`) or check `kubectl cluster-info`.
|
||||
- **KinD cluster creation hangs** — Docker is not running. Start Docker Desktop or run `systemctl start docker`.
|
||||
- **`Bind for 0.0.0.0:9090 failed: port is already allocated`** — Another process (e.g. Prometheus) is using the port. The default dev config (`kind-config-dev.yml`) no longer maps host ports; if you use `KIND_CONFIG=kind-config.yml` or a custom config with `extraPortMappings`, free the port or switch to `kind-config-dev.yml`.
|
||||
- **`TimeoutError: Pods did not become ready`** — Slow image pull or node resource limits. Increase `KRKN_TEST_READINESS_TIMEOUT` or check node resources.
|
||||
- **`ModuleNotFoundError: pytest_rerunfailures`** — Missing test deps. Run `pip install -r CI/tests_v2/requirements.txt` (or `make setup`).
|
||||
- **Stale `krkn-test-*` namespaces** — Left over from a previous crashed run. They are auto-cleaned at session start (older than 30 min). To remove cluster and reports: `make -f CI/tests_v2/Makefile clean`.
|
||||
- **Wrong cluster targeted** — Multiple kube contexts. Use `--require-kind` to skip unless context is kind/minikube, or set context explicitly: `kubectl config use-context kind-ci-krkn`.
|
||||
- **`OSError: [Errno 48] Address already in use` when running tests in parallel** — Kraken normally starts an HTTP status server on port 8081. With `-n auto` (pytest-xdist), multiple Kraken processes would all try to bind to 8081. The test framework disables this server (`publish_kraken_status: False`) in the generated config, so parallel runs should not hit this. If you see it, ensure you're using the framework's `build_config` and not a config that has `publish_kraken_status: True`.
|
||||
74
CI/tests_v2/config/common_test_config.yaml
Normal file
74
CI/tests_v2/config/common_test_config.yaml
Normal file
@@ -0,0 +1,74 @@
|
||||
kraken:
|
||||
distribution: kubernetes # Distribution can be kubernetes or openshift.
|
||||
kubeconfig_path: ~/.kube/config # Path to kubeconfig.
|
||||
exit_on_failure: False # Exit when a post action scenario fails.
|
||||
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
port: 8081 # Signal port
|
||||
auto_rollback: True # Enable auto rollback for scenarios.
|
||||
rollback_versions_directory: /tmp/kraken-rollback # Directory to store rollback version files.
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load.
|
||||
- $scenario_type: # List of chaos pod scenarios to load.
|
||||
- $scenario_file
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed.
|
||||
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal.
|
||||
|
||||
performance_monitoring:
|
||||
capture_metrics: False
|
||||
metrics_profile_path: config/metrics-aggregated.yaml
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set.
|
||||
enable_alerts: True # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
enable_metrics: True
|
||||
alert_profile: config/alerts.yaml # Path or URL to alert profile with the prometheus queries
|
||||
metrics_profile: config/metrics-report.yaml
|
||||
check_critical_alerts: True # Path to alert profile with the prometheus queries.
|
||||
|
||||
tunings:
|
||||
wait_duration: 6 # Duration to wait between each chaos scenario.
|
||||
iterations: 1 # Number of times to execute the scenarios.
|
||||
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever.
|
||||
telemetry:
|
||||
enabled: False # enable/disables the telemetry collection feature
|
||||
api_url: https://yvnn4rfoi7.execute-api.us-west-2.amazonaws.com/test #telemetry service endpoint
|
||||
username: $TELEMETRY_USERNAME # telemetry service username
|
||||
password: $TELEMETRY_PASSWORD # telemetry service password
|
||||
prometheus_namespace: 'monitoring' # prometheus namespace
|
||||
prometheus_pod_name: 'prometheus-kind-prometheus-kube-prome-prometheus-0' # prometheus pod_name
|
||||
prometheus_container_name: 'prometheus'
|
||||
prometheus_backup: True # enables/disables prometheus data collection
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
archive_path: /tmp # local path where the archive files will be temporarily stored
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 10000 # the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
logs_backup: True
|
||||
logs_filter_patterns:
|
||||
- "(\\w{3}\\s\\d{1,2}\\s\\d{2}:\\d{2}:\\d{2}\\.\\d+).+" # Sep 9 11:20:36.123425532
|
||||
- "kinit (\\d+/\\d+/\\d+\\s\\d{2}:\\d{2}:\\d{2})\\s+" # kinit 2023/09/15 11:20:36 log
|
||||
- "(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z).+" # 2023-09-15T11:20:36.123425532Z log
|
||||
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
|
||||
events_backup: True # enables/disables cluster events collection
|
||||
telemetry_group: "funtests"
|
||||
elastic:
|
||||
enable_elastic: False
|
||||
verify_certs: False
|
||||
elastic_url: "https://192.168.39.196" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
|
||||
elastic_port: 32766
|
||||
username: "elastic"
|
||||
password: "test"
|
||||
metrics_index: "krkn-metrics"
|
||||
alerts_index: "krkn-alerts"
|
||||
telemetry_index: "krkn-telemetry"
|
||||
|
||||
health_checks: # Utilizing health check endpoints to observe application behavior during chaos injection.
|
||||
interval: # Interval in seconds to perform health checks, default value is 2 seconds
|
||||
config: # Provide list of health check configurations for applications
|
||||
- url: # Provide application endpoint
|
||||
bearer_token: # Bearer token for authentication if any
|
||||
auth: # Provide authentication credentials (username , password) in tuple format if any, ex:("admin","secretpassword")
|
||||
exit_on_failure: # If value is True exits when health check failed for application, values can be True/False
|
||||
67
CI/tests_v2/conftest.py
Normal file
67
CI/tests_v2/conftest.py
Normal file
@@ -0,0 +1,67 @@
|
||||
"""
|
||||
Shared fixtures for pytest functional tests (CI/tests_v2).
|
||||
Tests must be run from the repository root so run_kraken.py and config paths resolve.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption(
|
||||
"--keep-ns-on-fail",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Don't delete test namespaces on failure (for debugging)",
|
||||
)
|
||||
parser.addoption(
|
||||
"--require-kind",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Skip tests unless current context is a known dev cluster (kind, minikube)",
|
||||
)
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
|
||||
def pytest_runtest_makereport(item, call):
|
||||
outcome = yield
|
||||
rep = outcome.get_result()
|
||||
setattr(item, f"rep_{rep.when}", rep)
|
||||
|
||||
|
||||
def _repo_root() -> Path:
|
||||
"""Repository root (directory containing run_kraken.py and CI/)."""
|
||||
return Path(__file__).resolve().parent.parent.parent
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def repo_root():
|
||||
return _repo_root()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def _configure_logging():
|
||||
"""Set log format with timestamps for test runs."""
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s %(levelname)s [%(name)s] %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
level=logging.INFO,
|
||||
)
|
||||
|
||||
|
||||
# Re-export fixtures from lib modules so pytest discovers them
|
||||
from lib.deploy import deploy_workload, wait_for_pods_running # noqa: E402, F401
|
||||
from lib.kraken import build_config, run_kraken, run_kraken_background # noqa: E402, F401
|
||||
from lib.k8s import ( # noqa: E402, F401
|
||||
_kube_config_loaded,
|
||||
_log_cluster_context,
|
||||
k8s_apps,
|
||||
k8s_client,
|
||||
k8s_core,
|
||||
k8s_networking,
|
||||
kubectl,
|
||||
)
|
||||
from lib.namespace import _cleanup_stale_namespaces, test_namespace # noqa: E402, F401
|
||||
from lib.preflight import _preflight_checks # noqa: E402, F401
|
||||
8
CI/tests_v2/kind-config-dev.yml
Normal file
8
CI/tests_v2/kind-config-dev.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
# Lean KinD config for local dev (faster than full 5-node). Use KIND_CONFIG to override.
|
||||
# No extraPortMappings so setup works when 9090/30080 are in use (e.g. local Prometheus).
|
||||
# For Prometheus/ES port mapping, use the repo root kind-config.yml.
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
7
CI/tests_v2/lib/__init__.py
Normal file
7
CI/tests_v2/lib/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
# Shared framework for CI/tests_v2 functional tests.
|
||||
# base: BaseScenarioTest, timeout constants
|
||||
# utils: assertions, K8s helpers, patch_namespace_in_docs
|
||||
# k8s: K8s client fixtures, cluster context checks
|
||||
# namespace: test_namespace, stale namespace cleanup
|
||||
# deploy: deploy_workload, wait_for_pods_running, wait_for_deployment_replicas
|
||||
# kraken: run_kraken, run_kraken_background, build_config
|
||||
155
CI/tests_v2/lib/base.py
Normal file
155
CI/tests_v2/lib/base.py
Normal file
@@ -0,0 +1,155 @@
|
||||
"""
|
||||
Base class for CI/tests_v2 scenario tests.
|
||||
Encapsulates the shared lifecycle: ephemeral namespace, optional workload deploy, teardown.
|
||||
"""
|
||||
|
||||
import copy
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
|
||||
from lib.utils import load_scenario_base
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_nested(obj, path):
|
||||
"""Walk path (list of keys/indices) and return the value. Supports list and dict."""
|
||||
for key in path:
|
||||
obj = obj[key]
|
||||
return obj
|
||||
|
||||
|
||||
def _set_nested(obj, path, value):
|
||||
"""Walk path to the parent and set the last key to value."""
|
||||
if not path:
|
||||
return
|
||||
parent_path, last_key = path[:-1], path[-1]
|
||||
parent = obj
|
||||
for key in parent_path:
|
||||
parent = parent[key]
|
||||
parent[last_key] = value
|
||||
|
||||
|
||||
# Timeout constants (seconds). Override via env vars (e.g. KRKN_TEST_READINESS_TIMEOUT).
|
||||
# Coordinate with pytest-timeout budget (e.g. 300s).
|
||||
TIMEOUT_BUDGET = int(os.environ.get("KRKN_TEST_TIMEOUT_BUDGET", "300"))
|
||||
DEPLOY_TIMEOUT = int(os.environ.get("KRKN_TEST_DEPLOY_TIMEOUT", "90"))
|
||||
READINESS_TIMEOUT = int(os.environ.get("KRKN_TEST_READINESS_TIMEOUT", "90"))
|
||||
NS_CLEANUP_TIMEOUT = int(os.environ.get("KRKN_TEST_NS_CLEANUP_TIMEOUT", "60"))
|
||||
POLICY_WAIT_TIMEOUT = int(os.environ.get("KRKN_TEST_POLICY_WAIT_TIMEOUT", "30"))
|
||||
KRAKEN_PROC_WAIT_TIMEOUT = int(os.environ.get("KRKN_TEST_KRAKEN_PROC_WAIT_TIMEOUT", "60"))
|
||||
|
||||
|
||||
class BaseScenarioTest:
|
||||
"""
|
||||
Base class for scenario tests. Subclasses set:
|
||||
- WORKLOAD_MANIFEST: path (str), or callable(namespace) -> YAML str for inline manifest
|
||||
- WORKLOAD_IS_PATH: True if WORKLOAD_MANIFEST is a file path, False if inline YAML
|
||||
- LABEL_SELECTOR: label selector for pods to wait on (e.g. "app=my-target")
|
||||
- SCENARIO_NAME: e.g. "pod_disruption", "application_outage"
|
||||
- SCENARIO_TYPE: e.g. "pod_disruption_scenarios", "application_outages_scenarios"
|
||||
- NAMESPACE_KEY_PATH: path to namespace field, e.g. [0, "config", "namespace_pattern"] or ["application_outage", "namespace"]
|
||||
- NAMESPACE_IS_REGEX: True to wrap namespace in ^...$
|
||||
- OVERRIDES_KEY_PATH: path to dict for **overrides (e.g. ["application_outage"]), or [] if none
|
||||
"""
|
||||
|
||||
WORKLOAD_MANIFEST = None
|
||||
WORKLOAD_IS_PATH = True
|
||||
LABEL_SELECTOR = None
|
||||
SCENARIO_NAME = ""
|
||||
SCENARIO_TYPE = ""
|
||||
NAMESPACE_KEY_PATH = []
|
||||
NAMESPACE_IS_REGEX = False
|
||||
OVERRIDES_KEY_PATH = []
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _inject_common_fixtures(
|
||||
self,
|
||||
repo_root,
|
||||
tmp_path,
|
||||
build_config,
|
||||
run_kraken,
|
||||
run_kraken_background,
|
||||
k8s_core,
|
||||
k8s_apps,
|
||||
k8s_networking,
|
||||
k8s_client,
|
||||
):
|
||||
"""Inject common fixtures onto self so test methods don't need to declare them."""
|
||||
self.repo_root = repo_root
|
||||
self.tmp_path = tmp_path
|
||||
self.build_config = build_config
|
||||
self.run_kraken = run_kraken
|
||||
self.run_kraken_background = run_kraken_background
|
||||
self.k8s_core = k8s_core
|
||||
self.k8s_apps = k8s_apps
|
||||
self.k8s_networking = k8s_networking
|
||||
self.k8s_client = k8s_client
|
||||
yield
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _setup_workload(self, request, repo_root):
|
||||
if "no_workload" in request.keywords:
|
||||
request.instance.ns = request.getfixturevalue("test_namespace")
|
||||
logger.debug("no_workload marker: skipping workload deploy, ns=%s", request.instance.ns)
|
||||
yield
|
||||
return
|
||||
deploy = request.getfixturevalue("deploy_workload")
|
||||
test_namespace = request.getfixturevalue("test_namespace")
|
||||
manifest = self.WORKLOAD_MANIFEST
|
||||
if callable(manifest):
|
||||
manifest = manifest(test_namespace)
|
||||
is_path = False
|
||||
logger.info("Deploying inline workload in ns=%s, label_selector=%s", test_namespace, self.LABEL_SELECTOR)
|
||||
else:
|
||||
is_path = self.WORKLOAD_IS_PATH
|
||||
if is_path and manifest and not Path(manifest).is_absolute():
|
||||
manifest = repo_root / manifest
|
||||
logger.info("Deploying workload from %s in ns=%s, label_selector=%s", manifest, test_namespace, self.LABEL_SELECTOR)
|
||||
ns = deploy(manifest, self.LABEL_SELECTOR, is_path=is_path, timeout=DEPLOY_TIMEOUT)
|
||||
request.instance.ns = ns
|
||||
yield
|
||||
|
||||
def load_and_patch_scenario(self, repo_root, namespace, **overrides):
|
||||
"""Load scenario_base.yaml and patch namespace (and overrides). Returns the scenario structure."""
|
||||
scenario = copy.deepcopy(load_scenario_base(repo_root, self.SCENARIO_NAME))
|
||||
ns_value = f"^{namespace}$" if self.NAMESPACE_IS_REGEX else namespace
|
||||
if self.NAMESPACE_KEY_PATH:
|
||||
_set_nested(scenario, self.NAMESPACE_KEY_PATH, ns_value)
|
||||
if overrides and self.OVERRIDES_KEY_PATH:
|
||||
target = _get_nested(scenario, self.OVERRIDES_KEY_PATH)
|
||||
for key, value in overrides.items():
|
||||
target[key] = value
|
||||
return scenario
|
||||
|
||||
def write_scenario(self, tmp_path, scenario_data, suffix=""):
|
||||
"""Write scenario data to a YAML file in tmp_path. Returns the path."""
|
||||
filename = f"{self.SCENARIO_NAME}_scenario{suffix}.yaml"
|
||||
path = tmp_path / filename
|
||||
path.write_text(yaml.dump(scenario_data, default_flow_style=False, sort_keys=False))
|
||||
return path
|
||||
|
||||
def run_scenario(self, tmp_path, namespace, *, overrides=None, config_filename=None):
|
||||
"""Load, patch, write scenario; build config; run Kraken. Returns CompletedProcess."""
|
||||
scenario = self.load_and_patch_scenario(self.repo_root, namespace, **(overrides or {}))
|
||||
scenario_path = self.write_scenario(tmp_path, scenario)
|
||||
config_path = self.build_config(
|
||||
self.SCENARIO_TYPE,
|
||||
str(scenario_path),
|
||||
filename=config_filename or "test_config.yaml",
|
||||
)
|
||||
if os.environ.get("KRKN_TEST_DRY_RUN", "0") == "1":
|
||||
logger.info(
|
||||
"[dry-run] Would run Kraken with config=%s, scenario=%s",
|
||||
config_path,
|
||||
scenario_path,
|
||||
)
|
||||
return subprocess.CompletedProcess(
|
||||
args=[], returncode=0, stdout="[dry-run] skipped", stderr=""
|
||||
)
|
||||
return self.run_kraken(config_path)
|
||||
145
CI/tests_v2/lib/deploy.py
Normal file
145
CI/tests_v2/lib/deploy.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""
|
||||
Workload deploy and pod/deployment readiness fixtures for CI/tests_v2.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
from kubernetes import utils as k8s_utils
|
||||
|
||||
from lib.base import READINESS_TIMEOUT
|
||||
from lib.utils import patch_namespace_in_docs
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def wait_for_deployment_replicas(k8s_apps, namespace: str, name: str, timeout: int = 120) -> None:
|
||||
"""
|
||||
Poll until the deployment has ready_replicas >= spec.replicas.
|
||||
Raises TimeoutError with diagnostic details on failure.
|
||||
"""
|
||||
deadline = time.monotonic() + timeout
|
||||
last_dep = None
|
||||
attempts = 0
|
||||
while time.monotonic() < deadline:
|
||||
try:
|
||||
dep = k8s_apps.read_namespaced_deployment(name=name, namespace=namespace)
|
||||
except Exception as e:
|
||||
logger.debug("Deployment %s/%s poll attempt %s failed: %s", namespace, name, attempts, e)
|
||||
time.sleep(2)
|
||||
attempts += 1
|
||||
continue
|
||||
last_dep = dep
|
||||
ready = dep.status.ready_replicas or 0
|
||||
desired = dep.spec.replicas or 1
|
||||
if ready >= desired:
|
||||
logger.debug("Deployment %s/%s ready (%s/%s)", namespace, name, ready, desired)
|
||||
return
|
||||
logger.debug("Deployment %s/%s not ready yet: %s/%s", namespace, name, ready, desired)
|
||||
time.sleep(2)
|
||||
attempts += 1
|
||||
diag = ""
|
||||
if last_dep is not None and last_dep.status:
|
||||
diag = f" ready_replicas={last_dep.status.ready_replicas}, desired={last_dep.spec.replicas}"
|
||||
raise TimeoutError(
|
||||
f"Deployment {namespace}/{name} did not become ready within {timeout}s.{diag}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def wait_for_pods_running(k8s_core):
|
||||
"""
|
||||
Poll until all matching pods are Running and all containers ready.
|
||||
Uses exponential backoff: 1s, 2s, 4s, ... capped at 10s.
|
||||
Raises TimeoutError with diagnostic details on failure.
|
||||
"""
|
||||
|
||||
def _wait(namespace: str, label_selector: str, timeout: int = READINESS_TIMEOUT):
|
||||
deadline = time.monotonic() + timeout
|
||||
interval = 1.0
|
||||
max_interval = 10.0
|
||||
last_list = None
|
||||
while time.monotonic() < deadline:
|
||||
try:
|
||||
pod_list = k8s_core.list_namespaced_pod(
|
||||
namespace=namespace,
|
||||
label_selector=label_selector,
|
||||
)
|
||||
except Exception:
|
||||
time.sleep(min(interval, max_interval))
|
||||
interval = min(interval * 2, max_interval)
|
||||
continue
|
||||
last_list = pod_list
|
||||
items = pod_list.items or []
|
||||
if not items:
|
||||
time.sleep(min(interval, max_interval))
|
||||
interval = min(interval * 2, max_interval)
|
||||
continue
|
||||
all_running = all(
|
||||
(p.status and p.status.phase == "Running") for p in items
|
||||
)
|
||||
if not all_running:
|
||||
time.sleep(min(interval, max_interval))
|
||||
interval = min(interval * 2, max_interval)
|
||||
continue
|
||||
all_ready = True
|
||||
for p in items:
|
||||
if not p.status or not p.status.container_statuses:
|
||||
all_ready = False
|
||||
break
|
||||
for cs in p.status.container_statuses:
|
||||
if not getattr(cs, "ready", False):
|
||||
all_ready = False
|
||||
break
|
||||
if all_ready:
|
||||
return
|
||||
time.sleep(min(interval, max_interval))
|
||||
interval = min(interval * 2, max_interval)
|
||||
|
||||
diag = ""
|
||||
if last_list and last_list.items:
|
||||
p = last_list.items[0]
|
||||
diag = f" e.g. pod {p.metadata.name}: phase={getattr(p.status, 'phase', None)}"
|
||||
raise TimeoutError(
|
||||
f"Pods in {namespace} with label {label_selector} did not become ready within {timeout}s.{diag}"
|
||||
)
|
||||
|
||||
return _wait
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def deploy_workload(test_namespace, k8s_client, wait_for_pods_running, repo_root, tmp_path):
|
||||
"""
|
||||
Helper that applies a manifest into the test namespace and waits for pods.
|
||||
Yields a callable: deploy(manifest_path_or_content, label_selector, *, is_path=True)
|
||||
which applies the manifest, waits for readiness, and returns the namespace name.
|
||||
"""
|
||||
|
||||
def _deploy(manifest_path_or_content, label_selector, *, is_path=True, timeout=READINESS_TIMEOUT):
|
||||
try:
|
||||
if is_path:
|
||||
path = Path(manifest_path_or_content)
|
||||
if not path.is_absolute():
|
||||
path = repo_root / path
|
||||
with open(path) as f:
|
||||
docs = list(yaml.safe_load_all(f))
|
||||
else:
|
||||
docs = list(yaml.safe_load_all(manifest_path_or_content))
|
||||
docs = patch_namespace_in_docs(docs, test_namespace)
|
||||
k8s_utils.create_from_yaml(
|
||||
k8s_client,
|
||||
yaml_objects=docs,
|
||||
namespace=test_namespace,
|
||||
)
|
||||
except k8s_utils.FailToCreateError as e:
|
||||
msgs = [str(exc) for exc in e.api_exceptions]
|
||||
raise RuntimeError(f"Failed to create resources: {'; '.join(msgs)}") from e
|
||||
logger.info("Workload applied in namespace=%s, waiting for pods with selector=%s", test_namespace, label_selector)
|
||||
wait_for_pods_running(test_namespace, label_selector, timeout=timeout)
|
||||
logger.info("Pods ready in namespace=%s", test_namespace)
|
||||
return test_namespace
|
||||
|
||||
return _deploy
|
||||
88
CI/tests_v2/lib/k8s.py
Normal file
88
CI/tests_v2/lib/k8s.py
Normal file
@@ -0,0 +1,88 @@
|
||||
"""
|
||||
Kubernetes client fixtures and cluster context checks for CI/tests_v2.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from kubernetes import client, config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def _kube_config_loaded():
|
||||
"""Load kubeconfig once per session. Skips if cluster unreachable."""
|
||||
try:
|
||||
config.load_kube_config()
|
||||
logger.info("Kube config loaded successfully")
|
||||
except config.ConfigException as e:
|
||||
logger.warning("Could not load kube config: %s", e)
|
||||
pytest.skip(f"Could not load kube config (is a cluster running?): {e}")
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def k8s_core(_kube_config_loaded):
|
||||
"""Kubernetes CoreV1Api for pods, etc. Uses default kubeconfig."""
|
||||
return client.CoreV1Api()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def k8s_networking(_kube_config_loaded):
|
||||
"""Kubernetes NetworkingV1Api for network policies."""
|
||||
return client.NetworkingV1Api()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def k8s_client(_kube_config_loaded):
|
||||
"""Kubernetes ApiClient for create_from_yaml and other generic API calls."""
|
||||
return client.ApiClient()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def k8s_apps(_kube_config_loaded):
|
||||
"""Kubernetes AppsV1Api for deployment status polling."""
|
||||
return client.AppsV1Api()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def _log_cluster_context(request):
|
||||
"""Log current cluster context at session start; skip if --require-kind and not a dev cluster."""
|
||||
try:
|
||||
contexts, active = config.list_kube_config_contexts()
|
||||
except Exception as e:
|
||||
logger.warning("Could not list kube config contexts: %s", e)
|
||||
return
|
||||
if not active:
|
||||
return
|
||||
context_name = active.get("name", "?")
|
||||
cluster = (active.get("context") or {}).get("cluster", "?")
|
||||
logger.info("Running tests against cluster: context=%s cluster=%s", context_name, cluster)
|
||||
if not request.config.getoption("--require-kind", False):
|
||||
return
|
||||
cluster_lower = (cluster or "").lower()
|
||||
if "kind" in cluster_lower or "minikube" in cluster_lower:
|
||||
return
|
||||
pytest.skip(
|
||||
f"Cluster '{cluster}' does not look like kind/minikube. "
|
||||
"Use default kubeconfig or pass --require-kind only on dev clusters."
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def kubectl(repo_root):
|
||||
"""Run kubectl with given args from repo root. Returns CompletedProcess."""
|
||||
|
||||
def run(args, timeout=120):
|
||||
cmd = ["kubectl"] + (args if isinstance(args, list) else list(args))
|
||||
return subprocess.run(
|
||||
cmd,
|
||||
cwd=repo_root,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
return run
|
||||
94
CI/tests_v2/lib/kraken.py
Normal file
94
CI/tests_v2/lib/kraken.py
Normal file
@@ -0,0 +1,94 @@
|
||||
"""
|
||||
Kraken execution and config building fixtures for CI/tests_v2.
|
||||
"""
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
|
||||
|
||||
def _kraken_cmd(config_path: str, repo_root: Path):
|
||||
"""Use the same Python as the test process so venv/.venv and coverage match."""
|
||||
python = sys.executable
|
||||
if os.environ.get("KRKN_TEST_COVERAGE", "0") == "1":
|
||||
return [
|
||||
python, "-m", "coverage", "run", "-a",
|
||||
"run_kraken.py", "-c", str(config_path),
|
||||
]
|
||||
return [python, "run_kraken.py", "-c", str(config_path)]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def run_kraken(repo_root):
|
||||
"""Run Kraken with the given config path. Returns CompletedProcess. Default timeout 300s."""
|
||||
|
||||
def run(config_path, timeout=300, extra_args=None):
|
||||
cmd = _kraken_cmd(config_path, repo_root)
|
||||
if extra_args:
|
||||
cmd.extend(extra_args)
|
||||
return subprocess.run(
|
||||
cmd,
|
||||
cwd=repo_root,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
return run
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def run_kraken_background(repo_root):
|
||||
"""Start Kraken in background. Returns Popen. Call proc.terminate() or proc.wait() to stop."""
|
||||
|
||||
def start(config_path):
|
||||
cmd = _kraken_cmd(config_path, repo_root)
|
||||
return subprocess.Popen(
|
||||
cmd,
|
||||
cwd=repo_root,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
|
||||
return start
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def build_config(repo_root, tmp_path):
|
||||
"""
|
||||
Build a Kraken config from tests_v2's common_test_config.yaml with scenario_type and scenario_file
|
||||
substituted. Disables Prometheus/Elastic checks for local runs.
|
||||
Returns the path to the written config file.
|
||||
"""
|
||||
common_path = repo_root / "CI" / "tests_v2" / "config" / "common_test_config.yaml"
|
||||
|
||||
def _build(scenario_type: str, scenario_file: str, filename: str = "test_config.yaml"):
|
||||
content = common_path.read_text()
|
||||
content = content.replace("$scenario_type", scenario_type)
|
||||
content = content.replace("$scenario_file", scenario_file)
|
||||
content = content.replace("$post_config", "")
|
||||
|
||||
config = yaml.safe_load(content)
|
||||
if "kraken" in config:
|
||||
# Disable status server so parallel test workers don't all bind to port 8081
|
||||
config["kraken"]["publish_kraken_status"] = False
|
||||
if "performance_monitoring" in config:
|
||||
config["performance_monitoring"]["check_critical_alerts"] = False
|
||||
config["performance_monitoring"]["enable_alerts"] = False
|
||||
config["performance_monitoring"]["enable_metrics"] = False
|
||||
if "elastic" in config:
|
||||
config["elastic"]["enable_elastic"] = False
|
||||
if "tunings" in config:
|
||||
config["tunings"]["wait_duration"] = 1
|
||||
|
||||
out_path = tmp_path / filename
|
||||
with open(out_path, "w") as f:
|
||||
yaml.dump(config, f, default_flow_style=False, sort_keys=False)
|
||||
return str(out_path)
|
||||
|
||||
return _build
|
||||
114
CI/tests_v2/lib/namespace.py
Normal file
114
CI/tests_v2/lib/namespace.py
Normal file
@@ -0,0 +1,114 @@
|
||||
"""
|
||||
Namespace lifecycle fixtures for CI/tests_v2: create, delete, stale cleanup.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
|
||||
import pytest
|
||||
from kubernetes import client
|
||||
from kubernetes.client.rest import ApiException
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
STALE_NS_AGE_MINUTES = 30
|
||||
|
||||
|
||||
def _namespace_age_minutes(metadata) -> float:
|
||||
"""Return age of namespace in minutes from its creation_timestamp."""
|
||||
if not metadata or not metadata.creation_timestamp:
|
||||
return 0.0
|
||||
created = metadata.creation_timestamp
|
||||
if hasattr(created, "timestamp"):
|
||||
created_ts = created.timestamp()
|
||||
else:
|
||||
try:
|
||||
dt = datetime.fromisoformat(created.replace("Z", "+00:00"))
|
||||
created_ts = dt.timestamp()
|
||||
except Exception:
|
||||
return 0.0
|
||||
return (time.time() - created_ts) / 60.0
|
||||
|
||||
|
||||
def _wait_for_namespace_gone(k8s_core, name: str, timeout: int = 60):
|
||||
"""Poll until the namespace no longer exists."""
|
||||
deadline = time.monotonic() + timeout
|
||||
while time.monotonic() < deadline:
|
||||
try:
|
||||
k8s_core.read_namespace(name=name)
|
||||
except ApiException as e:
|
||||
if e.status == 404:
|
||||
return
|
||||
raise
|
||||
time.sleep(1)
|
||||
raise TimeoutError(f"Namespace {name} did not disappear within {timeout}s")
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def test_namespace(request, k8s_core):
|
||||
"""
|
||||
Create an ephemeral namespace for the test. Deleted after the test unless
|
||||
--keep-ns-on-fail is set and the test failed.
|
||||
"""
|
||||
name = f"krkn-test-{uuid.uuid4().hex[:8]}"
|
||||
ns = client.V1Namespace(
|
||||
metadata=client.V1ObjectMeta(
|
||||
name=name,
|
||||
labels={
|
||||
"pod-security.kubernetes.io/audit": "privileged",
|
||||
"pod-security.kubernetes.io/enforce": "privileged",
|
||||
"pod-security.kubernetes.io/enforce-version": "v1.24",
|
||||
"pod-security.kubernetes.io/warn": "privileged",
|
||||
"security.openshift.io/scc.podSecurityLabelSync": "false",
|
||||
},
|
||||
)
|
||||
)
|
||||
k8s_core.create_namespace(body=ns)
|
||||
logger.info("Created test namespace: %s", name)
|
||||
|
||||
yield name
|
||||
|
||||
keep_on_fail = request.config.getoption("--keep-ns-on-fail", False)
|
||||
rep_call = getattr(request.node, "rep_call", None)
|
||||
failed = rep_call is not None and rep_call.failed
|
||||
if keep_on_fail and failed:
|
||||
logger.info("[keep-ns-on-fail] Keeping namespace %s for debugging", name)
|
||||
return
|
||||
|
||||
try:
|
||||
k8s_core.delete_namespace(
|
||||
name=name,
|
||||
body=client.V1DeleteOptions(propagation_policy="Background"),
|
||||
)
|
||||
logger.debug("Scheduled background deletion for namespace: %s", name)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to delete namespace %s: %s", name, e)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def _cleanup_stale_namespaces(k8s_core):
|
||||
"""Delete krkn-test-* namespaces older than STALE_NS_AGE_MINUTES at session start."""
|
||||
if os.environ.get("PYTEST_XDIST_WORKER"):
|
||||
return
|
||||
try:
|
||||
namespaces = k8s_core.list_namespace()
|
||||
except Exception as e:
|
||||
logger.warning("Could not list namespaces for stale cleanup: %s", e)
|
||||
return
|
||||
for ns in namespaces.items or []:
|
||||
name = ns.metadata.name if ns.metadata else ""
|
||||
if not name.startswith("krkn-test-"):
|
||||
continue
|
||||
if _namespace_age_minutes(ns.metadata) <= STALE_NS_AGE_MINUTES:
|
||||
continue
|
||||
try:
|
||||
logger.warning("Deleting stale namespace: %s", name)
|
||||
k8s_core.delete_namespace(
|
||||
name=name,
|
||||
body=client.V1DeleteOptions(propagation_policy="Background"),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to delete stale namespace %s: %s", name, e)
|
||||
48
CI/tests_v2/lib/preflight.py
Normal file
48
CI/tests_v2/lib/preflight.py
Normal file
@@ -0,0 +1,48 @@
|
||||
"""
|
||||
Preflight checks for CI/tests_v2: cluster reachability and test deps at session start.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import subprocess
|
||||
|
||||
import pytest
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def _preflight_checks(repo_root):
|
||||
"""
|
||||
Verify cluster is reachable and test deps are importable at session start.
|
||||
Skips the session if cluster-info fails or required plugins are missing.
|
||||
"""
|
||||
# Check test deps (pytest plugins)
|
||||
try:
|
||||
import pytest_rerunfailures # noqa: F401
|
||||
import pytest_html # noqa: F401
|
||||
import pytest_timeout # noqa: F401
|
||||
import pytest_order # noqa: F401
|
||||
import xdist # noqa: F401
|
||||
except ImportError as e:
|
||||
pytest.skip(
|
||||
f"Missing test dependency: {e}. "
|
||||
"Run: pip install -r CI/tests_v2/requirements.txt"
|
||||
)
|
||||
|
||||
# Check cluster reachable and log server URL
|
||||
result = subprocess.run(
|
||||
["kubectl", "cluster-info"],
|
||||
cwd=repo_root,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
pytest.skip(
|
||||
f"Cluster not reachable (kubectl cluster-info failed). "
|
||||
f"Start a cluster (e.g. make setup) or check KUBECONFIG. stderr: {result.stderr or '(none)'}"
|
||||
)
|
||||
# Log first line of cluster-info (server URL) for debugging
|
||||
if result.stdout:
|
||||
first_line = result.stdout.strip().split("\n")[0]
|
||||
logger.info("Preflight: %s", first_line)
|
||||
212
CI/tests_v2/lib/utils.py
Normal file
212
CI/tests_v2/lib/utils.py
Normal file
@@ -0,0 +1,212 @@
|
||||
"""
|
||||
Shared helpers for CI/tests_v2 functional tests.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Union
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
from kubernetes.client import V1NetworkPolicy, V1NetworkPolicyList, V1Pod, V1PodList
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _pods(pod_list: Union[V1PodList, List[V1Pod]]) -> List[V1Pod]:
|
||||
"""Normalize V1PodList or list of V1Pod to list of V1Pod."""
|
||||
return pod_list.items if hasattr(pod_list, "items") else pod_list
|
||||
|
||||
|
||||
def _policies(
|
||||
policy_list: Union[V1NetworkPolicyList, List[V1NetworkPolicy]],
|
||||
) -> List[V1NetworkPolicy]:
|
||||
"""Normalize V1NetworkPolicyList or list to list of V1NetworkPolicy."""
|
||||
return policy_list.items if hasattr(policy_list, "items") else policy_list
|
||||
|
||||
|
||||
def scenario_dir(repo_root: Path, scenario_name: str) -> Path:
|
||||
"""Return the path to a scenario folder under CI/tests_v2/scenarios/."""
|
||||
return repo_root / "CI" / "tests_v2" / "scenarios" / scenario_name
|
||||
|
||||
|
||||
def load_scenario_base(
|
||||
repo_root: Path,
|
||||
scenario_name: str,
|
||||
filename: str = "scenario_base.yaml",
|
||||
) -> Union[dict, list]:
|
||||
"""
|
||||
Load and parse the scenario base YAML for a scenario.
|
||||
Returns dict or list depending on the YAML structure.
|
||||
"""
|
||||
path = scenario_dir(repo_root, scenario_name) / filename
|
||||
text = path.read_text()
|
||||
data = yaml.safe_load(text)
|
||||
if data is None:
|
||||
raise ValueError(f"Empty or invalid YAML in {path}")
|
||||
return data
|
||||
|
||||
|
||||
def patch_namespace_in_docs(docs: list, namespace: str) -> list:
|
||||
"""Override metadata.namespace in each doc so create_from_yaml respects target namespace."""
|
||||
for doc in docs:
|
||||
if isinstance(doc, dict) and doc.get("metadata") is not None:
|
||||
doc["metadata"]["namespace"] = namespace
|
||||
return docs
|
||||
|
||||
|
||||
def get_pods_list(k8s_core, namespace: str, label_selector: str) -> V1PodList:
|
||||
"""Return V1PodList from the Kubernetes API."""
|
||||
return k8s_core.list_namespaced_pod(
|
||||
namespace=namespace,
|
||||
label_selector=label_selector,
|
||||
)
|
||||
|
||||
|
||||
def get_pods_or_skip(
|
||||
k8s_core,
|
||||
namespace: str,
|
||||
label_selector: str,
|
||||
no_pods_reason: Optional[str] = None,
|
||||
) -> V1PodList:
|
||||
"""
|
||||
Get pods via Kubernetes API or skip if cluster unreachable or no matching pods.
|
||||
Use at test start when prerequisites may be missing.
|
||||
no_pods_reason: message when no pods match; if None, a default message is used.
|
||||
"""
|
||||
try:
|
||||
pod_list = k8s_core.list_namespaced_pod(
|
||||
namespace=namespace,
|
||||
label_selector=label_selector,
|
||||
)
|
||||
except Exception as e:
|
||||
pytest.skip(f"Cluster unreachable: {e}")
|
||||
if not pod_list.items or len(pod_list.items) == 0:
|
||||
reason = (
|
||||
no_pods_reason
|
||||
if no_pods_reason
|
||||
else f"No pods in {namespace} with label {label_selector}. "
|
||||
"Start a KinD cluster with default storage (local-path-provisioner)."
|
||||
)
|
||||
pytest.skip(reason)
|
||||
return pod_list
|
||||
|
||||
|
||||
def pod_uids(pod_list: Union[V1PodList, List[V1Pod]]) -> list:
|
||||
"""Return list of pod UIDs from V1PodList or list of V1Pod."""
|
||||
return [p.metadata.uid for p in _pods(pod_list)]
|
||||
|
||||
|
||||
def restart_counts(pod_list: Union[V1PodList, List[V1Pod]]) -> int:
|
||||
"""Return total restart count across all containers in V1PodList or list of V1Pod."""
|
||||
total = 0
|
||||
for p in _pods(pod_list):
|
||||
if not p.status or not p.status.container_statuses:
|
||||
continue
|
||||
for cs in p.status.container_statuses:
|
||||
total += getattr(cs, "restart_count", 0)
|
||||
return total
|
||||
|
||||
|
||||
def get_network_policies_list(k8s_networking, namespace: str) -> V1NetworkPolicyList:
|
||||
"""Return V1NetworkPolicyList from the Kubernetes API."""
|
||||
return k8s_networking.list_namespaced_network_policy(namespace=namespace)
|
||||
|
||||
|
||||
def find_network_policy_by_prefix(
|
||||
policy_list: Union[V1NetworkPolicyList, List[V1NetworkPolicy]],
|
||||
name_prefix: str,
|
||||
) -> Optional[V1NetworkPolicy]:
|
||||
"""Return the first NetworkPolicy whose name starts with name_prefix, or None."""
|
||||
for policy in _policies(policy_list):
|
||||
if (
|
||||
policy.metadata
|
||||
and policy.metadata.name
|
||||
and policy.metadata.name.startswith(name_prefix)
|
||||
):
|
||||
return policy
|
||||
return None
|
||||
|
||||
|
||||
def assert_all_pods_running_and_ready(
|
||||
pod_list: Union[V1PodList, List[V1Pod]],
|
||||
namespace: str = "",
|
||||
) -> None:
|
||||
"""
|
||||
Assert all pods are Running and all containers Ready.
|
||||
Include namespace in assertion messages for debugging.
|
||||
"""
|
||||
ns_suffix = f" (namespace={namespace})" if namespace else ""
|
||||
for pod in _pods(pod_list):
|
||||
assert pod.status and pod.status.phase == "Running", (
|
||||
f"Pod {pod.metadata.name} not Running after scenario: {pod.status}{ns_suffix}"
|
||||
)
|
||||
if pod.status.container_statuses:
|
||||
for cs in pod.status.container_statuses:
|
||||
assert getattr(cs, "ready", False) is True, (
|
||||
f"Container {getattr(cs, 'name', '?')} not ready in pod {pod.metadata.name}{ns_suffix}"
|
||||
)
|
||||
|
||||
|
||||
def assert_pod_count_unchanged(
|
||||
before: Union[V1PodList, List[V1Pod]],
|
||||
after: Union[V1PodList, List[V1Pod]],
|
||||
namespace: str = "",
|
||||
) -> None:
|
||||
"""Assert pod count is unchanged; include namespace in failure message."""
|
||||
before_items = _pods(before)
|
||||
after_items = _pods(after)
|
||||
ns_suffix = f" (namespace={namespace})" if namespace else ""
|
||||
assert len(after_items) == len(before_items), (
|
||||
f"Pod count changed after scenario: expected {len(before_items)}, got {len(after_items)}.{ns_suffix}"
|
||||
)
|
||||
|
||||
|
||||
def assert_kraken_success(result, context: str = "", tmp_path=None, allowed_codes=(0,)) -> None:
|
||||
"""
|
||||
Assert Kraken run succeeded (returncode in allowed_codes). On failure, include stdout and stderr
|
||||
in the assertion message and optionally write full output to tmp_path.
|
||||
Default allowed_codes=(0,). For alert-aware tests, use allowed_codes=(0, 2).
|
||||
"""
|
||||
if result.returncode in allowed_codes:
|
||||
return
|
||||
if tmp_path is not None:
|
||||
try:
|
||||
(tmp_path / "kraken_stdout.log").write_text(result.stdout or "")
|
||||
(tmp_path / "kraken_stderr.log").write_text(result.stderr or "")
|
||||
except Exception as e:
|
||||
logger.warning("Could not write Kraken logs to tmp_path: %s", e)
|
||||
lines = (result.stdout or "").splitlines()
|
||||
tail_stdout = "\n".join(lines[-20:]) if lines else "(empty)"
|
||||
context_str = f" {context}" if context else ""
|
||||
path_hint = f"\nFull logs: {tmp_path}/kraken_stdout.log, {tmp_path}/kraken_stderr.log" if tmp_path else ""
|
||||
raise AssertionError(
|
||||
f"Krkn failed (rc={result.returncode}){context_str}.{path_hint}\n"
|
||||
f"--- stderr ---\n{result.stderr or '(empty)'}\n"
|
||||
f"--- stdout (last 20 lines) ---\n{tail_stdout}"
|
||||
)
|
||||
|
||||
|
||||
def assert_kraken_failure(result, context: str = "", tmp_path=None) -> None:
|
||||
"""
|
||||
Assert Kraken run failed (returncode != 0). On failure (Kraken unexpectedly succeeded),
|
||||
raise AssertionError with stdout/stderr and optional tmp_path log files for diagnostics.
|
||||
"""
|
||||
if result.returncode != 0:
|
||||
return
|
||||
if tmp_path is not None:
|
||||
try:
|
||||
(tmp_path / "kraken_stdout.log").write_text(result.stdout or "")
|
||||
(tmp_path / "kraken_stderr.log").write_text(result.stderr or "")
|
||||
except Exception as e:
|
||||
logger.warning("Could not write Kraken logs to tmp_path: %s", e)
|
||||
lines = (result.stdout or "").splitlines()
|
||||
tail_stdout = "\n".join(lines[-20:]) if lines else "(empty)"
|
||||
context_str = f" {context}" if context else ""
|
||||
path_hint = f"\nFull logs: {tmp_path}/kraken_stdout.log, {tmp_path}/kraken_stderr.log" if tmp_path else ""
|
||||
raise AssertionError(
|
||||
f"Expected Krkn to fail but it succeeded (rc=0){context_str}.{path_hint}\n"
|
||||
f"--- stderr ---\n{result.stderr or '(empty)'}\n"
|
||||
f"--- stdout (last 20 lines) ---\n{tail_stdout}"
|
||||
)
|
||||
14
CI/tests_v2/pytest.ini
Normal file
14
CI/tests_v2/pytest.ini
Normal file
@@ -0,0 +1,14 @@
|
||||
[pytest]
|
||||
testpaths = .
|
||||
python_files = test_*.py
|
||||
python_functions = test_*
|
||||
# Install CI/tests_v2/requirements.txt for --timeout, --reruns, --reruns-delay.
|
||||
# Example full run: pytest CI/tests_v2/ -v --timeout=300 --reruns=2 --reruns-delay=10 --html=... --junitxml=...
|
||||
addopts = -v
|
||||
markers =
|
||||
functional: marks a test as a functional test (deselect with '-m "not functional"')
|
||||
pod_disruption: marks a test as a pod disruption scenario test
|
||||
application_outage: marks a test as an application outage scenario test
|
||||
no_workload: skip workload deployment for this test (e.g. negative tests)
|
||||
order: set test order (pytest-order)
|
||||
junit_family = xunit2
|
||||
15
CI/tests_v2/requirements.txt
Normal file
15
CI/tests_v2/requirements.txt
Normal file
@@ -0,0 +1,15 @@
|
||||
# Pytest plugin deps for CI/tests_v2 functional tests.
|
||||
#
|
||||
# Kept separate from the root requirements.txt because:
|
||||
# - Root deps are Kraken runtime (cloud SDKs, K8s client, etc.)
|
||||
# - These are test-only plugins not needed by Kraken itself
|
||||
# - Merging would bloat installs for users who don't run functional tests
|
||||
# - Separate files reduce version-conflict risk between test and runtime deps
|
||||
#
|
||||
# pytest and coverage are already in root requirements.txt; do NOT duplicate here.
|
||||
# The Makefile installs both files automatically via `make setup`.
|
||||
pytest-rerunfailures>=14.0
|
||||
pytest-html>=4.1.0
|
||||
pytest-timeout>=2.2.0
|
||||
pytest-order>=1.2.0
|
||||
pytest-xdist>=3.5.0
|
||||
230
CI/tests_v2/scaffold.py
Normal file
230
CI/tests_v2/scaffold.py
Normal file
@@ -0,0 +1,230 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Generate boilerplate for a new scenario test in CI/tests_v2.
|
||||
|
||||
Usage (from repository root):
|
||||
python CI/tests_v2/scaffold.py --scenario service_hijacking
|
||||
python CI/tests_v2/scaffold.py --scenario node_disruption --scenario-type node_scenarios
|
||||
|
||||
Creates (folder-per-scenario layout):
|
||||
- CI/tests_v2/scenarios/<scenario>/test_<scenario>.py (BaseScenarioTest subclass + stub test)
|
||||
- CI/tests_v2/scenarios/<scenario>/resource.yaml (placeholder workload)
|
||||
- CI/tests_v2/scenarios/<scenario>/scenario_base.yaml (placeholder Krkn scenario; edit for your scenario_type)
|
||||
- Adds the scenario marker to pytest.ini (if not already present)
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def snake_to_camel(snake: str) -> str:
|
||||
"""Convert snake_case to CamelCase."""
|
||||
return "".join(word.capitalize() for word in snake.split("_"))
|
||||
|
||||
|
||||
def scenario_type_default(scenario: str) -> str:
|
||||
"""Default scenario_type for build_config (e.g. service_hijacking -> service_hijacking_scenarios)."""
|
||||
return f"{scenario}_scenarios"
|
||||
|
||||
|
||||
TEST_FILE_TEMPLATE = '''"""
|
||||
Functional test for {scenario} scenario.
|
||||
Each test runs in its own ephemeral namespace with workload deployed automatically.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from lib.base import BaseScenarioTest
|
||||
from lib.utils import (
|
||||
assert_all_pods_running_and_ready,
|
||||
assert_kraken_failure,
|
||||
assert_kraken_success,
|
||||
assert_pod_count_unchanged,
|
||||
get_pods_list,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.functional
|
||||
@pytest.mark.{marker}
|
||||
class Test{class_name}(BaseScenarioTest):
|
||||
"""{scenario} scenario."""
|
||||
|
||||
WORKLOAD_MANIFEST = "CI/tests_v2/scenarios/{scenario}/resource.yaml"
|
||||
WORKLOAD_IS_PATH = True
|
||||
LABEL_SELECTOR = "app={app_label}"
|
||||
SCENARIO_NAME = "{scenario}"
|
||||
SCENARIO_TYPE = "{scenario_type}"
|
||||
NAMESPACE_KEY_PATH = {namespace_key_path}
|
||||
NAMESPACE_IS_REGEX = {namespace_is_regex}
|
||||
OVERRIDES_KEY_PATH = {overrides_key_path}
|
||||
|
||||
@pytest.mark.order(1)
|
||||
def test_happy_path(self):
|
||||
"""Run {scenario} scenario and assert pods remain healthy."""
|
||||
ns = self.ns
|
||||
before = get_pods_list(self.k8s_core, ns, self.LABEL_SELECTOR)
|
||||
|
||||
result = self.run_scenario(self.tmp_path, ns)
|
||||
assert_kraken_success(result, context=f"namespace={{ns}}", tmp_path=self.tmp_path)
|
||||
|
||||
after = get_pods_list(self.k8s_core, ns, self.LABEL_SELECTOR)
|
||||
assert_pod_count_unchanged(before, after, namespace=ns)
|
||||
assert_all_pods_running_and_ready(after, namespace=ns)
|
||||
'''
|
||||
|
||||
RESOURCE_YAML_TEMPLATE = '''# Target workload for {scenario} scenario tests.
|
||||
# Namespace is patched at deploy time by the test framework.
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {app_label}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {app_label}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {app_label}
|
||||
spec:
|
||||
containers:
|
||||
- name: app
|
||||
image: nginx:alpine
|
||||
ports:
|
||||
- containerPort: 80
|
||||
'''
|
||||
|
||||
SCENARIO_BASE_DICT_TEMPLATE = '''# Base scenario for {scenario} (used by build_config with scenario_type: {scenario_type}).
|
||||
# Edit this file with the structure expected by Krkn. Top-level key must match SCENARIO_NAME.
|
||||
# See scenarios/application_outage/scenario_base.yaml and scenarios/pod_disruption/scenario_base.yaml for examples.
|
||||
{scenario}:
|
||||
namespace: default
|
||||
# Add fields required by your scenario plugin.
|
||||
'''
|
||||
|
||||
SCENARIO_BASE_LIST_TEMPLATE = '''# Base scenario for {scenario} (list format). Tests patch config.namespace_pattern with ^<ns>$.
|
||||
# Edit with the structure expected by your scenario plugin. See scenarios/pod_disruption/scenario_base.yaml.
|
||||
- id: {scenario}-default
|
||||
config:
|
||||
namespace_pattern: "^default$"
|
||||
# Add fields required by your scenario plugin.
|
||||
'''
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description="Scaffold a new scenario test in CI/tests_v2 (folder-per-scenario)")
|
||||
parser.add_argument(
|
||||
"--scenario",
|
||||
required=True,
|
||||
help="Scenario name in snake_case (e.g. service_hijacking)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--scenario-type",
|
||||
default=None,
|
||||
help="Kraken scenario_type for build_config (default: <scenario>_scenarios)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--list-based",
|
||||
action="store_true",
|
||||
help="Use list-based scenario (NAMESPACE_KEY_PATH [0, 'config', 'namespace_pattern'], OVERRIDES_KEY_PATH [0, 'config'])",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--regex-namespace",
|
||||
action="store_true",
|
||||
help="Set NAMESPACE_IS_REGEX = True (namespace wrapped in ^...$)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
scenario = args.scenario.strip().lower()
|
||||
if not re.match(r"^[a-z][a-z0-9_]*$", scenario):
|
||||
print("Error: --scenario must be snake_case (e.g. service_hijacking)", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
scenario_type = args.scenario_type or scenario_type_default(scenario)
|
||||
class_name = snake_to_camel(scenario)
|
||||
marker = scenario
|
||||
app_label = scenario.replace("_", "-")
|
||||
|
||||
if args.list_based:
|
||||
namespace_key_path = [0, "config", "namespace_pattern"]
|
||||
namespace_is_regex = True
|
||||
overrides_key_path = [0, "config"]
|
||||
scenario_base_template = SCENARIO_BASE_LIST_TEMPLATE
|
||||
else:
|
||||
namespace_key_path = [scenario, "namespace"]
|
||||
namespace_is_regex = args.regex_namespace
|
||||
overrides_key_path = [scenario]
|
||||
scenario_base_template = SCENARIO_BASE_DICT_TEMPLATE
|
||||
|
||||
repo_root = Path(__file__).resolve().parent.parent.parent
|
||||
scenario_dir_path = repo_root / "CI" / "tests_v2" / "scenarios" / scenario
|
||||
test_path = scenario_dir_path / f"test_{scenario}.py"
|
||||
resource_path = scenario_dir_path / "resource.yaml"
|
||||
scenario_base_path = scenario_dir_path / "scenario_base.yaml"
|
||||
|
||||
if scenario_dir_path.exists() and any(scenario_dir_path.iterdir()):
|
||||
print(f"Error: scenario directory already exists and is non-empty: {scenario_dir_path}", file=sys.stderr)
|
||||
return 1
|
||||
if test_path.exists():
|
||||
print(f"Error: {test_path} already exists", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
scenario_dir_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
test_content = TEST_FILE_TEMPLATE.format(
|
||||
scenario=scenario,
|
||||
marker=marker,
|
||||
class_name=class_name,
|
||||
app_label=app_label,
|
||||
scenario_type=scenario_type,
|
||||
namespace_key_path=repr(namespace_key_path),
|
||||
namespace_is_regex=namespace_is_regex,
|
||||
overrides_key_path=repr(overrides_key_path),
|
||||
)
|
||||
resource_content = RESOURCE_YAML_TEMPLATE.format(scenario=scenario, app_label=app_label)
|
||||
scenario_base_content = scenario_base_template.format(
|
||||
scenario=scenario,
|
||||
scenario_type=scenario_type,
|
||||
)
|
||||
|
||||
test_path.write_text(test_content, encoding="utf-8")
|
||||
resource_path.write_text(resource_content, encoding="utf-8")
|
||||
scenario_base_path.write_text(scenario_base_content, encoding="utf-8")
|
||||
|
||||
# Auto-add marker to pytest.ini if not already present
|
||||
pytest_ini_path = repo_root / "CI" / "tests_v2" / "pytest.ini"
|
||||
marker_line = f" {marker}: marks a test as a {scenario} scenario test"
|
||||
if pytest_ini_path.exists():
|
||||
content = pytest_ini_path.read_text(encoding="utf-8")
|
||||
if f" {marker}:" not in content and f"{marker}: marks" not in content:
|
||||
lines = content.splitlines(keepends=True)
|
||||
insert_at = None
|
||||
for i, line in enumerate(lines):
|
||||
if re.match(r"^ \w+:\s*.+", line):
|
||||
insert_at = i + 1
|
||||
if insert_at is not None:
|
||||
lines.insert(insert_at, marker_line + "\n")
|
||||
pytest_ini_path.write_text("".join(lines), encoding="utf-8")
|
||||
print("Added marker to pytest.ini")
|
||||
else:
|
||||
print("Could not find markers block in pytest.ini; add manually:")
|
||||
print(marker_line)
|
||||
else:
|
||||
print("Marker already in pytest.ini")
|
||||
else:
|
||||
print("pytest.ini not found; add this marker under 'markers':")
|
||||
print(marker_line)
|
||||
|
||||
print(f"Created: {test_path}")
|
||||
print(f"Created: {resource_path}")
|
||||
print(f"Created: {scenario_base_path}")
|
||||
print()
|
||||
print("Then edit scenario_base.yaml with your scenario structure (top-level key should match SCENARIO_NAME).")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
34
CI/tests_v2/scenarios/application_outage/nginx_http.yaml
Normal file
34
CI/tests_v2/scenarios/application_outage/nginx_http.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
# Nginx Deployment + Service for application outage traffic test.
|
||||
# Namespace is patched at deploy time by the test framework.
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-outage-http
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx-outage-http
|
||||
scenario: outage
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx-outage-http
|
||||
scenario: outage
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
ports:
|
||||
- containerPort: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx-outage-http
|
||||
spec:
|
||||
selector:
|
||||
app: nginx-outage-http
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
15
CI/tests_v2/scenarios/application_outage/resource.yaml
Normal file
15
CI/tests_v2/scenarios/application_outage/resource.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: outage
|
||||
labels:
|
||||
scenario: outage
|
||||
spec:
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: quay.io/krkn-chaos/krkn:tools
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
sleep infinity
|
||||
10
CI/tests_v2/scenarios/application_outage/scenario_base.yaml
Normal file
10
CI/tests_v2/scenarios/application_outage/scenario_base.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
# Base application_outage scenario. Tests load this and patch namespace (and optionally duration, block, exclude_label).
|
||||
application_outage:
|
||||
duration: 10
|
||||
namespace: default
|
||||
pod_selector:
|
||||
scenario: outage
|
||||
block:
|
||||
- Ingress
|
||||
- Egress
|
||||
exclude_label: ""
|
||||
@@ -0,0 +1,229 @@
|
||||
"""
|
||||
Functional test for application outage scenario (block network to target pods, then restore).
|
||||
Equivalent to CI/tests/test_app_outages.sh with proper assertions.
|
||||
The main happy-path test reuses one namespace and workload for multiple scenario runs (default, exclude_label, block variants); other tests use their own ephemeral namespace as needed.
|
||||
"""
|
||||
|
||||
import time
|
||||
|
||||
import pytest
|
||||
|
||||
from lib.base import (
|
||||
BaseScenarioTest,
|
||||
KRAKEN_PROC_WAIT_TIMEOUT,
|
||||
POLICY_WAIT_TIMEOUT,
|
||||
)
|
||||
from lib.utils import (
|
||||
assert_all_pods_running_and_ready,
|
||||
assert_kraken_failure,
|
||||
assert_kraken_success,
|
||||
assert_pod_count_unchanged,
|
||||
find_network_policy_by_prefix,
|
||||
get_network_policies_list,
|
||||
get_pods_list,
|
||||
)
|
||||
|
||||
|
||||
def _wait_for_network_policy(k8s_networking, namespace: str, prefix: str, timeout: int = 30):
|
||||
"""Poll until a NetworkPolicy with name starting with prefix exists. Return its name."""
|
||||
deadline = time.monotonic() + timeout
|
||||
while time.monotonic() < deadline:
|
||||
policy_list = get_network_policies_list(k8s_networking, namespace)
|
||||
policy = find_network_policy_by_prefix(policy_list, prefix)
|
||||
if policy:
|
||||
return policy.metadata.name
|
||||
time.sleep(1)
|
||||
raise TimeoutError(f"No NetworkPolicy with prefix {prefix!r} in {namespace} within {timeout}s")
|
||||
|
||||
|
||||
def _assert_no_network_policy_with_prefix(k8s_networking, namespace: str, prefix: str):
|
||||
policy_list = get_network_policies_list(k8s_networking, namespace)
|
||||
policy = find_network_policy_by_prefix(policy_list, prefix)
|
||||
name = policy.metadata.name if policy and policy.metadata else "?"
|
||||
assert policy is None, (
|
||||
f"Expected no NetworkPolicy with prefix {prefix!r} in namespace={namespace}, found {name}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.functional
|
||||
@pytest.mark.application_outage
|
||||
class TestApplicationOutage(BaseScenarioTest):
|
||||
"""Application outage scenario: block network to target pods, then restore."""
|
||||
|
||||
WORKLOAD_MANIFEST = "CI/tests_v2/scenarios/application_outage/resource.yaml"
|
||||
WORKLOAD_IS_PATH = True
|
||||
LABEL_SELECTOR = "scenario=outage"
|
||||
POLICY_PREFIX = "krkn-deny-"
|
||||
SCENARIO_NAME = "application_outage"
|
||||
SCENARIO_TYPE = "application_outages_scenarios"
|
||||
NAMESPACE_KEY_PATH = ["application_outage", "namespace"]
|
||||
NAMESPACE_IS_REGEX = False
|
||||
OVERRIDES_KEY_PATH = ["application_outage"]
|
||||
|
||||
@pytest.mark.order(1)
|
||||
def test_app_outage_block_restore_and_variants(self):
|
||||
"""Default, exclude_label, and block-type variants (Ingress, Egress, both) run successfully in one namespace; each run restores and pods stay ready."""
|
||||
ns = self.ns
|
||||
before = get_pods_list(self.k8s_core, ns, self.LABEL_SELECTOR)
|
||||
|
||||
cases = [
|
||||
("default", {}, "app_outage_config.yaml"),
|
||||
("exclude_label", {"exclude_label": {"env": "prod"}}, "app_outage_exclude_config.yaml"),
|
||||
("block=Ingress", {"block": ["Ingress"]}, "app_outage_block_ingress_config.yaml"),
|
||||
("block=Egress", {"block": ["Egress"]}, "app_outage_block_egress_config.yaml"),
|
||||
("block=Ingress,Egress", {"block": ["Ingress", "Egress"]}, "app_outage_block_ingress_egress_config.yaml"),
|
||||
]
|
||||
for context_name, overrides, config_filename in cases:
|
||||
result = self.run_scenario(
|
||||
self.tmp_path, ns,
|
||||
overrides=overrides if overrides else None,
|
||||
config_filename=config_filename,
|
||||
)
|
||||
assert_kraken_success(
|
||||
result, context=f"{context_name} namespace={ns}", tmp_path=self.tmp_path
|
||||
)
|
||||
after = get_pods_list(self.k8s_core, ns, self.LABEL_SELECTOR)
|
||||
assert_pod_count_unchanged(before, after, namespace=ns)
|
||||
assert_all_pods_running_and_ready(after, namespace=ns)
|
||||
|
||||
def test_network_policy_created_then_deleted(self):
|
||||
"""NetworkPolicy with prefix krkn-deny- is created during run and deleted after."""
|
||||
ns = self.ns
|
||||
scenario = self.load_and_patch_scenario(self.repo_root, ns, duration=12)
|
||||
scenario_path = self.write_scenario(self.tmp_path, scenario, suffix="_np_lifecycle")
|
||||
config_path = self.build_config(
|
||||
self.SCENARIO_TYPE, str(scenario_path),
|
||||
filename="app_outage_np_lifecycle.yaml",
|
||||
)
|
||||
proc = self.run_kraken_background(config_path)
|
||||
try:
|
||||
policy_name = _wait_for_network_policy(
|
||||
self.k8s_networking, ns, self.POLICY_PREFIX, timeout=POLICY_WAIT_TIMEOUT
|
||||
)
|
||||
assert policy_name.startswith(self.POLICY_PREFIX), (
|
||||
f"Policy name {policy_name!r} should start with {self.POLICY_PREFIX!r} (namespace={ns})"
|
||||
)
|
||||
policy_list = get_network_policies_list(self.k8s_networking, ns)
|
||||
policy = find_network_policy_by_prefix(policy_list, self.POLICY_PREFIX)
|
||||
assert policy is not None and policy.spec is not None, (
|
||||
f"Expected NetworkPolicy with spec (namespace={ns})"
|
||||
)
|
||||
assert policy.spec.pod_selector is not None, f"Policy should have pod_selector (namespace={ns})"
|
||||
assert policy.spec.policy_types is not None, f"Policy should have policy_types (namespace={ns})"
|
||||
finally:
|
||||
proc.wait(timeout=KRAKEN_PROC_WAIT_TIMEOUT)
|
||||
_assert_no_network_policy_with_prefix(self.k8s_networking, ns, self.POLICY_PREFIX)
|
||||
|
||||
# def test_traffic_blocked_during_outage(self, request):
|
||||
# """During outage, ingress to target pods is blocked; after run, traffic is restored."""
|
||||
# ns = self.ns
|
||||
# nginx_path = scenario_dir(self.repo_root, "application_outage") / "nginx_http.yaml"
|
||||
# docs = list(yaml.safe_load_all(nginx_path.read_text()))
|
||||
# docs = patch_namespace_in_docs(docs, ns)
|
||||
# try:
|
||||
# k8s_utils.create_from_yaml(
|
||||
# self.k8s_client,
|
||||
# yaml_objects=docs,
|
||||
# namespace=ns,
|
||||
# )
|
||||
# except k8s_utils.FailToCreateError as e:
|
||||
# msgs = [str(exc) for exc in e.api_exceptions]
|
||||
# raise AssertionError(
|
||||
# f"Failed to create nginx resources (namespace={ns}): {'; '.join(msgs)}"
|
||||
# ) from e
|
||||
# wait_for_deployment_replicas(self.k8s_apps, ns, "nginx-outage-http", timeout=READINESS_TIMEOUT)
|
||||
# port = _get_free_port()
|
||||
# pf_ref = []
|
||||
|
||||
# def _kill_port_forward():
|
||||
# if pf_ref and pf_ref[0].poll() is None:
|
||||
# pf_ref[0].terminate()
|
||||
# try:
|
||||
# pf_ref[0].wait(timeout=5)
|
||||
# except subprocess.TimeoutExpired:
|
||||
# pf_ref[0].kill()
|
||||
|
||||
# request.addfinalizer(_kill_port_forward)
|
||||
# pf = subprocess.Popen(
|
||||
# ["kubectl", "port-forward", "-n", ns, "service/nginx-outage-http", f"{port}:80"],
|
||||
# cwd=self.repo_root,
|
||||
# stdout=subprocess.DEVNULL,
|
||||
# stderr=subprocess.DEVNULL,
|
||||
# )
|
||||
# pf_ref.append(pf)
|
||||
# url = f"http://127.0.0.1:{port}/"
|
||||
# try:
|
||||
# time.sleep(2)
|
||||
# baseline_ok = False
|
||||
# for _ in range(10):
|
||||
# try:
|
||||
# resp = requests.get(url, timeout=3)
|
||||
# if resp.ok:
|
||||
# baseline_ok = True
|
||||
# break
|
||||
# except (requests.ConnectionError, requests.Timeout):
|
||||
# pass
|
||||
# time.sleep(1)
|
||||
# assert baseline_ok, f"Baseline: HTTP request to nginx should succeed (namespace={ns})"
|
||||
|
||||
# scenario = self.load_and_patch_scenario(self.repo_root, ns, duration=15)
|
||||
# scenario_path = self.write_scenario(self.tmp_path, scenario, suffix="_traffic")
|
||||
# config_path = self.build_config(
|
||||
# self.SCENARIO_TYPE, str(scenario_path),
|
||||
# filename="app_outage_traffic_config.yaml",
|
||||
# )
|
||||
# proc = self.run_kraken_background(config_path)
|
||||
# policy_name = _wait_for_network_policy(
|
||||
# self.k8s_networking, ns, self.POLICY_PREFIX, timeout=POLICY_WAIT_TIMEOUT
|
||||
# )
|
||||
# assert policy_name, f"Expected policy to exist (namespace={ns})"
|
||||
# time.sleep(2)
|
||||
# failed = False
|
||||
# for _ in range(5):
|
||||
# try:
|
||||
# resp = requests.get(url, timeout=2)
|
||||
# if not resp.ok:
|
||||
# failed = True
|
||||
# break
|
||||
# except (requests.ConnectionError, requests.Timeout):
|
||||
# failed = True
|
||||
# break
|
||||
# time.sleep(1)
|
||||
# assert failed, f"During outage, HTTP request to nginx should fail (namespace={ns})"
|
||||
# proc.wait(timeout=KRAKEN_PROC_WAIT_TIMEOUT)
|
||||
# time.sleep(1)
|
||||
# resp = requests.get(url, timeout=5)
|
||||
# assert resp.ok, f"After scenario, HTTP request to nginx should succeed (namespace={ns})"
|
||||
# finally:
|
||||
# pf.terminate()
|
||||
# pf.wait(timeout=5)
|
||||
|
||||
@pytest.mark.no_workload
|
||||
def test_invalid_scenario_fails(self):
|
||||
"""Invalid scenario file (missing application_outage) causes Kraken to exit non-zero."""
|
||||
invalid_scenario_path = self.tmp_path / "invalid_scenario.yaml"
|
||||
invalid_scenario_path.write_text("foo: bar\n")
|
||||
config_path = self.build_config(
|
||||
self.SCENARIO_TYPE, str(invalid_scenario_path),
|
||||
filename="invalid_config.yaml",
|
||||
)
|
||||
result = self.run_kraken(config_path)
|
||||
assert_kraken_failure(
|
||||
result, context=f"namespace={self.ns}", tmp_path=self.tmp_path
|
||||
)
|
||||
|
||||
@pytest.mark.no_workload
|
||||
def test_bad_namespace_fails(self):
|
||||
"""Scenario targeting non-existent namespace causes Kraken to exit non-zero."""
|
||||
scenario = self.load_and_patch_scenario(self.repo_root, "nonexistent-namespace-xyz-12345")
|
||||
scenario_path = self.write_scenario(self.tmp_path, scenario, suffix="_bad_ns")
|
||||
config_path = self.build_config(
|
||||
self.SCENARIO_TYPE, str(scenario_path),
|
||||
filename="app_outage_bad_ns_config.yaml",
|
||||
)
|
||||
result = self.run_kraken(config_path)
|
||||
assert_kraken_failure(
|
||||
result,
|
||||
context=f"test namespace={self.ns}",
|
||||
tmp_path=self.tmp_path,
|
||||
)
|
||||
21
CI/tests_v2/scenarios/pod_disruption/resource.yaml
Normal file
21
CI/tests_v2/scenarios/pod_disruption/resource.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
# Single-pod deployment targeted by pod disruption scenario.
|
||||
# Namespace is patched at deploy time by the test framework.
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: krkn-pod-disruption-target
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: krkn-pod-disruption-target
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: krkn-pod-disruption-target
|
||||
spec:
|
||||
containers:
|
||||
- name: app
|
||||
image: nginx:alpine
|
||||
ports:
|
||||
- containerPort: 80
|
||||
7
CI/tests_v2/scenarios/pod_disruption/scenario_base.yaml
Normal file
7
CI/tests_v2/scenarios/pod_disruption/scenario_base.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
# Base pod_disruption scenario (list). Tests load this and patch namespace_pattern with ^<ns>$.
|
||||
- id: kill-pods
|
||||
config:
|
||||
namespace_pattern: "^default$"
|
||||
label_selector: app=krkn-pod-disruption-target
|
||||
krkn_pod_recovery_time: 5
|
||||
kill: 1
|
||||
58
CI/tests_v2/scenarios/pod_disruption/test_pod_disruption.py
Normal file
58
CI/tests_v2/scenarios/pod_disruption/test_pod_disruption.py
Normal file
@@ -0,0 +1,58 @@
|
||||
"""
|
||||
Functional test for pod disruption scenario (pod crash and recovery).
|
||||
Equivalent to CI/tests/test_pod.sh with proper before/after assertions.
|
||||
Each test runs in its own ephemeral namespace with workload deployed automatically.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from lib.base import BaseScenarioTest, READINESS_TIMEOUT
|
||||
from lib.utils import (
|
||||
assert_all_pods_running_and_ready,
|
||||
assert_kraken_success,
|
||||
assert_pod_count_unchanged,
|
||||
get_pods_list,
|
||||
pod_uids,
|
||||
restart_counts,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.functional
|
||||
@pytest.mark.pod_disruption
|
||||
class TestPodDisruption(BaseScenarioTest):
|
||||
"""Pod disruption scenario: kill pods and verify recovery."""
|
||||
|
||||
WORKLOAD_MANIFEST = "CI/tests_v2/scenarios/pod_disruption/resource.yaml"
|
||||
WORKLOAD_IS_PATH = True
|
||||
LABEL_SELECTOR = "app=krkn-pod-disruption-target"
|
||||
SCENARIO_NAME = "pod_disruption"
|
||||
SCENARIO_TYPE = "pod_disruption_scenarios"
|
||||
NAMESPACE_KEY_PATH = [0, "config", "namespace_pattern"]
|
||||
NAMESPACE_IS_REGEX = True
|
||||
|
||||
@pytest.mark.order(1)
|
||||
def test_pod_crash_and_recovery(self, wait_for_pods_running):
|
||||
ns = self.ns
|
||||
before = get_pods_list(self.k8s_core, ns, self.LABEL_SELECTOR)
|
||||
before_uids = pod_uids(before)
|
||||
before_restarts = restart_counts(before)
|
||||
|
||||
result = self.run_scenario(self.tmp_path, ns)
|
||||
assert_kraken_success(result, context=f"namespace={ns}", tmp_path=self.tmp_path)
|
||||
|
||||
after = get_pods_list(self.k8s_core, ns, self.LABEL_SELECTOR)
|
||||
after_uids = pod_uids(after)
|
||||
after_restarts = restart_counts(after)
|
||||
uids_changed = set(after_uids) != set(before_uids)
|
||||
restarts_increased = after_restarts > before_restarts
|
||||
assert uids_changed or restarts_increased, (
|
||||
f"Chaos had no effect in namespace={ns}: pod UIDs unchanged and restart count did not increase. "
|
||||
f"Before UIDs: {before_uids}, restarts: {before_restarts}. "
|
||||
f"After UIDs: {after_uids}, restarts: {after_restarts}."
|
||||
)
|
||||
|
||||
wait_for_pods_running(ns, self.LABEL_SELECTOR, timeout=READINESS_TIMEOUT)
|
||||
|
||||
after_final = get_pods_list(self.k8s_core, ns, self.LABEL_SELECTOR)
|
||||
assert_pod_count_unchanged(before, after_final, namespace=ns)
|
||||
assert_all_pods_running_and_ready(after_final, namespace=ns)
|
||||
74
CI/tests_v2/setup_env.sh
Executable file
74
CI/tests_v2/setup_env.sh
Executable file
@@ -0,0 +1,74 @@
|
||||
#!/usr/bin/env bash
|
||||
# Setup environment for CI/tests_v2 pytest functional tests.
|
||||
# Run from the repository root: ./CI/tests_v2/setup_env.sh
|
||||
#
|
||||
# - Creates a KinD cluster using kind-config-dev.yml (override with KIND_CONFIG=...).
|
||||
# - Waits for the cluster and for local-path-provisioner pods (required by pod disruption test).
|
||||
# - Does not install Python deps; use a venv and pip install -r requirements.txt and CI/tests_v2/requirements.txt yourself.
|
||||
|
||||
set -e
|
||||
|
||||
REPO_ROOT="$(cd "$(dirname "$0")/../.." && pwd)"
|
||||
KIND_CONFIG="${KIND_CONFIG:-${REPO_ROOT}/CI/tests_v2/kind-config-dev.yml}"
|
||||
CLUSTER_NAME="${KIND_CLUSTER_NAME:-ci-krkn}"
|
||||
|
||||
echo "Repository root: $REPO_ROOT"
|
||||
cd "$REPO_ROOT"
|
||||
|
||||
# Check required tools
|
||||
command -v kind >/dev/null 2>&1 || { echo "Error: kind is not installed. Install from https://kind.sigs.k8s.io/docs/user/quick-start/"; exit 1; }
|
||||
command -v kubectl >/dev/null 2>&1 || { echo "Error: kubectl is not installed."; exit 1; }
|
||||
|
||||
# Python 3.9+
|
||||
python3 -c "import sys; exit(0 if sys.version_info >= (3, 9) else 1)" 2>/dev/null || { echo "Error: Python 3.9+ required. Check: python3 --version"; exit 1; }
|
||||
|
||||
# Docker running (required for KinD)
|
||||
docker info >/dev/null 2>&1 || { echo "Error: Docker is not running. Start Docker Desktop or run: systemctl start docker"; exit 1; }
|
||||
|
||||
# Tool versions for reproducibility
|
||||
echo "kind: $(kind --version 2>/dev/null || kind version 2>/dev/null)"
|
||||
echo "kubectl: $(kubectl version --client --short 2>/dev/null || kubectl version --client 2>/dev/null)"
|
||||
|
||||
# Create cluster if it doesn't exist (use "kind get clusters" so we skip when nodes exist even if kubeconfig check would fail)
|
||||
if kind get clusters 2>/dev/null | grep -qx "$CLUSTER_NAME"; then
|
||||
echo "KinD cluster '$CLUSTER_NAME' already exists, skipping creation."
|
||||
else
|
||||
echo "Creating KinD cluster '$CLUSTER_NAME' from $KIND_CONFIG ..."
|
||||
kind create cluster --name "$CLUSTER_NAME" --config "$KIND_CONFIG"
|
||||
fi
|
||||
|
||||
# echo "Pre-pulling test workload images into KinD cluster..."
|
||||
# docker pull nginx:alpine
|
||||
# kind load docker-image nginx:alpine --name "$CLUSTER_NAME"
|
||||
|
||||
# kind merges into default kubeconfig (~/.kube/config), so kubectl should work in this shell.
|
||||
# If you need to use this cluster from another terminal: export KUBECONFIG=~/.kube/config
|
||||
# and ensure context: kubectl config use-context kind-$CLUSTER_NAME
|
||||
|
||||
echo "Waiting for cluster nodes to be Ready..."
|
||||
kubectl wait --for=condition=Ready nodes --all --timeout=120s 2>/dev/null || true
|
||||
|
||||
echo "Waiting for local-path-provisioner pods (namespace local-path-storage, label app=local-path-provisioner)..."
|
||||
for i in {1..60}; do
|
||||
if kubectl get pods -n local-path-storage -l app=local-path-provisioner -o name 2>/dev/null | grep -q .; then
|
||||
echo "Found local-path-provisioner pod(s). Waiting for Ready..."
|
||||
kubectl wait --for=condition=ready pod -l app=local-path-provisioner -n local-path-storage --timeout=120s 2>/dev/null && break
|
||||
fi
|
||||
echo "Attempt $i: local-path-provisioner not ready yet..."
|
||||
sleep 3
|
||||
done
|
||||
|
||||
if ! kubectl get pods -n local-path-storage -l app=local-path-provisioner -o name 2>/dev/null | grep -q .; then
|
||||
echo "Warning: No pods with label app=local-path-provisioner in local-path-storage."
|
||||
echo "KinD usually deploys this by default. Check: kubectl get pods -n local-path-storage"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Cluster is ready for CI/tests_v2."
|
||||
echo " kubectl uses the default kubeconfig (kind merged it). For another terminal: export KUBECONFIG=~/.kube/config"
|
||||
echo ""
|
||||
echo "Next: activate your venv, install deps, and run tests from repo root:"
|
||||
echo " pip install -r requirements.txt"
|
||||
echo " pip install -r CI/tests_v2/requirements.txt"
|
||||
echo " pytest CI/tests_v2/ -v --timeout=300 --reruns=2 --reruns-delay=10"
|
||||
273
CLAUDE.md
Normal file
273
CLAUDE.md
Normal file
@@ -0,0 +1,273 @@
|
||||
# CLAUDE.md - Krkn Chaos Engineering Framework
|
||||
|
||||
## Project Overview
|
||||
|
||||
Krkn (Kraken) is a chaos engineering tool for Kubernetes/OpenShift clusters. It injects deliberate failures to validate cluster resilience. Plugin-based architecture with multi-cloud support (AWS, Azure, GCP, IBM Cloud, VMware, Alibaba, OpenStack).
|
||||
|
||||
## Repository Structure
|
||||
|
||||
```
|
||||
krkn/
|
||||
├── krkn/
|
||||
│ ├── scenario_plugins/ # Chaos scenario plugins (pod, node, network, hogs, etc.)
|
||||
│ ├── utils/ # Utility functions
|
||||
│ ├── rollback/ # Rollback management
|
||||
│ ├── prometheus/ # Prometheus integration
|
||||
│ └── cerberus/ # Health monitoring
|
||||
├── tests/ # Unit tests (unittest framework)
|
||||
├── scenarios/ # Example scenario configs (openshift/, kube/, kind/)
|
||||
├── config/ # Configuration files
|
||||
└── CI/ # CI/CD test scripts
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Setup (ALWAYS use virtual environment)
|
||||
python3 -m venv venv
|
||||
source venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
|
||||
# Run Krkn
|
||||
python run_kraken.py --config config/config.yaml
|
||||
|
||||
# Note: Scenarios are specified in config.yaml under kraken.chaos_scenarios
|
||||
# There is no --scenario flag; edit config/config.yaml to select scenarios
|
||||
|
||||
# Run tests
|
||||
python -m unittest discover -s tests -v
|
||||
python -m coverage run -a -m unittest discover -s tests -v
|
||||
```
|
||||
|
||||
## Critical Requirements
|
||||
|
||||
### Python Environment
|
||||
- **Python 3.9+** required
|
||||
- **NEVER install packages globally** - always use virtual environment
|
||||
- **CRITICAL**: `docker` must be <7.0 and `requests` must be <2.32 (Unix socket compatibility)
|
||||
|
||||
### Key Dependencies
|
||||
- **krkn-lib** (5.1.13): Core library for Kubernetes/OpenShift operations
|
||||
- **kubernetes** (34.1.0): Kubernetes Python client
|
||||
- **docker** (<7.0), **requests** (<2.32): DO NOT upgrade without verifying compatibility
|
||||
- Cloud SDKs: boto3 (AWS), azure-mgmt-* (Azure), google-cloud-compute (GCP), ibm_vpc (IBM), pyVmomi (VMware)
|
||||
|
||||
## Plugin Architecture (CRITICAL)
|
||||
|
||||
**Strictly enforced naming conventions:**
|
||||
|
||||
### Naming Rules
|
||||
- **Module files**: Must end with `_scenario_plugin.py` and use snake_case
|
||||
- Example: `pod_disruption_scenario_plugin.py`
|
||||
- **Class names**: Must be CamelCase and end with `ScenarioPlugin`
|
||||
- Example: `PodDisruptionScenarioPlugin`
|
||||
- Must match module filename (snake_case ↔ CamelCase)
|
||||
- **Directory structure**: Plugin dirs CANNOT contain "scenario" or "plugin"
|
||||
- Location: `krkn/scenario_plugins/<plugin_name>/`
|
||||
|
||||
### Plugin Implementation
|
||||
Every plugin MUST:
|
||||
1. Extend `AbstractScenarioPlugin`
|
||||
2. Implement `run()` method
|
||||
3. Implement `get_scenario_types()` method
|
||||
|
||||
```python
|
||||
from krkn.scenario_plugins import AbstractScenarioPlugin
|
||||
|
||||
class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
def run(self, config, scenarios_list, kubeconfig_path, wait_duration):
|
||||
pass
|
||||
|
||||
def get_scenario_types(self):
|
||||
return ["pod_scenarios", "pod_outage"]
|
||||
```
|
||||
|
||||
### Creating a New Plugin
|
||||
1. Create directory: `krkn/scenario_plugins/<plugin_name>/`
|
||||
2. Create module: `<plugin_name>_scenario_plugin.py`
|
||||
3. Create class: `<PluginName>ScenarioPlugin` extending `AbstractScenarioPlugin`
|
||||
4. Implement `run()` and `get_scenario_types()`
|
||||
5. Create unit test: `tests/test_<plugin_name>_scenario_plugin.py`
|
||||
6. Add example scenario: `scenarios/<platform>/<scenario>.yaml`
|
||||
|
||||
**DO NOT**: Violate naming conventions (factory will reject), include "scenario"/"plugin" in directory names, create plugins without tests.
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit Tests
|
||||
```bash
|
||||
# Run all tests
|
||||
python -m unittest discover -s tests -v
|
||||
|
||||
# Specific test
|
||||
python -m unittest tests.test_pod_disruption_scenario_plugin
|
||||
|
||||
# With coverage
|
||||
python -m coverage run -a -m unittest discover -s tests -v
|
||||
python -m coverage html
|
||||
```
|
||||
|
||||
**Test requirements:**
|
||||
- Naming: `test_<module>_scenario_plugin.py`
|
||||
- Mock external dependencies (Kubernetes API, cloud providers)
|
||||
- Test success, failure, and edge cases
|
||||
- Keep tests isolated and independent
|
||||
|
||||
### Functional Tests
|
||||
Located in `CI/tests/`. Can be run locally on a kind cluster with Prometheus and Elasticsearch set up.
|
||||
|
||||
**Setup for local testing:**
|
||||
1. Deploy Prometheus and Elasticsearch on your kind cluster:
|
||||
- Prometheus setup: https://krkn-chaos.dev/docs/developers-guide/testing-changes/#prometheus
|
||||
- Elasticsearch setup: https://krkn-chaos.dev/docs/developers-guide/testing-changes/#elasticsearch
|
||||
|
||||
2. Or disable monitoring features in `config/config.yaml`:
|
||||
```yaml
|
||||
performance_monitoring:
|
||||
enable_alerts: False
|
||||
enable_metrics: False
|
||||
check_critical_alerts: False
|
||||
```
|
||||
|
||||
**Note:** Functional tests run automatically in CI with full monitoring enabled.
|
||||
|
||||
## Cloud Provider Implementations
|
||||
|
||||
Node chaos scenarios are cloud-specific. Each in `krkn/scenario_plugins/node_actions/<provider>_node_scenarios.py`:
|
||||
- AWS, Azure, GCP, IBM Cloud, VMware, Alibaba, OpenStack, Bare Metal
|
||||
|
||||
Implement: stop, start, reboot, terminate instances.
|
||||
|
||||
**When modifying**: Maintain consistency with other providers, handle API errors, add logging, update tests.
|
||||
|
||||
### Adding Cloud Provider Support
|
||||
1. Create: `krkn/scenario_plugins/node_actions/<provider>_node_scenarios.py`
|
||||
2. Extend: `abstract_node_scenarios.AbstractNodeScenarios`
|
||||
3. Implement: `stop_instances`, `start_instances`, `reboot_instances`, `terminate_instances`
|
||||
4. Add SDK to `requirements.txt`
|
||||
5. Create unit test with mocked SDK
|
||||
6. Add example scenario: `scenarios/openshift/<provider>_node_scenarios.yml`
|
||||
|
||||
## Configuration
|
||||
|
||||
**Main config**: `config/config.yaml`
|
||||
- `kraken`: Core settings
|
||||
- `cerberus`: Health monitoring
|
||||
- `performance_monitoring`: Prometheus
|
||||
- `elastic`: Elasticsearch telemetry
|
||||
|
||||
**Scenario configs**: `scenarios/` directory
|
||||
```yaml
|
||||
- config:
|
||||
scenario_type: <type> # Must match plugin's get_scenario_types()
|
||||
```
|
||||
|
||||
## Code Style
|
||||
|
||||
- **Import order**: Standard library, third-party, local imports
|
||||
- **Naming**: snake_case (functions/variables), CamelCase (classes)
|
||||
- **Logging**: Use Python's `logging` module
|
||||
- **Error handling**: Return appropriate exit codes
|
||||
- **Docstrings**: Required for public functions/classes
|
||||
|
||||
## Exit Codes
|
||||
|
||||
Krkn uses specific exit codes to communicate execution status:
|
||||
|
||||
- `0`: Success - all scenarios passed, no critical alerts
|
||||
- `1`: Scenario failure - one or more scenarios failed
|
||||
- `2`: Critical alerts fired during execution
|
||||
- `3+`: Health check failure (Cerberus monitoring detected issues)
|
||||
|
||||
**When implementing scenarios:**
|
||||
- Return `0` on success
|
||||
- Return `1` on scenario-specific failures
|
||||
- Propagate health check failures appropriately
|
||||
- Log exit code reasons clearly
|
||||
|
||||
## Container Support
|
||||
|
||||
Krkn can run inside a container. See `containers/` directory.
|
||||
|
||||
**Building custom image:**
|
||||
```bash
|
||||
cd containers
|
||||
./compile_dockerfile.sh # Generates Dockerfile from template
|
||||
docker build -t krkn:latest .
|
||||
```
|
||||
|
||||
**Running containerized:**
|
||||
```bash
|
||||
docker run -v ~/.kube:/root/.kube:Z \
|
||||
-v $(pwd)/config:/config:Z \
|
||||
-v $(pwd)/scenarios:/scenarios:Z \
|
||||
krkn:latest
|
||||
```
|
||||
|
||||
## Git Workflow
|
||||
|
||||
- **NEVER commit directly to main**
|
||||
- **NEVER use `--force` without approval**
|
||||
- **ALWAYS create feature branches**: `git checkout -b feature/description`
|
||||
- **ALWAYS run tests before pushing**
|
||||
|
||||
**Conventional commits**: `feat:`, `fix:`, `test:`, `docs:`, `refactor:`
|
||||
|
||||
```bash
|
||||
git checkout main && git pull origin main
|
||||
git checkout -b feature/your-feature-name
|
||||
# Make changes, write tests
|
||||
python -m unittest discover -s tests -v
|
||||
git add <specific-files>
|
||||
git commit -m "feat: description"
|
||||
git push -u origin feature/your-feature-name
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- `KUBECONFIG`: Path to kubeconfig
|
||||
- `AWS_*`, `AZURE_*`, `GOOGLE_APPLICATION_CREDENTIALS`: Cloud credentials
|
||||
- `PROMETHEUS_URL`, `ELASTIC_URL`, `ELASTIC_PASSWORD`: Monitoring config
|
||||
|
||||
**NEVER commit credentials or API keys.**
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
1. Missing virtual environment - always activate venv
|
||||
2. Running functional tests without cluster setup
|
||||
3. Ignoring exit codes
|
||||
4. Modifying krkn-lib directly (it's a separate package)
|
||||
5. Upgrading docker/requests beyond version constraints
|
||||
|
||||
## Before Writing Code
|
||||
|
||||
1. Check for existing implementations
|
||||
2. Review existing plugins as examples
|
||||
3. Maintain consistency with cloud provider patterns
|
||||
4. Plan rollback logic
|
||||
5. Write tests alongside code
|
||||
6. Update documentation
|
||||
|
||||
## When Adding Dependencies
|
||||
|
||||
1. Check if functionality exists in krkn-lib or current dependencies
|
||||
2. Verify compatibility with existing versions
|
||||
3. Pin specific versions in `requirements.txt`
|
||||
4. Check for security vulnerabilities
|
||||
5. Test thoroughly for conflicts
|
||||
|
||||
## Common Development Tasks
|
||||
|
||||
### Modifying Existing Plugin
|
||||
1. Read plugin code and corresponding test
|
||||
2. Make changes
|
||||
3. Update/add unit tests
|
||||
4. Run: `python -m unittest tests.test_<plugin>_scenario_plugin`
|
||||
|
||||
### Writing Unit Tests
|
||||
1. Create: `tests/test_<module>_scenario_plugin.py`
|
||||
2. Import `unittest` and plugin class
|
||||
3. Mock external dependencies
|
||||
4. Test success, failure, and edge cases
|
||||
5. Run: `python -m unittest tests.test_<module>_scenario_plugin`
|
||||
|
||||
83
GOVERNANCE.md
Normal file
83
GOVERNANCE.md
Normal file
@@ -0,0 +1,83 @@
|
||||
|
||||
|
||||
|
||||
The governance model adopted here is heavily influenced by a set of CNCF projects, especially drew
|
||||
reference from [Kubernetes governance](https://github.com/kubernetes/community/blob/master/governance.md).
|
||||
*For similar structures some of the same wordings from kubernetes governance are borrowed to adhere
|
||||
to the originally construed meaning.*
|
||||
|
||||
## Principles
|
||||
|
||||
- **Open**: Krkn is open source community.
|
||||
- **Welcoming and respectful**: See [Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
- **Transparent and accessible**: Work and collaboration should be done in public.
|
||||
Changes to the Krkn organization, Krkn code repositories, and CNCF related activities (e.g.
|
||||
level, involvement, etc) are done in public.
|
||||
- **Merit**: Ideas and contributions are accepted according to their technical merit
|
||||
and alignment with project objectives, scope and design principles.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Krkn follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
Here is an excerpt:
|
||||
|
||||
> As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities.
|
||||
|
||||
## Maintainer Levels
|
||||
|
||||
### Contributor
|
||||
Contributors contribute to the community. Anyone can become a contributor by participating in discussions, reporting bugs, or contributing code or documentation.
|
||||
|
||||
#### Responsibilities:
|
||||
|
||||
Be active in the community and adhere to the Code of Conduct.
|
||||
|
||||
Report bugs and suggest new features.
|
||||
|
||||
Contribute high-quality code and documentation.
|
||||
|
||||
|
||||
### Member
|
||||
Members are active contributors to the community. Members have demonstrated a strong understanding of the project's codebase and conventions.
|
||||
|
||||
#### Responsibilities:
|
||||
|
||||
Review pull requests for correctness, quality, and adherence to project standards.
|
||||
|
||||
Provide constructive and timely feedback to contributors.
|
||||
|
||||
Ensure that all contributions are well-tested and documented.
|
||||
|
||||
Work with maintainers to ensure a smooth and efficient release process.
|
||||
|
||||
### Maintainer
|
||||
Maintainers are responsible for the overall health and direction of the project. They are long-standing contributors who have shown a deep commitment to the project's success.
|
||||
|
||||
#### Responsibilities:
|
||||
|
||||
Set the technical direction and vision for the project.
|
||||
|
||||
Manage releases and ensure the stability of the main branch.
|
||||
|
||||
Make decisions on feature inclusion and project priorities.
|
||||
|
||||
Mentor other contributors and help grow the community.
|
||||
|
||||
Resolve disputes and make final decisions when consensus cannot be reached.
|
||||
|
||||
### Owner
|
||||
Owners have administrative access to the project and are the final decision-makers.
|
||||
|
||||
#### Responsibilities:
|
||||
|
||||
Manage the core team of maintainers and approvers.
|
||||
|
||||
Set the overall vision and strategy for the project.
|
||||
|
||||
Handle administrative tasks, such as managing the project's repository and other resources.
|
||||
|
||||
Represent the project in the broader open-source community.
|
||||
|
||||
|
||||
# Credits
|
||||
Sections of this document have been borrowed from [Kubernetes governance](https://github.com/kubernetes/community/blob/master/governance.md)
|
||||
@@ -1,12 +1,34 @@
|
||||
## Overview
|
||||
|
||||
This document contains a list of maintainers in this repo.
|
||||
This file lists the maintainers and committers of the Krkn project.
|
||||
|
||||
In short, maintainers are people who are in charge of the maintenance of the Krkn project. Committers are active community members who have shown that they are committed to the continuous development of the project through ongoing engagement with the community.
|
||||
|
||||
For detailed description of the roles, see [Governance](./GOVERNANCE.md) page.
|
||||
|
||||
## Current Maintainers
|
||||
|
||||
| Maintainer | GitHub ID | Email |
|
||||
|---------------------| --------------------------------------------------------- | ----------------------- |
|
||||
| Ravi Elluri | [chaitanyaenr](https://github.com/chaitanyaenr) | nelluri@redhat.com |
|
||||
| Pradeep Surisetty | [psuriset](https://github.com/psuriset) | psuriset@redhat.com |
|
||||
| Paige Rubendall | [paigerube14](https://github.com/paigerube14) | prubenda@redhat.com |
|
||||
| Tullio Sebastiani | [tsebastiani](https://github.com/tsebastiani) | tsebasti@redhat.com |
|
||||
| Maintainer | GitHub ID | Email | Contribution Level |
|
||||
|---------------------| --------------------------------------------------------- | ----------------------- | ---------------------- |
|
||||
| Ravi Elluri | [chaitanyaenr](https://github.com/chaitanyaenr) | nelluri@redhat.com | Owner |
|
||||
| Pradeep Surisetty | [psuriset](https://github.com/psuriset) | psuriset@redhat.com | Owner |
|
||||
| Paige Patton | [paigerube14](https://github.com/paigerube14) | prubenda@redhat.com | Maintainer |
|
||||
| Tullio Sebastiani | [tsebastiani](https://github.com/tsebastiani) | tsebasti@redhat.com | Maintainer |
|
||||
| Yogananth Subramanian | [yogananth-subramanian](https://github.com/yogananth-subramanian) | ysubrama@redhat.com |Maintainer |
|
||||
| Sahil Shah | [shahsahil264](https://github.com/shahsahil264) | sahshah@redhat.com | Member |
|
||||
|
||||
|
||||
Note : It is mandatory for all Krkn community members to follow our [Code of Conduct](./CODE_OF_CONDUCT.md)
|
||||
|
||||
|
||||
## Contributor Ladder
|
||||
This project follows a contributor ladder model, where contributors can take on more responsibilities as they gain experience and demonstrate their commitment to the project.
|
||||
The roles are:
|
||||
* Contributor: A contributor to the community whether it be with code, docs or issues
|
||||
|
||||
* Member: A contributor who is active in the community and reviews pull requests.
|
||||
|
||||
* Maintainer: A contributor who is responsible for the overall health and direction of the project.
|
||||
|
||||
* Owner: A contributor who has administrative ownership of the project.
|
||||
|
||||
105
README.md
105
README.md
@@ -2,6 +2,7 @@
|
||||

|
||||

|
||||

|
||||
[](https://www.bestpractices.dev/projects/10548)
|
||||
|
||||

|
||||
|
||||
@@ -10,102 +11,21 @@ Kraken injects deliberate failures into Kubernetes clusters to check if it is re
|
||||
|
||||
|
||||
### Workflow
|
||||

|
||||
|
||||
### Demo
|
||||
[](https://youtu.be/LN-fZywp_mo "Kraken Demo - Click to Watch!")
|
||||

|
||||
|
||||
|
||||
### Chaos Testing Guide
|
||||
[Guide](docs/index.md) encapsulates:
|
||||
- Test methodology that needs to be embraced.
|
||||
- Best practices that an Kubernetes cluster, platform and applications running on top of it should take into account for best user experience, performance, resilience and reliability.
|
||||
- Tooling.
|
||||
- Scenarios supported.
|
||||
- Test environment recommendations as to how and where to run chaos tests.
|
||||
- Chaos testing in practice.
|
||||
|
||||
The guide is hosted at https://krkn-chaos.github.io/krkn.
|
||||
<!-- ### Demo
|
||||
[](https://youtu.be/LN-fZywp_mo "Kraken Demo - Click to Watch!") -->
|
||||
|
||||
|
||||
### How to Get Started
|
||||
Instructions on how to setup, configure and run Kraken can be found at [Installation](docs/installation.md).
|
||||
|
||||
You may consider utilizing the chaos recommendation tool prior to initiating the chaos runs to profile the application service(s) under test. This tool discovers a list of Krkn scenarios with a high probability of causing failures or disruptions to your application service(s). The tool can be accessed at [Chaos-Recommender](utils/chaos_recommender/README.md).
|
||||
|
||||
See the [getting started doc](docs/getting_started.md) on support on how to get started with your own custom scenario or editing current scenarios for your specific usage.
|
||||
|
||||
After installation, refer back to the below sections for supported scenarios and how to tweak the kraken config to load them on your cluster.
|
||||
Instructions on how to setup, configure and run Kraken can be found in the [documentation](https://krkn-chaos.dev/docs/).
|
||||
|
||||
|
||||
#### Running Kraken with minimal configuration tweaks
|
||||
For cases where you want to run Kraken with minimal configuration changes, refer to [krkn-hub](https://github.com/krkn-chaos/krkn-hub). One use case is CI integration where you do not want to carry around different configuration files for the scenarios.
|
||||
### Blogs, podcasts and interviews
|
||||
Additional resources, including blog posts, podcasts, and community interviews, can be found on the [website](https://krkn-chaos.dev/blog)
|
||||
|
||||
|
||||
### Config
|
||||
Instructions on how to setup the config and the options supported can be found at [Config](docs/config.md).
|
||||
|
||||
|
||||
### Kubernetes chaos scenarios supported
|
||||
|
||||
Scenario type | Kubernetes
|
||||
--------------------------- | ------------- |
|
||||
[Pod Scenarios](docs/pod_scenarios.md) | :heavy_check_mark: |
|
||||
[Pod Network Scenarios](docs/pod_network_scenarios.md) | :x: |
|
||||
[Container Scenarios](docs/container_scenarios.md) | :heavy_check_mark: |
|
||||
[Node Scenarios](docs/node_scenarios.md) | :heavy_check_mark: |
|
||||
[Time Scenarios](docs/time_scenarios.md) | :heavy_check_mark: |
|
||||
[Hog Scenarios: CPU, Memory](docs/arcaflow_scenarios.md) | :heavy_check_mark: |
|
||||
[Cluster Shut Down Scenarios](docs/cluster_shut_down_scenarios.md) | :heavy_check_mark: |
|
||||
[Service Disruption Scenarios](docs/service_disruption_scenarios.md.md) | :heavy_check_mark: |
|
||||
[Zone Outage Scenarios](docs/zone_outage.md) | :heavy_check_mark: |
|
||||
[Application_outages](docs/application_outages.md) | :heavy_check_mark: |
|
||||
[PVC scenario](docs/pvc_scenario.md) | :heavy_check_mark: |
|
||||
[Network_Chaos](docs/network_chaos.md) | :heavy_check_mark: |
|
||||
[ManagedCluster Scenarios](docs/managedcluster_scenarios.md) | :heavy_check_mark: |
|
||||
[Service Hijacking Scenarios](docs/service_hijacking_scenarios.md) | :heavy_check_mark: |
|
||||
[SYN Flood Scenarios](docs/syn_flood_scenarios.md) | :heavy_check_mark: |
|
||||
|
||||
|
||||
### Kraken scenario pass/fail criteria and report
|
||||
It is important to make sure to check if the targeted component recovered from the chaos injection and also if the Kubernetes cluster is healthy as failures in one component can have an adverse impact on other components. Kraken does this by:
|
||||
- Having built in checks for pod and node based scenarios to ensure the expected number of replicas and nodes are up. It also supports running custom scripts with the checks.
|
||||
- Leveraging [Cerberus](https://github.com/krkn-chaos/cerberus) to monitor the cluster under test and consuming the aggregated go/no-go signal to determine pass/fail post chaos. It is highly recommended to turn on the Cerberus health check feature available in Kraken. Instructions on installing and setting up Cerberus can be found [here](https://github.com/openshift-scale/cerberus#installation) or can be installed from Kraken using the [instructions](https://github.com/krkn-chaos/krkn#setting-up-infrastructure-dependencies). Once Cerberus is up and running, set cerberus_enabled to True and cerberus_url to the url where Cerberus publishes go/no-go signal in the Kraken config file. Cerberus can monitor [application routes](https://github.com/redhat-chaos/cerberus/blob/main/docs/config.md#watch-routes) during the chaos and fails the run if it encounters downtime as it is a potential downtime in a customers, or users environment as well. It is especially important during the control plane chaos scenarios including the API server, Etcd, Ingress etc. It can be enabled by setting `check_applicaton_routes: True` in the [Kraken config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) provided application routes are being monitored in the [cerberus config](https://github.com/redhat-chaos/krkn/blob/main/config/cerberus.yaml).
|
||||
- Leveraging built-in alert collection feature to fail the runs in case of critical alerts.
|
||||
|
||||
### Signaling
|
||||
In CI runs or any external job it is useful to stop Kraken once a certain test or state gets reached. We created a way to signal to kraken to pause the chaos or stop it completely using a signal posted to a port of your choice.
|
||||
|
||||
For example if we have a test run loading the cluster running and kraken separately running; we want to be able to know when to start/stop the kraken run based on when the test run completes or gets to a certain loaded state.
|
||||
|
||||
More detailed information on enabling and leveraging this feature can be found [here](docs/signal.md).
|
||||
|
||||
|
||||
### Performance monitoring
|
||||
Monitoring the Kubernetes/OpenShift cluster to observe the impact of Kraken chaos scenarios on various components is key to find out the bottlenecks as it is important to make sure the cluster is healthy in terms if both recovery as well as performance during/after the failure has been injected. Instructions on enabling it can be found [here](docs/performance_dashboards.md).
|
||||
|
||||
|
||||
### SLOs validation during and post chaos
|
||||
- In addition to checking the recovery and health of the cluster and components under test, Kraken takes in a profile with the Prometheus expressions to validate and alerts, exits with a non-zero return code depending on the severity set. This feature can be used to determine pass/fail or alert on abnormalities observed in the cluster based on the metrics.
|
||||
- Kraken also provides ability to check if any critical alerts are firing in the cluster post chaos and pass/fail's.
|
||||
|
||||
Information on enabling and leveraging this feature can be found [here](docs/SLOs_validation.md)
|
||||
|
||||
|
||||
### OCM / ACM integration
|
||||
|
||||
Kraken supports injecting faults into [Open Cluster Management (OCM)](https://open-cluster-management.io/) and [Red Hat Advanced Cluster Management for Kubernetes (ACM)](https://www.krkn.com/en/technologies/management/advanced-cluster-management) managed clusters through [ManagedCluster Scenarios](docs/managedcluster_scenarios.md).
|
||||
|
||||
|
||||
### Blogs and other useful resources
|
||||
- Blog post on introduction to Kraken: https://www.openshift.com/blog/introduction-to-kraken-a-chaos-tool-for-openshift/kubernetes
|
||||
- Discussion and demo on how Kraken can be leveraged to ensure OpenShift is reliable, performant and scalable: https://www.youtube.com/watch?v=s1PvupI5sD0&ab_channel=OpenShift
|
||||
- Blog post emphasizing the importance of making Chaos part of Performance and Scale runs to mimic the production environments: https://www.openshift.com/blog/making-chaos-part-of-kubernetes/openshift-performance-and-scalability-tests
|
||||
- Blog post on findings from Chaos test runs: https://cloud.redhat.com/blog/openshift/kubernetes-chaos-stories
|
||||
- Discussion with CNCF TAG App Delivery on Krkn workflow, features and addition to CNCF sandbox: [Github](https://github.com/cncf/sandbox/issues/44), [Tracker](https://github.com/cncf/tag-app-delivery/issues/465), [recording](https://www.youtube.com/watch?v=nXQkBFK_MWc&t=722s)
|
||||
- Blog post on supercharging chaos testing using AI integration in Krkn: https://www.redhat.com/en/blog/supercharging-chaos-testing-using-ai
|
||||
- Blog post announcing Krkn joining CNCF Sandbox: https://www.redhat.com/en/blog/krknchaos-joining-cncf-sandbox
|
||||
|
||||
### Roadmap
|
||||
Enhancements being planned can be found in the [roadmap](ROADMAP.md).
|
||||
|
||||
@@ -113,17 +33,8 @@ Enhancements being planned can be found in the [roadmap](ROADMAP.md).
|
||||
### Contributions
|
||||
We are always looking for more enhancements, fixes to make it better, any contributions are most welcome. Feel free to report or work on the issues filed on github.
|
||||
|
||||
[More information on how to Contribute](docs/contribute.md)
|
||||
[More information on how to Contribute](https://krkn-chaos.dev/docs/contribution-guidelines/)
|
||||
|
||||
If adding a new scenario or tweaking the main config, be sure to add in updates into the CI to be sure the CI is up to date.
|
||||
Please read [this file]((CI/README.md#adding-a-test-case)) for more information on updates.
|
||||
|
||||
|
||||
### Scenario Plugin Development
|
||||
|
||||
If you're gearing up to develop new scenarios, take a moment to review our
|
||||
[Scenario Plugin API Documentation](docs/scenario_plugin_api.md).
|
||||
It’s the perfect starting point to tap into your chaotic creativity!
|
||||
|
||||
### Community
|
||||
Key Members(slack_usernames/full name): paigerube14/Paige Rubendall, mffiedler/Mike Fiedler, tsebasti/Tullio Sebastiani, yogi/Yogananth Subramanian, sahil/Sahil Shah, pradeep/Pradeep Surisetty and ravielluri/Naga Ravi Chaitanya Elluri.
|
||||
|
||||
55
RELEASE.md
Normal file
55
RELEASE.md
Normal file
@@ -0,0 +1,55 @@
|
||||
### Release Protocol: The Community-First Cycle
|
||||
|
||||
This document outlines the project's release protocol, a methodology designed to ensure a responsive and transparent development process that is closely aligned with the needs of our users and contributors. This protocol is tailored for projects in their early stages, prioritizing agility and community feedback over a rigid, time-boxed schedule.
|
||||
|
||||
#### 1. Key Principles
|
||||
|
||||
* **Community as the Compass:** The primary driver for all development is feedback from our user and contributor community.
|
||||
* **Prioritization by Impact:** Tasks are prioritized based on their impact on user experience, the urgency of bug fixes, and the value of community-contributed features.
|
||||
* **Event-Driven Releases:** Releases are not bound by a fixed calendar. New versions are published when a significant body of work is complete, a critical issue is resolved, or a new feature is ready for adoption.
|
||||
* **Transparency and Communication:** All development decisions, progress, and plans are communicated openly through our issue tracker, pull requests, and community channels.
|
||||
|
||||
#### 2. The Release Lifecycle
|
||||
|
||||
The release cycle is a continuous flow of activities rather than a series of sequential phases.
|
||||
|
||||
**2.1. Discovery & Prioritization**
|
||||
* New features and bug fixes are identified through user feedback on our issue tracker, community discussions, and direct contributions.
|
||||
* The core maintainers, in collaboration with the community, continuously evaluate and tag issues to create an open and dynamic backlog.
|
||||
|
||||
**2.2. Development & Code Review**
|
||||
* Work is initiated based on the highest-priority items in the backlog.
|
||||
* All code contributions are made via pull requests (PRs).
|
||||
* PRs are reviewed by maintainers and other contributors to ensure code quality, adherence to project standards, and overall stability.
|
||||
|
||||
**2.3. Release Readiness**
|
||||
A new release is considered ready when one of the following conditions is met:
|
||||
* A major new feature has been completed and thoroughly tested.
|
||||
* A critical security vulnerability or bug has been addressed.
|
||||
* A sufficient number of smaller improvements and fixes have been merged, providing meaningful value to users.
|
||||
|
||||
**2.4. Versioning**
|
||||
We adhere to [**Semantic Versioning 2.0.0**](https://semver.org/).
|
||||
* **Major version (`X.y.z`)**: Reserved for releases that introduce breaking changes.
|
||||
* **Minor version (`x.Y.z`)**: Used for new features or significant non-breaking changes.
|
||||
* **Patch version (`x.y.Z`)**: Used for bug fixes and small, non-functional improvements.
|
||||
|
||||
#### 3. Roles and Responsibilities
|
||||
|
||||
* **Members:** The [core team](https://github.com/krkn-chaos/krkn/blob/main/MAINTAINERS.md) responsible for the project's health. Their duties include:
|
||||
* Reviewing pull requests.
|
||||
* Contributing code and documentation via pull requests.
|
||||
* Engaging in discussions and providing feedback.
|
||||
* **Maintainers and Owners:** The [core team](https://github.com/krkn-chaos/krkn/blob/main/MAINTAINERS.md) responsible for the project's health. Their duties include:
|
||||
* Facilitating community discussions and prioritization.
|
||||
* Reviewing and merging pull requests.
|
||||
* Cutting and announcing official releases.
|
||||
* **Contributors:** The community. Their duties include:
|
||||
* Reporting bugs and suggesting new features.
|
||||
* Contributing code and documentation via pull requests.
|
||||
* Engaging in discussions and providing feedback.
|
||||
|
||||
#### 4. Adoption and Future Evolution
|
||||
|
||||
This protocol is designed for the current stage of the project. As the project matures and the contributor base grows, the maintainers will evaluate the need for a more structured methodology to ensure continued scalability and stability.
|
||||
|
||||
16
ROADMAP.md
16
ROADMAP.md
@@ -2,11 +2,11 @@
|
||||
|
||||
Following are a list of enhancements that we are planning to work on adding support in Krkn. Of course any help/contributions are greatly appreciated.
|
||||
|
||||
- [ ] [Ability to run multiple chaos scenarios in parallel under load to mimic real world outages](https://github.com/krkn-chaos/krkn/issues/424)
|
||||
- [x] [Ability to run multiple chaos scenarios in parallel under load to mimic real world outages](https://github.com/krkn-chaos/krkn/issues/424)
|
||||
- [x] [Centralized storage for chaos experiments artifacts](https://github.com/krkn-chaos/krkn/issues/423)
|
||||
- [ ] [Support for causing DNS outages](https://github.com/krkn-chaos/krkn/issues/394)
|
||||
- [x] [Support for causing DNS outages](https://github.com/krkn-chaos/krkn/issues/394)
|
||||
- [x] [Chaos recommender](https://github.com/krkn-chaos/krkn/tree/main/utils/chaos-recommender) to suggest scenarios having probability of impacting the service under test using profiling results
|
||||
- [] Chaos AI integration to improve test coverage while reducing fault space to save costs and execution time
|
||||
- [x] Chaos AI integration to improve test coverage while reducing fault space to save costs and execution time [krkn-chaos-ai](https://github.com/krkn-chaos/krkn-chaos-ai)
|
||||
- [x] [Support for pod level network traffic shaping](https://github.com/krkn-chaos/krkn/issues/393)
|
||||
- [ ] [Ability to visualize the metrics that are being captured by Kraken and stored in Elasticsearch](https://github.com/krkn-chaos/krkn/issues/124)
|
||||
- [x] Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/krkn-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
|
||||
@@ -14,3 +14,13 @@ Following are a list of enhancements that we are planning to work on adding supp
|
||||
- [x] [Switch documentation references to Kubernetes](https://github.com/krkn-chaos/krkn/issues/495)
|
||||
- [x] [OCP and Kubernetes functionalities segregation](https://github.com/krkn-chaos/krkn/issues/497)
|
||||
- [x] [Krknctl - client for running Krkn scenarios with ease](https://github.com/krkn-chaos/krknctl)
|
||||
- [x] [AI Chat bot to help get started with Krkn and commands](https://github.com/krkn-chaos/krkn-lightspeed)
|
||||
- [ ] [Ability to roll back cluster to original state if chaos fails](https://github.com/krkn-chaos/krkn/issues/804)
|
||||
- [ ] Add recovery time metrics to each scenario for better regression analysis
|
||||
- [ ] [Add resiliency scoring to chaos scenarios ran on cluster](https://github.com/krkn-chaos/krkn/issues/125)
|
||||
- [ ] [Add AI-based Chaos Configuration Generator](https://github.com/krkn-chaos/krkn/issues/1166)
|
||||
- [ ] [Introduce Security Chaos Engineering Scenarios](https://github.com/krkn-chaos/krkn/issues/1165)
|
||||
- [ ] [Add AWS-native Chaos Scenarios (S3, Lambda, Networking)](https://github.com/krkn-chaos/krkn/issues/1164)
|
||||
- [ ] [Unify Krkn Ecosystem under krknctl for Enhanced UX](https://github.com/krkn-chaos/krknctl/issues/113)
|
||||
- [ ] [Build Web UI for Creating, Monitoring, and Reviewing Chaos Scenarios](https://github.com/krkn-chaos/krkn/issues/1167)
|
||||
- [ ] [Add Predefined Chaos Scenario Templates (KRKN Chaos Library)](https://github.com/krkn-chaos/krkn/issues/1168)
|
||||
|
||||
43
SECURITY.md
Normal file
43
SECURITY.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Security Policy
|
||||
|
||||
We attach great importance to code security. We are very grateful to the users, security vulnerability researchers, etc. for reporting security vulnerabilities to the Krkn community. All reported security vulnerabilities will be carefully assessed and addressed in a timely manner.
|
||||
|
||||
|
||||
## Security Checks
|
||||
|
||||
Krkn leverages [Snyk](https://snyk.io/) to ensure that any security vulnerabilities found
|
||||
in the code base and dependencies are fixed and published in the latest release. Security
|
||||
vulnerability checks are enabled for each pull request to enable developers to get insights
|
||||
and proactively fix them.
|
||||
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
The Krkn project treats security vulnerabilities seriously, so we
|
||||
strive to take action quickly when required.
|
||||
|
||||
The project requests that security issues be disclosed in a responsible
|
||||
manner to allow adequate time to respond. If a security issue or
|
||||
vulnerability has been found, please disclose the details to our
|
||||
dedicated email address:
|
||||
|
||||
cncf-krkn-maintainers@lists.cncf.io
|
||||
|
||||
You can also use the [GitHub vulnerability report mechanism](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability) to report the security vulnerability.
|
||||
|
||||
Please include as much information as possible with the report. The
|
||||
following details assist with analysis efforts:
|
||||
- Description of the vulnerability
|
||||
- Affected component (version, commit, branch etc)
|
||||
- Affected code (file path, line numbers)
|
||||
- Exploit code
|
||||
|
||||
|
||||
## Security Team
|
||||
|
||||
The security team currently consists of the [Maintainers of Krkn](https://github.com/krkn-chaos/krkn/blob/main/MAINTAINERS.md)
|
||||
|
||||
|
||||
## Process and Supported Releases
|
||||
|
||||
The Krkn security team will investigate and provide a fix in a timely manner depending on the severity. The fix will be included in the new release of Krkn and details will be included in the release notes.
|
||||
@@ -39,7 +39,7 @@ cerberus:
|
||||
Sunday:
|
||||
slack_team_alias: # The slack team alias to be tagged while reporting failures in the slack channel when no watcher is assigned
|
||||
|
||||
custom_checks: # Relative paths of files conataining additional user defined checks
|
||||
custom_checks: # Relative paths of files containing additional user defined checks
|
||||
|
||||
tunings:
|
||||
timeout: 3 # Number of seconds before requests fail
|
||||
|
||||
@@ -1,75 +1,76 @@
|
||||
kraken:
|
||||
distribution: kubernetes # Distribution can be kubernetes or openshift
|
||||
kubeconfig_path: ~/.kube/config # Path to kubeconfig
|
||||
exit_on_failure: False # Exit when a post action scenario fails
|
||||
auto_rollback: True # Enable auto rollback for scenarios.
|
||||
rollback_versions_directory: /tmp/kraken-rollback # Directory to store rollback version files.
|
||||
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
port: 8081 # Signal port
|
||||
chaos_scenarios:
|
||||
# List of policies/chaos scenarios to load
|
||||
- hog_scenarios:
|
||||
- scenarios/kube/cpu-hog/input.yaml
|
||||
- scenarios/kube/memory-hog/input.yaml
|
||||
- scenarios/kube/io-hog/input.yaml
|
||||
- scenarios/kube/io-hog/input.yaml
|
||||
- application_outages_scenarios:
|
||||
- scenarios/openshift/app_outage.yaml
|
||||
- container_scenarios: # List of chaos pod scenarios to load
|
||||
- scenarios/openshift/container_etcd.yml
|
||||
- pod_network_scenarios:
|
||||
- scenarios/openshift/network_chaos_ingress.yml
|
||||
- scenarios/openshift/pod_network_outage.yml
|
||||
- pod_disruption_scenarios:
|
||||
- scenarios/openshift/etcd.yml
|
||||
- scenarios/openshift/regex_openshift_pod_kill.yml
|
||||
- scenarios/openshift/prom_kill.yml
|
||||
- scenarios/openshift/openshift-apiserver.yml
|
||||
- scenarios/openshift/openshift-kube-apiserver.yml
|
||||
- vmware_node_scenarios:
|
||||
- scenarios/openshift/vmware_node_scenarios.yml
|
||||
- ibmcloud_node_scenarios:
|
||||
- scenarios/openshift/ibmcloud_node_scenarios.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/aws_node_scenarios.yml
|
||||
- time_scenarios: # List of chaos time scenarios to load
|
||||
- scenarios/openshift/time_scenarios_example.yml
|
||||
- cluster_shut_down_scenarios:
|
||||
- scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- service_disruption_scenarios:
|
||||
- scenarios/openshift/regex_namespace.yaml
|
||||
- scenarios/openshift/ingress_namespace.yaml
|
||||
- zone_outages_scenarios:
|
||||
- scenarios/openshift/zone_outage.yaml
|
||||
- pvc_scenarios:
|
||||
- scenarios/openshift/pvc_scenario.yaml
|
||||
- network_chaos_scenarios:
|
||||
- scenarios/openshift/network_chaos.yaml
|
||||
- service_hijacking_scenarios:
|
||||
- scenarios/kube/service_hijacking.yaml
|
||||
- syn_flood_scenarios:
|
||||
- scenarios/kube/syn_flood.yaml
|
||||
# List of policies/chaos scenarios to load
|
||||
- hog_scenarios:
|
||||
- scenarios/kube/cpu-hog.yml
|
||||
- scenarios/kube/memory-hog.yml
|
||||
- scenarios/kube/io-hog.yml
|
||||
- application_outages_scenarios:
|
||||
- scenarios/openshift/app_outage.yaml
|
||||
- container_scenarios: # List of chaos pod scenarios to load
|
||||
- scenarios/openshift/container_etcd.yml
|
||||
- pod_network_scenarios:
|
||||
- scenarios/openshift/network_chaos_ingress.yml
|
||||
- scenarios/openshift/pod_network_outage.yml
|
||||
- pod_disruption_scenarios:
|
||||
- scenarios/openshift/etcd.yml
|
||||
- scenarios/openshift/regex_openshift_pod_kill.yml
|
||||
- scenarios/openshift/prom_kill.yml
|
||||
- scenarios/openshift/openshift-apiserver.yml
|
||||
- scenarios/openshift/openshift-kube-apiserver.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/aws_node_scenarios.yml
|
||||
- scenarios/openshift/vmware_node_scenarios.yml
|
||||
- scenarios/openshift/ibmcloud_node_scenarios.yml
|
||||
- time_scenarios: # List of chaos time scenarios to load
|
||||
- scenarios/openshift/time_scenarios_example.yml
|
||||
- cluster_shut_down_scenarios:
|
||||
- scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- service_disruption_scenarios:
|
||||
- scenarios/openshift/regex_namespace.yaml
|
||||
- scenarios/openshift/ingress_namespace.yaml
|
||||
- zone_outages_scenarios:
|
||||
- scenarios/openshift/zone_outage.yaml
|
||||
- pvc_scenarios:
|
||||
- scenarios/openshift/pvc_scenario.yaml
|
||||
- network_chaos_scenarios:
|
||||
- scenarios/openshift/network_chaos.yaml
|
||||
- service_hijacking_scenarios:
|
||||
- scenarios/kube/service_hijacking.yaml
|
||||
- syn_flood_scenarios:
|
||||
- scenarios/kube/syn_flood.yaml
|
||||
- network_chaos_ng_scenarios:
|
||||
- scenarios/kube/pod-network-filter.yml
|
||||
- scenarios/kube/node-network-filter.yml
|
||||
- scenarios/kube/node-network-chaos.yml
|
||||
- scenarios/kube/pod-network-chaos.yml
|
||||
- kubevirt_vm_outage:
|
||||
- scenarios/kubevirt/kubevirt-vm-outage.yaml
|
||||
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
prometheus_url: '' # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_url: '' # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
enable_metrics: False
|
||||
alert_profile: config/alerts.yaml # Path or URL to alert profile with the prometheus queries
|
||||
metrics_profile: config/metrics.yaml
|
||||
metrics_profile: config/metrics-report.yaml
|
||||
check_critical_alerts: False # When enabled will check prometheus for critical alerts firing post chaos
|
||||
elastic:
|
||||
enable_elastic: False
|
||||
collect_metrics: False
|
||||
collect_alerts: False
|
||||
verify_certs: False
|
||||
elastic_url: "" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
|
||||
elastic_port: 32766
|
||||
@@ -78,9 +79,10 @@ elastic:
|
||||
metrics_index: "krkn-metrics"
|
||||
alerts_index: "krkn-alerts"
|
||||
telemetry_index: "krkn-telemetry"
|
||||
run_tag: ""
|
||||
|
||||
tunings:
|
||||
wait_duration: 60 # Duration to wait between each chaos scenario
|
||||
wait_duration: 1 # Duration to wait between each chaos scenario
|
||||
iterations: 1 # Number of times to execute the scenarios
|
||||
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever
|
||||
telemetry:
|
||||
@@ -94,7 +96,7 @@ telemetry:
|
||||
prometheus_pod_name: "" # name of the prometheus pod (if distribution is kubernetes)
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
archive_path: /tmp # local path where the archive files will be temporarly stored
|
||||
archive_path: /tmp # local path where the archive files will be temporarily stored
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 500000
|
||||
@@ -113,7 +115,21 @@ telemetry:
|
||||
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
|
||||
events_backup: True # enables/disables cluster events collection
|
||||
|
||||
health_checks: # Utilizing health check endpoints to observe application behavior during chaos injection.
|
||||
interval: # Interval in seconds to perform health checks, default value is 2 seconds
|
||||
config: # Provide list of health check configurations for applications
|
||||
- url: # Provide application endpoint
|
||||
bearer_token: # Bearer token for authentication if any
|
||||
auth: # Provide authentication credentials (username , password) in tuple format if any, ex:("admin","secretpassword")
|
||||
exit_on_failure: # If value is True exits when health check failed for application, values can be True/False
|
||||
|
||||
|
||||
|
||||
|
||||
kubevirt_checks: # Utilizing virt check endpoints to observe ssh ability to VMI's during chaos injection.
|
||||
interval: 2 # Interval in seconds to perform virt checks, default value is 2 seconds
|
||||
namespace: # Namespace where to find VMI's
|
||||
name: # Regex Name style of VMI's to watch, optional, will watch all VMI names in the namespace if left blank
|
||||
only_failures: False # Boolean of whether to show all VMI's failures and successful ssh connection (False), or only failure status' (True)
|
||||
disconnected: False # Boolean of how to try to connect to the VMIs; if True will use the ip_address to try ssh from within a node, if false will use the name and uses virtctl to try to connect; Default is False
|
||||
ssh_node: "" # If set, will be a backup way to ssh to a node. Will want to set to a node that isn't targeted in chaos
|
||||
node_names: ""
|
||||
exit_on_failure: # If value is True and VMI's are failing post chaos returns failure, values can be True/False
|
||||
|
||||
|
||||
@@ -7,26 +7,33 @@ kraken:
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load
|
||||
- plugin_scenarios:
|
||||
- scenarios/kind/scheduler.yml
|
||||
- node_scenarios:
|
||||
- scenarios/kind/node_scenarios_example.yml
|
||||
- pod_disruption_scenarios:
|
||||
- scenarios/kube/pod.yml
|
||||
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
alert_profile: config/alerts.yaml # Path to alert profile with the prometheus queries
|
||||
|
||||
elastic:
|
||||
enable_elastic: False
|
||||
|
||||
tunings:
|
||||
wait_duration: 60 # Duration to wait between each chaos scenario
|
||||
iterations: 1 # Number of times to execute the scenarios
|
||||
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever
|
||||
|
||||
telemetry:
|
||||
enabled: False # enable/disables the telemetry collection feature
|
||||
archive_path: /tmp # local path where the archive files will be temporarily stored
|
||||
events_backup: False # enables/disables cluster events collection
|
||||
logs_backup: False
|
||||
|
||||
health_checks: # Utilizing health check endpoints to observe application behavior during chaos injection.
|
||||
|
||||
@@ -14,11 +14,9 @@ kraken:
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
|
||||
@@ -35,7 +35,7 @@ kraken:
|
||||
cerberus:
|
||||
cerberus_enabled: True # Enable it when cerberus is previously installed
|
||||
cerberus_url: http://0.0.0.0:8080 # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: True # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
@@ -61,7 +61,7 @@ telemetry:
|
||||
prometheus_backup: True # enables/disables prometheus data collection
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
archive_path: /tmp # local path where the archive files will be temporarly stored
|
||||
archive_path: /tmp # local path where the archive files will be temporarily stored
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 500000 # the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
|
||||
@@ -1,133 +1,126 @@
|
||||
metrics:
|
||||
# API server
|
||||
- query: histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb!~"WATCH", subresource!="log"}[2m])) by (verb,resource,subresource,instance,le)) > 0
|
||||
metricName: API99thLatency
|
||||
|
||||
- query: sum(irate(apiserver_request_total{apiserver="kube-apiserver",verb!="WATCH",subresource!="log"}[2m])) by (verb,instance,resource,code) > 0
|
||||
metricName: APIRequestRate
|
||||
instant: True
|
||||
|
||||
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
|
||||
metricName: APIInflightRequests
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99, rate(apiserver_current_inflight_requests[5m]))
|
||||
metricName: APIInflightRequests
|
||||
instant: True
|
||||
|
||||
# Container & pod metrics
|
||||
- query: (sum(container_memory_rss{name!="",container!="POD",namespace=~"openshift-(etcd|oauth-apiserver|.*apiserver|ovn-kubernetes|sdn|ingress|authentication|.*controller-manager|.*scheduler)"}) by (container, pod, namespace, node) and on (node) kube_node_role{role="master"}) > 0
|
||||
metricName: containerMemory-Masters
|
||||
instant: true
|
||||
|
||||
- query: (sum(irate(container_cpu_usage_seconds_total{name!="",container!="POD",namespace=~"openshift-(etcd|oauth-apiserver|sdn|ovn-kubernetes|.*apiserver|authentication|.*controller-manager|.*scheduler)"}[2m]) * 100) by (container, pod, namespace, node) and on (node) kube_node_role{role="master"}) > 0
|
||||
metricName: containerCPU-Masters
|
||||
instant: true
|
||||
|
||||
- query: (sum(irate(container_cpu_usage_seconds_total{pod!="",container="prometheus",namespace="openshift-monitoring"}[2m]) * 100) by (container, pod, namespace, node) and on (node) kube_node_role{role="infra"}) > 0
|
||||
metricName: containerCPU-Prometheus
|
||||
instant: true
|
||||
|
||||
- query: (avg(irate(container_cpu_usage_seconds_total{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress)"}[2m]) * 100 and on (node) kube_node_role{role="worker"}) by (namespace, container)) > 0
|
||||
metricName: containerCPU-AggregatedWorkers
|
||||
instant: true
|
||||
|
||||
- query: (avg(irate(container_cpu_usage_seconds_total{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress|monitoring|image-registry|logging)"}[2m]) * 100 and on (node) kube_node_role{role="infra"}) by (namespace, container)) > 0
|
||||
metricName: containerCPU-AggregatedInfra
|
||||
|
||||
- query: (sum(container_memory_rss{pod!="",namespace="openshift-monitoring",name!="",container="prometheus"}) by (container, pod, namespace, node) and on (node) kube_node_role{role="infra"}) > 0
|
||||
metricName: containerMemory-Prometheus
|
||||
instant: True
|
||||
|
||||
- query: avg(container_memory_rss{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress)"} and on (node) kube_node_role{role="worker"}) by (container, namespace)
|
||||
metricName: containerMemory-AggregatedWorkers
|
||||
instant: True
|
||||
|
||||
- query: avg(container_memory_rss{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress|monitoring|image-registry|logging)"} and on (node) kube_node_role{role="infra"}) by (container, namespace)
|
||||
metricName: containerMemory-AggregatedInfra
|
||||
instant: True
|
||||
|
||||
# Node metrics
|
||||
- query: (sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) > 0
|
||||
metricName: nodeCPU-Masters
|
||||
instant: True
|
||||
|
||||
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: maxCPU-Masters
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemory-Masters
|
||||
instant: true
|
||||
|
||||
- query: (avg((sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))) by (mode)) > 0
|
||||
metricName: nodeCPU-AggregatedWorkers
|
||||
instant: True
|
||||
|
||||
- query: (avg((sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))) by (mode)) > 0
|
||||
metricName: nodeCPU-AggregatedInfra
|
||||
instant: True
|
||||
|
||||
- query: avg(node_memory_MemAvailable_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeMemoryAvailable-Masters
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemory-Masters
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: maxMemory-Masters
|
||||
instant: true
|
||||
|
||||
- query: avg(node_memory_MemAvailable_bytes and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryAvailable-AggregatedWorkers
|
||||
instant: True
|
||||
|
||||
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: maxCPU-Workers
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: maxMemory-Workers
|
||||
instant: true
|
||||
|
||||
- query: avg(node_memory_MemAvailable_bytes and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryAvailable-AggregatedInfra
|
||||
instant: True
|
||||
|
||||
- query: avg(node_memory_Active_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeMemoryActive-Masters
|
||||
instant: True
|
||||
|
||||
- query: avg(node_memory_Active_bytes and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryActive-AggregatedWorkers
|
||||
instant: True
|
||||
|
||||
- query: avg(avg(node_memory_Active_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryActive-AggregatedInfra
|
||||
|
||||
- query: avg(node_memory_Cached_bytes) by (instance) + avg(node_memory_Buffers_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeMemoryCached+nodeMemoryBuffers-Masters
|
||||
|
||||
- query: avg(node_memory_Cached_bytes + node_memory_Buffers_bytes and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryCached+nodeMemoryBuffers-AggregatedWorkers
|
||||
|
||||
- query: avg(node_memory_Cached_bytes + node_memory_Buffers_bytes and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryCached+nodeMemoryBuffers-AggregatedInfra
|
||||
|
||||
- query: irate(node_network_receive_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: rxNetworkBytes-Masters
|
||||
|
||||
- query: avg(irate(node_network_receive_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: rxNetworkBytes-AggregatedWorkers
|
||||
|
||||
- query: avg(irate(node_network_receive_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: rxNetworkBytes-AggregatedInfra
|
||||
|
||||
- query: irate(node_network_transmit_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: txNetworkBytes-Masters
|
||||
|
||||
- query: avg(irate(node_network_transmit_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: txNetworkBytes-AggregatedWorkers
|
||||
|
||||
- query: avg(irate(node_network_transmit_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: txNetworkBytes-AggregatedInfra
|
||||
|
||||
- query: rate(node_disk_written_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeDiskWrittenBytes-Masters
|
||||
|
||||
- query: avg(rate(node_disk_written_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: nodeDiskWrittenBytes-AggregatedWorkers
|
||||
|
||||
- query: avg(rate(node_disk_written_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: nodeDiskWrittenBytes-AggregatedInfra
|
||||
|
||||
- query: rate(node_disk_read_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeDiskReadBytes-Masters
|
||||
|
||||
- query: avg(rate(node_disk_read_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: nodeDiskReadBytes-AggregatedWorkers
|
||||
|
||||
- query: avg(rate(node_disk_read_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: nodeDiskReadBytes-AggregatedInfra
|
||||
instant: True
|
||||
|
||||
# Etcd metrics
|
||||
- query: sum(rate(etcd_server_leader_changes_seen_total[2m]))
|
||||
metricName: etcdLeaderChangesRate
|
||||
instant: True
|
||||
|
||||
- query: etcd_server_is_leader > 0
|
||||
metricName: etcdServerIsLeader
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))
|
||||
metricName: 99thEtcdDiskBackendCommitDurationSeconds
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))
|
||||
metricName: 99thEtcdDiskWalFsyncDurationSeconds
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket[5m]))
|
||||
metricName: 99thEtcdRoundTripTimeSeconds
|
||||
|
||||
- query: etcd_mvcc_db_total_size_in_bytes
|
||||
metricName: etcdDBPhysicalSizeBytes
|
||||
|
||||
- query: etcd_mvcc_db_total_size_in_use_in_bytes
|
||||
metricName: etcdDBLogicalSizeBytes
|
||||
instant: True
|
||||
|
||||
- query: sum by (cluster_version)(etcd_cluster_version)
|
||||
metricName: etcdVersion
|
||||
@@ -135,83 +128,16 @@ metrics:
|
||||
|
||||
- query: sum(rate(etcd_object_counts{}[5m])) by (resource) > 0
|
||||
metricName: etcdObjectCount
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99,sum(rate(etcd_request_duration_seconds_bucket[2m])) by (le,operation,apiserver)) > 0
|
||||
metricName: P99APIEtcdRequestLatency
|
||||
|
||||
- query: sum(grpc_server_started_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"})
|
||||
metricName: ActiveWatchStreams
|
||||
|
||||
- query: sum(grpc_server_started_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"})
|
||||
metricName: ActiveLeaseStreams
|
||||
|
||||
- query: sum(rate(etcd_debugging_snap_save_total_duration_seconds_sum{namespace="openshift-etcd"}[2m]))
|
||||
metricName: snapshotSaveLatency
|
||||
|
||||
- query: sum(rate(etcd_server_heartbeat_send_failures_total{namespace="openshift-etcd"}[2m]))
|
||||
metricName: HeartBeatFailures
|
||||
|
||||
- query: sum(rate(etcd_server_health_failures{namespace="openshift-etcd"}[2m]))
|
||||
metricName: HealthFailures
|
||||
|
||||
- query: sum(rate(etcd_server_slow_apply_total{namespace="openshift-etcd"}[2m]))
|
||||
metricName: SlowApplies
|
||||
|
||||
- query: sum(rate(etcd_server_slow_read_indexes_total{namespace="openshift-etcd"}[2m]))
|
||||
metricName: SlowIndexRead
|
||||
|
||||
- query: sum(etcd_server_proposals_pending)
|
||||
metricName: PendingProposals
|
||||
|
||||
- query: histogram_quantile(1.0, sum(rate(etcd_debugging_mvcc_db_compaction_pause_duration_milliseconds_bucket[1m])) by (le, instance))
|
||||
metricName: CompactionMaxPause
|
||||
instant: True
|
||||
|
||||
- query: sum by (instance) (apiserver_storage_objects)
|
||||
metricName: etcdTotalObjectCount
|
||||
instant: True
|
||||
|
||||
- query: topk(500, max by(resource) (apiserver_storage_objects))
|
||||
metricName: etcdTopObectCount
|
||||
|
||||
# Cluster metrics
|
||||
- query: count(kube_namespace_created)
|
||||
metricName: namespaceCount
|
||||
|
||||
- query: sum(kube_pod_status_phase{}) by (phase)
|
||||
metricName: podStatusCount
|
||||
|
||||
- query: count(kube_secret_info{})
|
||||
metricName: secretCount
|
||||
|
||||
- query: count(kube_deployment_labels{})
|
||||
metricName: deploymentCount
|
||||
|
||||
- query: count(kube_configmap_info{})
|
||||
metricName: configmapCount
|
||||
|
||||
- query: count(kube_service_info{})
|
||||
metricName: serviceCount
|
||||
|
||||
- query: kube_node_role
|
||||
metricName: nodeRoles
|
||||
instant: true
|
||||
|
||||
- query: sum(kube_node_status_condition{status="true"}) by (condition)
|
||||
metricName: nodeStatus
|
||||
|
||||
- query: (sum(rate(container_fs_writes_bytes_total{container!="",device!~".+dm.+"}[5m])) by (device, container, node) and on (node) kube_node_role{role="master"}) > 0
|
||||
metricName: containerDiskUsage
|
||||
|
||||
- query: cluster_version{type="completed"}
|
||||
metricName: clusterVersion
|
||||
instant: true
|
||||
|
||||
# Golang metrics
|
||||
|
||||
- query: go_memstats_heap_alloc_bytes{job=~"apiserver|api|etcd"}
|
||||
metricName: goHeapAllocBytes
|
||||
|
||||
- query: go_memstats_heap_inuse_bytes{job=~"apiserver|api|etcd"}
|
||||
metricName: goHeapInuseBytes
|
||||
|
||||
- query: go_gc_duration_seconds{job=~"apiserver|api|etcd",quantile="1"}
|
||||
metricName: goGCDurationSeconds
|
||||
instant: True
|
||||
|
||||
248
config/metrics-report.yaml
Normal file
248
config/metrics-report.yaml
Normal file
@@ -0,0 +1,248 @@
|
||||
metrics:
|
||||
|
||||
# API server
|
||||
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
|
||||
metricName: APIInflightRequests
|
||||
instant: true
|
||||
|
||||
# Kubelet & CRI-O
|
||||
|
||||
# Average and max of the CPU usage from all worker's kubelet
|
||||
- query: avg(avg_over_time(irate(process_cpu_seconds_total{service="kubelet",job="kubelet"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: cpu-kubelet
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(irate(process_cpu_seconds_total{service="kubelet",job="kubelet"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: max-cpu-kubelet
|
||||
instant: true
|
||||
|
||||
# Average of the memory usage from all worker's kubelet
|
||||
- query: avg(avg_over_time(process_resident_memory_bytes{service="kubelet",job="kubelet"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: memory-kubelet
|
||||
instant: true
|
||||
|
||||
# Max of the memory usage from all worker's kubelet
|
||||
- query: max(max_over_time(process_resident_memory_bytes{service="kubelet",job="kubelet"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: max-memory-kubelet
|
||||
instant: true
|
||||
|
||||
- query: max_over_time(sum(process_resident_memory_bytes{service="kubelet",job="kubelet"} and on (node) kube_node_role{role="worker"})[.elapsed:])
|
||||
metricName: max-memory-sum-kubelet
|
||||
instant: true
|
||||
|
||||
# Average and max of the CPU usage from all worker's CRI-O
|
||||
- query: avg(avg_over_time(irate(process_cpu_seconds_total{service="kubelet",job="crio"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: cpu-crio
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(irate(process_cpu_seconds_total{service="kubelet",job="crio"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: max-cpu-crio
|
||||
instant: true
|
||||
|
||||
# Average of the memory usage from all worker's CRI-O
|
||||
- query: avg(avg_over_time(process_resident_memory_bytes{service="kubelet",job="crio"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: memory-crio
|
||||
instant: true
|
||||
|
||||
# Max of the memory usage from all worker's CRI-O
|
||||
- query: max(max_over_time(process_resident_memory_bytes{service="kubelet",job="crio"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: max-memory-crio
|
||||
instant: true
|
||||
|
||||
# Etcd
|
||||
|
||||
- query: avg(avg_over_time(histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))[.elapsed:]))
|
||||
metricName: 99thEtcdDiskBackendCommit
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[.elapsed:]))
|
||||
metricName: 99thEtcdDiskWalFsync
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(histogram_quantile(0.99, irate(etcd_network_peer_round_trip_time_seconds_bucket[2m]))[.elapsed:]))
|
||||
metricName: 99thEtcdRoundTripTime
|
||||
instant: true
|
||||
|
||||
# Control-plane
|
||||
|
||||
- query: avg(avg_over_time(topk(1, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-kube-controller-manager"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-kube-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(topk(1, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-kube-controller-manager"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: max-cpu-kube-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(1, sum(container_memory_rss{name!="", namespace="openshift-kube-controller-manager"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-kube-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(topk(1, sum(container_memory_rss{name!="", namespace="openshift-kube-controller-manager"}) by (pod))[.elapsed:]))
|
||||
metricName: max-memory-kube-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-kube-apiserver"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-kube-apiserver
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(container_memory_rss{name!="", namespace="openshift-kube-apiserver"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-kube-apiserver
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-apiserver"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-openshift-apiserver
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(container_memory_rss{name!="", namespace="openshift-apiserver"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-openshift-apiserver
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-etcd"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-etcd
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3,sum(container_memory_rss{name!="", namespace="openshift-etcd"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-etcd
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(1, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-controller-manager"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-openshift-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(1, sum(container_memory_rss{name!="", namespace="openshift-controller-manager"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-openshift-controller-manager
|
||||
instant: true
|
||||
|
||||
# multus
|
||||
|
||||
- query: avg(avg_over_time(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-multus", pod=~"(multus).+", container!="POD"}[2m])[.elapsed:])) by (container)
|
||||
metricName: cpu-multus
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(container_memory_rss{name!="", namespace="openshift-multus", pod=~"(multus).+", container!="POD"}[.elapsed:])) by (container)
|
||||
metricName: memory-multus
|
||||
instant: true
|
||||
|
||||
# OVNKubernetes - standard & IC
|
||||
|
||||
- query: avg(avg_over_time(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-ovn-kubernetes", pod=~"(ovnkube-master|ovnkube-control-plane).+", container!="POD"}[2m])[.elapsed:])) by (container)
|
||||
metricName: cpu-ovn-control-plane
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(container_memory_rss{name!="", namespace="openshift-ovn-kubernetes", pod=~"(ovnkube-master|ovnkube-control-plane).+", container!="POD"}[.elapsed:])) by (container)
|
||||
metricName: memory-ovn-control-plane
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-ovn-kubernetes", pod=~"ovnkube-node.+", container!="POD"}[2m])[.elapsed:])) by (container)
|
||||
metricName: cpu-ovnkube-node
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(container_memory_rss{name!="", namespace="openshift-ovn-kubernetes", pod=~"ovnkube-node.+", container!="POD"}[.elapsed:])) by (container)
|
||||
metricName: memory-ovnkube-node
|
||||
instant: true
|
||||
|
||||
# Nodes
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: cpu-masters
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: memory-masters
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: max-memory-masters
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: cpu-workers
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: max-cpu-workers
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: memory-workers
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: max-memory-workers
|
||||
instant: true
|
||||
|
||||
- query: sum( (node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)") )
|
||||
metricName: memory-sum-workers
|
||||
instant: true
|
||||
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: cpu-infra
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: max-cpu-infra
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: memory-infra
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: max-memory-infra
|
||||
instant: true
|
||||
|
||||
- query: max_over_time(sum((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))[.elapsed:])
|
||||
metricName: max-memory-sum-infra
|
||||
instant: true
|
||||
|
||||
# Monitoring and ingress
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}[2m])) by (pod)[.elapsed:]))
|
||||
metricName: cpu-prometheus
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}[2m])) by (pod)[.elapsed:]))
|
||||
metricName: max-cpu-prometheus
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(sum(container_memory_rss{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}) by (pod)[.elapsed:]))
|
||||
metricName: memory-prometheus
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(sum(container_memory_rss{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}) by (pod)[.elapsed:]))
|
||||
metricName: max-memory-prometheus
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-ingress", pod=~"router-default.+"}[2m])) by (pod)[.elapsed:]))
|
||||
metricName: cpu-router
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(sum(container_memory_rss{name!="", namespace="openshift-ingress", pod=~"router-default.+"}) by (pod)[.elapsed:]))
|
||||
metricName: memory-router
|
||||
instant: true
|
||||
|
||||
# Cluster
|
||||
|
||||
- query: avg_over_time(cluster:memory_usage:ratio[.elapsed:])
|
||||
metricName: memory-cluster-usage-ratio
|
||||
instant: true
|
||||
|
||||
- query: avg_over_time(cluster:node_cpu:ratio[.elapsed:])
|
||||
metricName: cpu-cluster-usage-ratio
|
||||
instant: true
|
||||
|
||||
# Retain the raw CPU seconds totals for comparison
|
||||
- query: sum(node_cpu_seconds_total and on (instance) label_replace(kube_node_role{role="worker",role!="infra"}, "instance", "$1", "node", "(.+)")) by (mode)
|
||||
metricName: nodeCPUSeconds-Workers
|
||||
instant: true
|
||||
|
||||
|
||||
- query: sum(node_cpu_seconds_total and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) by (mode)
|
||||
metricName: nodeCPUSeconds-Masters
|
||||
instant: true
|
||||
|
||||
|
||||
- query: sum(node_cpu_seconds_total and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (mode)
|
||||
metricName: nodeCPUSeconds-Infra
|
||||
instant: true
|
||||
@@ -1,13 +1,7 @@
|
||||
metrics:
|
||||
# API server
|
||||
- query: histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb!~"WATCH", subresource!="log"}[2m])) by (verb,resource,subresource,instance,le)) > 0
|
||||
metricName: API99thLatency
|
||||
|
||||
- query: sum(irate(apiserver_request_total{apiserver="kube-apiserver",verb!="WATCH",subresource!="log"}[2m])) by (verb,instance,resource,code) > 0
|
||||
metricName: APIRequestRate
|
||||
|
||||
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
|
||||
metricName: APIInflightRequests
|
||||
- query: irate(apiserver_request_total{verb="POST", resource="pods", subresource="binding",code="201"}[2m]) > 0
|
||||
metricName: schedulingThroughput
|
||||
|
||||
# Containers & pod metrics
|
||||
- query: sum(irate(container_cpu_usage_seconds_total{name!="",namespace=~"openshift-(etcd|oauth-apiserver|.*apiserver|ovn-kubernetes|sdn|ingress|authentication|.*controller-manager|.*scheduler|monitoring|logging|image-registry)"}[2m]) * 100) by (pod, namespace, node)
|
||||
@@ -33,8 +27,17 @@ metrics:
|
||||
metricName: crioMemory
|
||||
|
||||
# Node metrics
|
||||
- query: sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) > 0
|
||||
metricName: nodeCPU
|
||||
- query: (sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) > 0
|
||||
metricName: nodeCPU-Masters
|
||||
|
||||
- query: (avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemory-Masters
|
||||
|
||||
- query: (sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) > 0
|
||||
metricName: nodeCPU-Workers
|
||||
|
||||
- query: (avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[2m:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemory-Workers
|
||||
|
||||
- query: avg(node_memory_MemAvailable_bytes) by (instance)
|
||||
metricName: nodeMemoryAvailable
|
||||
@@ -42,6 +45,9 @@ metrics:
|
||||
- query: avg(node_memory_Active_bytes) by (instance)
|
||||
metricName: nodeMemoryActive
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: maxMemory-Masters
|
||||
|
||||
- query: avg(node_memory_Cached_bytes) by (instance) + avg(node_memory_Buffers_bytes) by (instance)
|
||||
metricName: nodeMemoryCached+nodeMemoryBuffers
|
||||
|
||||
@@ -84,34 +90,4 @@ metrics:
|
||||
|
||||
- query: sum by (cluster_version)(etcd_cluster_version)
|
||||
metricName: etcdVersion
|
||||
instant: true
|
||||
|
||||
# Cluster metrics
|
||||
- query: count(kube_namespace_created)
|
||||
metricName: namespaceCount
|
||||
|
||||
- query: sum(kube_pod_status_phase{}) by (phase)
|
||||
metricName: podStatusCount
|
||||
|
||||
- query: count(kube_secret_info{})
|
||||
metricName: secretCount
|
||||
|
||||
- query: count(kube_deployment_labels{})
|
||||
metricName: deploymentCount
|
||||
|
||||
- query: count(kube_configmap_info{})
|
||||
metricName: configmapCount
|
||||
|
||||
- query: count(kube_service_info{})
|
||||
metricName: serviceCount
|
||||
|
||||
- query: kube_node_role
|
||||
metricName: nodeRoles
|
||||
instant: true
|
||||
|
||||
- query: sum(kube_node_status_condition{status="true"}) by (condition)
|
||||
metricName: nodeStatus
|
||||
|
||||
- query: cluster_version{type="completed"}
|
||||
metricName: clusterVersion
|
||||
instant: true
|
||||
instant: true
|
||||
@@ -1,17 +1,35 @@
|
||||
# oc build
|
||||
FROM golang:1.22.5 AS oc-build
|
||||
FROM golang:1.24.9 AS oc-build
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libkrb5-dev
|
||||
WORKDIR /tmp
|
||||
# oc build
|
||||
RUN git clone --branch release-4.18 https://github.com/openshift/oc.git
|
||||
WORKDIR /tmp/oc
|
||||
RUN go mod edit -go 1.22.5 &&\
|
||||
go get github.com/moby/buildkit@v0.12.5 &&\
|
||||
go get github.com/containerd/containerd@v1.7.11&&\
|
||||
go get github.com/docker/docker@v25.0.6&&\
|
||||
go get github.com/opencontainers/runc@v1.1.14&&\
|
||||
RUN go mod edit -go 1.24.9 &&\
|
||||
go mod edit -require github.com/moby/buildkit@v0.12.5 &&\
|
||||
go mod edit -require github.com/containerd/containerd@v1.7.29&&\
|
||||
go mod edit -require github.com/docker/docker@v27.5.1+incompatible&&\
|
||||
go mod edit -require github.com/opencontainers/runc@v1.2.8&&\
|
||||
go mod edit -require github.com/go-git/go-git/v5@v5.13.0&&\
|
||||
go mod edit -require github.com/opencontainers/selinux@v1.13.0&&\
|
||||
go mod edit -require github.com/ulikunitz/xz@v0.5.15&&\
|
||||
go mod edit -require golang.org/x/net@v0.38.0&&\
|
||||
go mod edit -require github.com/containerd/containerd@v1.7.27&&\
|
||||
go mod edit -require golang.org/x/oauth2@v0.27.0&&\
|
||||
go mod edit -require golang.org/x/crypto@v0.35.0&&\
|
||||
go mod edit -replace github.com/containerd/containerd@v1.7.27=github.com/containerd/containerd@v1.7.29&&\
|
||||
go mod tidy && go mod vendor
|
||||
|
||||
RUN make GO_REQUIRED_MIN_VERSION:= oc
|
||||
|
||||
# virtctl build
|
||||
WORKDIR /tmp
|
||||
RUN git clone https://github.com/kubevirt/kubevirt.git
|
||||
WORKDIR /tmp/kubevirt
|
||||
RUN go mod edit -go 1.24.9 &&\
|
||||
go work use &&\
|
||||
go build -o virtctl ./cmd/virtctl/
|
||||
|
||||
FROM fedora:40
|
||||
ARG PR_NUMBER
|
||||
ARG TAG
|
||||
@@ -20,23 +38,23 @@ RUN dnf update -y
|
||||
|
||||
ENV KUBECONFIG /home/krkn/.kube/config
|
||||
|
||||
# install kubectl
|
||||
RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" &&\
|
||||
cp kubectl /usr/local/bin/kubectl && chmod +x /usr/local/bin/kubectl &&\
|
||||
cp kubectl /usr/bin/kubectl && chmod +x /usr/bin/kubectl
|
||||
|
||||
# This overwrites any existing configuration in /etc/yum.repos.d/kubernetes.repo
|
||||
RUN dnf update && dnf install -y --setopt=install_weak_deps=False \
|
||||
git python39 jq yq gettext wget which &&\
|
||||
git python3.11 jq yq gettext wget which ipmitool openssh-server &&\
|
||||
dnf clean all
|
||||
|
||||
# copy oc client binary from oc-build image
|
||||
COPY --from=oc-build /tmp/oc/oc /usr/bin/oc
|
||||
COPY --from=oc-build /tmp/kubevirt/virtctl /usr/bin/virtctl
|
||||
|
||||
# krkn build
|
||||
RUN git clone https://github.com/krkn-chaos/krkn.git /home/krkn/kraken && \
|
||||
mkdir -p /home/krkn/.kube
|
||||
|
||||
RUN mkdir -p /home/krkn/.ssh && \
|
||||
chmod 700 /home/krkn/.ssh
|
||||
|
||||
WORKDIR /home/krkn/kraken
|
||||
|
||||
# default behaviour will be to build main
|
||||
@@ -45,16 +63,28 @@ RUN if [ -n "$PR_NUMBER" ]; then git fetch origin pull/${PR_NUMBER}/head:pr-${PR
|
||||
# if it is a TAG trigger checkout the tag
|
||||
RUN if [ -n "$TAG" ]; then git checkout "$TAG";fi
|
||||
|
||||
RUN python3.9 -m ensurepip
|
||||
RUN pip3.9 install -r requirements.txt
|
||||
RUN pip3.9 install jsonschema
|
||||
RUN python3.11 -m ensurepip --upgrade --default-pip
|
||||
RUN python3.11 -m pip install --upgrade pip setuptools==78.1.1
|
||||
|
||||
LABEL krknctl.title="Krkn Base Image"
|
||||
LABEL krknctl.description="This is the krkn base image."
|
||||
LABEL krknctl.input_fields='$KRKNCTL_INPUT'
|
||||
# removes the the vulnerable versions of setuptools and pip
|
||||
RUN rm -rf "$(pip cache dir)"
|
||||
RUN rm -rf /tmp/*
|
||||
RUN rm -rf /usr/local/lib/python3.11/ensurepip/_bundled
|
||||
RUN pip3.11 install -r requirements.txt
|
||||
RUN pip3.11 install jsonschema
|
||||
|
||||
LABEL krknctl.title.global="Krkn Base Image"
|
||||
LABEL krknctl.description.global="This is the krkn base image."
|
||||
LABEL krknctl.input_fields.global='$KRKNCTL_INPUT'
|
||||
|
||||
# SSH setup script
|
||||
RUN chmod +x /home/krkn/kraken/containers/setup-ssh.sh
|
||||
|
||||
# Main entrypoint script
|
||||
RUN chmod +x /home/krkn/kraken/containers/entrypoint.sh
|
||||
|
||||
RUN chown -R krkn:krkn /home/krkn && chmod 755 /home/krkn
|
||||
USER krkn
|
||||
ENTRYPOINT ["python3.9", "run_kraken.py"]
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "/home/krkn/kraken/containers/entrypoint.sh"]
|
||||
CMD ["--config=config/config.yaml"]
|
||||
|
||||
@@ -6,7 +6,7 @@ Container image gets automatically built by quay.io at [Kraken image](https://qu
|
||||
|
||||
### Run containerized version
|
||||
|
||||
Refer [instructions](https://github.com/redhat-chaos/krkn/blob/main/docs/installation.md#run-containerized-version) for information on how to run the containerized version of kraken.
|
||||
Refer [instructions](https://krkn-chaos.dev/docs/installation/) for information on how to run the containerized version of kraken.
|
||||
|
||||
|
||||
### Run Custom Kraken Image
|
||||
|
||||
8
containers/entrypoint.sh
Normal file
8
containers/entrypoint.sh
Normal file
@@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
# Run SSH setup
|
||||
./containers/setup-ssh.sh
|
||||
# Change to kraken directory
|
||||
|
||||
# Execute the main command
|
||||
exec python3.9 run_kraken.py "$@"
|
||||
@@ -31,6 +31,24 @@
|
||||
"separator": ",",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "ssh-public-key",
|
||||
"short_description": "Krkn ssh public key path",
|
||||
"description": "Sets the path where krkn will search for ssh public key (in container)",
|
||||
"variable": "KRKN_SSH_PUBLIC",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "ssh-private-key",
|
||||
"short_description": "Krkn ssh private key path",
|
||||
"description": "Sets the path where krkn will search for ssh private key (in container)",
|
||||
"variable": "KRKN_SSH_PRIVATE",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "krkn-kubeconfig",
|
||||
"short_description": "Krkn kubeconfig path",
|
||||
@@ -67,6 +85,24 @@
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "prometheus-url",
|
||||
"short_description": "Prometheus url",
|
||||
"description": "Prometheus url for when running on kuberenetes",
|
||||
"variable": "PROMETHEUS_URL",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "prometheus-token",
|
||||
"short_description": "Prometheus bearer token",
|
||||
"description": "Prometheus bearer token for prometheus url authentication",
|
||||
"variable": "PROMETHEUS_TOKEN",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "uuid",
|
||||
"short_description": "Sets krkn run uuid",
|
||||
@@ -101,12 +137,21 @@
|
||||
{
|
||||
"name": "alerts-path",
|
||||
"short_description": "Cluster alerts path file (in container)",
|
||||
"description": "Enables cluster alerts check",
|
||||
"description": "Allows to specify a different alert file path",
|
||||
"variable": "ALERTS_PATH",
|
||||
"type": "string",
|
||||
"default": "config/alerts.yaml",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "metrics-path",
|
||||
"short_description": "Cluster metrics path file (in container)",
|
||||
"description": "Allows to specify a different metrics file path",
|
||||
"variable": "METRICS_PATH",
|
||||
"type": "string",
|
||||
"default": "config/metrics-aggregated.yaml",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "enable-es",
|
||||
"short_description": "Enables elastic search data collection",
|
||||
@@ -125,7 +170,6 @@
|
||||
"variable": "ES_SERVER",
|
||||
"type": "string",
|
||||
"default": "http://0.0.0.0",
|
||||
"validator": "^(http|https):\/\/.*",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
@@ -166,28 +210,6 @@
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-collect-metrics",
|
||||
"short_description": "Enables metrics collection on elastic search",
|
||||
"description": "Enables metrics collection on elastic search",
|
||||
"variable": "ES_COLLECT_METRICS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-collect-alerts",
|
||||
"short_description": "Enables alerts collection on elastic search",
|
||||
"description": "Enables alerts collection on elastic search",
|
||||
"variable": "ES_COLLECT_ALERTS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-metrics-index",
|
||||
"short_description": "Elasticsearch metrics index",
|
||||
@@ -381,6 +403,142 @@
|
||||
"default": "True",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "health-check-interval",
|
||||
"short_description": "Heath check interval",
|
||||
"description": "How often to check the health check urls",
|
||||
"variable": "HEALTH_CHECK_INTERVAL",
|
||||
"type": "number",
|
||||
"default": "2",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "health-check-url",
|
||||
"short_description": "Health check url",
|
||||
"description": "Url to check the health of",
|
||||
"variable": "HEALTH_CHECK_URL",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "health-check-auth",
|
||||
"short_description": "Health check authentication tuple",
|
||||
"description": "Authentication tuple to authenticate into health check URL",
|
||||
"variable": "HEALTH_CHECK_AUTH",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "health-check-bearer-token",
|
||||
"short_description": "Health check bearer token",
|
||||
"description": "Bearer token to authenticate into health check URL",
|
||||
"variable": "HEALTH_CHECK_BEARER_TOKEN",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "health-check-exit",
|
||||
"short_description": "Health check exit on failure",
|
||||
"description": "Exit on failure when health check URL is not able to connect",
|
||||
"variable": "HEALTH_CHECK_EXIT_ON_FAILURE",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "health-check-verify",
|
||||
"short_description": "SSL Verification of health check url",
|
||||
"description": "SSL Verification to authenticate into health check URL",
|
||||
"variable": "HEALTH_CHECK_VERIFY",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-check-interval",
|
||||
"short_description": "Kube Virt check interval",
|
||||
"description": "How often to check the kube virt check Vms ssh status",
|
||||
"variable": "KUBE_VIRT_CHECK_INTERVAL",
|
||||
"type": "number",
|
||||
"default": "2",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-namespace",
|
||||
"short_description": "KubeVirt namespace to check",
|
||||
"description": "KubeVirt namespace to check the health of",
|
||||
"variable": "KUBE_VIRT_NAMESPACE",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-name",
|
||||
"short_description": "KubeVirt regex names to watch",
|
||||
"description": "KubeVirt regex names to check VMs",
|
||||
"variable": "KUBE_VIRT_NAME",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-only-failures",
|
||||
"short_description": "KubeVirt checks only report if failure occurs",
|
||||
"description": "KubeVirt checks only report if failure occurs",
|
||||
"variable": "KUBE_VIRT_FAILURES",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False,true,false",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-disconnected",
|
||||
"short_description": "KubeVirt checks in disconnected mode",
|
||||
"description": "KubeVirt checks in disconnected mode, bypassing the clusters Api",
|
||||
"variable": "KUBE_VIRT_DISCONNECTED",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False,true,false",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-ssh-node",
|
||||
"short_description": "KubeVirt node to ssh from",
|
||||
"description": "KubeVirt node to ssh from, should be available whole chaos run",
|
||||
"variable": "KUBE_VIRT_SSH_NODE",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-exit-on-failure",
|
||||
"short_description": "KubeVirt fail if failed vms at end of run",
|
||||
"description": "KubeVirt fails run if vms still have false status",
|
||||
"variable": "KUBE_VIRT_EXIT_ON_FAIL",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False,true,false",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-node-node",
|
||||
"short_description": "KubeVirt node to filter vms on",
|
||||
"description": "Only track VMs in KubeVirt on given node name",
|
||||
"variable": "KUBE_VIRT_NODE_NAME",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "krkn-debug",
|
||||
"short_description": "Krkn debug mode",
|
||||
@@ -392,5 +550,4 @@
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
}
|
||||
]
|
||||
|
||||
]
|
||||
73
containers/setup-ssh.sh
Normal file
73
containers/setup-ssh.sh
Normal file
@@ -0,0 +1,73 @@
|
||||
#!/bin/bash
|
||||
# Setup SSH key if mounted
|
||||
# Support multiple mount locations
|
||||
MOUNTED_PRIVATE_KEY_ALT="/secrets/id_rsa"
|
||||
MOUNTED_PRIVATE_KEY="/home/krkn/.ssh/id_rsa"
|
||||
MOUNTED_PUBLIC_KEY="/home/krkn/.ssh/id_rsa.pub"
|
||||
WORKING_KEY="/home/krkn/.ssh/id_rsa.key"
|
||||
|
||||
# Determine which source to use
|
||||
SOURCE_KEY=""
|
||||
if [ -f "$MOUNTED_PRIVATE_KEY_ALT" ]; then
|
||||
SOURCE_KEY="$MOUNTED_PRIVATE_KEY_ALT"
|
||||
echo "Found SSH key at alternative location: $SOURCE_KEY"
|
||||
elif [ -f "$MOUNTED_PRIVATE_KEY" ]; then
|
||||
SOURCE_KEY="$MOUNTED_PRIVATE_KEY"
|
||||
echo "Found SSH key at default location: $SOURCE_KEY"
|
||||
fi
|
||||
|
||||
# Setup SSH private key and create config for outbound connections
|
||||
if [ -n "$SOURCE_KEY" ]; then
|
||||
echo "Setting up SSH private key from: $SOURCE_KEY"
|
||||
|
||||
# Check current permissions and ownership
|
||||
ls -la "$SOURCE_KEY"
|
||||
|
||||
# Since the mounted key might be owned by root and we run as krkn user,
|
||||
# we cannot modify it directly. Copy to a new location we can control.
|
||||
echo "Copying SSH key to working location: $WORKING_KEY"
|
||||
|
||||
# Try to copy - if readable by anyone, this will work
|
||||
if cp "$SOURCE_KEY" "$WORKING_KEY" 2>/dev/null || cat "$SOURCE_KEY" > "$WORKING_KEY" 2>/dev/null; then
|
||||
chmod 600 "$WORKING_KEY"
|
||||
echo "SSH key copied successfully"
|
||||
ls -la "$WORKING_KEY"
|
||||
|
||||
# Verify the key is readable
|
||||
if ssh-keygen -y -f "$WORKING_KEY" > /dev/null 2>&1; then
|
||||
echo "SSH private key verified successfully"
|
||||
else
|
||||
echo "Warning: SSH key verification failed, but continuing anyway"
|
||||
fi
|
||||
|
||||
# Create SSH config to use the working key
|
||||
cat > /home/krkn/.ssh/config <<EOF
|
||||
Host *
|
||||
IdentityFile $WORKING_KEY
|
||||
StrictHostKeyChecking no
|
||||
UserKnownHostsFile /dev/null
|
||||
EOF
|
||||
chmod 600 /home/krkn/.ssh/config
|
||||
echo "SSH config created with default identity: $WORKING_KEY"
|
||||
else
|
||||
echo "ERROR: Cannot read SSH key at $SOURCE_KEY"
|
||||
echo "Key is owned by: $(stat -c '%U:%G' "$SOURCE_KEY" 2>/dev/null || stat -f '%Su:%Sg' "$SOURCE_KEY" 2>/dev/null)"
|
||||
echo ""
|
||||
echo "Solutions:"
|
||||
echo "1. Mount with world-readable permissions (less secure): chmod 644 /path/to/key"
|
||||
echo "2. Mount to /secrets/id_rsa instead of /home/krkn/.ssh/id_rsa"
|
||||
echo "3. Change ownership on host: chown \$(id -u):\$(id -g) /path/to/key"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Setup SSH public key if mounted (for inbound server access)
|
||||
if [ -f "$MOUNTED_PUBLIC_KEY" ]; then
|
||||
echo "SSH public key already present at $MOUNTED_PUBLIC_KEY"
|
||||
# Try to fix permissions (will fail silently if file is mounted read-only or owned by another user)
|
||||
chmod 600 "$MOUNTED_PUBLIC_KEY" 2>/dev/null
|
||||
if [ ! -f "/home/krkn/.ssh/authorized_keys" ]; then
|
||||
cp "$MOUNTED_PUBLIC_KEY" /home/krkn/.ssh/authorized_keys
|
||||
chmod 600 /home/krkn/.ssh/authorized_keys
|
||||
fi
|
||||
fi
|
||||
@@ -1,48 +0,0 @@
|
||||
## SLOs validation
|
||||
|
||||
Pass/fail based on metrics captured from the cluster is important in addition to checking the health status and recovery. Kraken supports:
|
||||
|
||||
### Checking for critical alerts post chaos
|
||||
If enabled, the check runs at the end of each scenario ( post chaos ) and Kraken exits in case critical alerts are firing to allow user to debug. You can enable it in the config:
|
||||
|
||||
```
|
||||
performance_monitoring:
|
||||
check_critical_alerts: False # When enabled will check prometheus for critical alerts firing post chaos
|
||||
```
|
||||
|
||||
### Validation and alerting based on the queries defined by the user during chaos
|
||||
Takes PromQL queries as input and modifies the return code of the run to determine pass/fail. It's especially useful in case of automated runs in CI where user won't be able to monitor the system. This feature can be enabled in the [config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) by setting the following:
|
||||
|
||||
```
|
||||
performance_monitoring:
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
enable_alerts: True # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error.
|
||||
alert_profile: config/alerts.yaml # Path to alert profile with the prometheus queries.
|
||||
```
|
||||
|
||||
#### Alert profile
|
||||
A couple of [alert profiles](https://github.com/redhat-chaos/krkn/tree/main/config) [alerts](https://github.com/redhat-chaos/krkn/blob/main/config/alerts.yaml) are shipped by default and can be tweaked to add more queries to alert on. User can provide a URL or path to the file in the [config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml). The following are a few alerts examples:
|
||||
|
||||
```
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[5m:]) > 0.01
|
||||
description: 5 minutes avg. etcd fsync latency on {{$labels.pod}} higher than 10ms {{$value}}
|
||||
severity: error
|
||||
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket[5m]))[5m:]) > 0.1
|
||||
description: 5 minutes avg. etcd network peer round trip on {{$labels.pod}} higher than 100ms {{$value}}
|
||||
severity: info
|
||||
|
||||
- expr: increase(etcd_server_leader_changes_seen_total[2m]) > 0
|
||||
description: etcd leader changes observed
|
||||
severity: critical
|
||||
```
|
||||
|
||||
Krkn supports setting the severity for the alerts with each one having different effects:
|
||||
|
||||
```
|
||||
info: Prints an info message with the alarm description to stdout. By default all expressions have this severity.
|
||||
warning: Prints a warning message with the alarm description to stdout.
|
||||
error: Prints a error message with the alarm description to stdout and sets Krkn rc = 1
|
||||
critical: Prints a fatal message with the alarm description to stdout and exits execution inmediatly with rc != 0
|
||||
```
|
||||
@@ -1 +0,0 @@
|
||||
theme: jekyll-theme-cayman
|
||||
@@ -1,17 +0,0 @@
|
||||
### Application outages
|
||||
Scenario to block the traffic ( Ingress/Egress ) of an application matching the labels for the specified duration of time to understand the behavior of the service/other services which depend on it during downtime. This helps with planning the requirements accordingly, be it improving the timeouts or tweaking the alerts etc.
|
||||
|
||||
##### Sample scenario config
|
||||
```
|
||||
application_outage: # Scenario to create an outage of an application by blocking traffic
|
||||
duration: 600 # Duration in seconds after which the routes will be accessible
|
||||
namespace: <namespace-with-application> # Namespace to target - all application routes will go inaccessible if pod selector is empty
|
||||
pod_selector: {app: foo} # Pods to target
|
||||
block: [Ingress, Egress] # It can be Ingress or Egress or Ingress, Egress
|
||||
```
|
||||
|
||||
##### Debugging steps in case of failures
|
||||
Kraken creates a network policy blocking the ingress/egress traffic to create an outage, in case of failures before reverting back the network policy, you can delete it manually by executing the following commands to stop the outage:
|
||||
```
|
||||
$ oc delete networkpolicy/kraken-deny -n <targeted-namespace>
|
||||
```
|
||||
@@ -1,70 +0,0 @@
|
||||
## Arcaflow Scenarios
|
||||
Arcaflow is a workflow engine in development which provides the ability to execute workflow steps in sequence, in parallel, repeatedly, etc. The main difference to competitors such as Netflix Conductor is the ability to run ad-hoc workflows without an infrastructure setup required.
|
||||
|
||||
The engine uses containers to execute plugins and runs them either locally in Docker/Podman or remotely on a Kubernetes cluster. The workflow system is strongly typed and allows for generating JSON schema and OpenAPI documents for all data formats involved.
|
||||
|
||||
### Available Scenarios
|
||||
#### Hog scenarios:
|
||||
- [CPU Hog](arcaflow_scenarios/cpu_hog.md)
|
||||
- [Memory Hog](arcaflow_scenarios/memory_hog.md)
|
||||
- [I/O Hog](arcaflow_scenarios/io_hog.md)
|
||||
|
||||
|
||||
### Prequisites
|
||||
Arcaflow supports three deployment technologies:
|
||||
- Docker
|
||||
- Podman
|
||||
- Kubernetes
|
||||
|
||||
#### Docker
|
||||
In order to run Arcaflow Scenarios with the Docker deployer, be sure that:
|
||||
- Docker is correctly installed in your Operating System (to find instructions on how to install docker please refer to [Docker Documentation](https://www.docker.com/))
|
||||
- The Docker daemon is running
|
||||
|
||||
#### Podman
|
||||
The podman deployer is built around the podman CLI and doesn't need necessarily to be run along with the podman daemon.
|
||||
To run Arcaflow Scenarios in your Operating system be sure that:
|
||||
- podman is correctly installed in your Operating System (to find instructions on how to install podman refer to [Podman Documentation](https://podman.io/))
|
||||
- the podman CLI is in your shell PATH
|
||||
|
||||
#### Kubernetes
|
||||
The kubernetes deployer integrates directly the Kubernetes API Client and needs only a valid kubeconfig file and a reachable Kubernetes/OpenShift Cluster.
|
||||
|
||||
### Usage
|
||||
|
||||
To enable arcaflow scenarios edit the kraken config file, go to the section `kraken -> chaos_scenarios` of the yaml structure
|
||||
and add a new element to the list named `arcaflow_scenarios` then add the desired scenario
|
||||
pointing to the `input.yaml` file.
|
||||
```
|
||||
kraken:
|
||||
...
|
||||
chaos_scenarios:
|
||||
- arcaflow_scenarios:
|
||||
- scenarios/arcaflow/cpu-hog/input.yaml
|
||||
```
|
||||
|
||||
#### input.yaml
|
||||
The implemented scenarios can be found in *scenarios/arcaflow/<scenario_name>* folder.
|
||||
The entrypoint of each scenario is the *input.yaml* file.
|
||||
In this file there are all the options to set up the scenario accordingly to the desired target
|
||||
### config.yaml
|
||||
The arcaflow config file. Here you can set the arcaflow deployer and the arcaflow log level.
|
||||
The supported deployers are:
|
||||
- Docker
|
||||
- Podman (podman daemon not needed, suggested option)
|
||||
- Kubernetes
|
||||
|
||||
The supported log levels are:
|
||||
- debug
|
||||
- info
|
||||
- warning
|
||||
- error
|
||||
### workflow.yaml
|
||||
This file contains the steps that will be executed to perform the scenario against the target.
|
||||
Each step is represented by a container that will be executed from the deployer and its options.
|
||||
Note that we provide the scenarios as a template, but they can be manipulated to define more complex workflows.
|
||||
To have more details regarding the arcaflow workflows architecture and syntax it is suggested to refer to the [Arcaflow Documentation](https://arcalot.io/arcaflow/).
|
||||
|
||||
This edit is no longer in quay image
|
||||
Working on fix in ticket: https://issues.redhat.com/browse/CHAOS-494
|
||||
This will effect all versions 4.12 and higher of OpenShift
|
||||
@@ -1,19 +0,0 @@
|
||||
# CPU Hog
|
||||
This scenario is based on the arcaflow [arcaflow-plugin-stressng](https://github.com/arcalot/arcaflow-plugin-stressng) plugin.
|
||||
The purpose of this scenario is to create cpu pressure on a particular node of the Kubernetes/OpenShift cluster for a time span.
|
||||
To enable this plugin add the pointer to the scenario input file `scenarios/arcaflow/cpu-hog/input.yaml` as described in the
|
||||
Usage section.
|
||||
This scenario takes a list of objects named `input_list` with the following properties:
|
||||
|
||||
- **kubeconfig :** *string* the kubeconfig needed by the deployer to deploy the sysbench plugin in the target cluster
|
||||
- **namespace :** *string* the namespace where the scenario container will be deployed
|
||||
**Note:** this parameter will be automatically filled by kraken if the `kubeconfig_path` property is correctly set
|
||||
- **node_selector :** *key-value map* the node label that will be used as `nodeSelector` by the pod to target a specific cluster node
|
||||
- **duration :** *string* stop stress test after N seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y.
|
||||
- **cpu_count :** *int* the number of CPU cores to be used (0 means all)
|
||||
- **cpu_method :** *string* a fine-grained control of which cpu stressors to use (ackermann, cfloat etc. see [manpage](https://manpages.org/sysbench) for all the cpu_method options)
|
||||
- **cpu_load_percentage :** *int* the CPU load by percentage
|
||||
|
||||
To perform several load tests in the same run simultaneously (eg. stress two or more nodes in the same run) add another item
|
||||
to the `input_list` with the same properties (and eventually different values eg. different node_selectors
|
||||
to schedule the pod on different nodes). To reduce (or increase) the parallelism change the value `parallelism` in `workload.yaml` file
|
||||
@@ -1,21 +0,0 @@
|
||||
# I/O Hog
|
||||
This scenario is based on the arcaflow [arcaflow-plugin-stressng](https://github.com/arcalot/arcaflow-plugin-stressng) plugin.
|
||||
The purpose of this scenario is to create disk pressure on a particular node of the Kubernetes/OpenShift cluster for a time span.
|
||||
The scenario allows to attach a node path to the pod as a `hostPath` volume.
|
||||
To enable this plugin add the pointer to the scenario input file `scenarios/arcaflow/io-hog/input.yaml` as described in the
|
||||
Usage section.
|
||||
This scenario takes a list of objects named `input_list` with the following properties:
|
||||
|
||||
- **kubeconfig :** *string* the kubeconfig needed by the deployer to deploy the sysbench plugin in the target cluster
|
||||
- **namespace :** *string* the namespace where the scenario container will be deployed
|
||||
**Note:** this parameter will be automatically filled by kraken if the `kubeconfig_path` property is correctly set
|
||||
- **node_selector :** *key-value map* the node label that will be used as `nodeSelector` by the pod to target a specific cluster node
|
||||
- **duration :** *string* stop stress test after N seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y.
|
||||
- **target_pod_folder :** *string* the path in the pod where the volume is mounted
|
||||
- **target_pod_volume :** *object* the `hostPath` volume definition in the [Kubernetes/OpenShift](https://docs.openshift.com/container-platform/3.11/install_config/persistent_storage/using_hostpath.html) format, that will be attached to the pod as a volume
|
||||
- **io_write_bytes :** *string* writes N bytes for each hdd process. The size can be expressed as % of free space on the file system or in units of Bytes, KBytes, MBytes and GBytes using the suffix b, k, m or g
|
||||
- **io_block_size :** *string* size of each write in bytes. Size can be from 1 byte to 4m.
|
||||
|
||||
To perform several load tests in the same run simultaneously (eg. stress two or more nodes in the same run) add another item
|
||||
to the `input_list` with the same properties (and eventually different values eg. different node_selectors
|
||||
to schedule the pod on different nodes). To reduce (or increase) the parallelism change the value `parallelism` in `workload.yaml` file
|
||||
@@ -1,18 +0,0 @@
|
||||
# Memory Hog
|
||||
This scenario is based on the arcaflow [arcaflow-plugin-stressng](https://github.com/arcalot/arcaflow-plugin-stressng) plugin.
|
||||
The purpose of this scenario is to create Virtual Memory pressure on a particular node of the Kubernetes/OpenShift cluster for a time span.
|
||||
To enable this plugin add the pointer to the scenario input file `scenarios/arcaflow/memory-hog/input.yaml` as described in the
|
||||
Usage section.
|
||||
This scenario takes a list of objects named `input_list` with the following properties:
|
||||
|
||||
- **kubeconfig :** *string* the kubeconfig needed by the deployer to deploy the sysbench plugin in the target cluster
|
||||
- **namespace :** *string* the namespace where the scenario container will be deployed
|
||||
**Note:** this parameter will be automatically filled by kraken if the `kubeconfig_path` property is correctly set
|
||||
- **node_selector :** *key-value map* the node label that will be used as `nodeSelector` by the pod to target a specific cluster node
|
||||
- **duration :** *string* stop stress test after N seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y.
|
||||
- **vm_bytes :** *string* N bytes per vm process or percentage of memory used (using the % symbol). The size can be expressed in units of Bytes, KBytes, MBytes and GBytes using the suffix b, k, m or g.
|
||||
- **vm_workers :** *int* Number of VM stressors to be run (0 means 1 stressor per CPU)
|
||||
|
||||
To perform several load tests in the same run simultaneously (eg. stress two or more nodes in the same run) add another item
|
||||
to the `input_list` with the same properties (and eventually different values eg. different node_selectors
|
||||
to schedule the pod on different nodes). To reduce (or increase) the parallelism change the value `parallelism` in `workload.yaml` file
|
||||
@@ -1,102 +0,0 @@
|
||||
Supported Cloud Providers:
|
||||
|
||||
- [AWS](#aws)
|
||||
- [GCP](#gcp)
|
||||
- [Openstack](#openstack)
|
||||
- [Azure](#azure)
|
||||
- [Alibaba](#alibaba)
|
||||
- [VMware](#vmware)
|
||||
- [IBMCloud](#ibmcloud)
|
||||
|
||||
## AWS
|
||||
|
||||
**NOTE**: For clusters with AWS make sure [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) is installed and properly [configured](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) using an AWS account
|
||||
|
||||
## GCP
|
||||
|
||||
In order to set up Application Default Credentials (ADC) for use by Cloud Client Libraries, you can provide either service account credentials or the credentials associated with your user acccount:
|
||||
|
||||
- Using service account credentials:
|
||||
|
||||
A google service account is required to give proper authentication to GCP for node actions. See [here](https://cloud.google.com/docs/authentication/getting-started) for how to create a service account.
|
||||
|
||||
**NOTE**: A user with 'resourcemanager.projects.setIamPolicy' permission is required to grant project-level permissions to the service account.
|
||||
|
||||
After creating the service account you will need to enable the account using the following: ```export GOOGLE_APPLICATION_CREDENTIALS="<serviceaccount.json>"```
|
||||
|
||||
- Using the credentials associated with your user acccount:
|
||||
|
||||
1. Make sure that the [GCP CLI](https://cloud.google.com/sdk/docs/install#linux) is installed and [initialized](https://cloud.google.com/sdk/docs/initializing) by running:
|
||||
|
||||
```gcloud init```
|
||||
|
||||
2. Create local authentication credentials for your user account:
|
||||
|
||||
```gcloud auth application-default login```
|
||||
|
||||
## Openstack
|
||||
|
||||
**NOTE**: For clusters with Openstack Cloud, ensure to create and source the [OPENSTACK RC file](https://docs.openstack.org/newton/user-guide/common/cli-set-environment-variables-using-openstack-rc.html) to set the OPENSTACK environment variables from the server where Kraken runs.
|
||||
|
||||
## Azure
|
||||
|
||||
**NOTE**: You will need to create a service principal and give it the correct access, see [here](https://docs.openshift.com/container-platform/4.5/installing/installing_azure/installing-azure-account.html) for creating the service principal and setting the proper permissions.
|
||||
|
||||
To properly run the service principal requires “Azure Active Directory Graph/Application.ReadWrite.OwnedBy” api permission granted and “User Access Administrator”.
|
||||
|
||||
Before running you will need to set the following:
|
||||
|
||||
1. ```export AZURE_SUBSCRIPTION_ID=<subscription_id>```
|
||||
|
||||
2. ```export AZURE_TENANT_ID=<tenant_id>```
|
||||
|
||||
3. ```export AZURE_CLIENT_SECRET=<client secret>```
|
||||
|
||||
4. ```export AZURE_CLIENT_ID=<client id>```
|
||||
|
||||
## Alibaba
|
||||
|
||||
See the [Installation guide](https://www.alibabacloud.com/help/en/alibaba-cloud-cli/latest/installation-guide) to install alicloud cli.
|
||||
|
||||
1. ```export ALIBABA_ID=<access_key_id>```
|
||||
|
||||
2. ```export ALIBABA_SECRET=<access key secret>```
|
||||
|
||||
3. ```export ALIBABA_REGION_ID=<region id>```
|
||||
|
||||
Refer to [region and zone page](https://www.alibabacloud.com/help/en/elastic-compute-service/latest/regions-and-zones#concept-2459516) to get the region id for the region you are running on.
|
||||
|
||||
Set cloud_type to either alibaba or alicloud in your node scenario yaml file.
|
||||
|
||||
## VMware
|
||||
|
||||
Set the following environment variables
|
||||
|
||||
1. ```export VSPHERE_IP=<vSphere_client_IP_address>```
|
||||
|
||||
2. ```export VSPHERE_USERNAME=<vSphere_client_username>```
|
||||
|
||||
3. ```export VSPHERE_PASSWORD=<vSphere_client_password>```
|
||||
|
||||
These are the credentials that you would normally use to access the vSphere client.
|
||||
|
||||
## IBMCloud
|
||||
|
||||
If no API key is set up with proper VPC resource permissions, use the following to create it:
|
||||
|
||||
* Access group
|
||||
* Service id with the following access
|
||||
* With policy **VPC Infrastructure Services**
|
||||
* Resources = All
|
||||
* Roles:
|
||||
* Editor
|
||||
* Administrator
|
||||
* Operator
|
||||
* Viewer
|
||||
* API Key
|
||||
|
||||
Set the following environment variables
|
||||
|
||||
1. ```export IBMC_URL=https://<region>.iaas.cloud.ibm.com/v1```
|
||||
|
||||
2. ```export IBMC_APIKEY=<ibmcloud_api_key>```
|
||||
@@ -1,19 +0,0 @@
|
||||
#### Kubernetes cluster shut down scenario
|
||||
Scenario to shut down all the nodes including the masters and restart them after specified duration. Cluster shut down scenario can be injected by placing the shut_down config file under cluster_shut_down_scenario option in the kraken config. Refer to [cluster_shut_down_scenario](https://github.com/krkn-chaos/krkn/blob/main/scenarios/cluster_shut_down_scenario.yml) config file.
|
||||
|
||||
Refer to [cloud setup](cloud_setup.md) to configure your cli properly for the cloud provider of the cluster you want to shut down.
|
||||
|
||||
Current accepted cloud types:
|
||||
* [Azure](cloud_setup.md#azure)
|
||||
* [GCP](cloud_setup.md#gcp)
|
||||
* [AWS](cloud_setup.md#aws)
|
||||
* [Openstack](cloud_setup.md#openstack)
|
||||
* [IBMCloud](cloud_setup.md#ibmcloud)
|
||||
|
||||
|
||||
```
|
||||
cluster_shut_down_scenario: # Scenario to stop all the nodes for specified duration and restart the nodes.
|
||||
runs: 1 # Number of times to execute the cluster_shut_down scenario.
|
||||
shut_down_duration: 120 # Duration in seconds to shut down the cluster.
|
||||
cloud_type: aws # Cloud type on which Kubernetes/OpenShift runs.
|
||||
```
|
||||
@@ -1,65 +0,0 @@
|
||||
### Config
|
||||
Set the scenarios to inject and the tunings like duration to wait between each scenario in the config file located at [config/config.yaml](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml).
|
||||
|
||||
**NOTE**: [config](https://github.com/redhat-chaos/krkn/blob/main/config/config_performance.yaml) can be used if leveraging the [automated way](https://github.com/redhat-chaos/krkn#setting-up-infrastructure-dependencies) to install the infrastructure pieces.
|
||||
|
||||
Config components:
|
||||
* [Kraken](#kraken)
|
||||
* [Cerberus](#cerberus)
|
||||
* [Performance Monitoring](#performance-monitoring)
|
||||
* [Tunings](#tunings)
|
||||
|
||||
# Kraken
|
||||
This section defines scenarios and specific data to the chaos run
|
||||
|
||||
## Distribution
|
||||
Either **openshift** or **kubernetes** depending on the type of cluster you want to run chaos on.
|
||||
The prometheus url/route and bearer token are automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
|
||||
## Exit on failure
|
||||
**exit_on_failure**: Exit when a post action check or cerberus run fails
|
||||
|
||||
## Publish kraken status
|
||||
**publish_kraken_status**: Can be accessed at http://0.0.0.0:8081 (or what signal_address and port you set in signal address section)
|
||||
**signal_state**: State you want kraken to start at; will wait for the RUN signal to start running a chaos iteration. When set to PAUSE before running the scenarios, refer to [signal.md](signal.md) for more details
|
||||
|
||||
## Signal Address
|
||||
**signal_address**: Address to listen/post the signal state to
|
||||
**port**: port to listen/post the signal state to
|
||||
|
||||
## Chaos Scenarios
|
||||
|
||||
**chaos_scenarios**: List of different types of chaos scenarios you want to run with paths to their specific yaml file configurations
|
||||
|
||||
If a scenario has a post action check script, it will be run before and after each scenario to validate the component under test starts and ends at the same state
|
||||
|
||||
Currently the scenarios are run one after another (in sequence) and will exit if one of the scenarios fail, without moving onto the next one
|
||||
|
||||
Chaos scenario types:
|
||||
- container_scenarios
|
||||
- plugin_scenarios
|
||||
- node_scenarios
|
||||
- time_scenarios
|
||||
- cluster_shut_down_scenarios
|
||||
- namespace_scenarios
|
||||
- zone_outages
|
||||
- application_outages
|
||||
- pvc_scenarios
|
||||
- network_chaos
|
||||
|
||||
|
||||
# Cerberus
|
||||
Parameters to set for enabling of cerberus checks at the end of each executed scenario. The given url will pinged after the scenario and post action check have been completed for each scenario and iteration.
|
||||
**cerberus_enabled**: Enable it when cerberus is previously installed
|
||||
**cerberus_url**: When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
|
||||
**check_applicaton_routes**: When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
|
||||
# Performance Monitoring
|
||||
There are 2 main sections defined in this part of the config [metrics](metrics.md) and [alerts](alerts.md); read more about each of these configurations in their respective docs
|
||||
|
||||
# Tunings
|
||||
**wait_duration**: Duration to wait between each chaos scenario
|
||||
**iterations**: Number of times to execute the scenarios
|
||||
**daemon_mode**: True or False; If true, iterations are set to infinity which means that the kraken will cause chaos forever and number of iterations is ignored
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
### Container Scenarios
|
||||
Kraken uses the `oc exec` command to `kill` specific containers in a pod.
|
||||
This can be based on the pods namespace or labels. If you know the exact object you want to kill, you can also specify the specific container name or pod name in the scenario yaml file.
|
||||
These scenarios are in a simple yaml format that you can manipulate to run your specific tests or use the pre-existing scenarios to see how it works.
|
||||
|
||||
#### Example Config
|
||||
The following are the components of Kubernetes for which a basic chaos scenario config exists today.
|
||||
|
||||
```
|
||||
scenarios:
|
||||
- name: "<name of scenario>"
|
||||
namespace: "<specific namespace>" # can specify "*" if you want to find in all namespaces
|
||||
label_selector: "<label of pod(s)>"
|
||||
container_name: "<specific container name>" # This is optional, can take out and will kill all containers in all pods found under namespace and label
|
||||
pod_names: # This is optional, can take out and will select all pods with given namespace and label
|
||||
- <pod_name>
|
||||
count: <number of containers to disrupt, default=1>
|
||||
action: <kill signal to run. For example 1 ( hang up ) or 9. Default is set to 1>
|
||||
expected_recovery_time: <number of seconds to wait for container to be running again> (defaults to 120seconds)
|
||||
```
|
||||
|
||||
#### Post Action
|
||||
In all scenarios we do a post chaos check to wait and verify the specific component.
|
||||
|
||||
Here there are two options:
|
||||
1. Pass a custom script in the main config scenario list that will run before the chaos and verify the output matches post chaos scenario.
|
||||
|
||||
See [scenarios/post_action_etcd_container.py](https://github.com/krkn-chaos/krkn/blob/main/scenarios/post_action_etcd_container.py) for an example.
|
||||
```
|
||||
- container_scenarios: # List of chaos pod scenarios to load.
|
||||
- - scenarios/container_etcd.yml
|
||||
- scenarios/post_action_etcd_container.py
|
||||
```
|
||||
|
||||
2. Allow kraken to wait and check the killed containers until they become ready again. Kraken keeps a list of the specific
|
||||
containers that were killed as well as the namespaces and pods to verify all containers that were affected recover properly.
|
||||
|
||||
```
|
||||
expected_recovery_time: <seconds to wait for container to recover>
|
||||
```
|
||||
@@ -1,95 +0,0 @@
|
||||
# How to contribute
|
||||
|
||||
Contributions are always appreciated.
|
||||
|
||||
How to:
|
||||
* [Submit Pull Request](#pull-request)
|
||||
* [Fix Formatting](#fix-formatting)
|
||||
* [Squash Commits](#squash-commits)
|
||||
* [Rebase Upstream](#rebase-with-upstream)
|
||||
|
||||
## Pull request
|
||||
|
||||
In order to submit a change or a PR, please fork the project and follow these instructions:
|
||||
```bash
|
||||
$ git clone http://github.com/<me>/krkn
|
||||
$ cd krkn
|
||||
$ git checkout -b <branch_name>
|
||||
$ <make change>
|
||||
$ git add <changes>
|
||||
$ git commit -a
|
||||
$ <insert good message>
|
||||
$ git push
|
||||
```
|
||||
|
||||
## Fix Formatting
|
||||
Kraken uses [pre-commit](https://pre-commit.com) framework to maintain the code linting and python code styling.
|
||||
The CI would run the pre-commit check on each pull request.
|
||||
We encourage our contributors to follow the same pattern while contributing to the code.
|
||||
|
||||
The pre-commit configuration file is present in the repository `.pre-commit-config.yaml`.
|
||||
It contains the different code styling and linting guides which we use for the application.
|
||||
|
||||
The following command can be used to run the pre-commit:
|
||||
`pre-commit run --all-files`
|
||||
|
||||
If pre-commit is not installed in your system, it can be installed with `pip install pre-commit`.
|
||||
|
||||
## Squash Commits
|
||||
If there are multiple commits, please rebase/squash multiple commits
|
||||
before creating the PR by following:
|
||||
|
||||
```bash
|
||||
$ git checkout <my-working-branch>
|
||||
$ git rebase -i HEAD~<num_of_commits_to_merge>
|
||||
-OR-
|
||||
$ git rebase -i <commit_id_of_first_change_commit>
|
||||
```
|
||||
|
||||
In the interactive rebase screen, set the first commit to `pick`, and all others to `squash`, or whatever else you may need to do.
|
||||
|
||||
|
||||
Push your rebased commits (you may need to force), then issue your PR.
|
||||
|
||||
```
|
||||
$ git push origin <my-working-branch> --force
|
||||
```
|
||||
|
||||
## Rebase with Upstream
|
||||
|
||||
If changes go into the main repository while you're working on your code it is best to rebase your code with the
|
||||
upstream, so you stay up to date with all changes and fix any conflicting code changes.
|
||||
|
||||
If not already configured, set the upstream url for kraken.
|
||||
```
|
||||
git remote add upstream https://github.com/krkn-chaos/krkn.git
|
||||
```
|
||||
|
||||
Rebase to upstream master branch.
|
||||
```
|
||||
git fetch upstream
|
||||
git rebase upstream/master
|
||||
git push origin <branch_name> --force
|
||||
```
|
||||
|
||||
If any errors occur, it will list off any files that have merge issues.
|
||||
Edit the files with the code you want to keep. See below for detailed help from Git.
|
||||
1. Vi <file(s)>
|
||||
2. Resolving-a-merge-conflict-using-the-command-line
|
||||
3. git add <all files you edit>
|
||||
4. git rebase --continue
|
||||
5. Might need to repeat steps 2 through 4 until rebase complete
|
||||
6. git status <this will also tell you if you have other files to edit>
|
||||
7. git push origin <branch_name> --force [push the changes to github remote]
|
||||
|
||||
|
||||
Merge Conflicts Example
|
||||
```
|
||||
1. git rebase upstream/kraken
|
||||
2. vi run_kraken.py [edit at the indicated places, get rid of arrowed lines and dashes, and apply correct changes]
|
||||
3. git add run_kraken.py
|
||||
4. git rebase --continue
|
||||
5. repeat 2-4 until done
|
||||
6. git status <this will also tell you if you have other files to edit>
|
||||
7. git push origin <branch_name> --force [push the changes to github remote]
|
||||
```
|
||||
@@ -1,51 +0,0 @@
|
||||
## Getting Started Running Chaos Scenarios
|
||||
|
||||
#### Adding New Scenarios
|
||||
Adding a new scenario is as simple as adding a new config file under [scenarios directory](https://github.com/redhat-chaos/krkn/tree/main/scenarios) and defining it in the main kraken [config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml#L8).
|
||||
You can either copy an existing yaml file and make it your own, or fill in one of the templates below to suit your needs.
|
||||
|
||||
### Templates
|
||||
#### Pod Scenario Yaml Template
|
||||
For example, for adding a pod level scenario for a new application, refer to the sample scenario below to know what fields are necessary and what to add in each location:
|
||||
```
|
||||
# yaml-language-server: $schema=../plugin.schema.json
|
||||
- id: kill-pods
|
||||
config:
|
||||
namespace_pattern: ^<namespace>$
|
||||
label_selector: <pod label>
|
||||
kill: <number of pods to kill>
|
||||
krkn_pod_recovery_time: <expected time for the pod to become ready>
|
||||
```
|
||||
|
||||
#### Node Scenario Yaml Template
|
||||
|
||||
```
|
||||
node_scenarios:
|
||||
- actions: # Node chaos scenarios to be injected.
|
||||
- <chaos scenario>
|
||||
- <chaos scenario>
|
||||
node_name: <node name> # Can be left blank.
|
||||
label_selector: <node label>
|
||||
instance_kill_count: <number of nodes on which to perform action>
|
||||
timeout: <duration to wait for completion>
|
||||
cloud_type: <cloud provider>
|
||||
```
|
||||
|
||||
|
||||
#### Time Chaos Scenario Template
|
||||
```
|
||||
time_scenarios:
|
||||
- action: 'skew_time' or 'skew_date'
|
||||
object_type: 'pod' or 'node'
|
||||
label_selector: <label of pod or node>
|
||||
```
|
||||
|
||||
|
||||
### Common Scenario Edits
|
||||
If you just want to make small changes to pre-existing scenarios, feel free to edit the scenario file itself.
|
||||
|
||||
#### Example of Quick Pod Scenario Edit:
|
||||
If you want to kill 2 pods instead of 1 in any of the pre-existing scenarios, you can either edit the number located at filters -> randomSample -> size or the runs under the config -> runStrategy section.
|
||||
|
||||
#### Example of Quick Nodes Scenario Edit:
|
||||
If your cluster is build on GCP instead of AWS, just change the cloud type in the node_scenarios_example.yml file.
|
||||
310
docs/index.md
310
docs/index.md
@@ -1,310 +0,0 @@
|
||||
## Chaos Testing Guide
|
||||
|
||||
|
||||
### Table of Contents
|
||||
* [Introduction](#introduction)
|
||||
* [Test Stratagies and Methodology](#test-strategies-and-methodology)
|
||||
* [Best Practices](#best-practices)
|
||||
* [Tooling](#tooling)
|
||||
* [Workflow](#workflow)
|
||||
* [Cluster recovery checks, metrics evaluation and pass/fail criteria](#cluster-recovery-checks-metrics-evaluation-and-passfail-criteria)
|
||||
* [Scenarios](#scenarios)
|
||||
* [Test Environment Recommendations - how and where to run chaos tests](#test-environment-recommendations---how-and-where-to-run-chaos-tests)
|
||||
* [Chaos testing in Practice](#chaos-testing-in-practice)
|
||||
* [OpenShift organization](#openshift-organization)
|
||||
* [startx-lab](#startx-lab)
|
||||
|
||||
|
||||
### Introduction
|
||||
There are a couple of false assumptions that users might have when operating and running their applications in distributed systems:
|
||||
|
||||
The network is reliable.
|
||||
There is zero latency.
|
||||
Bandwidth is infinite.
|
||||
The network is secure.
|
||||
Topology never changes.
|
||||
The network is homogeneous.
|
||||
Consistent resource usage with no spikes.
|
||||
All shared resources are available from all places.
|
||||
|
||||
Various assumptions led to a number of outages in production environments in the past. The services suffered from poor performance or were inaccessible to the customers, leading to missing Service Level Agreement uptime promises, revenue loss, and a degradation in the perceived reliability of said services.
|
||||
|
||||
How can we best avoid this from happening? This is where Chaos testing can add value.
|
||||
|
||||
|
||||
|
||||
### Test Strategies and Methodology
|
||||
Failures in production are costly. To help mitigate risk to service health, consider the following strategies and approaches to service testing:
|
||||
|
||||
- Be proactive vs reactive. We have different types of test suites in place - unit, integration and end-to-end - that help expose bugs in code in a controlled environment. Through implementation of a chaos engineering strategy, we can discover potential causes of service degradation. We need to understand the systems' behavior under unpredictable conditions in order to find the areas to harden, and use performance data points to size the clusters to handle failures in order to keep downtime to a minimum.
|
||||
|
||||
- Test the resiliency of a system under turbulent conditions by running tests that are designed to disrupt while monitoring the systems adaptability and performance:
|
||||
- Establish and define your steady state and metrics - understand the behavior and performance under stable conditions and define the metrics that will be used to evaluate the system’s behavior. Then decide on acceptable outcomes before injecting chaos.
|
||||
- Analyze the statuses and metrics of all components during the chaos test runs.
|
||||
- Improve the areas that are not resilient and performant by comparing the key metrics and Service Level Objectives (SLOs) to the stable conditions before the chaos.
|
||||
For example: evaluating the API server latency or application uptime to see if the key performance indicators and service level indicators are still within acceptable limits.
|
||||
|
||||
|
||||
|
||||
|
||||
### Best Practices
|
||||
Now that we understand the test methodology, let us take a look at the best practices for an Kubernetes cluster. On that platform there are user applications and cluster workloads that need to be designed for stability and to provide the best user experience possible:
|
||||
|
||||
- Alerts with appropriate severity should get fired.
|
||||
- Alerts are key to identify when a component starts degrading, and can help focus the investigation effort on affected system components.
|
||||
- Alerts should have proper severity, description, notification policy, escalation policy, and SOP in order to reduce MTTR for responding SRE or Ops resources.
|
||||
- Detailed information on the alerts consistency can be found [here](https://github.com/openshift/enhancements/blob/master/enhancements/monitoring/alerting-consistency.md).
|
||||
|
||||
- Minimal performance impact - Network, CPU, Memory, Disk, Throughput etc.
|
||||
- The system, as well as the applications, should be designed to have minimal performance impact during disruptions to ensure stability and also to avoid hogging resources that other applications can use.
|
||||
We want to look at this in terms of CPU, Memory, Disk, Throughput, Network etc.
|
||||
- We want to look at this in terms of CPU, Memory, Disk, Throughput, Network etc.
|
||||
|
||||
- Appropriate CPU/Memory limits set to avoid performance throttling and OOM kills.
|
||||
- There might be rogue applications hogging resources ( CPU/Memory ) on the nodes which might lead to applications underperforming or worse getting OOM killed. It is important to ensure that applications and system components have reserved resources for the kube-scheduler to take into consideration in order to keep them performing at the expected levels.
|
||||
|
||||
- Services dependent on the system under test need to handle the failure gracefully to avoid performance degradation and downtime - appropriate timeouts.
|
||||
- In a distributed system, services deployed coordinate with each other and might have external dependencies. Each of the services deployed as a deployment, pod, or container, need to handle the downtime of other dependent services gracefully instead of crashing due to not having appropriate timeouts, fallback logic etc.
|
||||
|
||||
- Proper node sizing to avoid cascading failures and ensure cluster stability especially when the cluster is large and dense
|
||||
- The platform needs to be sized taking into account the resource usage spikes that might occur during chaotic events. For example, if one of the main nodes goes down, the other two main nodes need to have enough resources to handle the load. The resource usage depends on the load or number of objects that are running being managed by the Control Plane ( Api Server, Etcd, Controller and Scheduler ). As such, it’s critical to test such conditions, understand the behavior, and leverage the data to size the platform appropriately. This can help keep the applications stable during unplanned events without the control plane undergoing cascading failures which can potentially bring down the entire cluster.
|
||||
|
||||
- Proper node sizing to avoid application failures and maintain stability.
|
||||
- An application pod might use more resources during reinitialization after a crash, so it is important to take that into account for sizing the nodes in the cluster to accommodate it. For example, monitoring solutions like Prometheus need high amounts of memory to replay the write ahead log ( WAL ) when it restarts. As such, it’s critical to test such conditions, understand the behavior, and leverage the data to size the platform appropriately. This can help keep the application stable during unplanned events without undergoing degradation in performance or even worse hog the resources on the node which can impact other applications and system pods.
|
||||
|
||||
|
||||
- Minimal initialization time and fast recovery logic.
|
||||
- The controller watching the component should recognize a failure as soon as possible. The component needs to have minimal initialization time to avoid extended downtime or overloading the replicas if it is a highly available configuration. The cause of failure can be because of issues with the infrastructure on top of which it is running, application failures, or because of service failures that it depends on.
|
||||
|
||||
- High Availability deployment strategy.
|
||||
- There should be multiple replicas ( both Kubernetes and application control planes ) running preferably in different availability zones to survive outages while still serving the user/system requests. Avoid single points of failure.
|
||||
- Backed by persistent storage
|
||||
- It is important to have the system/application backed by persistent storage. This is especially important in cases where the application is a database or a stateful application given that a node, pod, or container failure will wipe off the data.
|
||||
|
||||
- There should be fallback routes to the backend in case of using CDN, for example, Akamai in case of console.redhat.com - a managed service deployed on top of Kubernetes dedicated:
|
||||
- Content delivery networks (CDNs) are commonly used to host resources such as images, JavaScript files, and CSS. The average web page is nearly 2 MB in size, and offloading heavy resources to third-parties is extremely effective for reducing backend server traffic and latency. However, this makes each CDN an additional point of failure for every site that relies on it. If the CDN fails, its customers could also fail.
|
||||
- To test how the application reacts to failures, drop all network traffic between the system and CDN. The application should still serve the content to the user irrespective of the failure.
|
||||
|
||||
- Appropriate caching and Content Delivery Network should be enabled to be performant and usable when there is a latency on the client side.
|
||||
- Not every user or machine has access to unlimited bandwidth, there might be a delay on the user side ( client ) to access the API’s due to limited bandwidth, throttling or latency depending on the geographic location. It is important to inject latency between the client and API calls to understand the behavior and optimize things including caching wherever possible, using CDN’s or opting for different protocols like HTTP/2 or HTTP/3 vs HTTP.
|
||||
|
||||
|
||||
|
||||
|
||||
### Tooling
|
||||
Now that we looked at the best practices, In this section, we will go through how [Kraken](https://github.com/redhat-chaos/krkn) - a chaos testing framework can help test the resilience of Kubernetes and make sure the applications and services are following the best practices.
|
||||
|
||||
#### Workflow
|
||||
Let us start by understanding the workflow of kraken: the user will start by running kraken by pointing to a specific Kubernetes cluster using kubeconfig to be able to talk to the platform on top of which the Kubernetes cluster is hosted. This can be done by either the oc/kubectl API or the cloud API. Based on the configuration of kraken, it will inject specific chaos scenarios as shown below, talk to [Cerberus](https://github.com/redhat-chaos/cerberus) to get the go/no-go signal representing the overall health of the cluster ( optional - can be turned off ), scrapes metrics from in-cluster prometheus given a metrics profile with the promql queries and stores them long term in Elasticsearch configured ( optional - can be turned off ), evaluates the promql expressions specified in the alerts profile ( optional - can be turned off ) and aggregated everything to set the pass/fail i.e. exits 0 or 1. More about the metrics collection, cerberus and metrics evaluation can be found in the next section.
|
||||
|
||||

|
||||
|
||||
#### Cluster recovery checks, metrics evaluation and pass/fail criteria
|
||||
- Most of the scenarios have built in checks to verify if the targeted component recovered from the failure after the specified duration of time but there might be cases where other components might have an impact because of a certain failure and it’s extremely important to make sure that the system/application is healthy as a whole post chaos. This is exactly where [Cerberus](https://github.com/redhat-chaos/cerberus) comes to the rescue.
|
||||
If the monitoring tool, cerberus is enabled it will consume the signal and continue running chaos or not based on that signal.
|
||||
|
||||
- Apart from checking the recovery and cluster health status, it’s equally important to evaluate the performance metrics like latency, resource usage spikes, throughput, etcd health like disk fsync, leader elections etc. To help with this, Kraken has a way to evaluate promql expressions from the incluster prometheus and set the exit status to 0 or 1 based on the severity set for each of the query. Details on how to use this feature can be found [here](https://github.com/redhat-chaos/krkn#alerts).
|
||||
|
||||
- The overall pass or fail of kraken is based on the recovery of the specific component (within a certain amount of time), the cerberus health signal which tracks the health of the entire cluster and metrics evaluation from incluster prometheus.
|
||||
|
||||
|
||||
|
||||
|
||||
### Scenarios
|
||||
|
||||
Let us take a look at how to run the chaos scenarios on your Kubernetes clusters using Kraken-hub - a lightweight wrapper around Kraken to ease the runs by providing the ability to run them by just running container images using podman with parameters set as environment variables. This eliminates the need to carry around and edit configuration files and makes it easy for any CI framework integration. Here are the scenarios supported:
|
||||
|
||||
- Pod Scenarios ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/pod-scenarios.md))
|
||||
- Disrupts Kubernetes/Kubernetes and applications deployed as pods:
|
||||
- Helps understand the availability of the application, the initialization timing and recovery status.
|
||||
- [Demo](https://asciinema.org/a/452351?speed=3&theme=solarized-dark)
|
||||
|
||||
- Container Scenarios ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/container-scenarios.md))
|
||||
- Disrupts Kubernetes/Kubernetes and applications deployed as containers running as part of a pod(s) using a specified kill signal to mimic failures:
|
||||
- Helps understand the impact and recovery timing when the program/process running in the containers are disrupted - hangs, paused, killed etc., using various kill signals, i.e. SIGHUP, SIGTERM, SIGKILL etc.
|
||||
- [Demo](https://asciinema.org/a/BXqs9JSGDSEKcydTIJ5LpPZBM?speed=3&theme=solarized-dark)
|
||||
|
||||
- Node Scenarios ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/node-scenarios.md))
|
||||
- Disrupts nodes as part of the cluster infrastructure by talking to the cloud API. AWS, Azure, GCP, OpenStack and Baremetal are the supported platforms as of now. Possible disruptions include:
|
||||
- Terminate nodes
|
||||
- Fork bomb inside the node
|
||||
- Stop the node
|
||||
- Crash the kubelet running on the node
|
||||
- etc.
|
||||
- [Demo](https://asciinema.org/a/ANZY7HhPdWTNaWt4xMFanF6Q5)
|
||||
|
||||
- Zone Outages ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/zone-outages.md))
|
||||
- Creates outage of availability zone(s) in a targeted region in the public cloud where the Kubernetes cluster is running by tweaking the network acl of the zone to simulate the failure, and that in turn will stop both ingress and egress traffic from all nodes in a particular zone for the specified duration and reverts it back to the previous state.
|
||||
- Helps understand the impact on both Kubernetes/Kubernetes control plane as well as applications and services running on the worker nodes in that zone.
|
||||
- Currently, only set up for AWS cloud platform: 1 VPC and multiples subnets within the VPC can be specified.
|
||||
- [Demo](https://asciinema.org/a/452672?speed=3&theme=solarized-dark)
|
||||
|
||||
- Application Outages ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/application-outages.md))
|
||||
- Scenario to block the traffic ( Ingress/Egress ) of an application matching the labels for the specified duration of time to understand the behavior of the service/other services which depend on it during the downtime.
|
||||
- Helps understand how the dependent services react to the unavailability.
|
||||
- [Demo](https://asciinema.org/a/452403?speed=3&theme=solarized-dark)
|
||||
|
||||
- Power Outages ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/power-outages.md))
|
||||
- This scenario imitates a power outage by shutting down of the entire cluster for a specified duration of time, then restarts all the nodes after the specified time and checks the health of the cluster.
|
||||
- There are various use cases in the customer environments. For example, when some of the clusters are shutdown in cases where the applications are not needed to run in a particular time/season in order to save costs.
|
||||
- The nodes are stopped in parallel to mimic a power outage i.e., pulling off the plug
|
||||
- [Demo](https://asciinema.org/a/r0zLbh70XK7gnc4s5v0ZzSXGo)
|
||||
|
||||
- Resource Hog
|
||||
- Hogs CPU, Memory and IO on the targeted nodes
|
||||
- Helps understand if the application/system components have reserved resources to not get disrupted because of rogue applications, or get performance throttled.
|
||||
- CPU Hog ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/node-cpu-hog.md), [Demo](https://asciinema.org/a/452762))
|
||||
- Memory Hog ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/node-memory-hog.md), [Demo](https://asciinema.org/a/452742?speed=3&theme=solarized-dark))
|
||||
|
||||
- Time Skewing ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/time-scenarios.md))
|
||||
- Manipulate the system time and/or date of specific pods/nodes.
|
||||
- Verify scheduling of objects so they continue to work.
|
||||
- Verify time gets reset properly.
|
||||
|
||||
- Namespace Failures ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/namespace-scenarios.md))
|
||||
- Delete namespaces for the specified duration.
|
||||
- Helps understand the impact on other components and tests/improves recovery time of the components in the targeted namespace.
|
||||
|
||||
- Persistent Volume Fill ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/pvc-scenarios.md))
|
||||
- Fills up the persistent volumes, up to a given percentage, used by the pod for the specified duration.
|
||||
- Helps understand how an application deals when it is no longer able to write data to the disk. For example, kafka’s behavior when it is not able to commit data to the disk.
|
||||
|
||||
- Network Chaos ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/network-chaos.md))
|
||||
- Scenarios supported includes:
|
||||
- Network latency
|
||||
- Packet loss
|
||||
- Interface flapping
|
||||
- DNS errors
|
||||
- Packet corruption
|
||||
- Bandwidth limitation
|
||||
|
||||
|
||||
|
||||
|
||||
### Test Environment Recommendations - how and where to run chaos tests
|
||||
|
||||
Let us take a look at few recommendations on how and where to run the chaos tests:
|
||||
|
||||
- Run the chaos tests continuously in your test pipelines:
|
||||
- Software, systems, and infrastructure does change – and the condition/health of each can change pretty rapidly. A good place to run tests is in your CI/CD pipeline running on a regular cadence.
|
||||
|
||||
- Run the chaos tests manually to learn from the system:
|
||||
- When running a Chaos scenario or Fault tests, it is more important to understand how the system responds and reacts, rather than mark the execution as pass or fail.
|
||||
- It is important to define the scope of the test before the execution to avoid some issues from masking others.
|
||||
|
||||
- Run the chaos tests in production environments or mimic the load in staging environments:
|
||||
- As scary as a thought about testing in production is, production is the environment that users are in and traffic spikes/load are real. To fully test the robustness/resilience of a production system, running Chaos Engineering experiments in a production environment will provide needed insights. A couple of things to keep in mind:
|
||||
- Minimize blast radius and have a backup plan in place to make sure the users and customers do not undergo downtime.
|
||||
- Mimic the load in a staging environment in case Service Level Agreements are too tight to cover any downtime.
|
||||
|
||||
- Enable Observability:
|
||||
- Chaos Engineering Without Observability ... Is Just Chaos.
|
||||
- Make sure to have logging and monitoring installed on the cluster to help with understanding the behaviour as to why it is happening. In case of running the tests in the CI where it is not humanly possible to monitor the cluster all the time, it is recommended to leverage Cerberus to capture the state during the runs and metrics collection in Kraken to store metrics long term even after the cluster is gone.
|
||||
- Kraken ships with dashboards that will help understand API, Etcd and Kubernetes cluster level stats and performance metrics.
|
||||
- Pay attention to Prometheus alerts. Check if they are firing as expected.
|
||||
|
||||
- Run multiple chaos tests at once to mimic the production outages:
|
||||
- For example, hogging both IO and Network at the same time instead of running them separately to observe the impact.
|
||||
- You might have existing test cases, be it related to Performance, Scalability or QE. Run the chaos in the background during the test runs to observe the impact. Signaling feature in Kraken can help with coordinating the chaos runs i.e., start, stop, pause the scenarios based on the state of the other test jobs.
|
||||
|
||||
|
||||
#### Chaos testing in Practice
|
||||
|
||||
##### OpenShift organization
|
||||
Within the OpenShift organization we use kraken to perform chaos testing throughout a release before the code is available to customers.
|
||||
|
||||
1. We execute kraken during our regression test suite.
|
||||
|
||||
i. We cover each of the chaos scenarios across different clouds.
|
||||
|
||||
a. Our testing is predominantly done on AWS, Azure and GCP.
|
||||
|
||||
2. We run the chaos scenarios during a long running reliability test.
|
||||
|
||||
i. During this test we perform different types of tasks by different users on the cluster.
|
||||
|
||||
ii. We have added the execution of kraken to perform at certain times throughout the long running test and monitor the health of the cluster.
|
||||
|
||||
iii. This test can be seen here: https://github.com/openshift/svt/tree/master/reliability-v2
|
||||
|
||||
3. We are starting to add in test cases that perform chaos testing during an upgrade (not many iterations of this have been completed).
|
||||
|
||||
|
||||
##### startx-lab
|
||||
|
||||
**NOTE**: Requests for enhancements and any issues need to be filed at the mentioned links given that they are not natively supported in Kraken.
|
||||
|
||||
The following content covers the implementation details around how Startx is leveraging Kraken:
|
||||
|
||||
* Using kraken as part of a tekton pipeline
|
||||
|
||||
You can find on [artifacthub.io](https://artifacthub.io/packages/search?kind=7&ts_query_web=kraken) the
|
||||
[kraken-scenario](https://artifacthub.io/packages/tekton-task/startx-tekton-catalog/kraken-scenario) `tekton-task`
|
||||
which can be used to start a kraken chaos scenarios as part of a chaos pipeline.
|
||||
|
||||
To use this task, you must have :
|
||||
|
||||
- Openshift pipeline enabled (or tekton CRD loaded for Kubernetes clusters)
|
||||
- 1 Secret named `kraken-aws-creds` for scenarios using aws
|
||||
- 1 ConfigMap named `kraken-kubeconfig` with credentials to the targeted cluster
|
||||
- 1 ConfigMap named `kraken-config-example` with kraken configuration file (config.yaml)
|
||||
- 1 ConfigMap named `kraken-common-example` with all kraken related files
|
||||
- The `pipeline` SA with be autorized to run with priviveged SCC
|
||||
|
||||
You can create theses resources using the following sequence :
|
||||
|
||||
```bash
|
||||
oc project default
|
||||
oc adm policy add-scc-to-user privileged -z pipeline
|
||||
oc apply -f https://github.com/startxfr/tekton-catalog/raw/stable/task/kraken-scenario/0.1/samples/common.yaml
|
||||
```
|
||||
|
||||
Then you must change content of `kraken-aws-creds` secret, `kraken-kubeconfig` and `kraken-config-example` configMap
|
||||
to reflect your cluster configuration. Refer to the [kraken configuration](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml)
|
||||
and [configuration examples](https://github.com/startxfr/tekton-catalog/blob/stable/task/kraken-scenario/0.1/samples/)
|
||||
for details on how to configure theses resources.
|
||||
|
||||
* Start as a single taskrun
|
||||
|
||||
```bash
|
||||
oc apply -f https://github.com/startxfr/tekton-catalog/raw/stable/task/kraken-scenario/0.1/samples/taskrun.yaml
|
||||
```
|
||||
|
||||
* Start as a pipelinerun
|
||||
|
||||
```yaml
|
||||
oc apply -f https://github.com/startxfr/tekton-catalog/raw/stable/task/kraken-scenario/0.1/samples/pipelinerun.yaml
|
||||
```
|
||||
|
||||
* Deploying kraken using a helm-chart
|
||||
|
||||
You can find on [artifacthub.io](https://artifacthub.io/packages/search?kind=0&ts_query_web=kraken) the
|
||||
[chaos-kraken](https://artifacthub.io/packages/helm/startx/chaos-kraken) `helm-chart`
|
||||
which can be used to deploy a kraken chaos scenarios.
|
||||
|
||||
Default configuration create the following resources :
|
||||
|
||||
- 1 project named **chaos-kraken**
|
||||
- 1 scc with privileged context for kraken deployment
|
||||
- 1 configmap with kraken 21 generic scenarios, various scripts and configuration
|
||||
- 1 configmap with kubeconfig of the targeted cluster
|
||||
- 1 job named kraken-test-xxx
|
||||
- 1 service to the kraken pods
|
||||
- 1 route to the kraken service
|
||||
|
||||
```bash
|
||||
# Install the startx helm repository
|
||||
helm repo add startx https://startxfr.github.io/helm-repository/packages/
|
||||
# Install the kraken project
|
||||
helm install --set project.enabled=true chaos-kraken-project startx/chaos-kraken
|
||||
# Deploy the kraken instance
|
||||
helm install \
|
||||
--set kraken.enabled=true \
|
||||
--set kraken.aws.credentials.region="eu-west-3" \
|
||||
--set kraken.aws.credentials.key_id="AKIAXXXXXXXXXXXXXXXX" \
|
||||
--set kraken.aws.credentials.secret="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" \
|
||||
--set kraken.kubeconfig.token.server="https://api.mycluster:6443" \
|
||||
--set kraken.kubeconfig.token.token="sha256~XXXXXXXXXX_PUT_YOUR_TOKEN_HERE_XXXXXXXXXXXX" \
|
||||
-n chaos-kraken \
|
||||
chaos-kraken-instance startx/chaos-kraken
|
||||
```
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user