mirror of
https://github.com/krkn-chaos/krkn.git
synced 2026-03-15 08:00:37 +00:00
Compare commits
364 Commits
v1.5.6
...
custom_wei
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b9d7c8ba12 | ||
|
|
e8075743ab | ||
|
|
ec5511b2db | ||
|
|
4e7dca9474 | ||
|
|
edf0f3d1c9 | ||
|
|
8c9bce6987 | ||
|
|
5608482f1b | ||
|
|
a14d3955a6 | ||
|
|
f655ec1a73 | ||
|
|
dfc350ac03 | ||
|
|
c474b810b2 | ||
|
|
072e8d0e87 | ||
|
|
aee61061ac | ||
|
|
544cac8bbb | ||
|
|
49b1affdb8 | ||
|
|
c1dd43fe87 | ||
|
|
8dad2a3996 | ||
|
|
cebc60f5a8 | ||
|
|
2065443622 | ||
|
|
b6ef7fa052 | ||
|
|
4f305e78aa | ||
|
|
b17e933134 | ||
|
|
beea484597 | ||
|
|
0222b0f161 | ||
|
|
f7e674d5ad | ||
|
|
7aea12ce6c | ||
|
|
625e1e90cf | ||
|
|
a9f1ce8f1b | ||
|
|
66e364e293 | ||
|
|
898ce76648 | ||
|
|
4a0f4e7cab | ||
|
|
819191866d | ||
|
|
37ca4bbce7 | ||
|
|
b9dd4e40d3 | ||
|
|
3fd249bb88 | ||
|
|
773107245c | ||
|
|
05bc201528 | ||
|
|
9a316550e1 | ||
|
|
9c261e2599 | ||
|
|
0cc82dc65d | ||
|
|
269e21e9eb | ||
|
|
d0dbe3354a | ||
|
|
4a0686daf3 | ||
|
|
822bebac0c | ||
|
|
a13150b0f5 | ||
|
|
0443637fe1 | ||
|
|
36585630f2 | ||
|
|
1401724312 | ||
|
|
fa204a515c | ||
|
|
b3a5fc2d53 | ||
|
|
05600b62b3 | ||
|
|
126599e02c | ||
|
|
b3d6a19d24 | ||
|
|
65100f26a7 | ||
|
|
10b6e4663e | ||
|
|
ce52183a26 | ||
|
|
e9ab3b47b3 | ||
|
|
3e14fe07b7 | ||
|
|
d9271a4bcc | ||
|
|
850930631e | ||
|
|
15eee80c55 | ||
|
|
ff3c4f5313 | ||
|
|
4c74df301f | ||
|
|
b60b66de43 | ||
|
|
2458022248 | ||
|
|
18385cba2b | ||
|
|
e7fa6bdebc | ||
|
|
c3f6b1a7ff | ||
|
|
f2ba8b85af | ||
|
|
ba3fdea403 | ||
|
|
42d18a8e04 | ||
|
|
4b3617bd8a | ||
|
|
eb7a1e243c | ||
|
|
197ce43f9a | ||
|
|
eecdeed73c | ||
|
|
ef606d0f17 | ||
|
|
9981c26304 | ||
|
|
4ebfc5dde5 | ||
|
|
4527d073c6 | ||
|
|
93d6967331 | ||
|
|
b462c46b28 | ||
|
|
ab4ae85896 | ||
|
|
6acd6f9bd3 | ||
|
|
787759a591 | ||
|
|
957cb355be | ||
|
|
35609484d4 | ||
|
|
959337eb63 | ||
|
|
f4bdbff9dc | ||
|
|
954202cab7 | ||
|
|
a373dcf453 | ||
|
|
d0c604a516 | ||
|
|
82582f5bc3 | ||
|
|
37f0f1eb8b | ||
|
|
d2eab21f95 | ||
|
|
d84910299a | ||
|
|
48f19c0a0e | ||
|
|
eb86885bcd | ||
|
|
967fd14bd7 | ||
|
|
5cefe80286 | ||
|
|
9ee76ce337 | ||
|
|
fd3e7ee2c8 | ||
|
|
c85c435b5d | ||
|
|
d5284ace25 | ||
|
|
c3098ec80b | ||
|
|
6629c7ec33 | ||
|
|
fb6af04b09 | ||
|
|
dc1215a61b | ||
|
|
f74aef18f8 | ||
|
|
166204e3c5 | ||
|
|
fc7667aef1 | ||
|
|
3eea42770f | ||
|
|
77a46e3869 | ||
|
|
b801308d4a | ||
|
|
97f4c1fd9c | ||
|
|
c54390d8b1 | ||
|
|
543729b18a | ||
|
|
a0ea4dc749 | ||
|
|
a5459792ef | ||
|
|
d434bb26fa | ||
|
|
fee41d404e | ||
|
|
8663ee8893 | ||
|
|
a072f0306a | ||
|
|
8221392356 | ||
|
|
671fc581dd | ||
|
|
11508ce017 | ||
|
|
0d78139fb6 | ||
|
|
a3baffe8ee | ||
|
|
438b08fcd5 | ||
|
|
9b930a02a5 | ||
|
|
194e3b87ee | ||
|
|
8c05e44c23 | ||
|
|
88f8cf49f1 | ||
|
|
015ba4d90d | ||
|
|
26fdbef144 | ||
|
|
d77e6dc79c | ||
|
|
2885645e77 | ||
|
|
84169e2d4e | ||
|
|
05bc404d32 | ||
|
|
e8fd432fc5 | ||
|
|
ec05675e3a | ||
|
|
c91648d35c | ||
|
|
24aa9036b0 | ||
|
|
816363d151 | ||
|
|
90c52f907f | ||
|
|
4f250c9601 | ||
|
|
6480adc00a | ||
|
|
5002f210ae | ||
|
|
62c5afa9a2 | ||
|
|
c109fc0b17 | ||
|
|
fff675f3dd | ||
|
|
c125e5acf7 | ||
|
|
ca6995a1a1 | ||
|
|
50cf91ac9e | ||
|
|
11069c6982 | ||
|
|
106d9bf1ae | ||
|
|
17f832637c | ||
|
|
0e5c8c55a4 | ||
|
|
9d9a6f9b80 | ||
|
|
f8fe2ae5b7 | ||
|
|
77b1dd32c7 | ||
|
|
9df727ccf5 | ||
|
|
70c8fec705 | ||
|
|
0731144a6b | ||
|
|
9337052e7b | ||
|
|
dc8d7ad75b | ||
|
|
1cc44e1f18 | ||
|
|
c8190fd1c1 | ||
|
|
9078b35e46 | ||
|
|
e6b1665aa1 | ||
|
|
c56819365c | ||
|
|
6a657576cb | ||
|
|
f04f1f1101 | ||
|
|
bddbd42f8c | ||
|
|
630dbd805b | ||
|
|
10d26ba50e | ||
|
|
d47286ae21 | ||
|
|
890e3012dd | ||
|
|
d0dafa872d | ||
|
|
149eb8fcd3 | ||
|
|
4c462a8971 | ||
|
|
5bdbf622c3 | ||
|
|
0dcb901da1 | ||
|
|
6e94df9cfc | ||
|
|
87c2b3c8fd | ||
|
|
7e4b2aff65 | ||
|
|
27f0845182 | ||
|
|
4c9cd5bced | ||
|
|
075dbd10c7 | ||
|
|
e080ad2ee2 | ||
|
|
693520f306 | ||
|
|
bf909a7c18 | ||
|
|
abbcfe09ec | ||
|
|
32fb6eec07 | ||
|
|
608b7c847f | ||
|
|
edd0159251 | ||
|
|
cf9f7702ed | ||
|
|
cfe624f153 | ||
|
|
62f50db195 | ||
|
|
aee838d3ac | ||
|
|
3b4d8a13f9 | ||
|
|
a86bb6ab95 | ||
|
|
7f0110972b | ||
|
|
126f4ebb35 | ||
|
|
83d99bbb02 | ||
|
|
2624102d65 | ||
|
|
02587bcbe6 | ||
|
|
c51bf04f9e | ||
|
|
41195b1a60 | ||
|
|
ab80acbee7 | ||
|
|
3573d13ea9 | ||
|
|
9c5251d52f | ||
|
|
a0bba27edc | ||
|
|
0d0143d1e0 | ||
|
|
0004c05f81 | ||
|
|
57a747a34a | ||
|
|
22108ae4e7 | ||
|
|
cecaa1eda3 | ||
|
|
5450ecb914 | ||
|
|
cad6b68f43 | ||
|
|
0eba329305 | ||
|
|
ce8593f2f0 | ||
|
|
9061ddbb5b | ||
|
|
dd4d0d0389 | ||
|
|
0cabe5e91d | ||
|
|
32fe0223ff | ||
|
|
a25736ad08 | ||
|
|
440890d252 | ||
|
|
69bf20fc76 | ||
|
|
2a42a2dc31 | ||
|
|
21ab8d475d | ||
|
|
b024cfde19 | ||
|
|
c7e068a562 | ||
|
|
64cfd2ca4d | ||
|
|
9cb701a616 | ||
|
|
0372013b67 | ||
|
|
4fea1a354d | ||
|
|
667798d588 | ||
|
|
0c30d89a1b | ||
|
|
2ba20fa483 | ||
|
|
97035a765c | ||
|
|
10ba53574e | ||
|
|
0ecba41082 | ||
|
|
491f59d152 | ||
|
|
2549c9a146 | ||
|
|
949f1f09e0 | ||
|
|
959766254d | ||
|
|
0e68dedb12 | ||
|
|
34a676a795 | ||
|
|
e5c5b35db3 | ||
|
|
93d2e60386 | ||
|
|
462c9ac67e | ||
|
|
04e44738d9 | ||
|
|
f810cadad2 | ||
|
|
4b869bad83 | ||
|
|
a36b0c76b2 | ||
|
|
a17e16390c | ||
|
|
f8534d616c | ||
|
|
9670ce82f5 | ||
|
|
95e4b68389 | ||
|
|
0aac6119b0 | ||
|
|
7e5bdfd5cf | ||
|
|
3c207ab2ea | ||
|
|
d91172d9b2 | ||
|
|
a13fb43d94 | ||
|
|
37ee7177bc | ||
|
|
32142cc159 | ||
|
|
34bfc0d3d9 | ||
|
|
736c90e937 | ||
|
|
5e7938ba4a | ||
|
|
b525f83261 | ||
|
|
26460a0dce | ||
|
|
7968c2a776 | ||
|
|
6186555c15 | ||
|
|
9cd086f59c | ||
|
|
1057917731 | ||
|
|
5484828b67 | ||
|
|
d18b6332e5 | ||
|
|
89a0e166f1 | ||
|
|
624f50acd1 | ||
|
|
e02c6d1287 | ||
|
|
04425a8d8a | ||
|
|
f3933f0e62 | ||
|
|
56ff0a8c72 | ||
|
|
9378cd74cd | ||
|
|
4d3491da0f | ||
|
|
d6ce66160b | ||
|
|
ef1a55438b | ||
|
|
d8f54b83a2 | ||
|
|
4870c86515 | ||
|
|
6ae17cf678 | ||
|
|
ce9f8aa050 | ||
|
|
05148317c1 | ||
|
|
5f836f294b | ||
|
|
cfa1bb09a0 | ||
|
|
5ddfff5a85 | ||
|
|
7d18487228 | ||
|
|
08de42c91a | ||
|
|
dc7d5bb01b | ||
|
|
ea3444d375 | ||
|
|
7b660a0878 | ||
|
|
5fe0655f22 | ||
|
|
5df343c183 | ||
|
|
f364e9f283 | ||
|
|
86a7427606 | ||
|
|
31266fbc3e | ||
|
|
57de3769e7 | ||
|
|
42fc8eea40 | ||
|
|
22d56e2cdc | ||
|
|
a259b68221 | ||
|
|
052f83e7d9 | ||
|
|
fb3bbe4e26 | ||
|
|
96ba9be4b8 | ||
|
|
58d5d1d8dc | ||
|
|
3fe22a0d8f | ||
|
|
21b89a32a7 | ||
|
|
dbe3ea9718 | ||
|
|
a142f6e7a4 | ||
|
|
2610a7af67 | ||
|
|
f827f65132 | ||
|
|
aa6cbbc11a | ||
|
|
e17354e54d | ||
|
|
2dfa5cb0cd | ||
|
|
0799008cd5 | ||
|
|
2327531e46 | ||
|
|
2c14c48a63 | ||
|
|
ab98e416a6 | ||
|
|
19ad2d1a3d | ||
|
|
804d7cbf58 | ||
|
|
54af2fc6ff | ||
|
|
b79e526cfd | ||
|
|
a5efd7d06c | ||
|
|
a1b81bd382 | ||
|
|
782440c8c4 | ||
|
|
7e2755cbb7 | ||
|
|
2babb53d6e | ||
|
|
85f76e9193 | ||
|
|
8bf21392f1 | ||
|
|
606fb60811 | ||
|
|
fac7c3c6fb | ||
|
|
8dd9b30030 | ||
|
|
2d99f17aaf | ||
|
|
50742a793c | ||
|
|
ba6a844544 | ||
|
|
7e7a917dba | ||
|
|
b9c0bb39c7 | ||
|
|
706a886151 | ||
|
|
a1cf9e2c00 | ||
|
|
0f5dfcb823 | ||
|
|
1e1015e6e7 | ||
|
|
c71ce31779 | ||
|
|
1298f220a6 | ||
|
|
24059fb731 | ||
|
|
ab951adb78 | ||
|
|
a9a7fb7e51 | ||
|
|
5a8d5b0fe1 | ||
|
|
c440dc4b51 | ||
|
|
b174c51ee0 | ||
|
|
fec0434ce1 | ||
|
|
1067d5ec8d | ||
|
|
85ea1ef7e1 | ||
|
|
2e38b8b033 | ||
|
|
c7ea366756 | ||
|
|
67d4ee9fa2 | ||
|
|
fa59834bae |
5
.coveragerc
Normal file
5
.coveragerc
Normal file
@@ -0,0 +1,5 @@
|
||||
[run]
|
||||
omit =
|
||||
tests/*
|
||||
krkn/tests/**
|
||||
CI/tests_v2/*
|
||||
1
.github/CODEOWNERS
vendored
Normal file
1
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* @paigerube14 @tsebastiani @chaitanyaenr
|
||||
43
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
43
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report an issue
|
||||
title: "[BUG]"
|
||||
labels: bug
|
||||
---
|
||||
|
||||
# Bug Description
|
||||
|
||||
## **Describe the bug**
|
||||
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
## **To Reproduce**
|
||||
|
||||
Any specific steps used to reproduce the behavior
|
||||
|
||||
### Scenario File
|
||||
Scenario file(s) that were specified in your config file (can be starred (*) with confidential information )
|
||||
```yaml
|
||||
<config>
|
||||
|
||||
```
|
||||
|
||||
### Config File
|
||||
Config file you used when error was seen (the default used is config/config.yaml)
|
||||
|
||||
```yaml
|
||||
<config>
|
||||
|
||||
```
|
||||
|
||||
## **Expected behavior**
|
||||
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
## **Krkn Output**
|
||||
|
||||
Krkn output to help show your problem
|
||||
|
||||
## **Additional context**
|
||||
|
||||
Add any other context about the problem
|
||||
16
.github/ISSUE_TEMPLATE/feature.md
vendored
Normal file
16
.github/ISSUE_TEMPLATE/feature.md
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
name: New Feature Request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to see added/changed. Ex. new parameter in [xxx] scenario, new scenario that does [xxx]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the feature request here.
|
||||
47
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
47
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
# Type of change
|
||||
|
||||
- [ ] Refactor
|
||||
- [ ] New feature
|
||||
- [ ] Bug fix
|
||||
- [ ] Optimization
|
||||
|
||||
# Description
|
||||
<-- Provide a brief description of the changes made in this PR. -->
|
||||
|
||||
## Related Tickets & Documents
|
||||
If no related issue, please create one and start the converasation on wants of
|
||||
|
||||
- Related Issue #:
|
||||
- Closes #:
|
||||
|
||||
# Documentation
|
||||
- [ ] **Is documentation needed for this update?**
|
||||
|
||||
If checked, a documentation PR must be created and merged in the [website repository](https://github.com/krkn-chaos/website/).
|
||||
|
||||
## Related Documentation PR (if applicable)
|
||||
<-- Add the link to the corresponding documentation PR in the website repository -->
|
||||
|
||||
# Checklist before requesting a review
|
||||
[ ] Ensure the changes and proposed solution have been discussed in the relevant issue and have received acknowledgment from the community or maintainers. See [contributing guidelines](https://krkn-chaos.dev/docs/contribution-guidelines/)
|
||||
See [testing your changes](https://krkn-chaos.dev/docs/developers-guide/testing-changes/) and run on any Kubernetes or OpenShift cluster to validate your changes
|
||||
- [ ] I have performed a self-review of my code by running krkn and specific scenario
|
||||
- [ ] If it is a core feature, I have added thorough unit tests with above 80% coverage
|
||||
|
||||
*REQUIRED*:
|
||||
Description of combination of tests performed and output of run
|
||||
|
||||
```bash
|
||||
python run_kraken.py
|
||||
...
|
||||
<---insert test results output--->
|
||||
```
|
||||
|
||||
OR
|
||||
|
||||
|
||||
```bash
|
||||
python -m coverage run -a -m unittest discover -s tests -v
|
||||
...
|
||||
<---insert test results output--->
|
||||
```
|
||||
7
.github/release-template.md
vendored
Normal file
7
.github/release-template.md
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
## Release {VERSION}
|
||||
|
||||
### Download Artifacts
|
||||
- 📦 Krkn sources (noarch): [krkn-{VERSION}-src.tar.gz](https://krkn-chaos.gateway.scarf.sh/krkn-src-{VERSION}.tar.gz)
|
||||
|
||||
### Changes
|
||||
{CHANGES}
|
||||
38
.github/workflows/docker-image.yml
vendored
38
.github/workflows/docker-image.yml
vendored
@@ -1,8 +1,7 @@
|
||||
name: Docker Image CI
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags: ['v[0-9].[0-9]+.[0-9]+']
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
@@ -12,30 +11,45 @@ jobs:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
- name: Build the Docker images
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
run: |
|
||||
docker build --no-cache -t quay.io/krkn-chaos/krkn containers/
|
||||
./containers/compile_dockerfile.sh
|
||||
docker build --no-cache -t quay.io/krkn-chaos/krkn containers/ --build-arg TAG=${GITHUB_REF#refs/tags/}
|
||||
docker tag quay.io/krkn-chaos/krkn quay.io/redhat-chaos/krkn
|
||||
docker tag quay.io/krkn-chaos/krkn quay.io/krkn-chaos/krkn:${GITHUB_REF#refs/tags/}
|
||||
docker tag quay.io/krkn-chaos/krkn quay.io/redhat-chaos/krkn:${GITHUB_REF#refs/tags/}
|
||||
|
||||
- name: Test Build the Docker images
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
run: |
|
||||
./containers/compile_dockerfile.sh
|
||||
docker build --no-cache -t quay.io/krkn-chaos/krkn containers/ --build-arg PR_NUMBER=${{ github.event.pull_request.number }}
|
||||
- name: Login in quay
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
run: docker login quay.io -u ${QUAY_USER} -p ${QUAY_TOKEN}
|
||||
env:
|
||||
QUAY_USER: ${{ secrets.QUAY_USERNAME }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_PASSWORD }}
|
||||
- name: Push the KrknChaos Docker images
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker push quay.io/krkn-chaos/krkn
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
run: |
|
||||
docker push quay.io/krkn-chaos/krkn
|
||||
docker push quay.io/krkn-chaos/krkn:${GITHUB_REF#refs/tags/}
|
||||
- name: Login in to redhat-chaos quay
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
run: docker login quay.io -u ${QUAY_USER} -p ${QUAY_TOKEN}
|
||||
env:
|
||||
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}
|
||||
- name: Push the RedHat Chaos Docker images
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker push quay.io/redhat-chaos/krkn
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
run: |
|
||||
docker push quay.io/redhat-chaos/krkn
|
||||
docker push quay.io/redhat-chaos/krkn:${GITHUB_REF#refs/tags/}
|
||||
- name: Rebuild krkn-hub
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
uses: redhat-chaos/actions/krkn-hub@main
|
||||
with:
|
||||
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}
|
||||
QUAY_USER: ${{ secrets.QUAY_USERNAME }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_PASSWORD }}
|
||||
AUTOPUSH: ${{ secrets.AUTOPUSH }}
|
||||
|
||||
60
.github/workflows/release.yml
vendored
Normal file
60
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
name: Create Release
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
jobs:
|
||||
release:
|
||||
permissions:
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: calculate previous tag
|
||||
run: |
|
||||
git fetch --tags origin
|
||||
PREVIOUS_TAG=$(git tag --sort=-creatordate | sed -n '2 p')
|
||||
echo $PREVIOUS_TAG
|
||||
echo "PREVIOUS_TAG=$PREVIOUS_TAG" >> "$GITHUB_ENV"
|
||||
|
||||
- name: generate release notes from template
|
||||
id: release-notes
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
NOTES=$(gh api \
|
||||
--method POST \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
/repos/krkn-chaos/krkn/releases/generate-notes \
|
||||
-f "tag_name=${{ github.ref_name }}" -f "target_commitish=main" -f "previous_tag_name=${{ env.PREVIOUS_TAG }}" | jq -r .body)
|
||||
echo "NOTES<<EOF" >> $GITHUB_ENV
|
||||
echo "$NOTES" >> $GITHUB_ENV
|
||||
echo "EOF" >> $GITHUB_ENV
|
||||
|
||||
- name: replace placeholders in template
|
||||
run: |
|
||||
echo "${{ env.NOTES }}"
|
||||
TEMPLATE=$(cat .github/release-template.md)
|
||||
VERSION=${{ github.ref_name }}
|
||||
NOTES="${{ env.NOTES }}"
|
||||
OUTPUT=${TEMPLATE//\{VERSION\}/$VERSION}
|
||||
OUTPUT=${OUTPUT//\{CHANGES\}/$NOTES}
|
||||
echo "$OUTPUT" > release-notes.md
|
||||
- name: create release
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh release create ${{ github.ref_name }} --title "${{ github.ref_name }}" -F release-notes.md
|
||||
|
||||
- name: Install Syft
|
||||
run: |
|
||||
curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sudo sh -s -- -b /usr/local/bin
|
||||
|
||||
- name: Generate SBOM
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
syft . --scope all-layers --output cyclonedx-json > sbom.json
|
||||
echo "SBOM generated successfully!"
|
||||
gh release upload ${{ github.ref_name }} sbom.json
|
||||
45
.github/workflows/require-docs.yml
vendored
Normal file
45
.github/workflows/require-docs.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
name: Require Documentation Update
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited, synchronize]
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
check-docs:
|
||||
name: Check Documentation Update
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Check if Documentation is Required
|
||||
id: check_docs
|
||||
run: |
|
||||
echo "Checking PR body for documentation checkbox..."
|
||||
# Read the PR body from the GitHub event payload
|
||||
if echo "${{ github.event.pull_request.body }}" | grep -qi '\[x\].*documentation needed'; then
|
||||
echo "Documentation required detected."
|
||||
echo "docs_required=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Documentation not required."
|
||||
echo "docs_required=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Enforce Documentation Update (if required)
|
||||
if: steps.check_docs.outputs.docs_required == 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
# Retrieve feature branch and repository owner from the GitHub context
|
||||
FEATURE_BRANCH="${{ github.head_ref }}"
|
||||
REPO_OWNER="${{ github.repository_owner }}"
|
||||
WEBSITE_REPO="website"
|
||||
echo "Searching for a merged documentation PR for feature branch: $FEATURE_BRANCH in $REPO_OWNER/$WEBSITE_REPO..."
|
||||
MERGED_PR=$(gh pr list --repo "$REPO_OWNER/$WEBSITE_REPO" --state merged --json headRefName,title,url | jq -r \
|
||||
--arg FEATURE_BRANCH "$FEATURE_BRANCH" '.[] | select(.title | contains($FEATURE_BRANCH)) | .url')
|
||||
if [[ -z "$MERGED_PR" ]]; then
|
||||
echo ":x: Documentation PR for branch '$FEATURE_BRANCH' is required and has not been merged."
|
||||
exit 1
|
||||
else
|
||||
echo ":white_check_mark: Found merged documentation PR: $MERGED_PR"
|
||||
fi
|
||||
52
.github/workflows/stale.yml
vendored
Normal file
52
.github/workflows/stale.yml
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
name: Manage Stale Issues and Pull Requests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run daily at 1:00 AM UTC
|
||||
- cron: '0 1 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
name: Mark and Close Stale Issues and PRs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Mark and close stale issues and PRs
|
||||
uses: actions/stale@v9
|
||||
with:
|
||||
days-before-issue-stale: 60
|
||||
days-before-issue-close: 14
|
||||
stale-issue-label: 'stale'
|
||||
stale-issue-message: |
|
||||
This issue has been automatically marked as stale because it has not had any activity in the last 60 days.
|
||||
It will be closed in 14 days if no further activity occurs.
|
||||
If this issue is still relevant, please leave a comment or remove the stale label.
|
||||
Thank you for your contributions to krkn!
|
||||
close-issue-message: |
|
||||
This issue has been automatically closed due to inactivity.
|
||||
If you believe this issue is still relevant, please feel free to reopen it or create a new issue with updated information.
|
||||
Thank you for your understanding!
|
||||
close-issue-reason: 'not_planned'
|
||||
|
||||
days-before-pr-stale: 90
|
||||
days-before-pr-close: 14
|
||||
stale-pr-label: 'stale'
|
||||
stale-pr-message: |
|
||||
This pull request has been automatically marked as stale because it has not had any activity in the last 90 days.
|
||||
It will be closed in 14 days if no further activity occurs.
|
||||
If this PR is still relevant, please rebase it, address any pending reviews, or leave a comment.
|
||||
Thank you for your contributions to krkn!
|
||||
close-pr-message: |
|
||||
This pull request has been automatically closed due to inactivity.
|
||||
If you believe this PR is still relevant, please feel free to reopen it or create a new pull request with updated changes.
|
||||
Thank you for your understanding!
|
||||
|
||||
# Exempt labels
|
||||
exempt-issue-labels: 'bug,enhancement,good first issue'
|
||||
exempt-pr-labels: 'pending discussions,hold'
|
||||
|
||||
remove-stale-when-updated: true
|
||||
189
.github/workflows/tests.yml
vendored
189
.github/workflows/tests.yml
vendored
@@ -1,8 +1,12 @@
|
||||
name: Functional & Unit Tests
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
tests:
|
||||
# Common steps
|
||||
name: Functional & Unit Tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -10,47 +14,51 @@ jobs:
|
||||
uses: actions/checkout@v3
|
||||
- name: Create multi-node KinD cluster
|
||||
uses: redhat-chaos/actions/kind@main
|
||||
- name: Install Helm & add repos
|
||||
run: |
|
||||
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
|
||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
||||
helm repo add stable https://charts.helm.sh/stable
|
||||
helm repo update
|
||||
- name: Deploy prometheus & Port Forwarding
|
||||
uses: redhat-chaos/actions/prometheus@main
|
||||
- name: Deploy Elasticsearch
|
||||
with:
|
||||
ELASTIC_PORT: ${{ env.ELASTIC_PORT }}
|
||||
RUN_ID: ${{ github.run_id }}
|
||||
uses: redhat-chaos/actions/elastic@main
|
||||
- name: Download elastic password
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: elastic_password_${{ github.run_id }}
|
||||
- name: Set elastic password on env
|
||||
run: |
|
||||
kubectl create namespace prometheus-k8s
|
||||
helm install \
|
||||
--wait --timeout 360s \
|
||||
kind-prometheus \
|
||||
prometheus-community/kube-prometheus-stack \
|
||||
--namespace prometheus-k8s \
|
||||
--set prometheus.service.nodePort=30000 \
|
||||
--set prometheus.service.type=NodePort \
|
||||
--set grafana.service.nodePort=31000 \
|
||||
--set grafana.service.type=NodePort \
|
||||
--set alertmanager.service.nodePort=32000 \
|
||||
--set alertmanager.service.type=NodePort \
|
||||
--set prometheus-node-exporter.service.nodePort=32001 \
|
||||
--set prometheus-node-exporter.service.type=NodePort
|
||||
|
||||
SELECTOR=`kubectl -n prometheus-k8s get service kind-prometheus-kube-prome-prometheus -o wide --no-headers=true | awk '{ print $7 }'`
|
||||
POD_NAME=`kubectl -n prometheus-k8s get pods --selector="$SELECTOR" --no-headers=true | awk '{ print $1 }'`
|
||||
kubectl -n prometheus-k8s port-forward $POD_NAME 9090:9090 &
|
||||
sleep 5
|
||||
ELASTIC_PASSWORD=$(cat elastic_password.txt)
|
||||
echo "ELASTIC_PASSWORD=$ELASTIC_PASSWORD" >> "$GITHUB_ENV"
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
python-version: '3.11'
|
||||
architecture: 'x64'
|
||||
- name: Install environment
|
||||
run: |
|
||||
sudo apt-get install build-essential python3-dev
|
||||
pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
# - name: Run unit tests
|
||||
# run: python -m coverage run -a -m unittest discover -s tests -v
|
||||
pip install coverage
|
||||
|
||||
- name: Deploy test workloads
|
||||
run: |
|
||||
# es_pod_name=$(kubectl get pods -l "app=elasticsearch-master" -o name)
|
||||
# echo "POD_NAME: $es_pod_name"
|
||||
# kubectl --namespace default port-forward $es_pod_name 9200 &
|
||||
# prom_name=$(kubectl get pods -n monitoring -l "app.kubernetes.io/name=prometheus" -o name)
|
||||
# kubectl --namespace monitoring port-forward $prom_name 9090 &
|
||||
|
||||
# Wait for Elasticsearch to be ready
|
||||
echo "Waiting for Elasticsearch to be ready..."
|
||||
for i in {1..30}; do
|
||||
if curl -k -s -u elastic:$ELASTIC_PASSWORD https://localhost:9200/_cluster/health > /dev/null 2>&1; then
|
||||
echo "Elasticsearch is ready!"
|
||||
break
|
||||
fi
|
||||
echo "Attempt $i: Elasticsearch not ready yet, waiting..."
|
||||
sleep 2
|
||||
done
|
||||
kubectl apply -f CI/templates/outage_pod.yaml
|
||||
kubectl wait --for=condition=ready pod -l scenario=outage --timeout=300s
|
||||
kubectl apply -f CI/templates/container_scenario_pod.yaml
|
||||
@@ -58,50 +66,145 @@ jobs:
|
||||
kubectl create namespace namespace-scenario
|
||||
kubectl apply -f CI/templates/time_pod.yaml
|
||||
kubectl wait --for=condition=ready pod -l scenario=time-skew --timeout=300s
|
||||
kubectl apply -f CI/templates/service_hijacking.yaml
|
||||
kubectl wait --for=condition=ready pod -l "app.kubernetes.io/name=proxy" --timeout=300s
|
||||
kubectl apply -f CI/legacy/scenarios/volume_scenario.yaml
|
||||
kubectl wait --for=condition=ready pod kraken-test-pod -n kraken --timeout=300s
|
||||
- name: Get Kind nodes
|
||||
run: |
|
||||
kubectl get nodes --show-labels=true
|
||||
# Pull request only steps
|
||||
- name: Run unit tests
|
||||
run: python -m coverage run -a -m unittest discover -s tests -v
|
||||
|
||||
- name: Setup Functional Tests
|
||||
run: |
|
||||
yq -i '.kraken.distribution="kubernetes"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.performance_monitoring="localhost:9090"' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.elastic_port=9200' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.elastic_url="https://localhost"' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.enable_elastic=False' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.password="${{env.ELASTIC_PASSWORD}}"' CI/config/common_test_config.yaml
|
||||
yq -i '.performance_monitoring.prometheus_url="http://localhost:9090"' CI/config/common_test_config.yaml
|
||||
echo "test_app_outages" > ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_time" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_io_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
echo "test_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_customapp_pod" >> ./CI/tests/functional_tests
|
||||
echo "test_io_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_node" >> ./CI/tests/functional_tests
|
||||
echo "test_service_hijacking" >> ./CI/tests/functional_tests
|
||||
echo "test_pod_network_filter" >> ./CI/tests/functional_tests
|
||||
echo "test_pod_server" >> ./CI/tests/functional_tests
|
||||
echo "test_time" >> ./CI/tests/functional_tests
|
||||
echo "test_node_network_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_pod_network_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_cerberus_unhealthy" >> ./CI/tests/functional_tests
|
||||
echo "test_pod_error" >> ./CI/tests/functional_tests
|
||||
echo "test_pod" >> ./CI/tests/functional_tests
|
||||
# echo "test_pvc" >> ./CI/tests/functional_tests
|
||||
|
||||
|
||||
# Push on main only steps + all other functional to collect coverage
|
||||
# for the badge
|
||||
- name: Configure AWS Credentials
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region : ${{ secrets.AWS_REGION }}
|
||||
- name: Setup Post Merge Request Functional Tests
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: |
|
||||
yq -i '.telemetry.username="${{secrets.TELEMETRY_USERNAME}}"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.password="${{secrets.TELEMETRY_PASSWORD}}"' CI/config/common_test_config.yaml
|
||||
echo "test_telemetry" >> ./CI/tests/functional_tests
|
||||
# Final common steps
|
||||
- name: Run Functional tests
|
||||
env:
|
||||
AWS_BUCKET: ${{ secrets.AWS_BUCKET }}
|
||||
run: |
|
||||
./CI/run.sh
|
||||
cat ./CI/results.markdown >> $GITHUB_STEP_SUMMARY
|
||||
echo >> $GITHUB_STEP_SUMMARY
|
||||
- name: Run Unit tests
|
||||
run: python -m coverage run -a -m unittest discover -s tests -v
|
||||
- name: Upload CI logs
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ci-logs
|
||||
path: CI/out
|
||||
if-no-files-found: error
|
||||
- name: Collect coverage report
|
||||
if: ${{ always() }}
|
||||
run: |
|
||||
python -m coverage html
|
||||
python -m coverage json
|
||||
- name: Publish coverage report to job summary
|
||||
if: ${{ always() }}
|
||||
run: |
|
||||
pip install html2text
|
||||
html2text --ignore-images --ignore-links -b 0 htmlcov/index.html >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload coverage data
|
||||
uses: actions/upload-artifact@v3
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage
|
||||
path: htmlcov
|
||||
if-no-files-found: error
|
||||
- name: Upload json coverage
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage.json
|
||||
path: coverage.json
|
||||
if-no-files-found: error
|
||||
- name: Check CI results
|
||||
run: grep Fail CI/results.markdown && false || true
|
||||
if: ${{ always() }}
|
||||
run: "! grep Fail CI/results.markdown"
|
||||
|
||||
badge:
|
||||
permissions:
|
||||
contents: write
|
||||
name: Generate Coverage Badge
|
||||
runs-on: ubuntu-latest
|
||||
needs:
|
||||
- tests
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
steps:
|
||||
- name: Check out doc repo
|
||||
uses: actions/checkout@master
|
||||
with:
|
||||
repository: krkn-chaos/krkn-lib-docs
|
||||
path: krkn-lib-docs
|
||||
ssh-key: ${{ secrets.KRKN_LIB_DOCS_PRIV_KEY }}
|
||||
- name: Download json coverage
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: coverage.json
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.11'
|
||||
- name: Copy badge on GitHub Page Repo
|
||||
env:
|
||||
COLOR: yellow
|
||||
run: |
|
||||
# generate coverage badge on previously calculated total coverage
|
||||
# and copy in the docs page
|
||||
export TOTAL=$(python -c "import json;print(json.load(open('coverage.json'))['totals']['percent_covered_display'])")
|
||||
[[ $TOTAL > 40 ]] && COLOR=green
|
||||
echo "TOTAL: $TOTAL"
|
||||
echo "COLOR: $COLOR"
|
||||
curl "https://img.shields.io/badge/coverage-$TOTAL%25-$COLOR" > ./krkn-lib-docs/coverage_badge_krkn.svg
|
||||
- name: Push updated Coverage Badge
|
||||
run: |
|
||||
cd krkn-lib-docs
|
||||
git add .
|
||||
git config user.name "krkn-chaos"
|
||||
git config user.email "krkn-actions@users.noreply.github.com"
|
||||
git commit -m "[KRKN] Coverage Badge ${GITHUB_REF##*/}" || echo "no changes to commit"
|
||||
git push
|
||||
|
||||
|
||||
|
||||
53
.github/workflows/tests_v2.yml
vendored
Normal file
53
.github/workflows/tests_v2.yml
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
name: Tests v2 (pytest functional)
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
tests-v2:
|
||||
name: Tests v2 (pytest functional)
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Create KinD cluster
|
||||
uses: redhat-chaos/actions/kind@main
|
||||
|
||||
- name: Pre-load test images into KinD
|
||||
run: |
|
||||
docker pull nginx:alpine
|
||||
kind load docker-image nginx:alpine
|
||||
docker pull quay.io/krkn-chaos/krkn:tools
|
||||
kind load docker-image quay.io/krkn-chaos/krkn:tools
|
||||
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.11'
|
||||
architecture: 'x64'
|
||||
cache: 'pip'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get install -y build-essential python3-dev
|
||||
pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
pip install -r CI/tests_v2/requirements.txt
|
||||
|
||||
- name: Run tests_v2
|
||||
run: |
|
||||
KRKN_TEST_COVERAGE=1 python -m pytest CI/tests_v2/ -v --timeout=300 --reruns=1 --reruns-delay=5 \
|
||||
--html=CI/tests_v2/report.html -n auto --junitxml=CI/tests_v2/results.xml
|
||||
|
||||
- name: Upload tests_v2 artifacts
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: tests-v2-results
|
||||
path: |
|
||||
CI/tests_v2/report.html
|
||||
CI/tests_v2/results.xml
|
||||
CI/tests_v2/assets/
|
||||
if-no-files-found: ignore
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -16,6 +16,8 @@ __pycache__/*
|
||||
*.out
|
||||
kube-burner*
|
||||
kube_burner*
|
||||
recommender_*.json
|
||||
resiliency*.json
|
||||
|
||||
# Project files
|
||||
.ropeproject
|
||||
@@ -63,6 +65,10 @@ CI/out/*
|
||||
CI/ci_results
|
||||
CI/legacy/*node.yaml
|
||||
CI/results.markdown
|
||||
# CI tests_v2 (pytest-html / pytest outputs)
|
||||
CI/tests_v2/results.xml
|
||||
CI/tests_v2/report.html
|
||||
CI/tests_v2/assets/
|
||||
|
||||
#env
|
||||
chaos/*
|
||||
|
||||
9
ADOPTERS.md
Normal file
9
ADOPTERS.md
Normal file
@@ -0,0 +1,9 @@
|
||||
# Krkn Adopters
|
||||
|
||||
This is a list of organizations that have publicly acknowledged usage of Krkn and shared details of how they are leveraging it in their environment for chaos engineering use cases. Do you want to add yourself to this list? Please fork the repository and open a PR with the required change.
|
||||
|
||||
| Organization | Since | Website | Use-Case |
|
||||
|:-|:-|:-|:-|
|
||||
| MarketAxess | 2024 | https://www.marketaxess.com/ | Kraken enables us to achieve our goal of increasing the reliability of our cloud products on Kubernetes. The tool allows us to automatically run various chaos scenarios, identify resilience and performance bottlenecks, and seamlessly restore the system to its original state once scenarios finish. These chaos scenarios include pod disruptions, node (EC2) outages, simulating availability zone (AZ) outages, and filling up storage spaces like EBS and EFS. The community is highly responsive to requests and works on expanding the tool's capabilities. MarketAxess actively contributes to the project, adding features such as the ability to leverage existing network ACLs and proposing several feature improvements to enhance test coverage. |
|
||||
| Red Hat Openshift | 2020 | https://www.redhat.com/ | Kraken is a highly reliable chaos testing tool used to ensure the quality and resiliency of Red Hat Openshift. The engineering team runs all the test scenarios under Kraken on different cloud platforms on both self-managed and cloud services environments prior to the release of a new version of the product. The team also contributes to the Kraken project consistently which helps the test scenarios to keep up with the new features introduced to the product. Inclusion of this test coverage has contributed to gaining the trust of new and existing customers of the product. |
|
||||
| IBM | 2023 | https://www.ibm.com/ | While working on AI for Chaos Testing at IBM Research, we closely collaborated with the Kraken (Krkn) team to advance intelligent chaos engineering. Our contributions included developing AI-enabled chaos injection strategies and integrating reinforcement learning (RL)-based fault search techniques into the Krkn tool, enabling it to identify and explore system vulnerabilities more efficiently. Kraken stands out as one of the most user-friendly and effective tools for chaos engineering, and the Kraken team’s deep technical involvement played a crucial role in the success of this collaboration—helping bridge cutting-edge AI research with practical, real-world system reliability testing. |
|
||||
@@ -1,27 +1,31 @@
|
||||
kraken:
|
||||
distribution: openshift # Distribution can be kubernetes or openshift.
|
||||
distribution: kubernetes # Distribution can be kubernetes or openshift.
|
||||
kubeconfig_path: ~/.kube/config # Path to kubeconfig.
|
||||
exit_on_failure: False # Exit when a post action scenario fails.
|
||||
litmus_version: v1.13.6 # Litmus version to install.
|
||||
litmus_uninstall: False # If you want to uninstall litmus if failure.
|
||||
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
port: 8081 # Signal port
|
||||
auto_rollback: True # Enable auto rollback for scenarios.
|
||||
rollback_versions_directory: /tmp/kraken-rollback # Directory to store rollback version files.
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load.
|
||||
- $scenario_type: # List of chaos pod scenarios to load.
|
||||
- $scenario_file
|
||||
$post_config
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed.
|
||||
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal.
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift.
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
capture_metrics: False
|
||||
metrics_profile_path: config/metrics-aggregated.yaml
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set.
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error.
|
||||
alert_profile: config/alerts.yaml # Path to alert profile with the prometheus queries.
|
||||
enable_alerts: True # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
enable_metrics: True
|
||||
alert_profile: config/alerts.yaml # Path or URL to alert profile with the prometheus queries
|
||||
metrics_profile: config/metrics-report.yaml
|
||||
check_critical_alerts: True # Path to alert profile with the prometheus queries.
|
||||
|
||||
tunings:
|
||||
wait_duration: 6 # Duration to wait between each chaos scenario.
|
||||
@@ -29,13 +33,42 @@ tunings:
|
||||
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever.
|
||||
telemetry:
|
||||
enabled: False # enable/disables the telemetry collection feature
|
||||
api_url: https://ulnmf9xv7j.execute-api.us-west-2.amazonaws.com/production #telemetry service endpoint
|
||||
username: username # telemetry service username
|
||||
password: password # telemetry service password
|
||||
api_url: https://yvnn4rfoi7.execute-api.us-west-2.amazonaws.com/test #telemetry service endpoint
|
||||
username: $TELEMETRY_USERNAME # telemetry service username
|
||||
password: $TELEMETRY_PASSWORD # telemetry service password
|
||||
prometheus_namespace: 'monitoring' # prometheus namespace
|
||||
prometheus_pod_name: 'prometheus-kind-prometheus-kube-prome-prometheus-0' # prometheus pod_name
|
||||
prometheus_container_name: 'prometheus'
|
||||
prometheus_backup: True # enables/disables prometheus data collection
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
archive_path: /tmp # local path where the archive files will be temporarly stored
|
||||
archive_path: /tmp # local path where the archive files will be temporarily stored
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 10000 # the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
logs_backup: True
|
||||
logs_filter_patterns:
|
||||
- "(\\w{3}\\s\\d{1,2}\\s\\d{2}:\\d{2}:\\d{2}\\.\\d+).+" # Sep 9 11:20:36.123425532
|
||||
- "kinit (\\d+/\\d+/\\d+\\s\\d{2}:\\d{2}:\\d{2})\\s+" # kinit 2023/09/15 11:20:36 log
|
||||
- "(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z).+" # 2023-09-15T11:20:36.123425532Z log
|
||||
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
|
||||
events_backup: True # enables/disables cluster events collection
|
||||
telemetry_group: "funtests"
|
||||
elastic:
|
||||
enable_elastic: False
|
||||
verify_certs: False
|
||||
elastic_url: "https://192.168.39.196" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
|
||||
elastic_port: 32766
|
||||
username: "elastic"
|
||||
password: "test"
|
||||
metrics_index: "krkn-metrics"
|
||||
alerts_index: "krkn-alerts"
|
||||
telemetry_index: "krkn-telemetry"
|
||||
|
||||
health_checks: # Utilizing health check endpoints to observe application behavior during chaos injection.
|
||||
interval: # Interval in seconds to perform health checks, default value is 2 seconds
|
||||
config: # Provide list of health check configurations for applications
|
||||
- url: # Provide application endpoint
|
||||
bearer_token: # Bearer token for authentication if any
|
||||
auth: # Provide authentication credentials (username , password) in tuple format if any, ex:("admin","secretpassword")
|
||||
exit_on_failure: # If value is True exits when health check failed for application, values can be True/False
|
||||
|
||||
@@ -45,15 +45,45 @@ metadata:
|
||||
name: kraken-test-pod
|
||||
namespace: kraken
|
||||
spec:
|
||||
securityContext:
|
||||
fsGroup: 1001
|
||||
# initContainer to fix permissions on the mounted volume
|
||||
initContainers:
|
||||
- name: fix-permissions
|
||||
image: 'quay.io/centos7/httpd-24-centos7:centos7'
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
echo "Setting up permissions for /home/kraken..."
|
||||
# Create the directory if it doesn't exist
|
||||
mkdir -p /home/kraken
|
||||
# Set ownership to user 1001 and group 1001
|
||||
chown -R 1001:1001 /home/kraken
|
||||
# Set permissions to allow read/write
|
||||
chmod -R 755 /home/kraken
|
||||
rm -rf /home/kraken/*
|
||||
echo "Permissions fixed. Current state:"
|
||||
ls -la /home/kraken
|
||||
volumeMounts:
|
||||
- mountPath: "/home/kraken"
|
||||
name: kraken-test-pv
|
||||
securityContext:
|
||||
runAsUser: 0 # Run as root to fix permissions
|
||||
volumes:
|
||||
- name: kraken-test-pv
|
||||
persistentVolumeClaim:
|
||||
claimName: kraken-test-pvc
|
||||
containers:
|
||||
- name: kraken-test-container
|
||||
image: 'quay.io/centos7/httpd-24-centos7:latest'
|
||||
volumeMounts:
|
||||
- mountPath: "/home/krake-dir/"
|
||||
name: kraken-test-pv
|
||||
image: 'quay.io/centos7/httpd-24-centos7:centos7'
|
||||
securityContext:
|
||||
privileged: true
|
||||
runAsUser: 1001
|
||||
runAsNonRoot: true
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
volumeMounts:
|
||||
- mountPath: "/home/kraken"
|
||||
name: kraken-test-pv
|
||||
|
||||
@@ -39,7 +39,7 @@ echo '-----------------------|--------|---------' >> $results
|
||||
failed_tests=()
|
||||
for test_name in `cat CI/tests/functional_tests`
|
||||
do
|
||||
wait_cluster_become_ready
|
||||
#wait_cluster_become_ready
|
||||
return_value=`./CI/run_test.sh $test_name $results`
|
||||
if [[ $return_value == 1 ]]
|
||||
then
|
||||
@@ -49,6 +49,7 @@ do
|
||||
wait_cluster_become_ready
|
||||
done
|
||||
|
||||
|
||||
if (( ${#failed_tests[@]}>0 ))
|
||||
then
|
||||
echo -e "\n\n======================================================================"
|
||||
|
||||
79
CI/templates/mock_cerberus.yaml
Normal file
79
CI/templates/mock_cerberus.yaml
Normal file
@@ -0,0 +1,79 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: mock-cerberus-server
|
||||
namespace: default
|
||||
data:
|
||||
server.py: |
|
||||
#!/usr/bin/env python3
|
||||
from http.server import HTTPServer, BaseHTTPRequestHandler
|
||||
import json
|
||||
|
||||
class MockCerberusHandler(BaseHTTPRequestHandler):
|
||||
def do_GET(self):
|
||||
if self.path == '/':
|
||||
# Return True to indicate cluster is healthy
|
||||
self.send_response(200)
|
||||
self.send_header('Content-type', 'text/plain')
|
||||
self.end_headers()
|
||||
self.wfile.write(b'True')
|
||||
elif self.path.startswith('/history'):
|
||||
# Return empty history (no failures)
|
||||
self.send_response(200)
|
||||
self.send_header('Content-type', 'application/json')
|
||||
self.end_headers()
|
||||
response = {
|
||||
"history": {
|
||||
"failures": []
|
||||
}
|
||||
}
|
||||
self.wfile.write(json.dumps(response).encode())
|
||||
else:
|
||||
self.send_response(404)
|
||||
self.end_headers()
|
||||
|
||||
def log_message(self, format, *args):
|
||||
print(f"[MockCerberus] {format % args}")
|
||||
|
||||
if __name__ == '__main__':
|
||||
server = HTTPServer(('0.0.0.0', 8080), MockCerberusHandler)
|
||||
print("[MockCerberus] Starting mock cerberus server on port 8080...")
|
||||
server.serve_forever()
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: mock-cerberus
|
||||
namespace: default
|
||||
labels:
|
||||
app: mock-cerberus
|
||||
spec:
|
||||
containers:
|
||||
- name: mock-cerberus
|
||||
image: python:3.9-slim
|
||||
command: ["python3", "/app/server.py"]
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
volumeMounts:
|
||||
- name: server-script
|
||||
mountPath: /app
|
||||
volumes:
|
||||
- name: server-script
|
||||
configMap:
|
||||
name: mock-cerberus-server
|
||||
defaultMode: 0755
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mock-cerberus
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
app: mock-cerberus
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
targetPort: 8080
|
||||
type: ClusterIP
|
||||
85
CI/templates/mock_cerberus_unhealthy.yaml
Normal file
85
CI/templates/mock_cerberus_unhealthy.yaml
Normal file
@@ -0,0 +1,85 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: mock-cerberus-unhealthy-server
|
||||
namespace: default
|
||||
data:
|
||||
server.py: |
|
||||
#!/usr/bin/env python3
|
||||
from http.server import HTTPServer, BaseHTTPRequestHandler
|
||||
import json
|
||||
|
||||
class MockCerberusUnhealthyHandler(BaseHTTPRequestHandler):
|
||||
def do_GET(self):
|
||||
if self.path == '/':
|
||||
# Return False to indicate cluster is unhealthy
|
||||
self.send_response(200)
|
||||
self.send_header('Content-type', 'text/plain')
|
||||
self.end_headers()
|
||||
self.wfile.write(b'False')
|
||||
elif self.path.startswith('/history'):
|
||||
# Return history with failures
|
||||
self.send_response(200)
|
||||
self.send_header('Content-type', 'application/json')
|
||||
self.end_headers()
|
||||
response = {
|
||||
"history": {
|
||||
"failures": [
|
||||
{
|
||||
"component": "node",
|
||||
"name": "test-node",
|
||||
"timestamp": "2024-01-01T00:00:00Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
self.wfile.write(json.dumps(response).encode())
|
||||
else:
|
||||
self.send_response(404)
|
||||
self.end_headers()
|
||||
|
||||
def log_message(self, format, *args):
|
||||
print(f"[MockCerberusUnhealthy] {format % args}")
|
||||
|
||||
if __name__ == '__main__':
|
||||
server = HTTPServer(('0.0.0.0', 8080), MockCerberusUnhealthyHandler)
|
||||
print("[MockCerberusUnhealthy] Starting mock cerberus unhealthy server on port 8080...")
|
||||
server.serve_forever()
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: mock-cerberus-unhealthy
|
||||
namespace: default
|
||||
labels:
|
||||
app: mock-cerberus-unhealthy
|
||||
spec:
|
||||
containers:
|
||||
- name: mock-cerberus-unhealthy
|
||||
image: python:3.9-slim
|
||||
command: ["python3", "/app/server.py"]
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
name: http
|
||||
volumeMounts:
|
||||
- name: server-script
|
||||
mountPath: /app
|
||||
volumes:
|
||||
- name: server-script
|
||||
configMap:
|
||||
name: mock-cerberus-unhealthy-server
|
||||
defaultMode: 0755
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mock-cerberus-unhealthy
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
app: mock-cerberus-unhealthy
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 8080
|
||||
targetPort: 8080
|
||||
type: ClusterIP
|
||||
@@ -8,9 +8,9 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: docker.io/fedora/tools
|
||||
image: quay.io/krkn-chaos/krkn:tools
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
sleep infinity
|
||||
sleep infinity
|
||||
|
||||
29
CI/templates/pod_network_filter.yaml
Normal file
29
CI/templates/pod_network_filter.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-network-filter-test
|
||||
labels:
|
||||
app.kubernetes.io/name: pod-network-filter
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: quay.io/krkn-chaos/krkn-funtests:pod-network-filter
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
name: pod-network-prt
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: pod-network-filter-service
|
||||
spec:
|
||||
selector:
|
||||
app.kubernetes.io/name: pod-network-filter
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: pod-network-filter-svc
|
||||
protocol: TCP
|
||||
port: 80
|
||||
targetPort: pod-network-prt
|
||||
nodePort: 30037
|
||||
29
CI/templates/service_hijacking.yaml
Normal file
29
CI/templates/service_hijacking.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
app.kubernetes.io/name: proxy
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:stable
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: http-web-svc
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx-service
|
||||
spec:
|
||||
selector:
|
||||
app.kubernetes.io/name: proxy
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: name-of-service-port
|
||||
protocol: TCP
|
||||
port: 80
|
||||
targetPort: http-web-svc
|
||||
nodePort: 30036
|
||||
@@ -8,9 +8,9 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: docker.io/fedora/tools
|
||||
image: quay.io/krkn-chaos/krkn:tools
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
sleep infinity
|
||||
sleep infinity
|
||||
|
||||
@@ -1,15 +1,23 @@
|
||||
ERRORED=false
|
||||
|
||||
function finish {
|
||||
if [ $? -eq 1 ] && [ $ERRORED != "true" ]
|
||||
if [ $? != 0 ] && [ $ERRORED != "true" ]
|
||||
then
|
||||
error
|
||||
fi
|
||||
}
|
||||
|
||||
function error {
|
||||
echo "Error caught."
|
||||
ERRORED=true
|
||||
exit_code=$?
|
||||
if [ $exit_code == 1 ]
|
||||
then
|
||||
echo "Error caught."
|
||||
ERRORED=true
|
||||
elif [ $exit_code == 2 ]
|
||||
then
|
||||
echo "Run with exit code 2 detected, it is expected, wrapping the exit code with 0 to avoid pipeline failure"
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
function get_node {
|
||||
|
||||
@@ -10,10 +10,16 @@ function functional_test_app_outage {
|
||||
yq -i '.application_outage.duration=10' scenarios/openshift/app_outage.yaml
|
||||
yq -i '.application_outage.pod_selector={"scenario":"outage"}' scenarios/openshift/app_outage.yaml
|
||||
yq -i '.application_outage.namespace="default"' scenarios/openshift/app_outage.yaml
|
||||
export scenario_type="application_outages"
|
||||
export scenario_type="application_outages_scenarios"
|
||||
export scenario_file="scenarios/openshift/app_outage.yaml"
|
||||
export post_config=""
|
||||
|
||||
kubectl get services -A
|
||||
|
||||
kubectl get pods
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/app_outage.yaml
|
||||
cat $scenario_file
|
||||
cat CI/config/app_outage.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/app_outage.yaml
|
||||
echo "App outage scenario test: Success"
|
||||
}
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_arca_cpu_hog {
|
||||
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/arcaflow/cpu-hog/input.yaml
|
||||
export scenario_type="arcaflow_scenarios"
|
||||
export scenario_file="scenarios/arcaflow/cpu-hog/input.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/arca_cpu_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/arca_cpu_hog.yaml
|
||||
echo "Arcaflow CPU Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_arca_cpu_hog
|
||||
@@ -1,19 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_arca_io_hog {
|
||||
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/arcaflow/io-hog/input.yaml
|
||||
export scenario_type="arcaflow_scenarios"
|
||||
export scenario_file="scenarios/arcaflow/io-hog/input.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/arca_io_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/arca_io_hog.yaml
|
||||
echo "Arcaflow IO Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_arca_io_hog
|
||||
@@ -1,19 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_arca_memory_hog {
|
||||
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/arcaflow/memory-hog/input.yaml
|
||||
export scenario_type="arcaflow_scenarios"
|
||||
export scenario_file="scenarios/arcaflow/memory-hog/input.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/arca_memory_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/arca_memory_hog.yaml
|
||||
echo "Arcaflow Memory Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_arca_memory_hog
|
||||
79
CI/tests/test_cerberus_unhealthy.sh
Executable file
79
CI/tests/test_cerberus_unhealthy.sh
Executable file
@@ -0,0 +1,79 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_cerberus_unhealthy {
|
||||
echo "========================================"
|
||||
echo "Starting Cerberus Unhealthy Test"
|
||||
echo "========================================"
|
||||
|
||||
# Deploy mock cerberus unhealthy server
|
||||
echo "Deploying mock cerberus unhealthy server..."
|
||||
kubectl apply -f CI/templates/mock_cerberus_unhealthy.yaml
|
||||
|
||||
# Wait for mock cerberus unhealthy pod to be ready
|
||||
echo "Waiting for mock cerberus unhealthy to be ready..."
|
||||
kubectl wait --for=condition=ready pod -l app=mock-cerberus-unhealthy --timeout=300s
|
||||
|
||||
# Verify mock cerberus service is accessible
|
||||
echo "Verifying mock cerberus unhealthy service..."
|
||||
mock_cerberus_ip=$(kubectl get service mock-cerberus-unhealthy -o jsonpath='{.spec.clusterIP}')
|
||||
echo "Mock Cerberus Unhealthy IP: $mock_cerberus_ip"
|
||||
|
||||
# Test cerberus endpoint from within the cluster (should return False)
|
||||
kubectl run cerberus-unhealthy-test --image=curlimages/curl:latest --rm -i --restart=Never -- \
|
||||
curl -s http://mock-cerberus-unhealthy.default.svc.cluster.local:8080/ || echo "Cerberus unhealthy test curl completed"
|
||||
|
||||
# Configure scenario for pod disruption with cerberus enabled
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/kind/pod_etcd.yml"
|
||||
export post_config=""
|
||||
|
||||
# Generate config with cerberus enabled
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/cerberus_unhealthy_test_config.yaml
|
||||
|
||||
# Enable cerberus in the config but DON'T exit_on_failure (so the test can verify the behavior)
|
||||
# Using yq jq-wrapper syntax with -i -y
|
||||
yq -i '.cerberus.cerberus_enabled = true' CI/config/cerberus_unhealthy_test_config.yaml
|
||||
yq -i ".cerberus.cerberus_url = \"http://${mock_cerberus_ip}:8080\"" CI/config/cerberus_unhealthy_test_config.yaml
|
||||
yq -i '.kraken.exit_on_failure = false' CI/config/cerberus_unhealthy_test_config.yaml
|
||||
|
||||
echo "========================================"
|
||||
echo "Cerberus Unhealthy Configuration:"
|
||||
yq '.cerberus' CI/config/cerberus_unhealthy_test_config.yaml
|
||||
echo "exit_on_failure:"
|
||||
yq '.kraken.exit_on_failure' CI/config/cerberus_unhealthy_test_config.yaml
|
||||
echo "========================================"
|
||||
|
||||
# Run kraken with cerberus unhealthy (should detect unhealthy but not exit due to exit_on_failure=false)
|
||||
echo "Running kraken with cerberus unhealthy integration..."
|
||||
|
||||
# We expect this to complete (not exit 1) because exit_on_failure is false
|
||||
# But cerberus should log that the cluster is unhealthy
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/cerberus_unhealthy_test_config.yaml || {
|
||||
exit_code=$?
|
||||
echo "Kraken exited with code: $exit_code"
|
||||
# If exit_code is 1, that's expected when cerberus reports unhealthy and exit_on_failure would be true
|
||||
# But since we set exit_on_failure=false, it should not exit
|
||||
if [ $exit_code -eq 1 ]; then
|
||||
echo "WARNING: Kraken exited with 1, which may indicate cerberus detected unhealthy cluster"
|
||||
fi
|
||||
}
|
||||
|
||||
# Verify cerberus was called by checking mock cerberus logs
|
||||
echo "Checking mock cerberus unhealthy logs..."
|
||||
kubectl logs -l app=mock-cerberus-unhealthy --tail=50
|
||||
|
||||
# Cleanup
|
||||
echo "Cleaning up mock cerberus unhealthy..."
|
||||
kubectl delete -f CI/templates/mock_cerberus_unhealthy.yaml || true
|
||||
|
||||
echo "========================================"
|
||||
echo "Cerberus unhealthy functional test: Success"
|
||||
echo "========================================"
|
||||
}
|
||||
|
||||
functional_test_cerberus_unhealthy
|
||||
@@ -8,16 +8,18 @@ trap finish EXIT
|
||||
pod_file="CI/scenarios/hello_pod.yaml"
|
||||
|
||||
function functional_test_container_crash {
|
||||
yq -i '.scenarios[0].namespace="default"' scenarios/openshift/app_outage.yaml
|
||||
yq -i '.scenarios[0].label_selector="scenario=container"' scenarios/openshift/app_outage.yaml
|
||||
yq -i '.scenarios[0].container_name="fedtools"' scenarios/openshift/app_outage.yaml
|
||||
yq -i '.scenarios[0].namespace="default"' scenarios/openshift/container_etcd.yml
|
||||
yq -i '.scenarios[0].label_selector="scenario=container"' scenarios/openshift/container_etcd.yml
|
||||
yq -i '.scenarios[0].container_name="fedtools"' scenarios/openshift/container_etcd.yml
|
||||
export scenario_type="container_scenarios"
|
||||
export scenario_file="- scenarios/openshift/app_outage.yaml"
|
||||
export scenario_file="scenarios/openshift/container_etcd.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/container_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/container_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/container_config.yaml -d True
|
||||
echo "Container scenario test: Success"
|
||||
|
||||
kubectl get pods -n kube-system -l component=etcd
|
||||
}
|
||||
|
||||
functional_test_container_crash
|
||||
|
||||
20
CI/tests/test_cpu_hog.sh
Normal file
20
CI/tests/test_cpu_hog.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_cpu_hog {
|
||||
yq -i '."node-selector"="kubernetes.io/hostname=kind-worker2"' scenarios/kube/cpu-hog.yml
|
||||
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/cpu-hog.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/cpu_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/cpu_hog.yaml
|
||||
echo "CPU Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_cpu_hog
|
||||
18
CI/tests/test_customapp_pod.sh
Executable file
18
CI/tests/test_customapp_pod.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_customapp_pod_node_selector {
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/openshift/customapp_pod.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/customapp_pod_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/customapp_pod_config.yaml -d True
|
||||
echo "Pod disruption with node_label_selector test: Success"
|
||||
}
|
||||
|
||||
functional_test_customapp_pod_node_selector
|
||||
20
CI/tests/test_io_hog.sh
Normal file
20
CI/tests/test_io_hog.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_io_hog {
|
||||
yq -i '."node-selector"="kubernetes.io/hostname=kind-worker2"' scenarios/kube/io-hog.yml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/io-hog.yml"
|
||||
export post_config=""
|
||||
|
||||
cat $scenario_file
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/io_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/io_hog.yaml
|
||||
echo "IO Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_io_hog
|
||||
19
CI/tests/test_memory_hog.sh
Normal file
19
CI/tests/test_memory_hog.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_memory_hog {
|
||||
yq -i '."node-selector"="kubernetes.io/hostname=kind-worker2"' scenarios/kube/memory-hog.yml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/memory-hog.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/memory_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/memory_hog.yaml
|
||||
echo "Memory Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_memory_hog
|
||||
@@ -6,8 +6,8 @@ trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function funtional_test_namespace_deletion {
|
||||
export scenario_type="namespace_scenarios"
|
||||
export scenario_file="- scenarios/openshift/ingress_namespace.yaml"
|
||||
export scenario_type="service_disruption_scenarios"
|
||||
export scenario_file="scenarios/openshift/ingress_namespace.yaml"
|
||||
export post_config=""
|
||||
yq '.scenarios[0].namespace="^namespace-scenario$"' -i scenarios/openshift/ingress_namespace.yaml
|
||||
yq '.scenarios[0].wait_time=30' -i scenarios/openshift/ingress_namespace.yaml
|
||||
|
||||
@@ -15,7 +15,7 @@ function functional_test_network_chaos {
|
||||
yq -i 'del(.network_chaos.egress.latency)' scenarios/openshift/network_chaos.yaml
|
||||
yq -i 'del(.network_chaos.egress.loss)' scenarios/openshift/network_chaos.yaml
|
||||
|
||||
export scenario_type="network_chaos"
|
||||
export scenario_type="network_chaos_scenarios"
|
||||
export scenario_file="scenarios/openshift/network_chaos.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/network_chaos.yaml
|
||||
|
||||
18
CI/tests/test_node.sh
Executable file
18
CI/tests/test_node.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
uset -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_node_stop_start {
|
||||
export scenario_type="node_scenarios"
|
||||
export scenario_file="scenarios/kind/node_scenarios_example.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/node_config.yaml
|
||||
cat CI/config/node_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/node_config.yaml
|
||||
echo "Node Stop/Start scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_node_stop_start
|
||||
165
CI/tests/test_node_network_chaos.sh
Executable file
165
CI/tests/test_node_network_chaos.sh
Executable file
@@ -0,0 +1,165 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_node_network_chaos {
|
||||
echo "Starting node network chaos functional test"
|
||||
|
||||
# Get a worker node
|
||||
get_node
|
||||
export TARGET_NODE=$(echo $WORKER_NODE | awk '{print $1}')
|
||||
echo "Target node: $TARGET_NODE"
|
||||
|
||||
# Deploy nginx workload on the target node
|
||||
echo "Deploying nginx workload on $TARGET_NODE..."
|
||||
kubectl create deployment nginx-node-net-chaos --image=nginx:latest
|
||||
|
||||
# Add node selector to ensure pod runs on target node
|
||||
kubectl patch deployment nginx-node-net-chaos -p '{"spec":{"template":{"spec":{"nodeSelector":{"kubernetes.io/hostname":"'$TARGET_NODE'"}}}}}'
|
||||
|
||||
# Expose service
|
||||
kubectl expose deployment nginx-node-net-chaos --port=80 --target-port=80 --name=nginx-node-net-chaos-svc
|
||||
|
||||
# Wait for nginx to be ready
|
||||
echo "Waiting for nginx pod to be ready on $TARGET_NODE..."
|
||||
kubectl wait --for=condition=ready pod -l app=nginx-node-net-chaos --timeout=120s
|
||||
|
||||
# Verify pod is on correct node
|
||||
export POD_NAME=$(kubectl get pods -l app=nginx-node-net-chaos -o jsonpath='{.items[0].metadata.name}')
|
||||
export POD_NODE=$(kubectl get pod $POD_NAME -o jsonpath='{.spec.nodeName}')
|
||||
echo "Pod $POD_NAME is running on node $POD_NODE"
|
||||
|
||||
if [ "$POD_NODE" != "$TARGET_NODE" ]; then
|
||||
echo "ERROR: Pod is not on target node (expected $TARGET_NODE, got $POD_NODE)"
|
||||
kubectl get pods -l app=nginx-node-net-chaos -o wide
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Setup port-forward to access nginx
|
||||
echo "Setting up port-forward to nginx service..."
|
||||
kubectl port-forward service/nginx-node-net-chaos-svc 8091:80 &
|
||||
PORT_FORWARD_PID=$!
|
||||
sleep 3 # Give port-forward time to start
|
||||
|
||||
# Test baseline connectivity
|
||||
echo "Testing baseline connectivity..."
|
||||
response=$(curl -s -o /dev/null -w "%{http_code}" --max-time 5 http://localhost:8091 || echo "000")
|
||||
if [ "$response" != "200" ]; then
|
||||
echo "ERROR: Nginx not responding correctly (got $response, expected 200)"
|
||||
kubectl get pods -l app=nginx-node-net-chaos
|
||||
kubectl describe pod $POD_NAME
|
||||
exit 1
|
||||
fi
|
||||
echo "Baseline test passed: nginx responding with 200"
|
||||
|
||||
# Measure baseline latency
|
||||
echo "Measuring baseline latency..."
|
||||
baseline_start=$(date +%s%3N)
|
||||
curl -s http://localhost:8091 > /dev/null || true
|
||||
baseline_end=$(date +%s%3N)
|
||||
baseline_latency=$((baseline_end - baseline_start))
|
||||
echo "Baseline latency: ${baseline_latency}ms"
|
||||
|
||||
# Configure node network chaos scenario
|
||||
echo "Configuring node network chaos scenario..."
|
||||
yq -i '.[0].config.target="'$TARGET_NODE'"' scenarios/kube/node-network-chaos.yml
|
||||
yq -i '.[0].config.namespace="default"' scenarios/kube/node-network-chaos.yml
|
||||
yq -i '.[0].config.test_duration=20' scenarios/kube/node-network-chaos.yml
|
||||
yq -i '.[0].config.latency="200ms"' scenarios/kube/node-network-chaos.yml
|
||||
yq -i '.[0].config.loss=15' scenarios/kube/node-network-chaos.yml
|
||||
yq -i '.[0].config.bandwidth="10mbit"' scenarios/kube/node-network-chaos.yml
|
||||
yq -i '.[0].config.ingress=true' scenarios/kube/node-network-chaos.yml
|
||||
yq -i '.[0].config.egress=true' scenarios/kube/node-network-chaos.yml
|
||||
yq -i '.[0].config.force=false' scenarios/kube/node-network-chaos.yml
|
||||
yq -i 'del(.[0].config.interfaces)' scenarios/kube/node-network-chaos.yml
|
||||
|
||||
# Prepare krkn config
|
||||
export scenario_type="network_chaos_ng_scenarios"
|
||||
export scenario_file="scenarios/kube/node-network-chaos.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/node_network_chaos_config.yaml
|
||||
|
||||
# Run krkn in background
|
||||
echo "Starting krkn with node network chaos scenario..."
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/node_network_chaos_config.yaml &
|
||||
KRKN_PID=$!
|
||||
echo "Krkn started with PID: $KRKN_PID"
|
||||
|
||||
# Wait for chaos to start (give it time to inject chaos)
|
||||
echo "Waiting for chaos injection to begin..."
|
||||
sleep 10
|
||||
|
||||
# Test during chaos - check for increased latency or packet loss effects
|
||||
echo "Testing network behavior during chaos..."
|
||||
chaos_test_count=0
|
||||
chaos_success=0
|
||||
|
||||
for i in {1..5}; do
|
||||
chaos_test_count=$((chaos_test_count + 1))
|
||||
chaos_start=$(date +%s%3N)
|
||||
response=$(curl -s -o /dev/null -w "%{http_code}" --max-time 10 http://localhost:8091 || echo "000")
|
||||
chaos_end=$(date +%s%3N)
|
||||
chaos_latency=$((chaos_end - chaos_start))
|
||||
|
||||
echo "Attempt $i: HTTP $response, latency: ${chaos_latency}ms"
|
||||
|
||||
# We expect either increased latency or some failures due to packet loss
|
||||
if [ "$response" == "200" ] || [ "$response" == "000" ]; then
|
||||
chaos_success=$((chaos_success + 1))
|
||||
fi
|
||||
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "Chaos test results: $chaos_success/$chaos_test_count requests processed"
|
||||
|
||||
# Verify node-level chaos affects pod
|
||||
echo "Verifying node-level chaos affects pod on $TARGET_NODE..."
|
||||
# The node chaos should affect all pods on the node
|
||||
|
||||
# Wait for krkn to complete
|
||||
echo "Waiting for krkn to complete..."
|
||||
wait $KRKN_PID || true
|
||||
echo "Krkn completed"
|
||||
|
||||
# Wait a bit for cleanup
|
||||
sleep 5
|
||||
|
||||
# Verify recovery - nginx should respond normally again
|
||||
echo "Verifying service recovery..."
|
||||
recovery_attempts=0
|
||||
max_recovery_attempts=10
|
||||
|
||||
while [ $recovery_attempts -lt $max_recovery_attempts ]; do
|
||||
recovery_attempts=$((recovery_attempts + 1))
|
||||
response=$(curl -s -o /dev/null -w "%{http_code}" --max-time 5 http://localhost:8091 || echo "000")
|
||||
|
||||
if [ "$response" == "200" ]; then
|
||||
echo "Recovery verified: nginx responding normally (attempt $recovery_attempts)"
|
||||
break
|
||||
fi
|
||||
|
||||
echo "Recovery attempt $recovery_attempts/$max_recovery_attempts: got $response, retrying..."
|
||||
sleep 3
|
||||
done
|
||||
|
||||
if [ "$response" != "200" ]; then
|
||||
echo "ERROR: Service did not recover after chaos (got $response)"
|
||||
kubectl get pods -l app=nginx-node-net-chaos
|
||||
kubectl describe pod $POD_NAME
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
echo "Cleaning up test resources..."
|
||||
kill $PORT_FORWARD_PID 2>/dev/null || true
|
||||
kubectl delete deployment nginx-node-net-chaos --ignore-not-found=true
|
||||
kubectl delete service nginx-node-net-chaos-svc --ignore-not-found=true
|
||||
|
||||
echo "Node network chaos test: Success"
|
||||
}
|
||||
|
||||
functional_test_node_network_chaos
|
||||
21
CI/tests/test_pod.sh
Executable file
21
CI/tests/test_pod.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_pod_crash {
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/kind/pod_path_provisioner.yml"
|
||||
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml
|
||||
echo "Pod disruption scenario test: Success"
|
||||
date
|
||||
kubectl get pods -n local-path-storage -l app=local-path-provisioner -o yaml
|
||||
}
|
||||
|
||||
functional_test_pod_crash
|
||||
31
CI/tests/test_pod_error.sh
Executable file
31
CI/tests/test_pod_error.sh
Executable file
@@ -0,0 +1,31 @@
|
||||
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_pod_error {
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/kind/pod_etcd.yml"
|
||||
export post_config=""
|
||||
# this test will check if krkn exits with an error when too many pods are targeted
|
||||
yq -i '.[0].config.kill=5' scenarios/kind/pod_etcd.yml
|
||||
yq -i '.[0].config.krkn_pod_recovery_time=1' scenarios/kind/pod_etcd.yml
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_config.yaml
|
||||
cat CI/config/pod_config.yaml
|
||||
|
||||
cat scenarios/kind/pod_etcd.yml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml
|
||||
|
||||
ret=$?
|
||||
echo "\n\nret $ret"
|
||||
if [[ $ret -ge 1 ]]; then
|
||||
echo "Pod disruption error scenario test: Success"
|
||||
else
|
||||
echo "Pod disruption error scenario test: Failure"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
functional_test_pod_error
|
||||
143
CI/tests/test_pod_network_chaos.sh
Executable file
143
CI/tests/test_pod_network_chaos.sh
Executable file
@@ -0,0 +1,143 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_pod_network_chaos {
|
||||
echo "Starting pod network chaos functional test"
|
||||
|
||||
# Deploy nginx workload
|
||||
echo "Deploying nginx workload..."
|
||||
kubectl create deployment nginx-pod-net-chaos --image=nginx:latest
|
||||
kubectl expose deployment nginx-pod-net-chaos --port=80 --target-port=80 --name=nginx-pod-net-chaos-svc
|
||||
|
||||
# Wait for nginx to be ready
|
||||
echo "Waiting for nginx pod to be ready..."
|
||||
kubectl wait --for=condition=ready pod -l app=nginx-pod-net-chaos --timeout=120s
|
||||
|
||||
# Get pod name
|
||||
export POD_NAME=$(kubectl get pods -l app=nginx-pod-net-chaos -o jsonpath='{.items[0].metadata.name}')
|
||||
echo "Target pod: $POD_NAME"
|
||||
|
||||
# Setup port-forward to access nginx
|
||||
echo "Setting up port-forward to nginx service..."
|
||||
kubectl port-forward service/nginx-pod-net-chaos-svc 8090:80 &
|
||||
PORT_FORWARD_PID=$!
|
||||
sleep 3 # Give port-forward time to start
|
||||
|
||||
# Test baseline connectivity
|
||||
echo "Testing baseline connectivity..."
|
||||
response=$(curl -s -o /dev/null -w "%{http_code}" --max-time 5 http://localhost:8090 || echo "000")
|
||||
if [ "$response" != "200" ]; then
|
||||
echo "ERROR: Nginx not responding correctly (got $response, expected 200)"
|
||||
kubectl get pods -l app=nginx-pod-net-chaos
|
||||
kubectl describe pod $POD_NAME
|
||||
exit 1
|
||||
fi
|
||||
echo "Baseline test passed: nginx responding with 200"
|
||||
|
||||
# Measure baseline latency
|
||||
echo "Measuring baseline latency..."
|
||||
baseline_start=$(date +%s%3N)
|
||||
curl -s http://localhost:8090 > /dev/null || true
|
||||
baseline_end=$(date +%s%3N)
|
||||
baseline_latency=$((baseline_end - baseline_start))
|
||||
echo "Baseline latency: ${baseline_latency}ms"
|
||||
|
||||
# Configure pod network chaos scenario
|
||||
echo "Configuring pod network chaos scenario..."
|
||||
yq -i '.[0].config.target="'$POD_NAME'"' scenarios/kube/pod-network-chaos.yml
|
||||
yq -i '.[0].config.namespace="default"' scenarios/kube/pod-network-chaos.yml
|
||||
yq -i '.[0].config.test_duration=20' scenarios/kube/pod-network-chaos.yml
|
||||
yq -i '.[0].config.latency="200ms"' scenarios/kube/pod-network-chaos.yml
|
||||
yq -i '.[0].config.loss=15' scenarios/kube/pod-network-chaos.yml
|
||||
yq -i '.[0].config.bandwidth="10mbit"' scenarios/kube/pod-network-chaos.yml
|
||||
yq -i '.[0].config.ingress=true' scenarios/kube/pod-network-chaos.yml
|
||||
yq -i '.[0].config.egress=true' scenarios/kube/pod-network-chaos.yml
|
||||
yq -i 'del(.[0].config.interfaces)' scenarios/kube/pod-network-chaos.yml
|
||||
|
||||
# Prepare krkn config
|
||||
export scenario_type="network_chaos_ng_scenarios"
|
||||
export scenario_file="scenarios/kube/pod-network-chaos.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_network_chaos_config.yaml
|
||||
|
||||
# Run krkn in background
|
||||
echo "Starting krkn with pod network chaos scenario..."
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_network_chaos_config.yaml &
|
||||
KRKN_PID=$!
|
||||
echo "Krkn started with PID: $KRKN_PID"
|
||||
|
||||
# Wait for chaos to start (give it time to inject chaos)
|
||||
echo "Waiting for chaos injection to begin..."
|
||||
sleep 10
|
||||
|
||||
# Test during chaos - check for increased latency or packet loss effects
|
||||
echo "Testing network behavior during chaos..."
|
||||
chaos_test_count=0
|
||||
chaos_success=0
|
||||
|
||||
for i in {1..5}; do
|
||||
chaos_test_count=$((chaos_test_count + 1))
|
||||
chaos_start=$(date +%s%3N)
|
||||
response=$(curl -s -o /dev/null -w "%{http_code}" --max-time 10 http://localhost:8090 || echo "000")
|
||||
chaos_end=$(date +%s%3N)
|
||||
chaos_latency=$((chaos_end - chaos_start))
|
||||
|
||||
echo "Attempt $i: HTTP $response, latency: ${chaos_latency}ms"
|
||||
|
||||
# We expect either increased latency or some failures due to packet loss
|
||||
if [ "$response" == "200" ] || [ "$response" == "000" ]; then
|
||||
chaos_success=$((chaos_success + 1))
|
||||
fi
|
||||
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "Chaos test results: $chaos_success/$chaos_test_count requests processed"
|
||||
|
||||
# Wait for krkn to complete
|
||||
echo "Waiting for krkn to complete..."
|
||||
wait $KRKN_PID || true
|
||||
echo "Krkn completed"
|
||||
|
||||
# Wait a bit for cleanup
|
||||
sleep 5
|
||||
|
||||
# Verify recovery - nginx should respond normally again
|
||||
echo "Verifying service recovery..."
|
||||
recovery_attempts=0
|
||||
max_recovery_attempts=10
|
||||
|
||||
while [ $recovery_attempts -lt $max_recovery_attempts ]; do
|
||||
recovery_attempts=$((recovery_attempts + 1))
|
||||
response=$(curl -s -o /dev/null -w "%{http_code}" --max-time 5 http://localhost:8090 || echo "000")
|
||||
|
||||
if [ "$response" == "200" ]; then
|
||||
echo "Recovery verified: nginx responding normally (attempt $recovery_attempts)"
|
||||
break
|
||||
fi
|
||||
|
||||
echo "Recovery attempt $recovery_attempts/$max_recovery_attempts: got $response, retrying..."
|
||||
sleep 3
|
||||
done
|
||||
|
||||
if [ "$response" != "200" ]; then
|
||||
echo "ERROR: Service did not recover after chaos (got $response)"
|
||||
kubectl get pods -l app=nginx-pod-net-chaos
|
||||
kubectl describe pod $POD_NAME
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
echo "Cleaning up test resources..."
|
||||
kill $PORT_FORWARD_PID 2>/dev/null || true
|
||||
kubectl delete deployment nginx-pod-net-chaos --ignore-not-found=true
|
||||
kubectl delete service nginx-pod-net-chaos-svc --ignore-not-found=true
|
||||
|
||||
echo "Pod network chaos test: Success"
|
||||
}
|
||||
|
||||
functional_test_pod_network_chaos
|
||||
62
CI/tests/test_pod_network_filter.sh
Executable file
62
CI/tests/test_pod_network_filter.sh
Executable file
@@ -0,0 +1,62 @@
|
||||
function functional_pod_network_filter {
|
||||
export SERVICE_URL="http://localhost:8889"
|
||||
export scenario_type="network_chaos_ng_scenarios"
|
||||
export scenario_file="scenarios/kube/pod-network-filter.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_network_filter.yaml
|
||||
yq -i '.[0].test_duration=10' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].label_selector=""' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].ingress=false' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].egress=true' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].target="pod-network-filter-test"' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].protocols=["tcp"]' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].ports=[443]' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.performance_monitoring.check_critical_alerts=False' CI/config/pod_network_filter.yaml
|
||||
|
||||
## Test webservice deployment
|
||||
kubectl apply -f ./CI/templates/pod_network_filter.yaml
|
||||
COUNTER=0
|
||||
while true
|
||||
do
|
||||
curl $SERVICE_URL
|
||||
EXITSTATUS=$?
|
||||
if [ "$EXITSTATUS" -eq "0" ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
COUNTER=$((COUNTER+1))
|
||||
[ $COUNTER -eq "100" ] && echo "maximum number of retry reached, test failed" && exit 1
|
||||
done
|
||||
|
||||
cat scenarios/kube/pod-network-filter.yml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_network_filter.yaml > krkn_pod_network.out 2>&1 &
|
||||
PID=$!
|
||||
|
||||
# wait until the dns resolution starts failing and the service returns 400
|
||||
DNS_FAILURE_STATUS=0
|
||||
while true
|
||||
do
|
||||
OUT_STATUS_CODE=$(curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL)
|
||||
if [ "$OUT_STATUS_CODE" -eq "404" ]
|
||||
then
|
||||
DNS_FAILURE_STATUS=404
|
||||
fi
|
||||
|
||||
if [ "$DNS_FAILURE_STATUS" -eq "404" ] && [ "$OUT_STATUS_CODE" -eq "200" ]
|
||||
then
|
||||
echo "service restored"
|
||||
break
|
||||
fi
|
||||
COUNTER=$((COUNTER+1))
|
||||
[ $COUNTER -eq "100" ] && echo "maximum number of retry reached, test failed" && exit 1
|
||||
sleep 2
|
||||
done
|
||||
|
||||
wait $PID
|
||||
|
||||
}
|
||||
|
||||
functional_pod_network_filter
|
||||
|
||||
35
CI/tests/test_pod_server.sh
Executable file
35
CI/tests/test_pod_server.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_pod_server {
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/kind/pod_etcd.yml"
|
||||
export post_config=""
|
||||
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_config.yaml
|
||||
yq -i '.[0].config.kill=1' scenarios/kind/pod_etcd.yml
|
||||
|
||||
yq -i '.tunings.daemon_mode=True' CI/config/pod_config.yaml
|
||||
cat CI/config/pod_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml &
|
||||
sleep 15
|
||||
curl -X POST http:/0.0.0.0:8081/STOP
|
||||
|
||||
wait
|
||||
|
||||
yq -i '.kraken.signal_state="PAUSE"' CI/config/pod_config.yaml
|
||||
yq -i '.tunings.daemon_mode=False' CI/config/pod_config.yaml
|
||||
cat CI/config/pod_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml &
|
||||
sleep 5
|
||||
curl -X POST http:/0.0.0.0:8081/RUN
|
||||
wait
|
||||
|
||||
echo "Pod disruption with server scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_pod_server
|
||||
18
CI/tests/test_pvc.sh
Executable file
18
CI/tests/test_pvc.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_pvc_fill {
|
||||
export scenario_type="pvc_scenarios"
|
||||
export scenario_file="scenarios/kind/pvc_scenario.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pvc_config.yaml
|
||||
cat CI/config/pvc_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pvc_config.yaml --debug True
|
||||
echo "PVC Fill scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_pvc_fill
|
||||
119
CI/tests/test_service_hijacking.sh
Normal file
119
CI/tests/test_service_hijacking.sh
Normal file
@@ -0,0 +1,119 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
# port mapping has been configured in kind-config.yml
|
||||
SERVICE_URL=http://localhost:8888
|
||||
PAYLOAD_GET_1="{ \
|
||||
\"status\":\"internal server error\" \
|
||||
}"
|
||||
STATUS_CODE_GET_1=500
|
||||
|
||||
PAYLOAD_PATCH_1="resource patched"
|
||||
STATUS_CODE_PATCH_1=201
|
||||
|
||||
PAYLOAD_POST_1="{ \
|
||||
\"status\": \"unauthorized\" \
|
||||
}"
|
||||
STATUS_CODE_POST_1=401
|
||||
|
||||
PAYLOAD_GET_2="{ \
|
||||
\"status\":\"resource created\" \
|
||||
}"
|
||||
STATUS_CODE_GET_2=201
|
||||
|
||||
PAYLOAD_PATCH_2="bad request"
|
||||
STATUS_CODE_PATCH_2=400
|
||||
|
||||
PAYLOAD_POST_2="not found"
|
||||
STATUS_CODE_POST_2=404
|
||||
|
||||
JSON_MIME="application/json"
|
||||
TEXT_MIME="text/plain; charset=utf-8"
|
||||
|
||||
function functional_test_service_hijacking {
|
||||
|
||||
export scenario_type="service_hijacking_scenarios"
|
||||
export scenario_file="scenarios/kube/service_hijacking.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/service_hijacking.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/service_hijacking.yaml > /tmp/krkn.log 2>&1 &
|
||||
PID=$!
|
||||
#Waiting the hijacking to have effect
|
||||
COUNTER=0
|
||||
while [ `curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php` == 404 ]
|
||||
do
|
||||
echo "waiting scenario to kick in."
|
||||
sleep 1
|
||||
COUNTER=$((COUNTER+1))
|
||||
[ $COUNTER -eq "100" ] && echo "maximum number of retry reached, test failed" && exit 1
|
||||
done
|
||||
|
||||
#Checking Step 1 GET on /list/index.php
|
||||
OUT_GET="`curl -X GET -s $SERVICE_URL/list/index.php`"
|
||||
OUT_CONTENT=`curl -X GET -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
|
||||
OUT_STATUS_CODE=`curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
|
||||
[ "${PAYLOAD_GET_1//[$'\t\r\n ']}" == "${OUT_GET//[$'\t\r\n ']}" ] && echo "Step 1 GET Payload OK" || (echo "Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_GET_1" ] && echo "Step 1 GET Status Code OK" || (echo " Step 1 GET status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$JSON_MIME" ] && echo "Step 1 GET MIME OK" || (echo " Step 1 GET MIME did not match. Test failed." && exit 1)
|
||||
|
||||
#Checking Step 1 POST on /list/index.php
|
||||
OUT_POST="`curl -s -X POST $SERVICE_URL/list/index.php`"
|
||||
OUT_STATUS_CODE=`curl -X POST -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
|
||||
OUT_CONTENT=`curl -X POST -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
|
||||
[ "${PAYLOAD_POST_1//[$'\t\r\n ']}" == "${OUT_POST//[$'\t\r\n ']}" ] && echo "Step 1 POST Payload OK" || (echo "Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_POST_1" ] && echo "Step 1 POST Status Code OK" || (echo "Step 1 POST status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$JSON_MIME" ] && echo "Step 1 POST MIME OK" || (echo " Step 1 POST MIME did not match. Test failed." && exit 1)
|
||||
|
||||
#Checking Step 1 PATCH on /patch
|
||||
OUT_PATCH="`curl -s -X PATCH $SERVICE_URL/patch`"
|
||||
OUT_STATUS_CODE=`curl -X PATCH -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/patch`
|
||||
OUT_CONTENT=`curl -X PATCH -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/patch`
|
||||
[ "${PAYLOAD_PATCH_1//[$'\t\r\n ']}" == "${OUT_PATCH//[$'\t\r\n ']}" ] && echo "Step 1 PATCH Payload OK" || (echo "Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_PATCH_1" ] && echo "Step 1 PATCH Status Code OK" || (echo "Step 1 PATCH status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$TEXT_MIME" ] && echo "Step 1 PATCH MIME OK" || (echo " Step 1 PATCH MIME did not match. Test failed." && exit 1)
|
||||
# wait for the next step
|
||||
sleep 16
|
||||
|
||||
#Checking Step 2 GET on /list/index.php
|
||||
OUT_GET="`curl -X GET -s $SERVICE_URL/list/index.php`"
|
||||
OUT_CONTENT=`curl -X GET -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
|
||||
OUT_STATUS_CODE=`curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
|
||||
[ "${PAYLOAD_GET_2//[$'\t\r\n ']}" == "${OUT_GET//[$'\t\r\n ']}" ] && echo "Step 2 GET Payload OK" || (echo "Step 2 GET Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_GET_2" ] && echo "Step 2 GET Status Code OK" || (echo "Step 2 GET status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$JSON_MIME" ] && echo "Step 2 GET MIME OK" || (echo " Step 2 GET MIME did not match. Test failed." && exit 1)
|
||||
|
||||
#Checking Step 2 POST on /list/index.php
|
||||
OUT_POST="`curl -s -X POST $SERVICE_URL/list/index.php`"
|
||||
OUT_CONTENT=`curl -X POST -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/list/index.php`
|
||||
OUT_STATUS_CODE=`curl -X POST -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/list/index.php`
|
||||
[ "${PAYLOAD_POST_2//[$'\t\r\n ']}" == "${OUT_POST//[$'\t\r\n ']}" ] && echo "Step 2 POST Payload OK" || (echo "Step 2 POST Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_POST_2" ] && echo "Step 2 POST Status Code OK" || (echo "Step 2 POST status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$TEXT_MIME" ] && echo "Step 2 POST MIME OK" || (echo " Step 2 POST MIME did not match. Test failed." && exit 1)
|
||||
|
||||
#Checking Step 2 PATCH on /patch
|
||||
OUT_PATCH="`curl -s -X PATCH $SERVICE_URL/patch`"
|
||||
OUT_CONTENT=`curl -X PATCH -s -o /dev/null -I -w "%{content_type}" $SERVICE_URL/patch`
|
||||
OUT_STATUS_CODE=`curl -X PATCH -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL/patch`
|
||||
[ "${PAYLOAD_PATCH_2//[$'\t\r\n ']}" == "${OUT_PATCH//[$'\t\r\n ']}" ] && echo "Step 2 PATCH Payload OK" || (echo "Step 2 PATCH Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_PATCH_2" ] && echo "Step 2 PATCH Status Code OK" || (echo "Step 2 PATCH status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$TEXT_MIME" ] && echo "Step 2 PATCH MIME OK" || (echo " Step 2 PATCH MIME did not match. Test failed." && exit 1)
|
||||
|
||||
|
||||
|
||||
wait $PID
|
||||
|
||||
cat /tmp/krkn.log
|
||||
|
||||
# now checking if service has been restore correctly and nginx responds correctly
|
||||
curl -s $SERVICE_URL | grep nginx! && echo "BODY: Service restored!" || (echo "BODY: failed to restore service" && exit 1)
|
||||
OUT_STATUS_CODE=`curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL`
|
||||
[ "$OUT_STATUS_CODE" == "200" ] && echo "STATUS_CODE: Service restored!" || (echo "STATUS_CODE: failed to restore service" && exit 1)
|
||||
|
||||
echo "Service Hijacking Chaos test: Success"
|
||||
}
|
||||
|
||||
|
||||
functional_test_service_hijacking
|
||||
37
CI/tests/test_telemetry.sh
Normal file
37
CI/tests/test_telemetry.sh
Normal file
@@ -0,0 +1,37 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_telemetry {
|
||||
AWS_CLI=`which aws`
|
||||
[ -z "$AWS_CLI" ]&& echo "AWS cli not found in path" && exit 1
|
||||
[ -z "$AWS_BUCKET" ] && echo "AWS bucket not set in environment" && exit 1
|
||||
|
||||
export RUN_TAG="funtest-telemetry"
|
||||
yq -i '.telemetry.enabled=True' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.full_prometheus_backup=True' CI/config/common_test_config.yaml
|
||||
yq -i '.performance_monitoring.check_critical_alerts=True' CI/config/common_test_config.yaml
|
||||
yq -i '.performance_monitoring.prometheus_url="http://localhost:9090"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.run_tag=env(RUN_TAG)' CI/config/common_test_config.yaml
|
||||
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/kind/pod_path_provisioner.yml"
|
||||
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/telemetry.yaml
|
||||
retval=$(python3 -m coverage run -a run_kraken.py -c CI/config/telemetry.yaml)
|
||||
RUN_FOLDER=`cat CI/out/test_telemetry.out | grep amazonaws.com | sed -rn "s#.*https:\/\/.*\/files/(.*)#\1#p" | sed 's/\x1b\[[0-9;]*m//g'`
|
||||
$AWS_CLI s3 ls "s3://$AWS_BUCKET/$RUN_FOLDER/" | awk '{ print $4 }' > s3_remote_files
|
||||
echo "checking if telemetry files are uploaded on s3"
|
||||
cat s3_remote_files | grep critical-alerts-00.log || ( echo "FAILED: critical-alerts-00.log not uploaded" && exit 1 )
|
||||
cat s3_remote_files | grep prometheus-00.tar || ( echo "FAILED: prometheus backup not uploaded" && exit 1 )
|
||||
cat s3_remote_files | grep telemetry.json || ( echo "FAILED: telemetry.json not uploaded" && exit 1 )
|
||||
echo "all files uploaded!"
|
||||
echo "Telemetry Collection: Success"
|
||||
}
|
||||
|
||||
functional_test_telemetry
|
||||
175
CI/tests_v2/CONTRIBUTING_TESTS.md
Normal file
175
CI/tests_v2/CONTRIBUTING_TESTS.md
Normal file
@@ -0,0 +1,175 @@
|
||||
# Adding a New Scenario Test (CI/tests_v2)
|
||||
|
||||
This guide explains how to add a new chaos scenario test to the v2 pytest framework. The layout is **folder-per-scenario**: each scenario has its own directory under `scenarios/<scenario_name>/` containing the test file, Kubernetes resources, and the Krkn scenario base YAML.
|
||||
|
||||
## Option 1: Scaffold script (recommended)
|
||||
|
||||
From the **repository root**:
|
||||
|
||||
```bash
|
||||
python CI/tests_v2/scaffold.py --scenario service_hijacking
|
||||
```
|
||||
|
||||
This creates:
|
||||
|
||||
- `CI/tests_v2/scenarios/service_hijacking/test_service_hijacking.py` — A test class extending `BaseScenarioTest` with a stub `test_happy_path` and `WORKLOAD_MANIFEST` pointing to the folder’s `resource.yaml`.
|
||||
- `CI/tests_v2/scenarios/service_hijacking/resource.yaml` — A placeholder Deployment (namespace is patched at deploy time).
|
||||
- `CI/tests_v2/scenarios/service_hijacking/scenario_base.yaml` — A placeholder Krkn scenario; edit this with the structure expected by your scenario type.
|
||||
|
||||
The script automatically registers the marker in `CI/tests_v2/pytest.ini`. For example, it adds:
|
||||
|
||||
```
|
||||
service_hijacking: marks a test as a service_hijacking scenario test
|
||||
```
|
||||
|
||||
**Next steps after scaffolding:**
|
||||
|
||||
1. Verify the marker was added to `pytest.ini` (the scaffold does this automatically).
|
||||
2. Edit `scenario_base.yaml` with the structure your Krkn scenario type expects (see `scenarios/application_outage/scenario_base.yaml` and `scenarios/pod_disruption/scenario_base.yaml` for examples). The top-level key should match `SCENARIO_NAME`.
|
||||
3. If your scenario uses a **list** structure (like pod_disruption) instead of a **dict** with a top-level key, set `NAMESPACE_KEY_PATH` (e.g. `[0, "config", "namespace_pattern"]`) and `NAMESPACE_IS_REGEX = True` if the namespace is a regex pattern.
|
||||
4. The generated `test_happy_path` already uses `self.run_scenario(self.tmp_path, ns)` and assertions. Add more test methods (e.g. negative tests with `@pytest.mark.no_workload`) as needed.
|
||||
5. Adjust `resource.yaml` if your scenario needs a different workload (e.g. specific image or labels).
|
||||
|
||||
If your Kraken scenario type string is not `<scenario>_scenarios`, pass it explicitly:
|
||||
|
||||
```bash
|
||||
python CI/tests_v2/scaffold.py --scenario node_disruption --scenario-type node_scenarios
|
||||
```
|
||||
|
||||
## Option 2: Manual setup
|
||||
|
||||
1. **Create the scenario folder**
|
||||
`CI/tests_v2/scenarios/<scenario_name>/`.
|
||||
|
||||
2. **Add resource.yaml**
|
||||
Kubernetes manifest(s) for the workload (Deployment or Pod). Use a distinct label (e.g. `app: <scenario>-target`). Omit or leave `metadata.namespace`; the framework patches it at deploy time.
|
||||
|
||||
3. **Add scenario_base.yaml**
|
||||
The canonical Krkn scenario structure. Tests will load this, patch namespace (and any overrides), write to `tmp_path`, and pass to `build_config`. See existing scenarios for the format your scenario type expects.
|
||||
|
||||
4. **Add test_<scenario>.py**
|
||||
- Import `BaseScenarioTest` from `lib.base` and helpers from `lib.utils` (e.g. `assert_kraken_success`, `get_pods_list`, `scenario_dir` if needed).
|
||||
- Define a class extending `BaseScenarioTest` with:
|
||||
- `WORKLOAD_MANIFEST = "CI/tests_v2/scenarios/<scenario_name>/resource.yaml"`
|
||||
- `WORKLOAD_IS_PATH = True`
|
||||
- `LABEL_SELECTOR = "app=<label>"`
|
||||
- `SCENARIO_NAME = "<scenario_name>"`
|
||||
- `SCENARIO_TYPE = "<scenario_type>"` (e.g. `application_outages_scenarios`)
|
||||
- `NAMESPACE_KEY_PATH`: path to the namespace field (e.g. `["application_outage", "namespace"]` for dict-based, or `[0, "config", "namespace_pattern"]` for list-based)
|
||||
- `NAMESPACE_IS_REGEX = False` (or `True` for regex patterns like pod_disruption)
|
||||
- `OVERRIDES_KEY_PATH = ["<top-level key>"]` if the scenario supports overrides (e.g. duration, block).
|
||||
- Add `@pytest.mark.functional` and `@pytest.mark.<scenario>` on the class.
|
||||
- In at least one test, call `self.run_scenario(self.tmp_path, self.ns)` and assert with `assert_kraken_success`, `assert_pod_count_unchanged`, and `assert_all_pods_running_and_ready`. Use `self.k8s_core`, `self.tmp_path`, etc. (injected by the base class).
|
||||
|
||||
5. **Register the marker**
|
||||
In `CI/tests_v2/pytest.ini`, under `markers`:
|
||||
```
|
||||
<scenario>: marks a test as a <scenario> scenario test
|
||||
```
|
||||
|
||||
## Conventions
|
||||
|
||||
- **Folder-per-scenario**: One directory per scenario under `scenarios/`. All assets (test, resource.yaml, scenario_base.yaml, and any extra YAMLs) live there for easy tracking and onboarding.
|
||||
- **Ephemeral namespace**: Every test gets a unique `krkn-test-<uuid>` namespace. The base class deploys the workload into it before the test; no manual deploy is required.
|
||||
- **Negative tests**: For tests that don’t need a workload (e.g. invalid scenario, bad namespace), use `@pytest.mark.no_workload`. The test will still get a namespace but no workload will be deployed.
|
||||
- **Scenario type**: `SCENARIO_TYPE` must match the key in Kraken’s config (e.g. `application_outages_scenarios`, `pod_disruption_scenarios`). See `CI/tests_v2/config/common_test_config.yaml` and the scenario plugin’s `get_scenario_types()`.
|
||||
- **Assertions**: Use `assert_kraken_success(result, context=f"namespace={ns}", tmp_path=self.tmp_path)` so failures include stdout/stderr and optional log files.
|
||||
- **Timeouts**: Use constants from `lib.base` (`READINESS_TIMEOUT`, `POLICY_WAIT_TIMEOUT`, etc.) instead of magic numbers.
|
||||
|
||||
## Exit Code Handling
|
||||
|
||||
Kraken uses the following exit codes: **0** = success; **1** = scenario failure (e.g. post scenarios still failing); **2** = critical alerts fired; **3+** = health check / KubeVirt check failures; **-1** = infrastructure error (bad config, no kubeconfig).
|
||||
|
||||
- **Happy-path tests**: Use `assert_kraken_success(result, ...)`. By default only exit code 0 is accepted.
|
||||
- **Alert-aware tests**: If you enable `check_critical_alerts` and expect alerts, use `assert_kraken_success(result, allowed_codes=(0, 2), ...)` so exit code 2 is treated as acceptable.
|
||||
- **Expected-failure tests**: Use `assert_kraken_failure(result, context=..., tmp_path=self.tmp_path)` for negative tests (invalid scenario, bad namespace, etc.). This gives the same diagnostic quality (log dump, tmp_path hint) as success assertions. Prefer this over a bare `assert result.returncode != 0`.
|
||||
|
||||
## Running your new tests
|
||||
|
||||
```bash
|
||||
pytest CI/tests_v2/ -v -m <scenario>
|
||||
```
|
||||
|
||||
For debugging with logs and keeping failed namespaces:
|
||||
|
||||
```bash
|
||||
pytest CI/tests_v2/ -v -m <scenario> --log-cli-level=DEBUG --keep-ns-on-fail
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Naming Conventions
|
||||
|
||||
Follow these conventions so the framework stays consistent as new scenarios are added.
|
||||
|
||||
### Quick Reference
|
||||
|
||||
| Element | Pattern | Example |
|
||||
|---|---|---|
|
||||
| Scenario folder | `scenarios/<snake_case>/` | `scenarios/node_disruption/` |
|
||||
| Test file | `test_<scenario>.py` | `test_node_disruption.py` |
|
||||
| Test class | `Test<CamelCase>(BaseScenarioTest)` | `TestNodeDisruption` |
|
||||
| Pytest marker | `@pytest.mark.<scenario>` (matches folder) | `@pytest.mark.node_disruption` |
|
||||
| Scenario YAML | `scenario_base.yaml` | — |
|
||||
| Workload YAML | `resource.yaml` | — |
|
||||
| Extra YAMLs | `<descriptive_name>.yaml` | `nginx_http.yaml` |
|
||||
| Lib modules | `lib/<concern>.py` | `lib/deploy.py` |
|
||||
| Public fixtures | `<verb>_<noun>` or `<noun>` | `run_kraken`, `test_namespace` |
|
||||
| Private/autouse fixtures | `_<descriptive>` | `_cleanup_stale_namespaces` |
|
||||
| Assertion helpers | `assert_<condition>` | `assert_pod_count_unchanged` |
|
||||
| Query helpers | `get_<resource>` or `find_<resource>_by_<criteria>` | `get_pods_list`, `find_network_policy_by_prefix` |
|
||||
| Env var overrides | `KRKN_TEST_<NAME>` | `KRKN_TEST_READINESS_TIMEOUT` |
|
||||
|
||||
### Folders
|
||||
|
||||
- One folder per scenario under `scenarios/`. The folder name is `snake_case` and must match the `SCENARIO_NAME` class attribute in the test.
|
||||
- Shared framework code lives in `lib/`. Each module covers a single concern (`k8s`, `namespace`, `deploy`, `kraken`, `utils`, `base`, `preflight`).
|
||||
- Do **not** add scenario-specific code to `lib/`; keep it in the scenario folder as module-level helpers.
|
||||
|
||||
### Files
|
||||
|
||||
- Test files: `test_<scenario>.py`. This is required for pytest discovery (`test_*.py`).
|
||||
- Workload manifests: always `resource.yaml`. If a scenario needs additional K8s resources (e.g. a Service for traffic testing), use a descriptive name like `nginx_http.yaml`.
|
||||
- Scenario config: always `scenario_base.yaml`. This is the template that `load_and_patch_scenario` loads and patches.
|
||||
|
||||
### Classes
|
||||
|
||||
- One test class per file: `Test<CamelCase>` extending `BaseScenarioTest`.
|
||||
- The CamelCase name must be the PascalCase equivalent of the folder name (e.g. `pod_disruption` -> `TestPodDisruption`).
|
||||
|
||||
### Test Methods
|
||||
|
||||
- Prefix: `test_` (pytest requirement).
|
||||
- Use descriptive names that convey **what is being verified**, not implementation details.
|
||||
- Good: `test_pod_crash_and_recovery`, `test_traffic_blocked_during_outage`, `test_invalid_scenario_fails`.
|
||||
- Avoid: `test_run_1`, `test_scenario`, `test_it_works`.
|
||||
|
||||
### Fixtures
|
||||
|
||||
- **Public fixtures** (intended for use in tests): use `<verb>_<noun>` or plain `<noun>`. Examples: `run_kraken`, `deploy_workload`, `test_namespace`, `kubectl`.
|
||||
- **Private/autouse fixtures** (framework internals): prefix with `_`. Examples: `_kube_config_loaded`, `_preflight_checks`, `_inject_common_fixtures`.
|
||||
- K8s client fixtures use the `k8s_` prefix: `k8s_core`, `k8s_apps`, `k8s_networking`, `k8s_client`.
|
||||
|
||||
### Helpers and Utilities
|
||||
|
||||
- **Assertions**: `assert_<what_is_expected>`. Always raise `AssertionError` with a message that includes the namespace.
|
||||
- **K8s queries**: `get_<resource>_list` for direct API calls, `find_<resource>_by_<criteria>` for filtered lookups.
|
||||
- **Private helpers**: prefix with `_` for module-internal functions (e.g. `_pods`, `_policies`, `_get_nested`).
|
||||
|
||||
### Constants and Environment Variables
|
||||
|
||||
- Timeout constants: `UPPER_CASE` in `lib/base.py`. Each is overridable via an env var prefixed `KRKN_TEST_`.
|
||||
- Feature flags: `KRKN_TEST_DRY_RUN`, `KRKN_TEST_COVERAGE`. Always use the `KRKN_TEST_` prefix so all tunables are discoverable with `grep KRKN_TEST_`.
|
||||
|
||||
### Markers
|
||||
|
||||
- Every test class gets `@pytest.mark.functional` (framework-wide) and `@pytest.mark.<scenario>` (scenario-specific).
|
||||
- The scenario marker name matches the folder name exactly.
|
||||
- Behavioral modifiers use plain descriptive names: `no_workload`, `order`.
|
||||
- Register all custom markers in `pytest.ini` to avoid warnings.
|
||||
|
||||
## Adding Dependencies
|
||||
|
||||
- **Runtime (Kraken needs it)**: Add to the **root** `requirements.txt`. Pin a version (e.g. `package==1.2.3` or `package>=1.2,<2`).
|
||||
- **Test-only (only CI/tests_v2 needs it)**: Add to **`CI/tests_v2/requirements.txt`**. Pin a version there as well.
|
||||
- After changing either file, run `make setup` (or `make -f CI/tests_v2/Makefile setup`) from the repo root to verify both files install cleanly together.
|
||||
97
CI/tests_v2/Makefile
Normal file
97
CI/tests_v2/Makefile
Normal file
@@ -0,0 +1,97 @@
|
||||
# CI/tests_v2 functional tests - single entry point.
|
||||
# Run from repo root: make -f CI/tests_v2/Makefile <target>
|
||||
# Or from CI/tests_v2: make <target> (REPO_ROOT is resolved automatically).
|
||||
|
||||
# Resolve repo root: go to Makefile dir then up two levels (CI/tests_v2 -> repo root)
|
||||
REPO_ROOT := $(shell cd "$(dir $(firstword $(MAKEFILE_LIST)))" && cd ../.. && pwd)
|
||||
VENV := $(REPO_ROOT)/venv
|
||||
PYTHON := $(VENV)/bin/python
|
||||
PIP := $(VENV)/bin/pip
|
||||
CLUSTER_NAME ?= ci-krkn
|
||||
TESTS_DIR := $(REPO_ROOT)/CI/tests_v2
|
||||
|
||||
.PHONY: setup preflight test test-fast test-debug test-scenario test-dry-run clean help
|
||||
|
||||
help:
|
||||
@echo "CI/tests_v2 functional tests - usage: make [target]"
|
||||
@echo ""
|
||||
@echo "Targets:"
|
||||
@echo " setup Create venv (if missing), install Python deps, create KinD cluster (kind-config-dev.yml)."
|
||||
@echo " Run once before first test. Override cluster config: KIND_CONFIG=path make setup"
|
||||
@echo ""
|
||||
@echo " preflight Check Python 3.9+, kind, kubectl, Docker, cluster reachability, test deps."
|
||||
@echo " Invoked automatically by test targets; run standalone to validate environment."
|
||||
@echo ""
|
||||
@echo " test Full run: retries (2), timeout 300s, HTML report, JUnit XML, coverage."
|
||||
@echo " Use for CI or final verification. Output: report.html, results.xml"
|
||||
@echo ""
|
||||
@echo " test-fast Quick run: no retries, 120s timeout, no report. For fast local iteration."
|
||||
@echo ""
|
||||
@echo " test-debug Debug run: verbose (-s), keep failed namespaces (--keep-ns-on-fail), DEBUG logging."
|
||||
@echo " Use when investigating failures; inspect kept namespaces with kubectl."
|
||||
@echo ""
|
||||
@echo " test-scenario Run only one scenario. Requires SCENARIO=<marker>."
|
||||
@echo " Example: make test-scenario SCENARIO=pod_disruption"
|
||||
@echo ""
|
||||
@echo " test-dry-run Validate scenario plumbing only (no Kraken execution). Sets KRKN_TEST_DRY_RUN=1."
|
||||
@echo ""
|
||||
@echo " clean Delete KinD cluster $(CLUSTER_NAME) and remove report.html, results.xml."
|
||||
@echo ""
|
||||
@echo " help Show this help."
|
||||
@echo ""
|
||||
@echo "Run from repo root: make -f CI/tests_v2/Makefile <target>"
|
||||
@echo "Or from CI/tests_v2: make <target>"
|
||||
|
||||
setup: $(VENV)/.installed
|
||||
@echo "Running cluster setup..."
|
||||
$(MAKE) -f $(TESTS_DIR)/Makefile preflight
|
||||
cd $(REPO_ROOT) && ./CI/tests_v2/setup_env.sh
|
||||
@echo "Setup complete. Run 'make test' or 'make -f CI/tests_v2/Makefile test' from repo root."
|
||||
|
||||
$(VENV)/.installed: $(REPO_ROOT)/requirements.txt $(TESTS_DIR)/requirements.txt
|
||||
@if [ ! -d "$(VENV)" ]; then python3 -m venv $(VENV); echo "Created venv at $(VENV)"; fi
|
||||
$(PYTHON) -m pip install -q --upgrade pip
|
||||
# Root = Kraken runtime; tests_v2 = test-only plugins; both required for functional tests.
|
||||
$(PIP) install -q -r $(REPO_ROOT)/requirements.txt
|
||||
$(PIP) install -q -r $(TESTS_DIR)/requirements.txt
|
||||
@touch $(VENV)/.installed
|
||||
@echo "Python deps installed."
|
||||
|
||||
preflight:
|
||||
@echo "Preflight: checking Python, tools, and cluster..."
|
||||
@command -v python3 >/dev/null 2>&1 || { echo "Error: python3 not found."; exit 1; }
|
||||
@python3 -c "import sys; exit(0 if sys.version_info >= (3, 9) else 1)" || { echo "Error: Python 3.9+ required."; exit 1; }
|
||||
@command -v kind >/dev/null 2>&1 || { echo "Error: kind not installed."; exit 1; }
|
||||
@command -v kubectl >/dev/null 2>&1 || { echo "Error: kubectl not installed."; exit 1; }
|
||||
@docker info >/dev/null 2>&1 || { echo "Error: Docker not running (required for KinD)."; exit 1; }
|
||||
@if kind get clusters 2>/dev/null | grep -qx "$(CLUSTER_NAME)"; then \
|
||||
kubectl cluster-info >/dev/null 2>&1 || { echo "Error: Cluster $(CLUSTER_NAME) exists but cluster-info failed."; exit 1; }; \
|
||||
else \
|
||||
echo "Note: Cluster $(CLUSTER_NAME) not found. Run 'make setup' to create it."; \
|
||||
fi
|
||||
@$(PYTHON) -c "import pytest_rerunfailures, pytest_html, pytest_timeout, pytest_order" 2>/dev/null || \
|
||||
{ echo "Error: Install test deps with 'make setup' or pip install -r CI/tests_v2/requirements.txt"; exit 1; }
|
||||
@echo "Preflight OK."
|
||||
|
||||
test: preflight
|
||||
cd $(REPO_ROOT) && KRKN_TEST_COVERAGE=1 $(PYTHON) -m pytest $(TESTS_DIR)/ -v --timeout=300 --reruns=2 --reruns-delay=10 \
|
||||
--html=$(TESTS_DIR)/report.html -n auto --junitxml=$(TESTS_DIR)/results.xml
|
||||
|
||||
test-fast: preflight
|
||||
cd $(REPO_ROOT) && $(PYTHON) -m pytest $(TESTS_DIR)/ -v -p no:rerunfailures -n auto --timeout=120
|
||||
|
||||
test-debug: preflight
|
||||
cd $(REPO_ROOT) && $(PYTHON) -m pytest $(TESTS_DIR)/ -v -s -p no:rerunfailures --timeout=300 \
|
||||
--keep-ns-on-fail --log-cli-level=DEBUG
|
||||
|
||||
test-scenario: preflight
|
||||
@if [ -z "$(SCENARIO)" ]; then echo "Error: set SCENARIO=pod_disruption (or application_outage, etc.)"; exit 1; fi
|
||||
cd $(REPO_ROOT) && $(PYTHON) -m pytest $(TESTS_DIR)/ -v -m "$(SCENARIO)" --timeout=300 --reruns=2 --reruns-delay=10
|
||||
|
||||
test-dry-run: preflight
|
||||
cd $(REPO_ROOT) && KRKN_TEST_DRY_RUN=1 $(PYTHON) -m pytest $(TESTS_DIR)/ -v
|
||||
|
||||
clean:
|
||||
@kind delete cluster --name $(CLUSTER_NAME) 2>/dev/null || true
|
||||
@rm -f $(TESTS_DIR)/report.html $(TESTS_DIR)/results.xml
|
||||
@echo "Cleaned cluster and report artifacts."
|
||||
198
CI/tests_v2/README.md
Normal file
198
CI/tests_v2/README.md
Normal file
@@ -0,0 +1,198 @@
|
||||
# Pytest Functional Tests (tests_v2)
|
||||
|
||||
This directory contains a pytest-based functional test framework that runs **alongside** the existing bash tests in `CI/tests/`. It covers the **pod disruption** and **application outage** scenarios with proper assertions, retries, and reporting.
|
||||
|
||||
Each test runs in its **own ephemeral Kubernetes namespace** (`krkn-test-<uuid>`). Before the test, the framework creates the namespace, deploys the target workload, and waits for pods to be ready. After the test, the namespace is deleted (cascading all resources). **You do not need to deploy any workloads manually.**
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Without a cluster, tests that need one will **skip** with a clear message (e.g. *"Could not load kube config"*). No manual workload deployment is required; workloads are deployed automatically into ephemeral namespaces per test.
|
||||
|
||||
- **KinD cluster** (or any Kubernetes cluster) running with `kubectl` configured (e.g. `KUBECONFIG` or default `~/.kube/config`).
|
||||
- **Python 3.9+** and main repo deps: `pip install -r requirements.txt`.
|
||||
|
||||
### Supported clusters
|
||||
|
||||
- **KinD** (recommended): Use `make -f CI/tests_v2/Makefile setup` from the repo root. Fastest for local dev; uses a 2-node dev config by default. Override with `KIND_CONFIG=/path/to/kind-config.yml` for a larger cluster.
|
||||
- **Minikube**: Should work; ensure `kubectl` context is set. Not tested in CI.
|
||||
- **Remote/cloud cluster**: Tests create and delete namespaces; use with caution. Use `--require-kind` to avoid accidentally running against production (tests will skip unless context is kind/minikube).
|
||||
|
||||
### Setting up the cluster
|
||||
|
||||
**Option A: Use the setup script (recommended)**
|
||||
|
||||
From the repository root, with `kind` and `kubectl` installed:
|
||||
|
||||
```bash
|
||||
# Create KinD cluster (defaults to CI/tests_v2/kind-config-dev.yml; override with KIND_CONFIG=...)
|
||||
./CI/tests_v2/setup_env.sh
|
||||
```
|
||||
|
||||
Then in the same shell (or after `export KUBECONFIG=~/.kube/config` in another terminal), activate your venv and install Python deps:
|
||||
|
||||
```bash
|
||||
python3 -m venv venv
|
||||
source venv/bin/activate # or: source venv/Scripts/activate on Windows
|
||||
pip install -r requirements.txt
|
||||
pip install -r CI/tests_v2/requirements.txt
|
||||
```
|
||||
|
||||
**Option B: Manual setup**
|
||||
|
||||
1. Install [kind](https://kind.sigs.k8s.io/docs/user/quick-start/) and [kubectl](https://kubernetes.io/docs/tasks/tools/).
|
||||
2. Create a cluster (from repo root):
|
||||
```bash
|
||||
kind create cluster --name kind --config kind-config.yml
|
||||
```
|
||||
3. Wait for the cluster:
|
||||
```bash
|
||||
kubectl wait --for=condition=Ready nodes --all --timeout=120s
|
||||
```
|
||||
4. Create a virtualenv, activate it, and install dependencies (as in Option A).
|
||||
5. Run tests from repo root: `pytest CI/tests_v2/ -v ...`
|
||||
|
||||
## Install test dependencies
|
||||
|
||||
From the repository root:
|
||||
|
||||
```bash
|
||||
pip install -r CI/tests_v2/requirements.txt
|
||||
```
|
||||
|
||||
This adds `pytest-rerunfailures`, `pytest-html`, `pytest-timeout`, and `pytest-order` (pytest and coverage come from the main `requirements.txt`).
|
||||
|
||||
## Dependency Management
|
||||
|
||||
Dependencies are split into two files:
|
||||
|
||||
- **Root `requirements.txt`** — Kraken runtime (cloud SDKs, Kubernetes client, krkn-lib, pytest, coverage, etc.). Required to run Kraken.
|
||||
- **`CI/tests_v2/requirements.txt`** — Test-only pytest plugins (rerunfailures, html, timeout, order, xdist). Not needed by Kraken itself.
|
||||
|
||||
**Rule of thumb:** If Kraken needs it at runtime, add to root. If only the functional tests need it, add to `CI/tests_v2/requirements.txt`.
|
||||
|
||||
Running `make -f CI/tests_v2/Makefile setup` (or `make setup` from `CI/tests_v2`) creates the venv and installs **both** files automatically; you do not need to install them separately. The Makefile re-installs when either file changes (via the `.installed` sentinel).
|
||||
|
||||
## Run tests
|
||||
|
||||
All commands below are from the **repository root**.
|
||||
|
||||
### Basic run (with retries and HTML report)
|
||||
|
||||
```bash
|
||||
pytest CI/tests_v2/ -v --timeout=300 --reruns=2 --reruns-delay=10 --html=CI/tests_v2/report.html --junitxml=CI/tests_v2/results.xml
|
||||
```
|
||||
|
||||
- Failed tests are **retried up to 2 times** with a 10s delay (configurable in `CI/tests_v2/pytest.ini`).
|
||||
- Each test has a **5-minute timeout**.
|
||||
- Open `CI/tests_v2/report.html` in a browser for a detailed report.
|
||||
|
||||
### Run in parallel (faster suite)
|
||||
|
||||
```bash
|
||||
pytest CI/tests_v2/ -v -n 4 --timeout=300
|
||||
```
|
||||
|
||||
Ephemeral namespaces make tests parallel-safe; use `-n` with the number of workers (e.g. 4).
|
||||
|
||||
### Run without retries (for debugging)
|
||||
|
||||
```bash
|
||||
pytest CI/tests_v2/ -v -p no:rerunfailures
|
||||
```
|
||||
|
||||
### Run with coverage
|
||||
|
||||
```bash
|
||||
python -m coverage run -m pytest CI/tests_v2/ -v
|
||||
python -m coverage report
|
||||
```
|
||||
|
||||
To append to existing coverage from unit tests, ensure coverage was started with `coverage run -a` for earlier runs, or run the full test suite in one go.
|
||||
|
||||
### Run only pod disruption tests
|
||||
|
||||
```bash
|
||||
pytest CI/tests_v2/ -v -m pod_disruption
|
||||
```
|
||||
|
||||
### Run only application outage tests
|
||||
|
||||
```bash
|
||||
pytest CI/tests_v2/ -v -m application_outage
|
||||
```
|
||||
|
||||
### Run with verbose output and no capture
|
||||
|
||||
```bash
|
||||
pytest CI/tests_v2/ -v -s
|
||||
```
|
||||
|
||||
### Keep failed test namespaces for debugging
|
||||
|
||||
When a test fails, its ephemeral namespace is normally deleted. To **keep** the namespace so you can inspect pods, logs, and network policies:
|
||||
|
||||
```bash
|
||||
pytest CI/tests_v2/ -v --keep-ns-on-fail
|
||||
```
|
||||
|
||||
On failure, the namespace name is printed (e.g. `[keep-ns-on-fail] Keeping namespace krkn-test-a1b2c3d4 for debugging`). Use `kubectl get pods -n krkn-test-a1b2c3d4` (and similar) to debug, then delete the namespace manually when done.
|
||||
|
||||
### Logging and cluster options
|
||||
|
||||
- **Structured logging**: Use `--log-cli-level=DEBUG` to see namespace creation, workload deploy, and readiness in the console. Use `--log-file=test.log` to capture logs to a file.
|
||||
- **Require dev cluster**: To avoid running against the wrong cluster, use `--require-kind`. Tests will skip unless the current kube context cluster name contains "kind" or "minikube".
|
||||
- **Stale namespace cleanup**: At session start, namespaces matching `krkn-test-*` that are older than 30 minutes are deleted (e.g. from a previous crashed run).
|
||||
- **Timeout overrides**: Set env vars to tune timeouts (e.g. in CI): `KRKN_TEST_READINESS_TIMEOUT`, `KRKN_TEST_DEPLOY_TIMEOUT`, `KRKN_TEST_NS_CLEANUP_TIMEOUT`, `KRKN_TEST_POLICY_WAIT_TIMEOUT`, `KRKN_TEST_KRAKEN_PROC_WAIT_TIMEOUT`, `KRKN_TEST_TIMEOUT_BUDGET`.
|
||||
|
||||
## Architecture
|
||||
|
||||
- **Folder-per-scenario**: Each scenario lives under `scenarios/<scenario_name>/` with:
|
||||
- **test_<scenario>.py** — Test class extending `BaseScenarioTest`; sets `WORKLOAD_MANIFEST`, `SCENARIO_NAME`, `SCENARIO_TYPE`, `NAMESPACE_KEY_PATH`, and optionally `OVERRIDES_KEY_PATH`.
|
||||
- **resource.yaml** — Kubernetes resources (Deployment/Pod) for the scenario; namespace is patched at deploy time.
|
||||
- **scenario_base.yaml** — Canonical Krkn scenario; the base class loads it, patches namespace (and overrides), and passes it to Kraken via `run_scenario()`. Optional extra YAMLs (e.g. `nginx_http.yaml` for application_outage) can live in the same folder.
|
||||
- **lib/**: Shared framework — `lib/base.py` defines `BaseScenarioTest`, timeout constants (env-overridable), and scenario helpers (`load_and_patch_scenario`, `run_scenario`); `lib/utils.py` provides assertion and K8s helpers; `lib/k8s.py` provides K8s client fixtures; `lib/namespace.py` provides namespace lifecycle; `lib/deploy.py` provides `deploy_workload`, `wait_for_pods_running`, `wait_for_deployment_replicas`; `lib/kraken.py` provides `run_kraken`, `build_config` (using `CI/tests_v2/config/common_test_config.yaml`).
|
||||
- **conftest.py**: Re-exports fixtures from the lib modules and defines `pytest_addoption`, logging, and `repo_root`.
|
||||
- **Adding a new scenario**: Use the scaffold script (see [CONTRIBUTING_TESTS.md](CONTRIBUTING_TESTS.md)) to create `scenarios/<name>/` with test file, `resource.yaml`, and `scenario_base.yaml`, or copy an existing scenario folder and adapt.
|
||||
|
||||
## What is tested
|
||||
|
||||
Each test runs in an isolated ephemeral namespace; workloads are deployed automatically before the test and the namespace is deleted after (unless `--keep-ns-on-fail` is set and the test failed).
|
||||
|
||||
- **scenarios/pod_disruption/**
|
||||
Pod disruption scenario. `resource.yaml` is a deployment with label `app=krkn-pod-disruption-target`; `scenario_base.yaml` is loaded and `namespace_pattern` is patched to the test namespace. The test:
|
||||
1. Records baseline pod UIDs and restart counts.
|
||||
2. Runs Kraken with the pod disruption scenario.
|
||||
3. Asserts that chaos had an effect (UIDs changed or restart count increased).
|
||||
4. Waits for pods to be Running and all containers Ready.
|
||||
5. Asserts pod count is unchanged and all pods are healthy.
|
||||
|
||||
- **scenarios/application_outage/**
|
||||
Application outage scenario (block Ingress/Egress to target pods, then restore). `resource.yaml` is the main workload (outage pod); `scenario_base.yaml` is loaded and patched with namespace (and duration/block as needed). Optional `nginx_http.yaml` is used by the traffic test. Tests include:
|
||||
- **test_app_outage_block_restore_and_variants**: Happy path with default, exclude_label, and block variants (Ingress, Egress, both); Krkn exit 0, pods still Running/Ready.
|
||||
- **test_network_policy_created_then_deleted**: Policy with prefix `krkn-deny-` appears during run and is gone after.
|
||||
- **test_traffic_blocked_during_outage** (disabled, planned): Deploys nginx with label `scenario=outage`, port-forwards; during outage curl fails, after run curl succeeds.
|
||||
- **test_invalid_scenario_fails**: Invalid scenario file (missing `application_outage` key) causes Kraken to exit non-zero.
|
||||
- **test_bad_namespace_fails**: Scenario targeting a non-existent namespace causes Kraken to exit non-zero.
|
||||
|
||||
## Configuration
|
||||
|
||||
- **pytest.ini**: Markers (`functional`, `pod_disruption`, `application_outage`, `no_workload`). Use `--timeout=300`, `--reruns=2`, `--reruns-delay=10` on the command line for full runs.
|
||||
- **conftest.py**: Re-exports fixtures from `lib/k8s.py`, `lib/namespace.py`, `lib/deploy.py`, `lib/kraken.py` (e.g. `test_namespace`, `deploy_workload`, `k8s_core`, `wait_for_pods_running`, `run_kraken`, `build_config`). Configs are built from `CI/tests_v2/config/common_test_config.yaml` with monitoring disabled for local runs. Timeout constants in `lib/base.py` can be overridden via env vars.
|
||||
- **Cluster access**: Reads and applies use the Kubernetes Python client; `kubectl` is still used for `port-forward` and for running Kraken.
|
||||
- **utils.py**: Pod/network policy helpers and assertion helpers (`assert_all_pods_running_and_ready`, `assert_pod_count_unchanged`, `assert_kraken_success`, `assert_kraken_failure`, `patch_namespace_in_docs`).
|
||||
|
||||
## Relationship to existing CI
|
||||
|
||||
- The **existing** bash tests in `CI/tests/` and `CI/run.sh` are **unchanged**. They continue to run as before in GitHub Actions.
|
||||
- This framework is **additive**. To run it in CI later, add a separate job or step that runs `pytest CI/tests_v2/ ...` from the repo root.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- **`pytest.skip: Could not load kube config`** — No cluster or bad KUBECONFIG. Run `make -f CI/tests_v2/Makefile setup` (or `make setup` from `CI/tests_v2`) or check `kubectl cluster-info`.
|
||||
- **KinD cluster creation hangs** — Docker is not running. Start Docker Desktop or run `systemctl start docker`.
|
||||
- **`Bind for 0.0.0.0:9090 failed: port is already allocated`** — Another process (e.g. Prometheus) is using the port. The default dev config (`kind-config-dev.yml`) no longer maps host ports; if you use `KIND_CONFIG=kind-config.yml` or a custom config with `extraPortMappings`, free the port or switch to `kind-config-dev.yml`.
|
||||
- **`TimeoutError: Pods did not become ready`** — Slow image pull or node resource limits. Increase `KRKN_TEST_READINESS_TIMEOUT` or check node resources.
|
||||
- **`ModuleNotFoundError: pytest_rerunfailures`** — Missing test deps. Run `pip install -r CI/tests_v2/requirements.txt` (or `make setup`).
|
||||
- **Stale `krkn-test-*` namespaces** — Left over from a previous crashed run. They are auto-cleaned at session start (older than 30 min). To remove cluster and reports: `make -f CI/tests_v2/Makefile clean`.
|
||||
- **Wrong cluster targeted** — Multiple kube contexts. Use `--require-kind` to skip unless context is kind/minikube, or set context explicitly: `kubectl config use-context kind-ci-krkn`.
|
||||
- **`OSError: [Errno 48] Address already in use` when running tests in parallel** — Kraken normally starts an HTTP status server on port 8081. With `-n auto` (pytest-xdist), multiple Kraken processes would all try to bind to 8081. The test framework disables this server (`publish_kraken_status: False`) in the generated config, so parallel runs should not hit this. If you see it, ensure you're using the framework's `build_config` and not a config that has `publish_kraken_status: True`.
|
||||
74
CI/tests_v2/config/common_test_config.yaml
Normal file
74
CI/tests_v2/config/common_test_config.yaml
Normal file
@@ -0,0 +1,74 @@
|
||||
kraken:
|
||||
distribution: kubernetes # Distribution can be kubernetes or openshift.
|
||||
kubeconfig_path: ~/.kube/config # Path to kubeconfig.
|
||||
exit_on_failure: False # Exit when a post action scenario fails.
|
||||
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
port: 8081 # Signal port
|
||||
auto_rollback: True # Enable auto rollback for scenarios.
|
||||
rollback_versions_directory: /tmp/kraken-rollback # Directory to store rollback version files.
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load.
|
||||
- $scenario_type: # List of chaos pod scenarios to load.
|
||||
- $scenario_file
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed.
|
||||
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal.
|
||||
|
||||
performance_monitoring:
|
||||
capture_metrics: False
|
||||
metrics_profile_path: config/metrics-aggregated.yaml
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set.
|
||||
enable_alerts: True # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
enable_metrics: True
|
||||
alert_profile: config/alerts.yaml # Path or URL to alert profile with the prometheus queries
|
||||
metrics_profile: config/metrics-report.yaml
|
||||
check_critical_alerts: True # Path to alert profile with the prometheus queries.
|
||||
|
||||
tunings:
|
||||
wait_duration: 6 # Duration to wait between each chaos scenario.
|
||||
iterations: 1 # Number of times to execute the scenarios.
|
||||
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever.
|
||||
telemetry:
|
||||
enabled: False # enable/disables the telemetry collection feature
|
||||
api_url: https://yvnn4rfoi7.execute-api.us-west-2.amazonaws.com/test #telemetry service endpoint
|
||||
username: $TELEMETRY_USERNAME # telemetry service username
|
||||
password: $TELEMETRY_PASSWORD # telemetry service password
|
||||
prometheus_namespace: 'monitoring' # prometheus namespace
|
||||
prometheus_pod_name: 'prometheus-kind-prometheus-kube-prome-prometheus-0' # prometheus pod_name
|
||||
prometheus_container_name: 'prometheus'
|
||||
prometheus_backup: True # enables/disables prometheus data collection
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
archive_path: /tmp # local path where the archive files will be temporarily stored
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 10000 # the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
logs_backup: True
|
||||
logs_filter_patterns:
|
||||
- "(\\w{3}\\s\\d{1,2}\\s\\d{2}:\\d{2}:\\d{2}\\.\\d+).+" # Sep 9 11:20:36.123425532
|
||||
- "kinit (\\d+/\\d+/\\d+\\s\\d{2}:\\d{2}:\\d{2})\\s+" # kinit 2023/09/15 11:20:36 log
|
||||
- "(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z).+" # 2023-09-15T11:20:36.123425532Z log
|
||||
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
|
||||
events_backup: True # enables/disables cluster events collection
|
||||
telemetry_group: "funtests"
|
||||
elastic:
|
||||
enable_elastic: False
|
||||
verify_certs: False
|
||||
elastic_url: "https://192.168.39.196" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
|
||||
elastic_port: 32766
|
||||
username: "elastic"
|
||||
password: "test"
|
||||
metrics_index: "krkn-metrics"
|
||||
alerts_index: "krkn-alerts"
|
||||
telemetry_index: "krkn-telemetry"
|
||||
|
||||
health_checks: # Utilizing health check endpoints to observe application behavior during chaos injection.
|
||||
interval: # Interval in seconds to perform health checks, default value is 2 seconds
|
||||
config: # Provide list of health check configurations for applications
|
||||
- url: # Provide application endpoint
|
||||
bearer_token: # Bearer token for authentication if any
|
||||
auth: # Provide authentication credentials (username , password) in tuple format if any, ex:("admin","secretpassword")
|
||||
exit_on_failure: # If value is True exits when health check failed for application, values can be True/False
|
||||
67
CI/tests_v2/conftest.py
Normal file
67
CI/tests_v2/conftest.py
Normal file
@@ -0,0 +1,67 @@
|
||||
"""
|
||||
Shared fixtures for pytest functional tests (CI/tests_v2).
|
||||
Tests must be run from the repository root so run_kraken.py and config paths resolve.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption(
|
||||
"--keep-ns-on-fail",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Don't delete test namespaces on failure (for debugging)",
|
||||
)
|
||||
parser.addoption(
|
||||
"--require-kind",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Skip tests unless current context is a known dev cluster (kind, minikube)",
|
||||
)
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
|
||||
def pytest_runtest_makereport(item, call):
|
||||
outcome = yield
|
||||
rep = outcome.get_result()
|
||||
setattr(item, f"rep_{rep.when}", rep)
|
||||
|
||||
|
||||
def _repo_root() -> Path:
|
||||
"""Repository root (directory containing run_kraken.py and CI/)."""
|
||||
return Path(__file__).resolve().parent.parent.parent
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def repo_root():
|
||||
return _repo_root()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def _configure_logging():
|
||||
"""Set log format with timestamps for test runs."""
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s %(levelname)s [%(name)s] %(message)s",
|
||||
datefmt="%Y-%m-%d %H:%M:%S",
|
||||
level=logging.INFO,
|
||||
)
|
||||
|
||||
|
||||
# Re-export fixtures from lib modules so pytest discovers them
|
||||
from lib.deploy import deploy_workload, wait_for_pods_running # noqa: E402, F401
|
||||
from lib.kraken import build_config, run_kraken, run_kraken_background # noqa: E402, F401
|
||||
from lib.k8s import ( # noqa: E402, F401
|
||||
_kube_config_loaded,
|
||||
_log_cluster_context,
|
||||
k8s_apps,
|
||||
k8s_client,
|
||||
k8s_core,
|
||||
k8s_networking,
|
||||
kubectl,
|
||||
)
|
||||
from lib.namespace import _cleanup_stale_namespaces, test_namespace # noqa: E402, F401
|
||||
from lib.preflight import _preflight_checks # noqa: E402, F401
|
||||
8
CI/tests_v2/kind-config-dev.yml
Normal file
8
CI/tests_v2/kind-config-dev.yml
Normal file
@@ -0,0 +1,8 @@
|
||||
# Lean KinD config for local dev (faster than full 5-node). Use KIND_CONFIG to override.
|
||||
# No extraPortMappings so setup works when 9090/30080 are in use (e.g. local Prometheus).
|
||||
# For Prometheus/ES port mapping, use the repo root kind-config.yml.
|
||||
kind: Cluster
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
7
CI/tests_v2/lib/__init__.py
Normal file
7
CI/tests_v2/lib/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
# Shared framework for CI/tests_v2 functional tests.
|
||||
# base: BaseScenarioTest, timeout constants
|
||||
# utils: assertions, K8s helpers, patch_namespace_in_docs
|
||||
# k8s: K8s client fixtures, cluster context checks
|
||||
# namespace: test_namespace, stale namespace cleanup
|
||||
# deploy: deploy_workload, wait_for_pods_running, wait_for_deployment_replicas
|
||||
# kraken: run_kraken, run_kraken_background, build_config
|
||||
155
CI/tests_v2/lib/base.py
Normal file
155
CI/tests_v2/lib/base.py
Normal file
@@ -0,0 +1,155 @@
|
||||
"""
|
||||
Base class for CI/tests_v2 scenario tests.
|
||||
Encapsulates the shared lifecycle: ephemeral namespace, optional workload deploy, teardown.
|
||||
"""
|
||||
|
||||
import copy
|
||||
import logging
|
||||
import os
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
|
||||
from lib.utils import load_scenario_base
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_nested(obj, path):
|
||||
"""Walk path (list of keys/indices) and return the value. Supports list and dict."""
|
||||
for key in path:
|
||||
obj = obj[key]
|
||||
return obj
|
||||
|
||||
|
||||
def _set_nested(obj, path, value):
|
||||
"""Walk path to the parent and set the last key to value."""
|
||||
if not path:
|
||||
return
|
||||
parent_path, last_key = path[:-1], path[-1]
|
||||
parent = obj
|
||||
for key in parent_path:
|
||||
parent = parent[key]
|
||||
parent[last_key] = value
|
||||
|
||||
|
||||
# Timeout constants (seconds). Override via env vars (e.g. KRKN_TEST_READINESS_TIMEOUT).
|
||||
# Coordinate with pytest-timeout budget (e.g. 300s).
|
||||
TIMEOUT_BUDGET = int(os.environ.get("KRKN_TEST_TIMEOUT_BUDGET", "300"))
|
||||
DEPLOY_TIMEOUT = int(os.environ.get("KRKN_TEST_DEPLOY_TIMEOUT", "90"))
|
||||
READINESS_TIMEOUT = int(os.environ.get("KRKN_TEST_READINESS_TIMEOUT", "90"))
|
||||
NS_CLEANUP_TIMEOUT = int(os.environ.get("KRKN_TEST_NS_CLEANUP_TIMEOUT", "60"))
|
||||
POLICY_WAIT_TIMEOUT = int(os.environ.get("KRKN_TEST_POLICY_WAIT_TIMEOUT", "30"))
|
||||
KRAKEN_PROC_WAIT_TIMEOUT = int(os.environ.get("KRKN_TEST_KRAKEN_PROC_WAIT_TIMEOUT", "60"))
|
||||
|
||||
|
||||
class BaseScenarioTest:
|
||||
"""
|
||||
Base class for scenario tests. Subclasses set:
|
||||
- WORKLOAD_MANIFEST: path (str), or callable(namespace) -> YAML str for inline manifest
|
||||
- WORKLOAD_IS_PATH: True if WORKLOAD_MANIFEST is a file path, False if inline YAML
|
||||
- LABEL_SELECTOR: label selector for pods to wait on (e.g. "app=my-target")
|
||||
- SCENARIO_NAME: e.g. "pod_disruption", "application_outage"
|
||||
- SCENARIO_TYPE: e.g. "pod_disruption_scenarios", "application_outages_scenarios"
|
||||
- NAMESPACE_KEY_PATH: path to namespace field, e.g. [0, "config", "namespace_pattern"] or ["application_outage", "namespace"]
|
||||
- NAMESPACE_IS_REGEX: True to wrap namespace in ^...$
|
||||
- OVERRIDES_KEY_PATH: path to dict for **overrides (e.g. ["application_outage"]), or [] if none
|
||||
"""
|
||||
|
||||
WORKLOAD_MANIFEST = None
|
||||
WORKLOAD_IS_PATH = True
|
||||
LABEL_SELECTOR = None
|
||||
SCENARIO_NAME = ""
|
||||
SCENARIO_TYPE = ""
|
||||
NAMESPACE_KEY_PATH = []
|
||||
NAMESPACE_IS_REGEX = False
|
||||
OVERRIDES_KEY_PATH = []
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _inject_common_fixtures(
|
||||
self,
|
||||
repo_root,
|
||||
tmp_path,
|
||||
build_config,
|
||||
run_kraken,
|
||||
run_kraken_background,
|
||||
k8s_core,
|
||||
k8s_apps,
|
||||
k8s_networking,
|
||||
k8s_client,
|
||||
):
|
||||
"""Inject common fixtures onto self so test methods don't need to declare them."""
|
||||
self.repo_root = repo_root
|
||||
self.tmp_path = tmp_path
|
||||
self.build_config = build_config
|
||||
self.run_kraken = run_kraken
|
||||
self.run_kraken_background = run_kraken_background
|
||||
self.k8s_core = k8s_core
|
||||
self.k8s_apps = k8s_apps
|
||||
self.k8s_networking = k8s_networking
|
||||
self.k8s_client = k8s_client
|
||||
yield
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def _setup_workload(self, request, repo_root):
|
||||
if "no_workload" in request.keywords:
|
||||
request.instance.ns = request.getfixturevalue("test_namespace")
|
||||
logger.debug("no_workload marker: skipping workload deploy, ns=%s", request.instance.ns)
|
||||
yield
|
||||
return
|
||||
deploy = request.getfixturevalue("deploy_workload")
|
||||
test_namespace = request.getfixturevalue("test_namespace")
|
||||
manifest = self.WORKLOAD_MANIFEST
|
||||
if callable(manifest):
|
||||
manifest = manifest(test_namespace)
|
||||
is_path = False
|
||||
logger.info("Deploying inline workload in ns=%s, label_selector=%s", test_namespace, self.LABEL_SELECTOR)
|
||||
else:
|
||||
is_path = self.WORKLOAD_IS_PATH
|
||||
if is_path and manifest and not Path(manifest).is_absolute():
|
||||
manifest = repo_root / manifest
|
||||
logger.info("Deploying workload from %s in ns=%s, label_selector=%s", manifest, test_namespace, self.LABEL_SELECTOR)
|
||||
ns = deploy(manifest, self.LABEL_SELECTOR, is_path=is_path, timeout=DEPLOY_TIMEOUT)
|
||||
request.instance.ns = ns
|
||||
yield
|
||||
|
||||
def load_and_patch_scenario(self, repo_root, namespace, **overrides):
|
||||
"""Load scenario_base.yaml and patch namespace (and overrides). Returns the scenario structure."""
|
||||
scenario = copy.deepcopy(load_scenario_base(repo_root, self.SCENARIO_NAME))
|
||||
ns_value = f"^{namespace}$" if self.NAMESPACE_IS_REGEX else namespace
|
||||
if self.NAMESPACE_KEY_PATH:
|
||||
_set_nested(scenario, self.NAMESPACE_KEY_PATH, ns_value)
|
||||
if overrides and self.OVERRIDES_KEY_PATH:
|
||||
target = _get_nested(scenario, self.OVERRIDES_KEY_PATH)
|
||||
for key, value in overrides.items():
|
||||
target[key] = value
|
||||
return scenario
|
||||
|
||||
def write_scenario(self, tmp_path, scenario_data, suffix=""):
|
||||
"""Write scenario data to a YAML file in tmp_path. Returns the path."""
|
||||
filename = f"{self.SCENARIO_NAME}_scenario{suffix}.yaml"
|
||||
path = tmp_path / filename
|
||||
path.write_text(yaml.dump(scenario_data, default_flow_style=False, sort_keys=False))
|
||||
return path
|
||||
|
||||
def run_scenario(self, tmp_path, namespace, *, overrides=None, config_filename=None):
|
||||
"""Load, patch, write scenario; build config; run Kraken. Returns CompletedProcess."""
|
||||
scenario = self.load_and_patch_scenario(self.repo_root, namespace, **(overrides or {}))
|
||||
scenario_path = self.write_scenario(tmp_path, scenario)
|
||||
config_path = self.build_config(
|
||||
self.SCENARIO_TYPE,
|
||||
str(scenario_path),
|
||||
filename=config_filename or "test_config.yaml",
|
||||
)
|
||||
if os.environ.get("KRKN_TEST_DRY_RUN", "0") == "1":
|
||||
logger.info(
|
||||
"[dry-run] Would run Kraken with config=%s, scenario=%s",
|
||||
config_path,
|
||||
scenario_path,
|
||||
)
|
||||
return subprocess.CompletedProcess(
|
||||
args=[], returncode=0, stdout="[dry-run] skipped", stderr=""
|
||||
)
|
||||
return self.run_kraken(config_path)
|
||||
145
CI/tests_v2/lib/deploy.py
Normal file
145
CI/tests_v2/lib/deploy.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""
|
||||
Workload deploy and pod/deployment readiness fixtures for CI/tests_v2.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
from kubernetes import utils as k8s_utils
|
||||
|
||||
from lib.base import READINESS_TIMEOUT
|
||||
from lib.utils import patch_namespace_in_docs
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def wait_for_deployment_replicas(k8s_apps, namespace: str, name: str, timeout: int = 120) -> None:
|
||||
"""
|
||||
Poll until the deployment has ready_replicas >= spec.replicas.
|
||||
Raises TimeoutError with diagnostic details on failure.
|
||||
"""
|
||||
deadline = time.monotonic() + timeout
|
||||
last_dep = None
|
||||
attempts = 0
|
||||
while time.monotonic() < deadline:
|
||||
try:
|
||||
dep = k8s_apps.read_namespaced_deployment(name=name, namespace=namespace)
|
||||
except Exception as e:
|
||||
logger.debug("Deployment %s/%s poll attempt %s failed: %s", namespace, name, attempts, e)
|
||||
time.sleep(2)
|
||||
attempts += 1
|
||||
continue
|
||||
last_dep = dep
|
||||
ready = dep.status.ready_replicas or 0
|
||||
desired = dep.spec.replicas or 1
|
||||
if ready >= desired:
|
||||
logger.debug("Deployment %s/%s ready (%s/%s)", namespace, name, ready, desired)
|
||||
return
|
||||
logger.debug("Deployment %s/%s not ready yet: %s/%s", namespace, name, ready, desired)
|
||||
time.sleep(2)
|
||||
attempts += 1
|
||||
diag = ""
|
||||
if last_dep is not None and last_dep.status:
|
||||
diag = f" ready_replicas={last_dep.status.ready_replicas}, desired={last_dep.spec.replicas}"
|
||||
raise TimeoutError(
|
||||
f"Deployment {namespace}/{name} did not become ready within {timeout}s.{diag}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def wait_for_pods_running(k8s_core):
|
||||
"""
|
||||
Poll until all matching pods are Running and all containers ready.
|
||||
Uses exponential backoff: 1s, 2s, 4s, ... capped at 10s.
|
||||
Raises TimeoutError with diagnostic details on failure.
|
||||
"""
|
||||
|
||||
def _wait(namespace: str, label_selector: str, timeout: int = READINESS_TIMEOUT):
|
||||
deadline = time.monotonic() + timeout
|
||||
interval = 1.0
|
||||
max_interval = 10.0
|
||||
last_list = None
|
||||
while time.monotonic() < deadline:
|
||||
try:
|
||||
pod_list = k8s_core.list_namespaced_pod(
|
||||
namespace=namespace,
|
||||
label_selector=label_selector,
|
||||
)
|
||||
except Exception:
|
||||
time.sleep(min(interval, max_interval))
|
||||
interval = min(interval * 2, max_interval)
|
||||
continue
|
||||
last_list = pod_list
|
||||
items = pod_list.items or []
|
||||
if not items:
|
||||
time.sleep(min(interval, max_interval))
|
||||
interval = min(interval * 2, max_interval)
|
||||
continue
|
||||
all_running = all(
|
||||
(p.status and p.status.phase == "Running") for p in items
|
||||
)
|
||||
if not all_running:
|
||||
time.sleep(min(interval, max_interval))
|
||||
interval = min(interval * 2, max_interval)
|
||||
continue
|
||||
all_ready = True
|
||||
for p in items:
|
||||
if not p.status or not p.status.container_statuses:
|
||||
all_ready = False
|
||||
break
|
||||
for cs in p.status.container_statuses:
|
||||
if not getattr(cs, "ready", False):
|
||||
all_ready = False
|
||||
break
|
||||
if all_ready:
|
||||
return
|
||||
time.sleep(min(interval, max_interval))
|
||||
interval = min(interval * 2, max_interval)
|
||||
|
||||
diag = ""
|
||||
if last_list and last_list.items:
|
||||
p = last_list.items[0]
|
||||
diag = f" e.g. pod {p.metadata.name}: phase={getattr(p.status, 'phase', None)}"
|
||||
raise TimeoutError(
|
||||
f"Pods in {namespace} with label {label_selector} did not become ready within {timeout}s.{diag}"
|
||||
)
|
||||
|
||||
return _wait
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def deploy_workload(test_namespace, k8s_client, wait_for_pods_running, repo_root, tmp_path):
|
||||
"""
|
||||
Helper that applies a manifest into the test namespace and waits for pods.
|
||||
Yields a callable: deploy(manifest_path_or_content, label_selector, *, is_path=True)
|
||||
which applies the manifest, waits for readiness, and returns the namespace name.
|
||||
"""
|
||||
|
||||
def _deploy(manifest_path_or_content, label_selector, *, is_path=True, timeout=READINESS_TIMEOUT):
|
||||
try:
|
||||
if is_path:
|
||||
path = Path(manifest_path_or_content)
|
||||
if not path.is_absolute():
|
||||
path = repo_root / path
|
||||
with open(path) as f:
|
||||
docs = list(yaml.safe_load_all(f))
|
||||
else:
|
||||
docs = list(yaml.safe_load_all(manifest_path_or_content))
|
||||
docs = patch_namespace_in_docs(docs, test_namespace)
|
||||
k8s_utils.create_from_yaml(
|
||||
k8s_client,
|
||||
yaml_objects=docs,
|
||||
namespace=test_namespace,
|
||||
)
|
||||
except k8s_utils.FailToCreateError as e:
|
||||
msgs = [str(exc) for exc in e.api_exceptions]
|
||||
raise RuntimeError(f"Failed to create resources: {'; '.join(msgs)}") from e
|
||||
logger.info("Workload applied in namespace=%s, waiting for pods with selector=%s", test_namespace, label_selector)
|
||||
wait_for_pods_running(test_namespace, label_selector, timeout=timeout)
|
||||
logger.info("Pods ready in namespace=%s", test_namespace)
|
||||
return test_namespace
|
||||
|
||||
return _deploy
|
||||
88
CI/tests_v2/lib/k8s.py
Normal file
88
CI/tests_v2/lib/k8s.py
Normal file
@@ -0,0 +1,88 @@
|
||||
"""
|
||||
Kubernetes client fixtures and cluster context checks for CI/tests_v2.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from kubernetes import client, config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def _kube_config_loaded():
|
||||
"""Load kubeconfig once per session. Skips if cluster unreachable."""
|
||||
try:
|
||||
config.load_kube_config()
|
||||
logger.info("Kube config loaded successfully")
|
||||
except config.ConfigException as e:
|
||||
logger.warning("Could not load kube config: %s", e)
|
||||
pytest.skip(f"Could not load kube config (is a cluster running?): {e}")
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def k8s_core(_kube_config_loaded):
|
||||
"""Kubernetes CoreV1Api for pods, etc. Uses default kubeconfig."""
|
||||
return client.CoreV1Api()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def k8s_networking(_kube_config_loaded):
|
||||
"""Kubernetes NetworkingV1Api for network policies."""
|
||||
return client.NetworkingV1Api()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def k8s_client(_kube_config_loaded):
|
||||
"""Kubernetes ApiClient for create_from_yaml and other generic API calls."""
|
||||
return client.ApiClient()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def k8s_apps(_kube_config_loaded):
|
||||
"""Kubernetes AppsV1Api for deployment status polling."""
|
||||
return client.AppsV1Api()
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def _log_cluster_context(request):
|
||||
"""Log current cluster context at session start; skip if --require-kind and not a dev cluster."""
|
||||
try:
|
||||
contexts, active = config.list_kube_config_contexts()
|
||||
except Exception as e:
|
||||
logger.warning("Could not list kube config contexts: %s", e)
|
||||
return
|
||||
if not active:
|
||||
return
|
||||
context_name = active.get("name", "?")
|
||||
cluster = (active.get("context") or {}).get("cluster", "?")
|
||||
logger.info("Running tests against cluster: context=%s cluster=%s", context_name, cluster)
|
||||
if not request.config.getoption("--require-kind", False):
|
||||
return
|
||||
cluster_lower = (cluster or "").lower()
|
||||
if "kind" in cluster_lower or "minikube" in cluster_lower:
|
||||
return
|
||||
pytest.skip(
|
||||
f"Cluster '{cluster}' does not look like kind/minikube. "
|
||||
"Use default kubeconfig or pass --require-kind only on dev clusters."
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def kubectl(repo_root):
|
||||
"""Run kubectl with given args from repo root. Returns CompletedProcess."""
|
||||
|
||||
def run(args, timeout=120):
|
||||
cmd = ["kubectl"] + (args if isinstance(args, list) else list(args))
|
||||
return subprocess.run(
|
||||
cmd,
|
||||
cwd=repo_root,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
return run
|
||||
94
CI/tests_v2/lib/kraken.py
Normal file
94
CI/tests_v2/lib/kraken.py
Normal file
@@ -0,0 +1,94 @@
|
||||
"""
|
||||
Kraken execution and config building fixtures for CI/tests_v2.
|
||||
"""
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
|
||||
|
||||
def _kraken_cmd(config_path: str, repo_root: Path):
|
||||
"""Use the same Python as the test process so venv/.venv and coverage match."""
|
||||
python = sys.executable
|
||||
if os.environ.get("KRKN_TEST_COVERAGE", "0") == "1":
|
||||
return [
|
||||
python, "-m", "coverage", "run", "-a",
|
||||
"run_kraken.py", "-c", str(config_path),
|
||||
]
|
||||
return [python, "run_kraken.py", "-c", str(config_path)]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def run_kraken(repo_root):
|
||||
"""Run Kraken with the given config path. Returns CompletedProcess. Default timeout 300s."""
|
||||
|
||||
def run(config_path, timeout=300, extra_args=None):
|
||||
cmd = _kraken_cmd(config_path, repo_root)
|
||||
if extra_args:
|
||||
cmd.extend(extra_args)
|
||||
return subprocess.run(
|
||||
cmd,
|
||||
cwd=repo_root,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
return run
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def run_kraken_background(repo_root):
|
||||
"""Start Kraken in background. Returns Popen. Call proc.terminate() or proc.wait() to stop."""
|
||||
|
||||
def start(config_path):
|
||||
cmd = _kraken_cmd(config_path, repo_root)
|
||||
return subprocess.Popen(
|
||||
cmd,
|
||||
cwd=repo_root,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
|
||||
return start
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def build_config(repo_root, tmp_path):
|
||||
"""
|
||||
Build a Kraken config from tests_v2's common_test_config.yaml with scenario_type and scenario_file
|
||||
substituted. Disables Prometheus/Elastic checks for local runs.
|
||||
Returns the path to the written config file.
|
||||
"""
|
||||
common_path = repo_root / "CI" / "tests_v2" / "config" / "common_test_config.yaml"
|
||||
|
||||
def _build(scenario_type: str, scenario_file: str, filename: str = "test_config.yaml"):
|
||||
content = common_path.read_text()
|
||||
content = content.replace("$scenario_type", scenario_type)
|
||||
content = content.replace("$scenario_file", scenario_file)
|
||||
content = content.replace("$post_config", "")
|
||||
|
||||
config = yaml.safe_load(content)
|
||||
if "kraken" in config:
|
||||
# Disable status server so parallel test workers don't all bind to port 8081
|
||||
config["kraken"]["publish_kraken_status"] = False
|
||||
if "performance_monitoring" in config:
|
||||
config["performance_monitoring"]["check_critical_alerts"] = False
|
||||
config["performance_monitoring"]["enable_alerts"] = False
|
||||
config["performance_monitoring"]["enable_metrics"] = False
|
||||
if "elastic" in config:
|
||||
config["elastic"]["enable_elastic"] = False
|
||||
if "tunings" in config:
|
||||
config["tunings"]["wait_duration"] = 1
|
||||
|
||||
out_path = tmp_path / filename
|
||||
with open(out_path, "w") as f:
|
||||
yaml.dump(config, f, default_flow_style=False, sort_keys=False)
|
||||
return str(out_path)
|
||||
|
||||
return _build
|
||||
114
CI/tests_v2/lib/namespace.py
Normal file
114
CI/tests_v2/lib/namespace.py
Normal file
@@ -0,0 +1,114 @@
|
||||
"""
|
||||
Namespace lifecycle fixtures for CI/tests_v2: create, delete, stale cleanup.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
|
||||
import pytest
|
||||
from kubernetes import client
|
||||
from kubernetes.client.rest import ApiException
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
STALE_NS_AGE_MINUTES = 30
|
||||
|
||||
|
||||
def _namespace_age_minutes(metadata) -> float:
|
||||
"""Return age of namespace in minutes from its creation_timestamp."""
|
||||
if not metadata or not metadata.creation_timestamp:
|
||||
return 0.0
|
||||
created = metadata.creation_timestamp
|
||||
if hasattr(created, "timestamp"):
|
||||
created_ts = created.timestamp()
|
||||
else:
|
||||
try:
|
||||
dt = datetime.fromisoformat(created.replace("Z", "+00:00"))
|
||||
created_ts = dt.timestamp()
|
||||
except Exception:
|
||||
return 0.0
|
||||
return (time.time() - created_ts) / 60.0
|
||||
|
||||
|
||||
def _wait_for_namespace_gone(k8s_core, name: str, timeout: int = 60):
|
||||
"""Poll until the namespace no longer exists."""
|
||||
deadline = time.monotonic() + timeout
|
||||
while time.monotonic() < deadline:
|
||||
try:
|
||||
k8s_core.read_namespace(name=name)
|
||||
except ApiException as e:
|
||||
if e.status == 404:
|
||||
return
|
||||
raise
|
||||
time.sleep(1)
|
||||
raise TimeoutError(f"Namespace {name} did not disappear within {timeout}s")
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def test_namespace(request, k8s_core):
|
||||
"""
|
||||
Create an ephemeral namespace for the test. Deleted after the test unless
|
||||
--keep-ns-on-fail is set and the test failed.
|
||||
"""
|
||||
name = f"krkn-test-{uuid.uuid4().hex[:8]}"
|
||||
ns = client.V1Namespace(
|
||||
metadata=client.V1ObjectMeta(
|
||||
name=name,
|
||||
labels={
|
||||
"pod-security.kubernetes.io/audit": "privileged",
|
||||
"pod-security.kubernetes.io/enforce": "privileged",
|
||||
"pod-security.kubernetes.io/enforce-version": "v1.24",
|
||||
"pod-security.kubernetes.io/warn": "privileged",
|
||||
"security.openshift.io/scc.podSecurityLabelSync": "false",
|
||||
},
|
||||
)
|
||||
)
|
||||
k8s_core.create_namespace(body=ns)
|
||||
logger.info("Created test namespace: %s", name)
|
||||
|
||||
yield name
|
||||
|
||||
keep_on_fail = request.config.getoption("--keep-ns-on-fail", False)
|
||||
rep_call = getattr(request.node, "rep_call", None)
|
||||
failed = rep_call is not None and rep_call.failed
|
||||
if keep_on_fail and failed:
|
||||
logger.info("[keep-ns-on-fail] Keeping namespace %s for debugging", name)
|
||||
return
|
||||
|
||||
try:
|
||||
k8s_core.delete_namespace(
|
||||
name=name,
|
||||
body=client.V1DeleteOptions(propagation_policy="Background"),
|
||||
)
|
||||
logger.debug("Scheduled background deletion for namespace: %s", name)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to delete namespace %s: %s", name, e)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def _cleanup_stale_namespaces(k8s_core):
|
||||
"""Delete krkn-test-* namespaces older than STALE_NS_AGE_MINUTES at session start."""
|
||||
if os.environ.get("PYTEST_XDIST_WORKER"):
|
||||
return
|
||||
try:
|
||||
namespaces = k8s_core.list_namespace()
|
||||
except Exception as e:
|
||||
logger.warning("Could not list namespaces for stale cleanup: %s", e)
|
||||
return
|
||||
for ns in namespaces.items or []:
|
||||
name = ns.metadata.name if ns.metadata else ""
|
||||
if not name.startswith("krkn-test-"):
|
||||
continue
|
||||
if _namespace_age_minutes(ns.metadata) <= STALE_NS_AGE_MINUTES:
|
||||
continue
|
||||
try:
|
||||
logger.warning("Deleting stale namespace: %s", name)
|
||||
k8s_core.delete_namespace(
|
||||
name=name,
|
||||
body=client.V1DeleteOptions(propagation_policy="Background"),
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to delete stale namespace %s: %s", name, e)
|
||||
48
CI/tests_v2/lib/preflight.py
Normal file
48
CI/tests_v2/lib/preflight.py
Normal file
@@ -0,0 +1,48 @@
|
||||
"""
|
||||
Preflight checks for CI/tests_v2: cluster reachability and test deps at session start.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import subprocess
|
||||
|
||||
import pytest
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def _preflight_checks(repo_root):
|
||||
"""
|
||||
Verify cluster is reachable and test deps are importable at session start.
|
||||
Skips the session if cluster-info fails or required plugins are missing.
|
||||
"""
|
||||
# Check test deps (pytest plugins)
|
||||
try:
|
||||
import pytest_rerunfailures # noqa: F401
|
||||
import pytest_html # noqa: F401
|
||||
import pytest_timeout # noqa: F401
|
||||
import pytest_order # noqa: F401
|
||||
import xdist # noqa: F401
|
||||
except ImportError as e:
|
||||
pytest.skip(
|
||||
f"Missing test dependency: {e}. "
|
||||
"Run: pip install -r CI/tests_v2/requirements.txt"
|
||||
)
|
||||
|
||||
# Check cluster reachable and log server URL
|
||||
result = subprocess.run(
|
||||
["kubectl", "cluster-info"],
|
||||
cwd=repo_root,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
pytest.skip(
|
||||
f"Cluster not reachable (kubectl cluster-info failed). "
|
||||
f"Start a cluster (e.g. make setup) or check KUBECONFIG. stderr: {result.stderr or '(none)'}"
|
||||
)
|
||||
# Log first line of cluster-info (server URL) for debugging
|
||||
if result.stdout:
|
||||
first_line = result.stdout.strip().split("\n")[0]
|
||||
logger.info("Preflight: %s", first_line)
|
||||
212
CI/tests_v2/lib/utils.py
Normal file
212
CI/tests_v2/lib/utils.py
Normal file
@@ -0,0 +1,212 @@
|
||||
"""
|
||||
Shared helpers for CI/tests_v2 functional tests.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import List, Optional, Union
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
from kubernetes.client import V1NetworkPolicy, V1NetworkPolicyList, V1Pod, V1PodList
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _pods(pod_list: Union[V1PodList, List[V1Pod]]) -> List[V1Pod]:
|
||||
"""Normalize V1PodList or list of V1Pod to list of V1Pod."""
|
||||
return pod_list.items if hasattr(pod_list, "items") else pod_list
|
||||
|
||||
|
||||
def _policies(
|
||||
policy_list: Union[V1NetworkPolicyList, List[V1NetworkPolicy]],
|
||||
) -> List[V1NetworkPolicy]:
|
||||
"""Normalize V1NetworkPolicyList or list to list of V1NetworkPolicy."""
|
||||
return policy_list.items if hasattr(policy_list, "items") else policy_list
|
||||
|
||||
|
||||
def scenario_dir(repo_root: Path, scenario_name: str) -> Path:
|
||||
"""Return the path to a scenario folder under CI/tests_v2/scenarios/."""
|
||||
return repo_root / "CI" / "tests_v2" / "scenarios" / scenario_name
|
||||
|
||||
|
||||
def load_scenario_base(
|
||||
repo_root: Path,
|
||||
scenario_name: str,
|
||||
filename: str = "scenario_base.yaml",
|
||||
) -> Union[dict, list]:
|
||||
"""
|
||||
Load and parse the scenario base YAML for a scenario.
|
||||
Returns dict or list depending on the YAML structure.
|
||||
"""
|
||||
path = scenario_dir(repo_root, scenario_name) / filename
|
||||
text = path.read_text()
|
||||
data = yaml.safe_load(text)
|
||||
if data is None:
|
||||
raise ValueError(f"Empty or invalid YAML in {path}")
|
||||
return data
|
||||
|
||||
|
||||
def patch_namespace_in_docs(docs: list, namespace: str) -> list:
|
||||
"""Override metadata.namespace in each doc so create_from_yaml respects target namespace."""
|
||||
for doc in docs:
|
||||
if isinstance(doc, dict) and doc.get("metadata") is not None:
|
||||
doc["metadata"]["namespace"] = namespace
|
||||
return docs
|
||||
|
||||
|
||||
def get_pods_list(k8s_core, namespace: str, label_selector: str) -> V1PodList:
|
||||
"""Return V1PodList from the Kubernetes API."""
|
||||
return k8s_core.list_namespaced_pod(
|
||||
namespace=namespace,
|
||||
label_selector=label_selector,
|
||||
)
|
||||
|
||||
|
||||
def get_pods_or_skip(
|
||||
k8s_core,
|
||||
namespace: str,
|
||||
label_selector: str,
|
||||
no_pods_reason: Optional[str] = None,
|
||||
) -> V1PodList:
|
||||
"""
|
||||
Get pods via Kubernetes API or skip if cluster unreachable or no matching pods.
|
||||
Use at test start when prerequisites may be missing.
|
||||
no_pods_reason: message when no pods match; if None, a default message is used.
|
||||
"""
|
||||
try:
|
||||
pod_list = k8s_core.list_namespaced_pod(
|
||||
namespace=namespace,
|
||||
label_selector=label_selector,
|
||||
)
|
||||
except Exception as e:
|
||||
pytest.skip(f"Cluster unreachable: {e}")
|
||||
if not pod_list.items or len(pod_list.items) == 0:
|
||||
reason = (
|
||||
no_pods_reason
|
||||
if no_pods_reason
|
||||
else f"No pods in {namespace} with label {label_selector}. "
|
||||
"Start a KinD cluster with default storage (local-path-provisioner)."
|
||||
)
|
||||
pytest.skip(reason)
|
||||
return pod_list
|
||||
|
||||
|
||||
def pod_uids(pod_list: Union[V1PodList, List[V1Pod]]) -> list:
|
||||
"""Return list of pod UIDs from V1PodList or list of V1Pod."""
|
||||
return [p.metadata.uid for p in _pods(pod_list)]
|
||||
|
||||
|
||||
def restart_counts(pod_list: Union[V1PodList, List[V1Pod]]) -> int:
|
||||
"""Return total restart count across all containers in V1PodList or list of V1Pod."""
|
||||
total = 0
|
||||
for p in _pods(pod_list):
|
||||
if not p.status or not p.status.container_statuses:
|
||||
continue
|
||||
for cs in p.status.container_statuses:
|
||||
total += getattr(cs, "restart_count", 0)
|
||||
return total
|
||||
|
||||
|
||||
def get_network_policies_list(k8s_networking, namespace: str) -> V1NetworkPolicyList:
|
||||
"""Return V1NetworkPolicyList from the Kubernetes API."""
|
||||
return k8s_networking.list_namespaced_network_policy(namespace=namespace)
|
||||
|
||||
|
||||
def find_network_policy_by_prefix(
|
||||
policy_list: Union[V1NetworkPolicyList, List[V1NetworkPolicy]],
|
||||
name_prefix: str,
|
||||
) -> Optional[V1NetworkPolicy]:
|
||||
"""Return the first NetworkPolicy whose name starts with name_prefix, or None."""
|
||||
for policy in _policies(policy_list):
|
||||
if (
|
||||
policy.metadata
|
||||
and policy.metadata.name
|
||||
and policy.metadata.name.startswith(name_prefix)
|
||||
):
|
||||
return policy
|
||||
return None
|
||||
|
||||
|
||||
def assert_all_pods_running_and_ready(
|
||||
pod_list: Union[V1PodList, List[V1Pod]],
|
||||
namespace: str = "",
|
||||
) -> None:
|
||||
"""
|
||||
Assert all pods are Running and all containers Ready.
|
||||
Include namespace in assertion messages for debugging.
|
||||
"""
|
||||
ns_suffix = f" (namespace={namespace})" if namespace else ""
|
||||
for pod in _pods(pod_list):
|
||||
assert pod.status and pod.status.phase == "Running", (
|
||||
f"Pod {pod.metadata.name} not Running after scenario: {pod.status}{ns_suffix}"
|
||||
)
|
||||
if pod.status.container_statuses:
|
||||
for cs in pod.status.container_statuses:
|
||||
assert getattr(cs, "ready", False) is True, (
|
||||
f"Container {getattr(cs, 'name', '?')} not ready in pod {pod.metadata.name}{ns_suffix}"
|
||||
)
|
||||
|
||||
|
||||
def assert_pod_count_unchanged(
|
||||
before: Union[V1PodList, List[V1Pod]],
|
||||
after: Union[V1PodList, List[V1Pod]],
|
||||
namespace: str = "",
|
||||
) -> None:
|
||||
"""Assert pod count is unchanged; include namespace in failure message."""
|
||||
before_items = _pods(before)
|
||||
after_items = _pods(after)
|
||||
ns_suffix = f" (namespace={namespace})" if namespace else ""
|
||||
assert len(after_items) == len(before_items), (
|
||||
f"Pod count changed after scenario: expected {len(before_items)}, got {len(after_items)}.{ns_suffix}"
|
||||
)
|
||||
|
||||
|
||||
def assert_kraken_success(result, context: str = "", tmp_path=None, allowed_codes=(0,)) -> None:
|
||||
"""
|
||||
Assert Kraken run succeeded (returncode in allowed_codes). On failure, include stdout and stderr
|
||||
in the assertion message and optionally write full output to tmp_path.
|
||||
Default allowed_codes=(0,). For alert-aware tests, use allowed_codes=(0, 2).
|
||||
"""
|
||||
if result.returncode in allowed_codes:
|
||||
return
|
||||
if tmp_path is not None:
|
||||
try:
|
||||
(tmp_path / "kraken_stdout.log").write_text(result.stdout or "")
|
||||
(tmp_path / "kraken_stderr.log").write_text(result.stderr or "")
|
||||
except Exception as e:
|
||||
logger.warning("Could not write Kraken logs to tmp_path: %s", e)
|
||||
lines = (result.stdout or "").splitlines()
|
||||
tail_stdout = "\n".join(lines[-20:]) if lines else "(empty)"
|
||||
context_str = f" {context}" if context else ""
|
||||
path_hint = f"\nFull logs: {tmp_path}/kraken_stdout.log, {tmp_path}/kraken_stderr.log" if tmp_path else ""
|
||||
raise AssertionError(
|
||||
f"Krkn failed (rc={result.returncode}){context_str}.{path_hint}\n"
|
||||
f"--- stderr ---\n{result.stderr or '(empty)'}\n"
|
||||
f"--- stdout (last 20 lines) ---\n{tail_stdout}"
|
||||
)
|
||||
|
||||
|
||||
def assert_kraken_failure(result, context: str = "", tmp_path=None) -> None:
|
||||
"""
|
||||
Assert Kraken run failed (returncode != 0). On failure (Kraken unexpectedly succeeded),
|
||||
raise AssertionError with stdout/stderr and optional tmp_path log files for diagnostics.
|
||||
"""
|
||||
if result.returncode != 0:
|
||||
return
|
||||
if tmp_path is not None:
|
||||
try:
|
||||
(tmp_path / "kraken_stdout.log").write_text(result.stdout or "")
|
||||
(tmp_path / "kraken_stderr.log").write_text(result.stderr or "")
|
||||
except Exception as e:
|
||||
logger.warning("Could not write Kraken logs to tmp_path: %s", e)
|
||||
lines = (result.stdout or "").splitlines()
|
||||
tail_stdout = "\n".join(lines[-20:]) if lines else "(empty)"
|
||||
context_str = f" {context}" if context else ""
|
||||
path_hint = f"\nFull logs: {tmp_path}/kraken_stdout.log, {tmp_path}/kraken_stderr.log" if tmp_path else ""
|
||||
raise AssertionError(
|
||||
f"Expected Krkn to fail but it succeeded (rc=0){context_str}.{path_hint}\n"
|
||||
f"--- stderr ---\n{result.stderr or '(empty)'}\n"
|
||||
f"--- stdout (last 20 lines) ---\n{tail_stdout}"
|
||||
)
|
||||
14
CI/tests_v2/pytest.ini
Normal file
14
CI/tests_v2/pytest.ini
Normal file
@@ -0,0 +1,14 @@
|
||||
[pytest]
|
||||
testpaths = .
|
||||
python_files = test_*.py
|
||||
python_functions = test_*
|
||||
# Install CI/tests_v2/requirements.txt for --timeout, --reruns, --reruns-delay.
|
||||
# Example full run: pytest CI/tests_v2/ -v --timeout=300 --reruns=2 --reruns-delay=10 --html=... --junitxml=...
|
||||
addopts = -v
|
||||
markers =
|
||||
functional: marks a test as a functional test (deselect with '-m "not functional"')
|
||||
pod_disruption: marks a test as a pod disruption scenario test
|
||||
application_outage: marks a test as an application outage scenario test
|
||||
no_workload: skip workload deployment for this test (e.g. negative tests)
|
||||
order: set test order (pytest-order)
|
||||
junit_family = xunit2
|
||||
15
CI/tests_v2/requirements.txt
Normal file
15
CI/tests_v2/requirements.txt
Normal file
@@ -0,0 +1,15 @@
|
||||
# Pytest plugin deps for CI/tests_v2 functional tests.
|
||||
#
|
||||
# Kept separate from the root requirements.txt because:
|
||||
# - Root deps are Kraken runtime (cloud SDKs, K8s client, etc.)
|
||||
# - These are test-only plugins not needed by Kraken itself
|
||||
# - Merging would bloat installs for users who don't run functional tests
|
||||
# - Separate files reduce version-conflict risk between test and runtime deps
|
||||
#
|
||||
# pytest and coverage are already in root requirements.txt; do NOT duplicate here.
|
||||
# The Makefile installs both files automatically via `make setup`.
|
||||
pytest-rerunfailures>=14.0
|
||||
pytest-html>=4.1.0
|
||||
pytest-timeout>=2.2.0
|
||||
pytest-order>=1.2.0
|
||||
pytest-xdist>=3.5.0
|
||||
230
CI/tests_v2/scaffold.py
Normal file
230
CI/tests_v2/scaffold.py
Normal file
@@ -0,0 +1,230 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Generate boilerplate for a new scenario test in CI/tests_v2.
|
||||
|
||||
Usage (from repository root):
|
||||
python CI/tests_v2/scaffold.py --scenario service_hijacking
|
||||
python CI/tests_v2/scaffold.py --scenario node_disruption --scenario-type node_scenarios
|
||||
|
||||
Creates (folder-per-scenario layout):
|
||||
- CI/tests_v2/scenarios/<scenario>/test_<scenario>.py (BaseScenarioTest subclass + stub test)
|
||||
- CI/tests_v2/scenarios/<scenario>/resource.yaml (placeholder workload)
|
||||
- CI/tests_v2/scenarios/<scenario>/scenario_base.yaml (placeholder Krkn scenario; edit for your scenario_type)
|
||||
- Adds the scenario marker to pytest.ini (if not already present)
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def snake_to_camel(snake: str) -> str:
|
||||
"""Convert snake_case to CamelCase."""
|
||||
return "".join(word.capitalize() for word in snake.split("_"))
|
||||
|
||||
|
||||
def scenario_type_default(scenario: str) -> str:
|
||||
"""Default scenario_type for build_config (e.g. service_hijacking -> service_hijacking_scenarios)."""
|
||||
return f"{scenario}_scenarios"
|
||||
|
||||
|
||||
TEST_FILE_TEMPLATE = '''"""
|
||||
Functional test for {scenario} scenario.
|
||||
Each test runs in its own ephemeral namespace with workload deployed automatically.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from lib.base import BaseScenarioTest
|
||||
from lib.utils import (
|
||||
assert_all_pods_running_and_ready,
|
||||
assert_kraken_failure,
|
||||
assert_kraken_success,
|
||||
assert_pod_count_unchanged,
|
||||
get_pods_list,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.functional
|
||||
@pytest.mark.{marker}
|
||||
class Test{class_name}(BaseScenarioTest):
|
||||
"""{scenario} scenario."""
|
||||
|
||||
WORKLOAD_MANIFEST = "CI/tests_v2/scenarios/{scenario}/resource.yaml"
|
||||
WORKLOAD_IS_PATH = True
|
||||
LABEL_SELECTOR = "app={app_label}"
|
||||
SCENARIO_NAME = "{scenario}"
|
||||
SCENARIO_TYPE = "{scenario_type}"
|
||||
NAMESPACE_KEY_PATH = {namespace_key_path}
|
||||
NAMESPACE_IS_REGEX = {namespace_is_regex}
|
||||
OVERRIDES_KEY_PATH = {overrides_key_path}
|
||||
|
||||
@pytest.mark.order(1)
|
||||
def test_happy_path(self):
|
||||
"""Run {scenario} scenario and assert pods remain healthy."""
|
||||
ns = self.ns
|
||||
before = get_pods_list(self.k8s_core, ns, self.LABEL_SELECTOR)
|
||||
|
||||
result = self.run_scenario(self.tmp_path, ns)
|
||||
assert_kraken_success(result, context=f"namespace={{ns}}", tmp_path=self.tmp_path)
|
||||
|
||||
after = get_pods_list(self.k8s_core, ns, self.LABEL_SELECTOR)
|
||||
assert_pod_count_unchanged(before, after, namespace=ns)
|
||||
assert_all_pods_running_and_ready(after, namespace=ns)
|
||||
'''
|
||||
|
||||
RESOURCE_YAML_TEMPLATE = '''# Target workload for {scenario} scenario tests.
|
||||
# Namespace is patched at deploy time by the test framework.
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {app_label}
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: {app_label}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {app_label}
|
||||
spec:
|
||||
containers:
|
||||
- name: app
|
||||
image: nginx:alpine
|
||||
ports:
|
||||
- containerPort: 80
|
||||
'''
|
||||
|
||||
SCENARIO_BASE_DICT_TEMPLATE = '''# Base scenario for {scenario} (used by build_config with scenario_type: {scenario_type}).
|
||||
# Edit this file with the structure expected by Krkn. Top-level key must match SCENARIO_NAME.
|
||||
# See scenarios/application_outage/scenario_base.yaml and scenarios/pod_disruption/scenario_base.yaml for examples.
|
||||
{scenario}:
|
||||
namespace: default
|
||||
# Add fields required by your scenario plugin.
|
||||
'''
|
||||
|
||||
SCENARIO_BASE_LIST_TEMPLATE = '''# Base scenario for {scenario} (list format). Tests patch config.namespace_pattern with ^<ns>$.
|
||||
# Edit with the structure expected by your scenario plugin. See scenarios/pod_disruption/scenario_base.yaml.
|
||||
- id: {scenario}-default
|
||||
config:
|
||||
namespace_pattern: "^default$"
|
||||
# Add fields required by your scenario plugin.
|
||||
'''
|
||||
|
||||
|
||||
def main() -> int:
|
||||
parser = argparse.ArgumentParser(description="Scaffold a new scenario test in CI/tests_v2 (folder-per-scenario)")
|
||||
parser.add_argument(
|
||||
"--scenario",
|
||||
required=True,
|
||||
help="Scenario name in snake_case (e.g. service_hijacking)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--scenario-type",
|
||||
default=None,
|
||||
help="Kraken scenario_type for build_config (default: <scenario>_scenarios)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--list-based",
|
||||
action="store_true",
|
||||
help="Use list-based scenario (NAMESPACE_KEY_PATH [0, 'config', 'namespace_pattern'], OVERRIDES_KEY_PATH [0, 'config'])",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--regex-namespace",
|
||||
action="store_true",
|
||||
help="Set NAMESPACE_IS_REGEX = True (namespace wrapped in ^...$)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
scenario = args.scenario.strip().lower()
|
||||
if not re.match(r"^[a-z][a-z0-9_]*$", scenario):
|
||||
print("Error: --scenario must be snake_case (e.g. service_hijacking)", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
scenario_type = args.scenario_type or scenario_type_default(scenario)
|
||||
class_name = snake_to_camel(scenario)
|
||||
marker = scenario
|
||||
app_label = scenario.replace("_", "-")
|
||||
|
||||
if args.list_based:
|
||||
namespace_key_path = [0, "config", "namespace_pattern"]
|
||||
namespace_is_regex = True
|
||||
overrides_key_path = [0, "config"]
|
||||
scenario_base_template = SCENARIO_BASE_LIST_TEMPLATE
|
||||
else:
|
||||
namespace_key_path = [scenario, "namespace"]
|
||||
namespace_is_regex = args.regex_namespace
|
||||
overrides_key_path = [scenario]
|
||||
scenario_base_template = SCENARIO_BASE_DICT_TEMPLATE
|
||||
|
||||
repo_root = Path(__file__).resolve().parent.parent.parent
|
||||
scenario_dir_path = repo_root / "CI" / "tests_v2" / "scenarios" / scenario
|
||||
test_path = scenario_dir_path / f"test_{scenario}.py"
|
||||
resource_path = scenario_dir_path / "resource.yaml"
|
||||
scenario_base_path = scenario_dir_path / "scenario_base.yaml"
|
||||
|
||||
if scenario_dir_path.exists() and any(scenario_dir_path.iterdir()):
|
||||
print(f"Error: scenario directory already exists and is non-empty: {scenario_dir_path}", file=sys.stderr)
|
||||
return 1
|
||||
if test_path.exists():
|
||||
print(f"Error: {test_path} already exists", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
scenario_dir_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
test_content = TEST_FILE_TEMPLATE.format(
|
||||
scenario=scenario,
|
||||
marker=marker,
|
||||
class_name=class_name,
|
||||
app_label=app_label,
|
||||
scenario_type=scenario_type,
|
||||
namespace_key_path=repr(namespace_key_path),
|
||||
namespace_is_regex=namespace_is_regex,
|
||||
overrides_key_path=repr(overrides_key_path),
|
||||
)
|
||||
resource_content = RESOURCE_YAML_TEMPLATE.format(scenario=scenario, app_label=app_label)
|
||||
scenario_base_content = scenario_base_template.format(
|
||||
scenario=scenario,
|
||||
scenario_type=scenario_type,
|
||||
)
|
||||
|
||||
test_path.write_text(test_content, encoding="utf-8")
|
||||
resource_path.write_text(resource_content, encoding="utf-8")
|
||||
scenario_base_path.write_text(scenario_base_content, encoding="utf-8")
|
||||
|
||||
# Auto-add marker to pytest.ini if not already present
|
||||
pytest_ini_path = repo_root / "CI" / "tests_v2" / "pytest.ini"
|
||||
marker_line = f" {marker}: marks a test as a {scenario} scenario test"
|
||||
if pytest_ini_path.exists():
|
||||
content = pytest_ini_path.read_text(encoding="utf-8")
|
||||
if f" {marker}:" not in content and f"{marker}: marks" not in content:
|
||||
lines = content.splitlines(keepends=True)
|
||||
insert_at = None
|
||||
for i, line in enumerate(lines):
|
||||
if re.match(r"^ \w+:\s*.+", line):
|
||||
insert_at = i + 1
|
||||
if insert_at is not None:
|
||||
lines.insert(insert_at, marker_line + "\n")
|
||||
pytest_ini_path.write_text("".join(lines), encoding="utf-8")
|
||||
print("Added marker to pytest.ini")
|
||||
else:
|
||||
print("Could not find markers block in pytest.ini; add manually:")
|
||||
print(marker_line)
|
||||
else:
|
||||
print("Marker already in pytest.ini")
|
||||
else:
|
||||
print("pytest.ini not found; add this marker under 'markers':")
|
||||
print(marker_line)
|
||||
|
||||
print(f"Created: {test_path}")
|
||||
print(f"Created: {resource_path}")
|
||||
print(f"Created: {scenario_base_path}")
|
||||
print()
|
||||
print("Then edit scenario_base.yaml with your scenario structure (top-level key should match SCENARIO_NAME).")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
34
CI/tests_v2/scenarios/application_outage/nginx_http.yaml
Normal file
34
CI/tests_v2/scenarios/application_outage/nginx_http.yaml
Normal file
@@ -0,0 +1,34 @@
|
||||
# Nginx Deployment + Service for application outage traffic test.
|
||||
# Namespace is patched at deploy time by the test framework.
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-outage-http
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx-outage-http
|
||||
scenario: outage
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx-outage-http
|
||||
scenario: outage
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
ports:
|
||||
- containerPort: 80
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx-outage-http
|
||||
spec:
|
||||
selector:
|
||||
app: nginx-outage-http
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 80
|
||||
15
CI/tests_v2/scenarios/application_outage/resource.yaml
Normal file
15
CI/tests_v2/scenarios/application_outage/resource.yaml
Normal file
@@ -0,0 +1,15 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: outage
|
||||
labels:
|
||||
scenario: outage
|
||||
spec:
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: quay.io/krkn-chaos/krkn:tools
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
sleep infinity
|
||||
10
CI/tests_v2/scenarios/application_outage/scenario_base.yaml
Normal file
10
CI/tests_v2/scenarios/application_outage/scenario_base.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
# Base application_outage scenario. Tests load this and patch namespace (and optionally duration, block, exclude_label).
|
||||
application_outage:
|
||||
duration: 10
|
||||
namespace: default
|
||||
pod_selector:
|
||||
scenario: outage
|
||||
block:
|
||||
- Ingress
|
||||
- Egress
|
||||
exclude_label: ""
|
||||
@@ -0,0 +1,229 @@
|
||||
"""
|
||||
Functional test for application outage scenario (block network to target pods, then restore).
|
||||
Equivalent to CI/tests/test_app_outages.sh with proper assertions.
|
||||
The main happy-path test reuses one namespace and workload for multiple scenario runs (default, exclude_label, block variants); other tests use their own ephemeral namespace as needed.
|
||||
"""
|
||||
|
||||
import time
|
||||
|
||||
import pytest
|
||||
|
||||
from lib.base import (
|
||||
BaseScenarioTest,
|
||||
KRAKEN_PROC_WAIT_TIMEOUT,
|
||||
POLICY_WAIT_TIMEOUT,
|
||||
)
|
||||
from lib.utils import (
|
||||
assert_all_pods_running_and_ready,
|
||||
assert_kraken_failure,
|
||||
assert_kraken_success,
|
||||
assert_pod_count_unchanged,
|
||||
find_network_policy_by_prefix,
|
||||
get_network_policies_list,
|
||||
get_pods_list,
|
||||
)
|
||||
|
||||
|
||||
def _wait_for_network_policy(k8s_networking, namespace: str, prefix: str, timeout: int = 30):
|
||||
"""Poll until a NetworkPolicy with name starting with prefix exists. Return its name."""
|
||||
deadline = time.monotonic() + timeout
|
||||
while time.monotonic() < deadline:
|
||||
policy_list = get_network_policies_list(k8s_networking, namespace)
|
||||
policy = find_network_policy_by_prefix(policy_list, prefix)
|
||||
if policy:
|
||||
return policy.metadata.name
|
||||
time.sleep(1)
|
||||
raise TimeoutError(f"No NetworkPolicy with prefix {prefix!r} in {namespace} within {timeout}s")
|
||||
|
||||
|
||||
def _assert_no_network_policy_with_prefix(k8s_networking, namespace: str, prefix: str):
|
||||
policy_list = get_network_policies_list(k8s_networking, namespace)
|
||||
policy = find_network_policy_by_prefix(policy_list, prefix)
|
||||
name = policy.metadata.name if policy and policy.metadata else "?"
|
||||
assert policy is None, (
|
||||
f"Expected no NetworkPolicy with prefix {prefix!r} in namespace={namespace}, found {name}"
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.functional
|
||||
@pytest.mark.application_outage
|
||||
class TestApplicationOutage(BaseScenarioTest):
|
||||
"""Application outage scenario: block network to target pods, then restore."""
|
||||
|
||||
WORKLOAD_MANIFEST = "CI/tests_v2/scenarios/application_outage/resource.yaml"
|
||||
WORKLOAD_IS_PATH = True
|
||||
LABEL_SELECTOR = "scenario=outage"
|
||||
POLICY_PREFIX = "krkn-deny-"
|
||||
SCENARIO_NAME = "application_outage"
|
||||
SCENARIO_TYPE = "application_outages_scenarios"
|
||||
NAMESPACE_KEY_PATH = ["application_outage", "namespace"]
|
||||
NAMESPACE_IS_REGEX = False
|
||||
OVERRIDES_KEY_PATH = ["application_outage"]
|
||||
|
||||
@pytest.mark.order(1)
|
||||
def test_app_outage_block_restore_and_variants(self):
|
||||
"""Default, exclude_label, and block-type variants (Ingress, Egress, both) run successfully in one namespace; each run restores and pods stay ready."""
|
||||
ns = self.ns
|
||||
before = get_pods_list(self.k8s_core, ns, self.LABEL_SELECTOR)
|
||||
|
||||
cases = [
|
||||
("default", {}, "app_outage_config.yaml"),
|
||||
("exclude_label", {"exclude_label": {"env": "prod"}}, "app_outage_exclude_config.yaml"),
|
||||
("block=Ingress", {"block": ["Ingress"]}, "app_outage_block_ingress_config.yaml"),
|
||||
("block=Egress", {"block": ["Egress"]}, "app_outage_block_egress_config.yaml"),
|
||||
("block=Ingress,Egress", {"block": ["Ingress", "Egress"]}, "app_outage_block_ingress_egress_config.yaml"),
|
||||
]
|
||||
for context_name, overrides, config_filename in cases:
|
||||
result = self.run_scenario(
|
||||
self.tmp_path, ns,
|
||||
overrides=overrides if overrides else None,
|
||||
config_filename=config_filename,
|
||||
)
|
||||
assert_kraken_success(
|
||||
result, context=f"{context_name} namespace={ns}", tmp_path=self.tmp_path
|
||||
)
|
||||
after = get_pods_list(self.k8s_core, ns, self.LABEL_SELECTOR)
|
||||
assert_pod_count_unchanged(before, after, namespace=ns)
|
||||
assert_all_pods_running_and_ready(after, namespace=ns)
|
||||
|
||||
def test_network_policy_created_then_deleted(self):
|
||||
"""NetworkPolicy with prefix krkn-deny- is created during run and deleted after."""
|
||||
ns = self.ns
|
||||
scenario = self.load_and_patch_scenario(self.repo_root, ns, duration=12)
|
||||
scenario_path = self.write_scenario(self.tmp_path, scenario, suffix="_np_lifecycle")
|
||||
config_path = self.build_config(
|
||||
self.SCENARIO_TYPE, str(scenario_path),
|
||||
filename="app_outage_np_lifecycle.yaml",
|
||||
)
|
||||
proc = self.run_kraken_background(config_path)
|
||||
try:
|
||||
policy_name = _wait_for_network_policy(
|
||||
self.k8s_networking, ns, self.POLICY_PREFIX, timeout=POLICY_WAIT_TIMEOUT
|
||||
)
|
||||
assert policy_name.startswith(self.POLICY_PREFIX), (
|
||||
f"Policy name {policy_name!r} should start with {self.POLICY_PREFIX!r} (namespace={ns})"
|
||||
)
|
||||
policy_list = get_network_policies_list(self.k8s_networking, ns)
|
||||
policy = find_network_policy_by_prefix(policy_list, self.POLICY_PREFIX)
|
||||
assert policy is not None and policy.spec is not None, (
|
||||
f"Expected NetworkPolicy with spec (namespace={ns})"
|
||||
)
|
||||
assert policy.spec.pod_selector is not None, f"Policy should have pod_selector (namespace={ns})"
|
||||
assert policy.spec.policy_types is not None, f"Policy should have policy_types (namespace={ns})"
|
||||
finally:
|
||||
proc.wait(timeout=KRAKEN_PROC_WAIT_TIMEOUT)
|
||||
_assert_no_network_policy_with_prefix(self.k8s_networking, ns, self.POLICY_PREFIX)
|
||||
|
||||
# def test_traffic_blocked_during_outage(self, request):
|
||||
# """During outage, ingress to target pods is blocked; after run, traffic is restored."""
|
||||
# ns = self.ns
|
||||
# nginx_path = scenario_dir(self.repo_root, "application_outage") / "nginx_http.yaml"
|
||||
# docs = list(yaml.safe_load_all(nginx_path.read_text()))
|
||||
# docs = patch_namespace_in_docs(docs, ns)
|
||||
# try:
|
||||
# k8s_utils.create_from_yaml(
|
||||
# self.k8s_client,
|
||||
# yaml_objects=docs,
|
||||
# namespace=ns,
|
||||
# )
|
||||
# except k8s_utils.FailToCreateError as e:
|
||||
# msgs = [str(exc) for exc in e.api_exceptions]
|
||||
# raise AssertionError(
|
||||
# f"Failed to create nginx resources (namespace={ns}): {'; '.join(msgs)}"
|
||||
# ) from e
|
||||
# wait_for_deployment_replicas(self.k8s_apps, ns, "nginx-outage-http", timeout=READINESS_TIMEOUT)
|
||||
# port = _get_free_port()
|
||||
# pf_ref = []
|
||||
|
||||
# def _kill_port_forward():
|
||||
# if pf_ref and pf_ref[0].poll() is None:
|
||||
# pf_ref[0].terminate()
|
||||
# try:
|
||||
# pf_ref[0].wait(timeout=5)
|
||||
# except subprocess.TimeoutExpired:
|
||||
# pf_ref[0].kill()
|
||||
|
||||
# request.addfinalizer(_kill_port_forward)
|
||||
# pf = subprocess.Popen(
|
||||
# ["kubectl", "port-forward", "-n", ns, "service/nginx-outage-http", f"{port}:80"],
|
||||
# cwd=self.repo_root,
|
||||
# stdout=subprocess.DEVNULL,
|
||||
# stderr=subprocess.DEVNULL,
|
||||
# )
|
||||
# pf_ref.append(pf)
|
||||
# url = f"http://127.0.0.1:{port}/"
|
||||
# try:
|
||||
# time.sleep(2)
|
||||
# baseline_ok = False
|
||||
# for _ in range(10):
|
||||
# try:
|
||||
# resp = requests.get(url, timeout=3)
|
||||
# if resp.ok:
|
||||
# baseline_ok = True
|
||||
# break
|
||||
# except (requests.ConnectionError, requests.Timeout):
|
||||
# pass
|
||||
# time.sleep(1)
|
||||
# assert baseline_ok, f"Baseline: HTTP request to nginx should succeed (namespace={ns})"
|
||||
|
||||
# scenario = self.load_and_patch_scenario(self.repo_root, ns, duration=15)
|
||||
# scenario_path = self.write_scenario(self.tmp_path, scenario, suffix="_traffic")
|
||||
# config_path = self.build_config(
|
||||
# self.SCENARIO_TYPE, str(scenario_path),
|
||||
# filename="app_outage_traffic_config.yaml",
|
||||
# )
|
||||
# proc = self.run_kraken_background(config_path)
|
||||
# policy_name = _wait_for_network_policy(
|
||||
# self.k8s_networking, ns, self.POLICY_PREFIX, timeout=POLICY_WAIT_TIMEOUT
|
||||
# )
|
||||
# assert policy_name, f"Expected policy to exist (namespace={ns})"
|
||||
# time.sleep(2)
|
||||
# failed = False
|
||||
# for _ in range(5):
|
||||
# try:
|
||||
# resp = requests.get(url, timeout=2)
|
||||
# if not resp.ok:
|
||||
# failed = True
|
||||
# break
|
||||
# except (requests.ConnectionError, requests.Timeout):
|
||||
# failed = True
|
||||
# break
|
||||
# time.sleep(1)
|
||||
# assert failed, f"During outage, HTTP request to nginx should fail (namespace={ns})"
|
||||
# proc.wait(timeout=KRAKEN_PROC_WAIT_TIMEOUT)
|
||||
# time.sleep(1)
|
||||
# resp = requests.get(url, timeout=5)
|
||||
# assert resp.ok, f"After scenario, HTTP request to nginx should succeed (namespace={ns})"
|
||||
# finally:
|
||||
# pf.terminate()
|
||||
# pf.wait(timeout=5)
|
||||
|
||||
@pytest.mark.no_workload
|
||||
def test_invalid_scenario_fails(self):
|
||||
"""Invalid scenario file (missing application_outage) causes Kraken to exit non-zero."""
|
||||
invalid_scenario_path = self.tmp_path / "invalid_scenario.yaml"
|
||||
invalid_scenario_path.write_text("foo: bar\n")
|
||||
config_path = self.build_config(
|
||||
self.SCENARIO_TYPE, str(invalid_scenario_path),
|
||||
filename="invalid_config.yaml",
|
||||
)
|
||||
result = self.run_kraken(config_path)
|
||||
assert_kraken_failure(
|
||||
result, context=f"namespace={self.ns}", tmp_path=self.tmp_path
|
||||
)
|
||||
|
||||
@pytest.mark.no_workload
|
||||
def test_bad_namespace_fails(self):
|
||||
"""Scenario targeting non-existent namespace causes Kraken to exit non-zero."""
|
||||
scenario = self.load_and_patch_scenario(self.repo_root, "nonexistent-namespace-xyz-12345")
|
||||
scenario_path = self.write_scenario(self.tmp_path, scenario, suffix="_bad_ns")
|
||||
config_path = self.build_config(
|
||||
self.SCENARIO_TYPE, str(scenario_path),
|
||||
filename="app_outage_bad_ns_config.yaml",
|
||||
)
|
||||
result = self.run_kraken(config_path)
|
||||
assert_kraken_failure(
|
||||
result,
|
||||
context=f"test namespace={self.ns}",
|
||||
tmp_path=self.tmp_path,
|
||||
)
|
||||
21
CI/tests_v2/scenarios/pod_disruption/resource.yaml
Normal file
21
CI/tests_v2/scenarios/pod_disruption/resource.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
# Single-pod deployment targeted by pod disruption scenario.
|
||||
# Namespace is patched at deploy time by the test framework.
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: krkn-pod-disruption-target
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: krkn-pod-disruption-target
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: krkn-pod-disruption-target
|
||||
spec:
|
||||
containers:
|
||||
- name: app
|
||||
image: nginx:alpine
|
||||
ports:
|
||||
- containerPort: 80
|
||||
7
CI/tests_v2/scenarios/pod_disruption/scenario_base.yaml
Normal file
7
CI/tests_v2/scenarios/pod_disruption/scenario_base.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
# Base pod_disruption scenario (list). Tests load this and patch namespace_pattern with ^<ns>$.
|
||||
- id: kill-pods
|
||||
config:
|
||||
namespace_pattern: "^default$"
|
||||
label_selector: app=krkn-pod-disruption-target
|
||||
krkn_pod_recovery_time: 5
|
||||
kill: 1
|
||||
58
CI/tests_v2/scenarios/pod_disruption/test_pod_disruption.py
Normal file
58
CI/tests_v2/scenarios/pod_disruption/test_pod_disruption.py
Normal file
@@ -0,0 +1,58 @@
|
||||
"""
|
||||
Functional test for pod disruption scenario (pod crash and recovery).
|
||||
Equivalent to CI/tests/test_pod.sh with proper before/after assertions.
|
||||
Each test runs in its own ephemeral namespace with workload deployed automatically.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
|
||||
from lib.base import BaseScenarioTest, READINESS_TIMEOUT
|
||||
from lib.utils import (
|
||||
assert_all_pods_running_and_ready,
|
||||
assert_kraken_success,
|
||||
assert_pod_count_unchanged,
|
||||
get_pods_list,
|
||||
pod_uids,
|
||||
restart_counts,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.functional
|
||||
@pytest.mark.pod_disruption
|
||||
class TestPodDisruption(BaseScenarioTest):
|
||||
"""Pod disruption scenario: kill pods and verify recovery."""
|
||||
|
||||
WORKLOAD_MANIFEST = "CI/tests_v2/scenarios/pod_disruption/resource.yaml"
|
||||
WORKLOAD_IS_PATH = True
|
||||
LABEL_SELECTOR = "app=krkn-pod-disruption-target"
|
||||
SCENARIO_NAME = "pod_disruption"
|
||||
SCENARIO_TYPE = "pod_disruption_scenarios"
|
||||
NAMESPACE_KEY_PATH = [0, "config", "namespace_pattern"]
|
||||
NAMESPACE_IS_REGEX = True
|
||||
|
||||
@pytest.mark.order(1)
|
||||
def test_pod_crash_and_recovery(self, wait_for_pods_running):
|
||||
ns = self.ns
|
||||
before = get_pods_list(self.k8s_core, ns, self.LABEL_SELECTOR)
|
||||
before_uids = pod_uids(before)
|
||||
before_restarts = restart_counts(before)
|
||||
|
||||
result = self.run_scenario(self.tmp_path, ns)
|
||||
assert_kraken_success(result, context=f"namespace={ns}", tmp_path=self.tmp_path)
|
||||
|
||||
after = get_pods_list(self.k8s_core, ns, self.LABEL_SELECTOR)
|
||||
after_uids = pod_uids(after)
|
||||
after_restarts = restart_counts(after)
|
||||
uids_changed = set(after_uids) != set(before_uids)
|
||||
restarts_increased = after_restarts > before_restarts
|
||||
assert uids_changed or restarts_increased, (
|
||||
f"Chaos had no effect in namespace={ns}: pod UIDs unchanged and restart count did not increase. "
|
||||
f"Before UIDs: {before_uids}, restarts: {before_restarts}. "
|
||||
f"After UIDs: {after_uids}, restarts: {after_restarts}."
|
||||
)
|
||||
|
||||
wait_for_pods_running(ns, self.LABEL_SELECTOR, timeout=READINESS_TIMEOUT)
|
||||
|
||||
after_final = get_pods_list(self.k8s_core, ns, self.LABEL_SELECTOR)
|
||||
assert_pod_count_unchanged(before, after_final, namespace=ns)
|
||||
assert_all_pods_running_and_ready(after_final, namespace=ns)
|
||||
74
CI/tests_v2/setup_env.sh
Executable file
74
CI/tests_v2/setup_env.sh
Executable file
@@ -0,0 +1,74 @@
|
||||
#!/usr/bin/env bash
|
||||
# Setup environment for CI/tests_v2 pytest functional tests.
|
||||
# Run from the repository root: ./CI/tests_v2/setup_env.sh
|
||||
#
|
||||
# - Creates a KinD cluster using kind-config-dev.yml (override with KIND_CONFIG=...).
|
||||
# - Waits for the cluster and for local-path-provisioner pods (required by pod disruption test).
|
||||
# - Does not install Python deps; use a venv and pip install -r requirements.txt and CI/tests_v2/requirements.txt yourself.
|
||||
|
||||
set -e
|
||||
|
||||
REPO_ROOT="$(cd "$(dirname "$0")/../.." && pwd)"
|
||||
KIND_CONFIG="${KIND_CONFIG:-${REPO_ROOT}/CI/tests_v2/kind-config-dev.yml}"
|
||||
CLUSTER_NAME="${KIND_CLUSTER_NAME:-ci-krkn}"
|
||||
|
||||
echo "Repository root: $REPO_ROOT"
|
||||
cd "$REPO_ROOT"
|
||||
|
||||
# Check required tools
|
||||
command -v kind >/dev/null 2>&1 || { echo "Error: kind is not installed. Install from https://kind.sigs.k8s.io/docs/user/quick-start/"; exit 1; }
|
||||
command -v kubectl >/dev/null 2>&1 || { echo "Error: kubectl is not installed."; exit 1; }
|
||||
|
||||
# Python 3.9+
|
||||
python3 -c "import sys; exit(0 if sys.version_info >= (3, 9) else 1)" 2>/dev/null || { echo "Error: Python 3.9+ required. Check: python3 --version"; exit 1; }
|
||||
|
||||
# Docker running (required for KinD)
|
||||
docker info >/dev/null 2>&1 || { echo "Error: Docker is not running. Start Docker Desktop or run: systemctl start docker"; exit 1; }
|
||||
|
||||
# Tool versions for reproducibility
|
||||
echo "kind: $(kind --version 2>/dev/null || kind version 2>/dev/null)"
|
||||
echo "kubectl: $(kubectl version --client --short 2>/dev/null || kubectl version --client 2>/dev/null)"
|
||||
|
||||
# Create cluster if it doesn't exist (use "kind get clusters" so we skip when nodes exist even if kubeconfig check would fail)
|
||||
if kind get clusters 2>/dev/null | grep -qx "$CLUSTER_NAME"; then
|
||||
echo "KinD cluster '$CLUSTER_NAME' already exists, skipping creation."
|
||||
else
|
||||
echo "Creating KinD cluster '$CLUSTER_NAME' from $KIND_CONFIG ..."
|
||||
kind create cluster --name "$CLUSTER_NAME" --config "$KIND_CONFIG"
|
||||
fi
|
||||
|
||||
# echo "Pre-pulling test workload images into KinD cluster..."
|
||||
# docker pull nginx:alpine
|
||||
# kind load docker-image nginx:alpine --name "$CLUSTER_NAME"
|
||||
|
||||
# kind merges into default kubeconfig (~/.kube/config), so kubectl should work in this shell.
|
||||
# If you need to use this cluster from another terminal: export KUBECONFIG=~/.kube/config
|
||||
# and ensure context: kubectl config use-context kind-$CLUSTER_NAME
|
||||
|
||||
echo "Waiting for cluster nodes to be Ready..."
|
||||
kubectl wait --for=condition=Ready nodes --all --timeout=120s 2>/dev/null || true
|
||||
|
||||
echo "Waiting for local-path-provisioner pods (namespace local-path-storage, label app=local-path-provisioner)..."
|
||||
for i in {1..60}; do
|
||||
if kubectl get pods -n local-path-storage -l app=local-path-provisioner -o name 2>/dev/null | grep -q .; then
|
||||
echo "Found local-path-provisioner pod(s). Waiting for Ready..."
|
||||
kubectl wait --for=condition=ready pod -l app=local-path-provisioner -n local-path-storage --timeout=120s 2>/dev/null && break
|
||||
fi
|
||||
echo "Attempt $i: local-path-provisioner not ready yet..."
|
||||
sleep 3
|
||||
done
|
||||
|
||||
if ! kubectl get pods -n local-path-storage -l app=local-path-provisioner -o name 2>/dev/null | grep -q .; then
|
||||
echo "Warning: No pods with label app=local-path-provisioner in local-path-storage."
|
||||
echo "KinD usually deploys this by default. Check: kubectl get pods -n local-path-storage"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Cluster is ready for CI/tests_v2."
|
||||
echo " kubectl uses the default kubeconfig (kind merged it). For another terminal: export KUBECONFIG=~/.kube/config"
|
||||
echo ""
|
||||
echo "Next: activate your venv, install deps, and run tests from repo root:"
|
||||
echo " pip install -r requirements.txt"
|
||||
echo " pip install -r CI/tests_v2/requirements.txt"
|
||||
echo " pytest CI/tests_v2/ -v --timeout=300 --reruns=2 --reruns-delay=10"
|
||||
273
CLAUDE.md
Normal file
273
CLAUDE.md
Normal file
@@ -0,0 +1,273 @@
|
||||
# CLAUDE.md - Krkn Chaos Engineering Framework
|
||||
|
||||
## Project Overview
|
||||
|
||||
Krkn (Kraken) is a chaos engineering tool for Kubernetes/OpenShift clusters. It injects deliberate failures to validate cluster resilience. Plugin-based architecture with multi-cloud support (AWS, Azure, GCP, IBM Cloud, VMware, Alibaba, OpenStack).
|
||||
|
||||
## Repository Structure
|
||||
|
||||
```
|
||||
krkn/
|
||||
├── krkn/
|
||||
│ ├── scenario_plugins/ # Chaos scenario plugins (pod, node, network, hogs, etc.)
|
||||
│ ├── utils/ # Utility functions
|
||||
│ ├── rollback/ # Rollback management
|
||||
│ ├── prometheus/ # Prometheus integration
|
||||
│ └── cerberus/ # Health monitoring
|
||||
├── tests/ # Unit tests (unittest framework)
|
||||
├── scenarios/ # Example scenario configs (openshift/, kube/, kind/)
|
||||
├── config/ # Configuration files
|
||||
└── CI/ # CI/CD test scripts
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Setup (ALWAYS use virtual environment)
|
||||
python3 -m venv venv
|
||||
source venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
|
||||
# Run Krkn
|
||||
python run_kraken.py --config config/config.yaml
|
||||
|
||||
# Note: Scenarios are specified in config.yaml under kraken.chaos_scenarios
|
||||
# There is no --scenario flag; edit config/config.yaml to select scenarios
|
||||
|
||||
# Run tests
|
||||
python -m unittest discover -s tests -v
|
||||
python -m coverage run -a -m unittest discover -s tests -v
|
||||
```
|
||||
|
||||
## Critical Requirements
|
||||
|
||||
### Python Environment
|
||||
- **Python 3.9+** required
|
||||
- **NEVER install packages globally** - always use virtual environment
|
||||
- **CRITICAL**: `docker` must be <7.0 and `requests` must be <2.32 (Unix socket compatibility)
|
||||
|
||||
### Key Dependencies
|
||||
- **krkn-lib** (5.1.13): Core library for Kubernetes/OpenShift operations
|
||||
- **kubernetes** (34.1.0): Kubernetes Python client
|
||||
- **docker** (<7.0), **requests** (<2.32): DO NOT upgrade without verifying compatibility
|
||||
- Cloud SDKs: boto3 (AWS), azure-mgmt-* (Azure), google-cloud-compute (GCP), ibm_vpc (IBM), pyVmomi (VMware)
|
||||
|
||||
## Plugin Architecture (CRITICAL)
|
||||
|
||||
**Strictly enforced naming conventions:**
|
||||
|
||||
### Naming Rules
|
||||
- **Module files**: Must end with `_scenario_plugin.py` and use snake_case
|
||||
- Example: `pod_disruption_scenario_plugin.py`
|
||||
- **Class names**: Must be CamelCase and end with `ScenarioPlugin`
|
||||
- Example: `PodDisruptionScenarioPlugin`
|
||||
- Must match module filename (snake_case ↔ CamelCase)
|
||||
- **Directory structure**: Plugin dirs CANNOT contain "scenario" or "plugin"
|
||||
- Location: `krkn/scenario_plugins/<plugin_name>/`
|
||||
|
||||
### Plugin Implementation
|
||||
Every plugin MUST:
|
||||
1. Extend `AbstractScenarioPlugin`
|
||||
2. Implement `run()` method
|
||||
3. Implement `get_scenario_types()` method
|
||||
|
||||
```python
|
||||
from krkn.scenario_plugins import AbstractScenarioPlugin
|
||||
|
||||
class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
def run(self, config, scenarios_list, kubeconfig_path, wait_duration):
|
||||
pass
|
||||
|
||||
def get_scenario_types(self):
|
||||
return ["pod_scenarios", "pod_outage"]
|
||||
```
|
||||
|
||||
### Creating a New Plugin
|
||||
1. Create directory: `krkn/scenario_plugins/<plugin_name>/`
|
||||
2. Create module: `<plugin_name>_scenario_plugin.py`
|
||||
3. Create class: `<PluginName>ScenarioPlugin` extending `AbstractScenarioPlugin`
|
||||
4. Implement `run()` and `get_scenario_types()`
|
||||
5. Create unit test: `tests/test_<plugin_name>_scenario_plugin.py`
|
||||
6. Add example scenario: `scenarios/<platform>/<scenario>.yaml`
|
||||
|
||||
**DO NOT**: Violate naming conventions (factory will reject), include "scenario"/"plugin" in directory names, create plugins without tests.
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit Tests
|
||||
```bash
|
||||
# Run all tests
|
||||
python -m unittest discover -s tests -v
|
||||
|
||||
# Specific test
|
||||
python -m unittest tests.test_pod_disruption_scenario_plugin
|
||||
|
||||
# With coverage
|
||||
python -m coverage run -a -m unittest discover -s tests -v
|
||||
python -m coverage html
|
||||
```
|
||||
|
||||
**Test requirements:**
|
||||
- Naming: `test_<module>_scenario_plugin.py`
|
||||
- Mock external dependencies (Kubernetes API, cloud providers)
|
||||
- Test success, failure, and edge cases
|
||||
- Keep tests isolated and independent
|
||||
|
||||
### Functional Tests
|
||||
Located in `CI/tests/`. Can be run locally on a kind cluster with Prometheus and Elasticsearch set up.
|
||||
|
||||
**Setup for local testing:**
|
||||
1. Deploy Prometheus and Elasticsearch on your kind cluster:
|
||||
- Prometheus setup: https://krkn-chaos.dev/docs/developers-guide/testing-changes/#prometheus
|
||||
- Elasticsearch setup: https://krkn-chaos.dev/docs/developers-guide/testing-changes/#elasticsearch
|
||||
|
||||
2. Or disable monitoring features in `config/config.yaml`:
|
||||
```yaml
|
||||
performance_monitoring:
|
||||
enable_alerts: False
|
||||
enable_metrics: False
|
||||
check_critical_alerts: False
|
||||
```
|
||||
|
||||
**Note:** Functional tests run automatically in CI with full monitoring enabled.
|
||||
|
||||
## Cloud Provider Implementations
|
||||
|
||||
Node chaos scenarios are cloud-specific. Each in `krkn/scenario_plugins/node_actions/<provider>_node_scenarios.py`:
|
||||
- AWS, Azure, GCP, IBM Cloud, VMware, Alibaba, OpenStack, Bare Metal
|
||||
|
||||
Implement: stop, start, reboot, terminate instances.
|
||||
|
||||
**When modifying**: Maintain consistency with other providers, handle API errors, add logging, update tests.
|
||||
|
||||
### Adding Cloud Provider Support
|
||||
1. Create: `krkn/scenario_plugins/node_actions/<provider>_node_scenarios.py`
|
||||
2. Extend: `abstract_node_scenarios.AbstractNodeScenarios`
|
||||
3. Implement: `stop_instances`, `start_instances`, `reboot_instances`, `terminate_instances`
|
||||
4. Add SDK to `requirements.txt`
|
||||
5. Create unit test with mocked SDK
|
||||
6. Add example scenario: `scenarios/openshift/<provider>_node_scenarios.yml`
|
||||
|
||||
## Configuration
|
||||
|
||||
**Main config**: `config/config.yaml`
|
||||
- `kraken`: Core settings
|
||||
- `cerberus`: Health monitoring
|
||||
- `performance_monitoring`: Prometheus
|
||||
- `elastic`: Elasticsearch telemetry
|
||||
|
||||
**Scenario configs**: `scenarios/` directory
|
||||
```yaml
|
||||
- config:
|
||||
scenario_type: <type> # Must match plugin's get_scenario_types()
|
||||
```
|
||||
|
||||
## Code Style
|
||||
|
||||
- **Import order**: Standard library, third-party, local imports
|
||||
- **Naming**: snake_case (functions/variables), CamelCase (classes)
|
||||
- **Logging**: Use Python's `logging` module
|
||||
- **Error handling**: Return appropriate exit codes
|
||||
- **Docstrings**: Required for public functions/classes
|
||||
|
||||
## Exit Codes
|
||||
|
||||
Krkn uses specific exit codes to communicate execution status:
|
||||
|
||||
- `0`: Success - all scenarios passed, no critical alerts
|
||||
- `1`: Scenario failure - one or more scenarios failed
|
||||
- `2`: Critical alerts fired during execution
|
||||
- `3+`: Health check failure (Cerberus monitoring detected issues)
|
||||
|
||||
**When implementing scenarios:**
|
||||
- Return `0` on success
|
||||
- Return `1` on scenario-specific failures
|
||||
- Propagate health check failures appropriately
|
||||
- Log exit code reasons clearly
|
||||
|
||||
## Container Support
|
||||
|
||||
Krkn can run inside a container. See `containers/` directory.
|
||||
|
||||
**Building custom image:**
|
||||
```bash
|
||||
cd containers
|
||||
./compile_dockerfile.sh # Generates Dockerfile from template
|
||||
docker build -t krkn:latest .
|
||||
```
|
||||
|
||||
**Running containerized:**
|
||||
```bash
|
||||
docker run -v ~/.kube:/root/.kube:Z \
|
||||
-v $(pwd)/config:/config:Z \
|
||||
-v $(pwd)/scenarios:/scenarios:Z \
|
||||
krkn:latest
|
||||
```
|
||||
|
||||
## Git Workflow
|
||||
|
||||
- **NEVER commit directly to main**
|
||||
- **NEVER use `--force` without approval**
|
||||
- **ALWAYS create feature branches**: `git checkout -b feature/description`
|
||||
- **ALWAYS run tests before pushing**
|
||||
|
||||
**Conventional commits**: `feat:`, `fix:`, `test:`, `docs:`, `refactor:`
|
||||
|
||||
```bash
|
||||
git checkout main && git pull origin main
|
||||
git checkout -b feature/your-feature-name
|
||||
# Make changes, write tests
|
||||
python -m unittest discover -s tests -v
|
||||
git add <specific-files>
|
||||
git commit -m "feat: description"
|
||||
git push -u origin feature/your-feature-name
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- `KUBECONFIG`: Path to kubeconfig
|
||||
- `AWS_*`, `AZURE_*`, `GOOGLE_APPLICATION_CREDENTIALS`: Cloud credentials
|
||||
- `PROMETHEUS_URL`, `ELASTIC_URL`, `ELASTIC_PASSWORD`: Monitoring config
|
||||
|
||||
**NEVER commit credentials or API keys.**
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
1. Missing virtual environment - always activate venv
|
||||
2. Running functional tests without cluster setup
|
||||
3. Ignoring exit codes
|
||||
4. Modifying krkn-lib directly (it's a separate package)
|
||||
5. Upgrading docker/requests beyond version constraints
|
||||
|
||||
## Before Writing Code
|
||||
|
||||
1. Check for existing implementations
|
||||
2. Review existing plugins as examples
|
||||
3. Maintain consistency with cloud provider patterns
|
||||
4. Plan rollback logic
|
||||
5. Write tests alongside code
|
||||
6. Update documentation
|
||||
|
||||
## When Adding Dependencies
|
||||
|
||||
1. Check if functionality exists in krkn-lib or current dependencies
|
||||
2. Verify compatibility with existing versions
|
||||
3. Pin specific versions in `requirements.txt`
|
||||
4. Check for security vulnerabilities
|
||||
5. Test thoroughly for conflicts
|
||||
|
||||
## Common Development Tasks
|
||||
|
||||
### Modifying Existing Plugin
|
||||
1. Read plugin code and corresponding test
|
||||
2. Make changes
|
||||
3. Update/add unit tests
|
||||
4. Run: `python -m unittest tests.test_<plugin>_scenario_plugin`
|
||||
|
||||
### Writing Unit Tests
|
||||
1. Create: `tests/test_<module>_scenario_plugin.py`
|
||||
2. Import `unittest` and plugin class
|
||||
3. Mock external dependencies
|
||||
4. Test success, failure, and edge cases
|
||||
5. Run: `python -m unittest tests.test_<module>_scenario_plugin`
|
||||
|
||||
83
GOVERNANCE.md
Normal file
83
GOVERNANCE.md
Normal file
@@ -0,0 +1,83 @@
|
||||
|
||||
|
||||
|
||||
The governance model adopted here is heavily influenced by a set of CNCF projects, especially drew
|
||||
reference from [Kubernetes governance](https://github.com/kubernetes/community/blob/master/governance.md).
|
||||
*For similar structures some of the same wordings from kubernetes governance are borrowed to adhere
|
||||
to the originally construed meaning.*
|
||||
|
||||
## Principles
|
||||
|
||||
- **Open**: Krkn is open source community.
|
||||
- **Welcoming and respectful**: See [Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
- **Transparent and accessible**: Work and collaboration should be done in public.
|
||||
Changes to the Krkn organization, Krkn code repositories, and CNCF related activities (e.g.
|
||||
level, involvement, etc) are done in public.
|
||||
- **Merit**: Ideas and contributions are accepted according to their technical merit
|
||||
and alignment with project objectives, scope and design principles.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Krkn follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
Here is an excerpt:
|
||||
|
||||
> As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities.
|
||||
|
||||
## Maintainer Levels
|
||||
|
||||
### Contributor
|
||||
Contributors contribute to the community. Anyone can become a contributor by participating in discussions, reporting bugs, or contributing code or documentation.
|
||||
|
||||
#### Responsibilities:
|
||||
|
||||
Be active in the community and adhere to the Code of Conduct.
|
||||
|
||||
Report bugs and suggest new features.
|
||||
|
||||
Contribute high-quality code and documentation.
|
||||
|
||||
|
||||
### Member
|
||||
Members are active contributors to the community. Members have demonstrated a strong understanding of the project's codebase and conventions.
|
||||
|
||||
#### Responsibilities:
|
||||
|
||||
Review pull requests for correctness, quality, and adherence to project standards.
|
||||
|
||||
Provide constructive and timely feedback to contributors.
|
||||
|
||||
Ensure that all contributions are well-tested and documented.
|
||||
|
||||
Work with maintainers to ensure a smooth and efficient release process.
|
||||
|
||||
### Maintainer
|
||||
Maintainers are responsible for the overall health and direction of the project. They are long-standing contributors who have shown a deep commitment to the project's success.
|
||||
|
||||
#### Responsibilities:
|
||||
|
||||
Set the technical direction and vision for the project.
|
||||
|
||||
Manage releases and ensure the stability of the main branch.
|
||||
|
||||
Make decisions on feature inclusion and project priorities.
|
||||
|
||||
Mentor other contributors and help grow the community.
|
||||
|
||||
Resolve disputes and make final decisions when consensus cannot be reached.
|
||||
|
||||
### Owner
|
||||
Owners have administrative access to the project and are the final decision-makers.
|
||||
|
||||
#### Responsibilities:
|
||||
|
||||
Manage the core team of maintainers and approvers.
|
||||
|
||||
Set the overall vision and strategy for the project.
|
||||
|
||||
Handle administrative tasks, such as managing the project's repository and other resources.
|
||||
|
||||
Represent the project in the broader open-source community.
|
||||
|
||||
|
||||
# Credits
|
||||
Sections of this document have been borrowed from [Kubernetes governance](https://github.com/kubernetes/community/blob/master/governance.md)
|
||||
@@ -1,12 +1,34 @@
|
||||
## Overview
|
||||
|
||||
This document contains a list of maintainers in this repo.
|
||||
This file lists the maintainers and committers of the Krkn project.
|
||||
|
||||
In short, maintainers are people who are in charge of the maintenance of the Krkn project. Committers are active community members who have shown that they are committed to the continuous development of the project through ongoing engagement with the community.
|
||||
|
||||
For detailed description of the roles, see [Governance](./GOVERNANCE.md) page.
|
||||
|
||||
## Current Maintainers
|
||||
|
||||
| Maintainer | GitHub ID | Email |
|
||||
|---------------------| --------------------------------------------------------- | ----------------------- |
|
||||
| Ravi Elluri | [chaitanyaenr](https://github.com/chaitanyaenr) | nelluri@redhat.com |
|
||||
| Pradeep Surisetty | [psuriset](https://github.com/psuriset) | psuriset@redhat.com |
|
||||
| Paige Rubendall | [paigerube14](https://github.com/paigerube14) | prubenda@redhat.com |
|
||||
| Tullio Sebastiani | [tsebastiani](https://github.com/tsebastiani) | tsebasti@redhat.com |
|
||||
| Maintainer | GitHub ID | Email | Contribution Level |
|
||||
|---------------------| --------------------------------------------------------- | ----------------------- | ---------------------- |
|
||||
| Ravi Elluri | [chaitanyaenr](https://github.com/chaitanyaenr) | nelluri@redhat.com | Owner |
|
||||
| Pradeep Surisetty | [psuriset](https://github.com/psuriset) | psuriset@redhat.com | Owner |
|
||||
| Paige Patton | [paigerube14](https://github.com/paigerube14) | prubenda@redhat.com | Maintainer |
|
||||
| Tullio Sebastiani | [tsebastiani](https://github.com/tsebastiani) | tsebasti@redhat.com | Maintainer |
|
||||
| Yogananth Subramanian | [yogananth-subramanian](https://github.com/yogananth-subramanian) | ysubrama@redhat.com |Maintainer |
|
||||
| Sahil Shah | [shahsahil264](https://github.com/shahsahil264) | sahshah@redhat.com | Member |
|
||||
|
||||
|
||||
Note : It is mandatory for all Krkn community members to follow our [Code of Conduct](./CODE_OF_CONDUCT.md)
|
||||
|
||||
|
||||
## Contributor Ladder
|
||||
This project follows a contributor ladder model, where contributors can take on more responsibilities as they gain experience and demonstrate their commitment to the project.
|
||||
The roles are:
|
||||
* Contributor: A contributor to the community whether it be with code, docs or issues
|
||||
|
||||
* Member: A contributor who is active in the community and reviews pull requests.
|
||||
|
||||
* Maintainer: A contributor who is responsible for the overall health and direction of the project.
|
||||
|
||||
* Owner: A contributor who has administrative ownership of the project.
|
||||
|
||||
112
README.md
112
README.md
@@ -1,6 +1,8 @@
|
||||
# Krkn aka Kraken
|
||||
[](https://quay.io/repository/krkn-chaos/krkn?tab=tags&tag=latest)
|
||||

|
||||

|
||||

|
||||
[](https://www.bestpractices.dev/projects/10548)
|
||||
|
||||

|
||||
|
||||
@@ -9,111 +11,20 @@ Kraken injects deliberate failures into Kubernetes clusters to check if it is re
|
||||
|
||||
|
||||
### Workflow
|
||||

|
||||
|
||||
### Demo
|
||||
[](https://youtu.be/LN-fZywp_mo "Kraken Demo - Click to Watch!")
|
||||

|
||||
|
||||
|
||||
### Chaos Testing Guide
|
||||
[Guide](docs/index.md) encapsulates:
|
||||
- Test methodology that needs to be embraced.
|
||||
- Best practices that an Kubernetes cluster, platform and applications running on top of it should take into account for best user experience, performance, resilience and reliability.
|
||||
- Tooling.
|
||||
- Scenarios supported.
|
||||
- Test environment recommendations as to how and where to run chaos tests.
|
||||
- Chaos testing in practice.
|
||||
|
||||
The guide is hosted at https://krkn-chaos.github.io/krkn.
|
||||
<!-- ### Demo
|
||||
[](https://youtu.be/LN-fZywp_mo "Kraken Demo - Click to Watch!") -->
|
||||
|
||||
|
||||
### How to Get Started
|
||||
Instructions on how to setup, configure and run Kraken can be found at [Installation](docs/installation.md).
|
||||
|
||||
You may consider utilizing the chaos recommendation tool prior to initiating the chaos runs to profile the application service(s) under test. This tool discovers a list of Krkn scenarios with a high probability of causing failures or disruptions to your application service(s). The tool can be accessed at [Chaos-Recommender](utils/chaos_recommender/README.md).
|
||||
|
||||
See the [getting started doc](docs/getting_started.md) on support on how to get started with your own custom scenario or editing current scenarios for your specific usage.
|
||||
|
||||
After installation, refer back to the below sections for supported scenarios and how to tweak the kraken config to load them on your cluster.
|
||||
Instructions on how to setup, configure and run Kraken can be found in the [documentation](https://krkn-chaos.dev/docs/).
|
||||
|
||||
|
||||
#### Running Kraken with minimal configuration tweaks
|
||||
For cases where you want to run Kraken with minimal configuration changes, refer to [krkn-hub](https://github.com/krkn-chaos/krkn-hub). One use case is CI integration where you do not want to carry around different configuration files for the scenarios.
|
||||
### Blogs, podcasts and interviews
|
||||
Additional resources, including blog posts, podcasts, and community interviews, can be found on the [website](https://krkn-chaos.dev/blog)
|
||||
|
||||
### Setting up infrastructure dependencies
|
||||
Kraken indexes the metrics specified in the profile into Elasticsearch in addition to leveraging Cerberus for understanding the health of the Kubernetes cluster under test. More information on the features is documented below. The infrastructure pieces can be easily installed and uninstalled by running:
|
||||
|
||||
```
|
||||
$ cd kraken
|
||||
$ podman-compose up or $ docker-compose up # Spins up the containers specified in the docker-compose.yml file present in the run directory.
|
||||
$ podman-compose down or $ docker-compose down # Delete the containers installed.
|
||||
```
|
||||
This will manage the Cerberus and Elasticsearch containers on the host on which you are running Kraken.
|
||||
|
||||
**NOTE**: Make sure you have enough resources (memory and disk) on the machine on top of which the containers are running as Elasticsearch is resource intensive. Cerberus monitors the system components by default, the [config](config/cerberus.yaml) can be tweaked to add applications namespaces, routes and other components to monitor as well. The command will keep running until killed since detached mode is not supported as of now.
|
||||
|
||||
|
||||
### Config
|
||||
Instructions on how to setup the config and the options supported can be found at [Config](docs/config.md).
|
||||
|
||||
|
||||
### Kubernetes chaos scenarios supported
|
||||
|
||||
Scenario type | Kubernetes
|
||||
--------------------------- | ------------- |
|
||||
[Pod Scenarios](docs/pod_scenarios.md) | :heavy_check_mark: |
|
||||
[Pod Network Scenarios](docs/pod_network_scenarios.md) | :x: |
|
||||
[Container Scenarios](docs/container_scenarios.md) | :heavy_check_mark: |
|
||||
[Node Scenarios](docs/node_scenarios.md) | :heavy_check_mark: |
|
||||
[Time Scenarios](docs/time_scenarios.md) | :heavy_check_mark: |
|
||||
[Hog Scenarios: CPU, Memory](docs/arcaflow_scenarios.md) | :heavy_check_mark: |
|
||||
[Cluster Shut Down Scenarios](docs/cluster_shut_down_scenarios.md) | :heavy_check_mark: |
|
||||
[Service Disruption Scenarios](docs/service_disruption_scenarios.md.md) | :heavy_check_mark: |
|
||||
[Zone Outage Scenarios](docs/zone_outage.md) | :heavy_check_mark: |
|
||||
[Application_outages](docs/application_outages.md) | :heavy_check_mark: |
|
||||
[PVC scenario](docs/pvc_scenario.md) | :heavy_check_mark: |
|
||||
[Network_Chaos](docs/network_chaos.md) | :heavy_check_mark: |
|
||||
[ManagedCluster Scenarios](docs/managedcluster_scenarios.md) | :heavy_check_mark: |
|
||||
|
||||
|
||||
### Kraken scenario pass/fail criteria and report
|
||||
It is important to make sure to check if the targeted component recovered from the chaos injection and also if the Kubernetes cluster is healthy as failures in one component can have an adverse impact on other components. Kraken does this by:
|
||||
- Having built in checks for pod and node based scenarios to ensure the expected number of replicas and nodes are up. It also supports running custom scripts with the checks.
|
||||
- Leveraging [Cerberus](https://github.com/krkn-chaos/cerberus) to monitor the cluster under test and consuming the aggregated go/no-go signal to determine pass/fail post chaos. It is highly recommended to turn on the Cerberus health check feature available in Kraken. Instructions on installing and setting up Cerberus can be found [here](https://github.com/openshift-scale/cerberus#installation) or can be installed from Kraken using the [instructions](https://github.com/krkn-chaos/krkn#setting-up-infrastructure-dependencies). Once Cerberus is up and running, set cerberus_enabled to True and cerberus_url to the url where Cerberus publishes go/no-go signal in the Kraken config file. Cerberus can monitor [application routes](https://github.com/redhat-chaos/cerberus/blob/main/docs/config.md#watch-routes) during the chaos and fails the run if it encounters downtime as it is a potential downtime in a customers, or users environment as well. It is especially important during the control plane chaos scenarios including the API server, Etcd, Ingress etc. It can be enabled by setting `check_applicaton_routes: True` in the [Kraken config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) provided application routes are being monitored in the [cerberus config](https://github.com/redhat-chaos/krkn/blob/main/config/cerberus.yaml).
|
||||
- Leveraging built-in alert collection feature to fail the runs in case of critical alerts.
|
||||
|
||||
### Signaling
|
||||
In CI runs or any external job it is useful to stop Kraken once a certain test or state gets reached. We created a way to signal to kraken to pause the chaos or stop it completely using a signal posted to a port of your choice.
|
||||
|
||||
For example if we have a test run loading the cluster running and kraken separately running; we want to be able to know when to start/stop the kraken run based on when the test run completes or gets to a certain loaded state.
|
||||
|
||||
More detailed information on enabling and leveraging this feature can be found [here](docs/signal.md).
|
||||
|
||||
|
||||
### Performance monitoring
|
||||
Monitoring the Kubernetes/OpenShift cluster to observe the impact of Kraken chaos scenarios on various components is key to find out the bottlenecks as it is important to make sure the cluster is healthy in terms if both recovery as well as performance during/after the failure has been injected. Instructions on enabling it can be found [here](docs/performance_dashboards.md).
|
||||
|
||||
|
||||
### SLOs validation during and post chaos
|
||||
- In addition to checking the recovery and health of the cluster and components under test, Kraken takes in a profile with the Prometheus expressions to validate and alerts, exits with a non-zero return code depending on the severity set. This feature can be used to determine pass/fail or alert on abnormalities observed in the cluster based on the metrics.
|
||||
- Kraken also provides ability to check if any critical alerts are firing in the cluster post chaos and pass/fail's.
|
||||
|
||||
Information on enabling and leveraging this feature can be found [here](docs/SLOs_validation.md)
|
||||
|
||||
|
||||
### OCM / ACM integration
|
||||
|
||||
Kraken supports injecting faults into [Open Cluster Management (OCM)](https://open-cluster-management.io/) and [Red Hat Advanced Cluster Management for Kubernetes (ACM)](https://www.krkn.com/en/technologies/management/advanced-cluster-management) managed clusters through [ManagedCluster Scenarios](docs/managedcluster_scenarios.md).
|
||||
|
||||
|
||||
### Blogs and other useful resources
|
||||
- Blog post on introduction to Kraken: https://www.openshift.com/blog/introduction-to-kraken-a-chaos-tool-for-openshift/kubernetes
|
||||
- Discussion and demo on how Kraken can be leveraged to ensure OpenShift is reliable, performant and scalable: https://www.youtube.com/watch?v=s1PvupI5sD0&ab_channel=OpenShift
|
||||
- Blog post emphasizing the importance of making Chaos part of Performance and Scale runs to mimic the production environments: https://www.openshift.com/blog/making-chaos-part-of-kubernetes/openshift-performance-and-scalability-tests
|
||||
- Blog post on findings from Chaos test runs: https://cloud.redhat.com/blog/openshift/kubernetes-chaos-stories
|
||||
- Discussion with CNCF TAG App Delivery on Krkn workflow, features and addition to CNCF sandbox: [Github](https://github.com/cncf/sandbox/issues/44), [Tracker](https://github.com/cncf/tag-app-delivery/issues/465), [recording](https://www.youtube.com/watch?v=nXQkBFK_MWc&t=722s)
|
||||
- Blog post on supercharging chaos testing using AI integration in Krkn: https://www.redhat.com/en/blog/supercharging-chaos-testing-using-ai
|
||||
- Blog post announcing Krkn joining CNCF Sandbox: https://www.redhat.com/en/blog/krknchaos-joining-cncf-sandbox
|
||||
|
||||
### Roadmap
|
||||
Enhancements being planned can be found in the [roadmap](ROADMAP.md).
|
||||
@@ -122,10 +33,7 @@ Enhancements being planned can be found in the [roadmap](ROADMAP.md).
|
||||
### Contributions
|
||||
We are always looking for more enhancements, fixes to make it better, any contributions are most welcome. Feel free to report or work on the issues filed on github.
|
||||
|
||||
[More information on how to Contribute](docs/contribute.md)
|
||||
|
||||
If adding a new scenario or tweaking the main config, be sure to add in updates into the CI to be sure the CI is up to date.
|
||||
Please read [this file]((CI/README.md#adding-a-test-case)) for more information on updates.
|
||||
[More information on how to Contribute](https://krkn-chaos.dev/docs/contribution-guidelines/)
|
||||
|
||||
|
||||
### Community
|
||||
|
||||
55
RELEASE.md
Normal file
55
RELEASE.md
Normal file
@@ -0,0 +1,55 @@
|
||||
### Release Protocol: The Community-First Cycle
|
||||
|
||||
This document outlines the project's release protocol, a methodology designed to ensure a responsive and transparent development process that is closely aligned with the needs of our users and contributors. This protocol is tailored for projects in their early stages, prioritizing agility and community feedback over a rigid, time-boxed schedule.
|
||||
|
||||
#### 1. Key Principles
|
||||
|
||||
* **Community as the Compass:** The primary driver for all development is feedback from our user and contributor community.
|
||||
* **Prioritization by Impact:** Tasks are prioritized based on their impact on user experience, the urgency of bug fixes, and the value of community-contributed features.
|
||||
* **Event-Driven Releases:** Releases are not bound by a fixed calendar. New versions are published when a significant body of work is complete, a critical issue is resolved, or a new feature is ready for adoption.
|
||||
* **Transparency and Communication:** All development decisions, progress, and plans are communicated openly through our issue tracker, pull requests, and community channels.
|
||||
|
||||
#### 2. The Release Lifecycle
|
||||
|
||||
The release cycle is a continuous flow of activities rather than a series of sequential phases.
|
||||
|
||||
**2.1. Discovery & Prioritization**
|
||||
* New features and bug fixes are identified through user feedback on our issue tracker, community discussions, and direct contributions.
|
||||
* The core maintainers, in collaboration with the community, continuously evaluate and tag issues to create an open and dynamic backlog.
|
||||
|
||||
**2.2. Development & Code Review**
|
||||
* Work is initiated based on the highest-priority items in the backlog.
|
||||
* All code contributions are made via pull requests (PRs).
|
||||
* PRs are reviewed by maintainers and other contributors to ensure code quality, adherence to project standards, and overall stability.
|
||||
|
||||
**2.3. Release Readiness**
|
||||
A new release is considered ready when one of the following conditions is met:
|
||||
* A major new feature has been completed and thoroughly tested.
|
||||
* A critical security vulnerability or bug has been addressed.
|
||||
* A sufficient number of smaller improvements and fixes have been merged, providing meaningful value to users.
|
||||
|
||||
**2.4. Versioning**
|
||||
We adhere to [**Semantic Versioning 2.0.0**](https://semver.org/).
|
||||
* **Major version (`X.y.z`)**: Reserved for releases that introduce breaking changes.
|
||||
* **Minor version (`x.Y.z`)**: Used for new features or significant non-breaking changes.
|
||||
* **Patch version (`x.y.Z`)**: Used for bug fixes and small, non-functional improvements.
|
||||
|
||||
#### 3. Roles and Responsibilities
|
||||
|
||||
* **Members:** The [core team](https://github.com/krkn-chaos/krkn/blob/main/MAINTAINERS.md) responsible for the project's health. Their duties include:
|
||||
* Reviewing pull requests.
|
||||
* Contributing code and documentation via pull requests.
|
||||
* Engaging in discussions and providing feedback.
|
||||
* **Maintainers and Owners:** The [core team](https://github.com/krkn-chaos/krkn/blob/main/MAINTAINERS.md) responsible for the project's health. Their duties include:
|
||||
* Facilitating community discussions and prioritization.
|
||||
* Reviewing and merging pull requests.
|
||||
* Cutting and announcing official releases.
|
||||
* **Contributors:** The community. Their duties include:
|
||||
* Reporting bugs and suggesting new features.
|
||||
* Contributing code and documentation via pull requests.
|
||||
* Engaging in discussions and providing feedback.
|
||||
|
||||
#### 4. Adoption and Future Evolution
|
||||
|
||||
This protocol is designed for the current stage of the project. As the project matures and the contributor base grows, the maintainers will evaluate the need for a more structured methodology to ensure continued scalability and stability.
|
||||
|
||||
25
ROADMAP.md
25
ROADMAP.md
@@ -2,14 +2,25 @@
|
||||
|
||||
Following are a list of enhancements that we are planning to work on adding support in Krkn. Of course any help/contributions are greatly appreciated.
|
||||
|
||||
- [ ] [Ability to run multiple chaos scenarios in parallel under load to mimic real world outages](https://github.com/krkn-chaos/krkn/issues/424)
|
||||
- [x] [Ability to run multiple chaos scenarios in parallel under load to mimic real world outages](https://github.com/krkn-chaos/krkn/issues/424)
|
||||
- [x] [Centralized storage for chaos experiments artifacts](https://github.com/krkn-chaos/krkn/issues/423)
|
||||
- [ ] [Support for causing DNS outages](https://github.com/krkn-chaos/krkn/issues/394)
|
||||
- [x] [Support for causing DNS outages](https://github.com/krkn-chaos/krkn/issues/394)
|
||||
- [x] [Chaos recommender](https://github.com/krkn-chaos/krkn/tree/main/utils/chaos-recommender) to suggest scenarios having probability of impacting the service under test using profiling results
|
||||
- [ ] Chaos AI integration to improve and automate test coverage
|
||||
- [x] Chaos AI integration to improve test coverage while reducing fault space to save costs and execution time [krkn-chaos-ai](https://github.com/krkn-chaos/krkn-chaos-ai)
|
||||
- [x] [Support for pod level network traffic shaping](https://github.com/krkn-chaos/krkn/issues/393)
|
||||
- [ ] [Ability to visualize the metrics that are being captured by Kraken and stored in Elasticsearch](https://github.com/krkn-chaos/krkn/issues/124)
|
||||
- [ ] Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/krkn-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
|
||||
- [ ] Continue to improve [Chaos Testing Guide](https://krkn-chaos.github.io/krkn) in terms of adding best practices, test environment recommendations and scenarios to make sure the OpenShift platform, as well the applications running on top it, are resilient and performant under chaotic conditions.
|
||||
- [ ] [Switch documentation references to Kubernetes](https://github.com/krkn-chaos/krkn/issues/495)
|
||||
- [ ] [OCP and Kubernetes functionalities segregation](https://github.com/krkn-chaos/krkn/issues/497)
|
||||
- [x] Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/krkn-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
|
||||
- [x] Continue to improve [Chaos Testing Guide](https://krkn-chaos.github.io/krkn) in terms of adding best practices, test environment recommendations and scenarios to make sure the OpenShift platform, as well the applications running on top it, are resilient and performant under chaotic conditions.
|
||||
- [x] [Switch documentation references to Kubernetes](https://github.com/krkn-chaos/krkn/issues/495)
|
||||
- [x] [OCP and Kubernetes functionalities segregation](https://github.com/krkn-chaos/krkn/issues/497)
|
||||
- [x] [Krknctl - client for running Krkn scenarios with ease](https://github.com/krkn-chaos/krknctl)
|
||||
- [x] [AI Chat bot to help get started with Krkn and commands](https://github.com/krkn-chaos/krkn-lightspeed)
|
||||
- [ ] [Ability to roll back cluster to original state if chaos fails](https://github.com/krkn-chaos/krkn/issues/804)
|
||||
- [ ] Add recovery time metrics to each scenario for better regression analysis
|
||||
- [ ] [Add resiliency scoring to chaos scenarios ran on cluster](https://github.com/krkn-chaos/krkn/issues/125)
|
||||
- [ ] [Add AI-based Chaos Configuration Generator](https://github.com/krkn-chaos/krkn/issues/1166)
|
||||
- [ ] [Introduce Security Chaos Engineering Scenarios](https://github.com/krkn-chaos/krkn/issues/1165)
|
||||
- [ ] [Add AWS-native Chaos Scenarios (S3, Lambda, Networking)](https://github.com/krkn-chaos/krkn/issues/1164)
|
||||
- [ ] [Unify Krkn Ecosystem under krknctl for Enhanced UX](https://github.com/krkn-chaos/krknctl/issues/113)
|
||||
- [ ] [Build Web UI for Creating, Monitoring, and Reviewing Chaos Scenarios](https://github.com/krkn-chaos/krkn/issues/1167)
|
||||
- [ ] [Add Predefined Chaos Scenario Templates (KRKN Chaos Library)](https://github.com/krkn-chaos/krkn/issues/1168)
|
||||
|
||||
43
SECURITY.md
Normal file
43
SECURITY.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Security Policy
|
||||
|
||||
We attach great importance to code security. We are very grateful to the users, security vulnerability researchers, etc. for reporting security vulnerabilities to the Krkn community. All reported security vulnerabilities will be carefully assessed and addressed in a timely manner.
|
||||
|
||||
|
||||
## Security Checks
|
||||
|
||||
Krkn leverages [Snyk](https://snyk.io/) to ensure that any security vulnerabilities found
|
||||
in the code base and dependencies are fixed and published in the latest release. Security
|
||||
vulnerability checks are enabled for each pull request to enable developers to get insights
|
||||
and proactively fix them.
|
||||
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
The Krkn project treats security vulnerabilities seriously, so we
|
||||
strive to take action quickly when required.
|
||||
|
||||
The project requests that security issues be disclosed in a responsible
|
||||
manner to allow adequate time to respond. If a security issue or
|
||||
vulnerability has been found, please disclose the details to our
|
||||
dedicated email address:
|
||||
|
||||
cncf-krkn-maintainers@lists.cncf.io
|
||||
|
||||
You can also use the [GitHub vulnerability report mechanism](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability) to report the security vulnerability.
|
||||
|
||||
Please include as much information as possible with the report. The
|
||||
following details assist with analysis efforts:
|
||||
- Description of the vulnerability
|
||||
- Affected component (version, commit, branch etc)
|
||||
- Affected code (file path, line numbers)
|
||||
- Exploit code
|
||||
|
||||
|
||||
## Security Team
|
||||
|
||||
The security team currently consists of the [Maintainers of Krkn](https://github.com/krkn-chaos/krkn/blob/main/MAINTAINERS.md)
|
||||
|
||||
|
||||
## Process and Supported Releases
|
||||
|
||||
The Krkn security team will investigate and provide a fix in a timely manner depending on the severity. The fix will be included in the new release of Krkn and details will be included in the release notes.
|
||||
@@ -8,7 +8,7 @@
|
||||
description: 10 minutes avg. 99th etcd fsync latency on {{$labels.pod}} higher than 1s. {{$value}}s
|
||||
severity: error
|
||||
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))[10m:]) > 0.007
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))[10m:]) > 0.03
|
||||
description: 10 minutes avg. 99th etcd commit latency on {{$labels.pod}} higher than 30ms. {{$value}}s
|
||||
severity: warning
|
||||
|
||||
@@ -88,3 +88,42 @@
|
||||
- expr: ALERTS{severity="critical", alertstate="firing"} > 0
|
||||
description: Critical prometheus alert. {{$labels.alertname}}
|
||||
severity: warning
|
||||
|
||||
# etcd CPU and usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-etcd', container='etcd'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: Etcd CPU usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# etcd memory usage increase
|
||||
- expr: sum(deriv(container_memory_usage_bytes{image!='', namespace='openshift-etcd', container='etcd'}[5m])) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: Etcd memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Openshift API server CPU and memory usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-apiserver', container='openshift-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: openshift apiserver cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-apiserver', container='openshift-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: openshift apiserver memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Openshift kube API server CPU and memory usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-kube-apiserver', container='kube-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: openshift apiserver cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-kube-apiserver', container='kube-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: openshift apiserver memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Master node CPU usage increase
|
||||
- expr: (sum((sum(deriv(pod:container_cpu_usage:sum{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: master nodes cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Master nodes memory usage increase
|
||||
- expr: (sum((sum(deriv(container_memory_usage_bytes{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: master nodes memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
|
||||
@@ -99,3 +99,41 @@
|
||||
- expr: ALERTS{severity="critical", alertstate="firing"} > 0
|
||||
description: Critical prometheus alert. {{$labels.alertname}}
|
||||
severity: warning
|
||||
|
||||
# etcd CPU and usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-etcd', container='etcd'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: Etcd CPU usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# etcd memory usage increase
|
||||
- expr: sum(deriv(container_memory_usage_bytes{image!='', namespace='openshift-etcd', container='etcd'}[5m])) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: Etcd memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Openshift API server CPU and memory usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-apiserver', container='openshift-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: openshift apiserver cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-apiserver', container='openshift-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: openshift apiserver memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Openshift kube API server CPU and memory usage increase
|
||||
- expr: sum(rate(container_cpu_usage_seconds_total{image!='', namespace='openshift-kube-apiserver', container='kube-apiserver'}[1m])) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: openshift apiserver cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
- expr: (sum(deriv(container_memory_usage_bytes{namespace='openshift-kube-apiserver', container='kube-apiserver'}[5m]))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: openshift apiserver memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Master node CPU usage increase
|
||||
- expr: (sum((sum(deriv(pod:container_cpu_usage:sum{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(machine_cpu_cores) > 5
|
||||
description: master nodes cpu usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
# Master nodes memory usage increase
|
||||
- expr: (sum((sum(deriv(container_memory_usage_bytes{container="",pod!=""}[5m])) BY (namespace, pod) * on(pod, namespace) group_left(node) (node_namespace_pod:kube_pod_info:) ) * on(node) group_left(role) (max by (node) (kube_node_role{role="master"})))) * 100 / sum(node_memory_MemTotal_bytes) > 5
|
||||
description: master nodes memory usage increased significantly by {{$value}}%
|
||||
severity: critical
|
||||
|
||||
@@ -39,7 +39,7 @@ cerberus:
|
||||
Sunday:
|
||||
slack_team_alias: # The slack team alias to be tagged while reporting failures in the slack channel when no watcher is assigned
|
||||
|
||||
custom_checks: # Relative paths of files conataining additional user defined checks
|
||||
custom_checks: # Relative paths of files containing additional user defined checks
|
||||
|
||||
tunings:
|
||||
timeout: 3 # Number of seconds before requests fail
|
||||
|
||||
@@ -1,78 +1,107 @@
|
||||
kraken:
|
||||
distribution: openshift # Distribution can be kubernetes or openshift
|
||||
kubeconfig_path: ~/.kube/config # Path to kubeconfig
|
||||
kubeconfig_path: ~/.kube/config # Path to kubeconfig
|
||||
exit_on_failure: False # Exit when a post action scenario fails
|
||||
auto_rollback: True # Enable auto rollback for scenarios.
|
||||
rollback_versions_directory: /tmp/kraken-rollback # Directory to store rollback version files.
|
||||
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
port: 8081 # Signal port
|
||||
chaos_scenarios:
|
||||
# List of policies/chaos scenarios to load
|
||||
- arcaflow_scenarios:
|
||||
- scenarios/arcaflow/cpu-hog/input.yaml
|
||||
- scenarios/arcaflow/memory-hog/input.yaml
|
||||
- scenarios/arcaflow/io-hog/input.yaml
|
||||
- application_outages:
|
||||
- scenarios/openshift/app_outage.yaml
|
||||
- container_scenarios: # List of chaos pod scenarios to load
|
||||
- - scenarios/openshift/container_etcd.yml
|
||||
- plugin_scenarios:
|
||||
- scenarios/openshift/etcd.yml
|
||||
- scenarios/openshift/regex_openshift_pod_kill.yml
|
||||
- scenarios/openshift/vmware_node_scenarios.yml
|
||||
- scenarios/openshift/network_chaos_ingress.yml
|
||||
- scenarios/openshift/prom_kill.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/node_scenarios_example.yml
|
||||
- plugin_scenarios:
|
||||
- scenarios/openshift/openshift-apiserver.yml
|
||||
- scenarios/openshift/openshift-kube-apiserver.yml
|
||||
- time_scenarios: # List of chaos time scenarios to load
|
||||
- scenarios/openshift/time_scenarios_example.yml
|
||||
- cluster_shut_down_scenarios:
|
||||
- - scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- scenarios/openshift/post_action_shut_down.py
|
||||
- service_disruption_scenarios:
|
||||
- - scenarios/openshift/regex_namespace.yaml
|
||||
- - scenarios/openshift/ingress_namespace.yaml
|
||||
- scenarios/openshift/post_action_namespace.py
|
||||
- zone_outages:
|
||||
- scenarios/openshift/zone_outage.yaml
|
||||
- pvc_scenarios:
|
||||
- scenarios/openshift/pvc_scenario.yaml
|
||||
- network_chaos:
|
||||
- scenarios/openshift/network_chaos.yaml
|
||||
# List of policies/chaos scenarios to load
|
||||
- hog_scenarios:
|
||||
- scenarios/kube/cpu-hog.yml
|
||||
- scenarios/kube/memory-hog.yml
|
||||
- scenarios/kube/io-hog.yml
|
||||
- application_outages_scenarios:
|
||||
- scenarios/openshift/app_outage.yaml
|
||||
- container_scenarios: # List of chaos pod scenarios to load
|
||||
- scenarios/openshift/container_etcd.yml
|
||||
- pod_network_scenarios:
|
||||
- scenarios/openshift/network_chaos_ingress.yml
|
||||
- scenarios/openshift/pod_network_outage.yml
|
||||
- pod_disruption_scenarios:
|
||||
- scenarios/openshift/etcd.yml
|
||||
- scenarios/openshift/regex_openshift_pod_kill.yml
|
||||
- scenarios/openshift/prom_kill.yml
|
||||
- scenarios/openshift/openshift-apiserver.yml
|
||||
- scenarios/openshift/openshift-kube-apiserver.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/aws_node_scenarios.yml
|
||||
- scenarios/openshift/vmware_node_scenarios.yml
|
||||
- scenarios/openshift/ibmcloud_node_scenarios.yml
|
||||
- time_scenarios: # List of chaos time scenarios to load
|
||||
- scenarios/openshift/time_scenarios_example.yml
|
||||
- cluster_shut_down_scenarios:
|
||||
- scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- service_disruption_scenarios:
|
||||
- scenarios/openshift/regex_namespace.yaml
|
||||
- scenarios/openshift/ingress_namespace.yaml
|
||||
- zone_outages_scenarios:
|
||||
- scenarios/openshift/zone_outage.yaml
|
||||
- pvc_scenarios:
|
||||
- scenarios/openshift/pvc_scenario.yaml
|
||||
- network_chaos_scenarios:
|
||||
- scenarios/openshift/network_chaos.yaml
|
||||
- service_hijacking_scenarios:
|
||||
- scenarios/kube/service_hijacking.yaml
|
||||
- syn_flood_scenarios:
|
||||
- scenarios/kube/syn_flood.yaml
|
||||
- network_chaos_ng_scenarios:
|
||||
- scenarios/kube/pod-network-filter.yml
|
||||
- scenarios/kube/node-network-filter.yml
|
||||
- scenarios/kube/node-network-chaos.yml
|
||||
- scenarios/kube/pod-network-chaos.yml
|
||||
- kubevirt_vm_outage:
|
||||
- scenarios/kubevirt/kubevirt-vm-outage.yaml
|
||||
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_url: '' # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
enable_metrics: False
|
||||
alert_profile: config/alerts.yaml # Path or URL to alert profile with the prometheus queries
|
||||
metrics_profile: config/metrics-report.yaml
|
||||
check_critical_alerts: False # When enabled will check prometheus for critical alerts firing post chaos
|
||||
elastic:
|
||||
enable_elastic: False
|
||||
verify_certs: False
|
||||
elastic_url: "" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
|
||||
elastic_port: 32766
|
||||
username: "elastic"
|
||||
password: "test"
|
||||
metrics_index: "krkn-metrics"
|
||||
alerts_index: "krkn-alerts"
|
||||
telemetry_index: "krkn-telemetry"
|
||||
run_tag: ""
|
||||
|
||||
tunings:
|
||||
wait_duration: 60 # Duration to wait between each chaos scenario
|
||||
wait_duration: 1 # Duration to wait between each chaos scenario
|
||||
iterations: 1 # Number of times to execute the scenarios
|
||||
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever
|
||||
telemetry:
|
||||
enabled: False # enable/disables the telemetry collection feature
|
||||
api_url: https://ulnmf9xv7j.execute-api.us-west-2.amazonaws.com/production #telemetry service endpoint
|
||||
username: username # telemetry service username
|
||||
password: password # telemetry service password
|
||||
password: password # telemetry service password
|
||||
prometheus_backup: True # enables/disables prometheus data collection
|
||||
prometheus_namespace: "" # namespace where prometheus is deployed (if distribution is kubernetes)
|
||||
prometheus_container_name: "" # name of the prometheus container name (if distribution is kubernetes)
|
||||
prometheus_pod_name: "" # name of the prometheus pod (if distribution is kubernetes)
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
archive_path: /tmp # local path where the archive files will be temporarly stored
|
||||
archive_path: /tmp # local path where the archive files will be temporarily stored
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 500000 # the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
archive_size: 500000
|
||||
telemetry_group: '' # if set will archive the telemetry in the S3 bucket on a folder named after the value, otherwise will use "default"
|
||||
# the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
# the higher the number of archive files will be produced and uploaded (and processed by backup_threads
|
||||
# simultaneously).
|
||||
# For unstable/slow connection is better to keep this value low
|
||||
@@ -86,6 +115,21 @@ telemetry:
|
||||
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
|
||||
events_backup: True # enables/disables cluster events collection
|
||||
|
||||
health_checks: # Utilizing health check endpoints to observe application behavior during chaos injection.
|
||||
interval: # Interval in seconds to perform health checks, default value is 2 seconds
|
||||
config: # Provide list of health check configurations for applications
|
||||
- url: # Provide application endpoint
|
||||
bearer_token: # Bearer token for authentication if any
|
||||
auth: # Provide authentication credentials (username , password) in tuple format if any, ex:("admin","secretpassword")
|
||||
exit_on_failure: # If value is True exits when health check failed for application, values can be True/False
|
||||
|
||||
|
||||
|
||||
kubevirt_checks: # Utilizing virt check endpoints to observe ssh ability to VMI's during chaos injection.
|
||||
interval: 2 # Interval in seconds to perform virt checks, default value is 2 seconds
|
||||
namespace: # Namespace where to find VMI's
|
||||
name: # Regex Name style of VMI's to watch, optional, will watch all VMI names in the namespace if left blank
|
||||
only_failures: False # Boolean of whether to show all VMI's failures and successful ssh connection (False), or only failure status' (True)
|
||||
disconnected: False # Boolean of how to try to connect to the VMIs; if True will use the ip_address to try ssh from within a node, if false will use the name and uses virtctl to try to connect; Default is False
|
||||
ssh_node: "" # If set, will be a backup way to ssh to a node. Will want to set to a node that isn't targeted in chaos
|
||||
node_names: ""
|
||||
exit_on_failure: # If value is True and VMI's are failing post chaos returns failure, values can be True/False
|
||||
|
||||
|
||||
@@ -6,27 +6,34 @@ kraken:
|
||||
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load
|
||||
- plugin_scenarios:
|
||||
- scenarios/kind/scheduler.yml
|
||||
- node_scenarios:
|
||||
- scenarios/kind/node_scenarios_example.yml
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load
|
||||
- pod_disruption_scenarios:
|
||||
- scenarios/kube/pod.yml
|
||||
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
alert_profile: config/alerts.yaml # Path to alert profile with the prometheus queries
|
||||
|
||||
elastic:
|
||||
enable_elastic: False
|
||||
|
||||
tunings:
|
||||
wait_duration: 60 # Duration to wait between each chaos scenario
|
||||
iterations: 1 # Number of times to execute the scenarios
|
||||
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever
|
||||
|
||||
telemetry:
|
||||
enabled: False # enable/disables the telemetry collection feature
|
||||
archive_path: /tmp # local path where the archive files will be temporarily stored
|
||||
events_backup: False # enables/disables cluster events collection
|
||||
logs_backup: False
|
||||
|
||||
health_checks: # Utilizing health check endpoints to observe application behavior during chaos injection.
|
||||
|
||||
@@ -7,18 +7,16 @@ kraken:
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load
|
||||
- container_scenarios: # List of chaos pod scenarios to load
|
||||
- - scenarios/kube/container_dns.yml
|
||||
- scenarios/kube/container_dns.yml
|
||||
- plugin_scenarios:
|
||||
- scenarios/kube/scheduler.yml
|
||||
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
|
||||
@@ -12,15 +12,14 @@ kraken:
|
||||
- scenarios/openshift/regex_openshift_pod_kill.yml
|
||||
- scenarios/openshift/prom_kill.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/node_scenarios_example.yml
|
||||
- scenarios/openshift/node_scenarios_example.yml
|
||||
- plugin_scenarios:
|
||||
- scenarios/openshift/openshift-apiserver.yml
|
||||
- scenarios/openshift/openshift-kube-apiserver.yml
|
||||
- time_scenarios: # List of chaos time scenarios to load
|
||||
- scenarios/openshift/time_scenarios_example.yml
|
||||
- cluster_shut_down_scenarios:
|
||||
- - scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- scenarios/openshift/post_action_shut_down.py
|
||||
- scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- service_disruption_scenarios:
|
||||
- scenarios/openshift/regex_namespace.yaml
|
||||
- scenarios/openshift/ingress_namespace.yaml
|
||||
@@ -36,7 +35,7 @@ kraken:
|
||||
cerberus:
|
||||
cerberus_enabled: True # Enable it when cerberus is previously installed
|
||||
cerberus_url: http://0.0.0.0:8080 # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: True # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
@@ -62,7 +61,7 @@ telemetry:
|
||||
prometheus_backup: True # enables/disables prometheus data collection
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
archive_path: /tmp # local path where the archive files will be temporarly stored
|
||||
archive_path: /tmp # local path where the archive files will be temporarily stored
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 500000 # the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
@@ -77,3 +76,8 @@ telemetry:
|
||||
- "kinit (\\d+/\\d+/\\d+\\s\\d{2}:\\d{2}:\\d{2})\\s+" # kinit 2023/09/15 11:20:36 log
|
||||
- "(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z).+" # 2023-09-15T11:20:36.123425532Z log
|
||||
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
|
||||
elastic:
|
||||
elastic_url: "" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
|
||||
elastic_index: "" # Elastic search index pattern to post results to
|
||||
|
||||
|
||||
|
||||
@@ -1,133 +1,126 @@
|
||||
metrics:
|
||||
# API server
|
||||
- query: histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb!~"WATCH", subresource!="log"}[2m])) by (verb,resource,subresource,instance,le)) > 0
|
||||
metricName: API99thLatency
|
||||
|
||||
- query: sum(irate(apiserver_request_total{apiserver="kube-apiserver",verb!="WATCH",subresource!="log"}[2m])) by (verb,instance,resource,code) > 0
|
||||
metricName: APIRequestRate
|
||||
instant: True
|
||||
|
||||
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
|
||||
metricName: APIInflightRequests
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99, rate(apiserver_current_inflight_requests[5m]))
|
||||
metricName: APIInflightRequests
|
||||
instant: True
|
||||
|
||||
# Container & pod metrics
|
||||
- query: (sum(container_memory_rss{name!="",container!="POD",namespace=~"openshift-(etcd|oauth-apiserver|.*apiserver|ovn-kubernetes|sdn|ingress|authentication|.*controller-manager|.*scheduler)"}) by (container, pod, namespace, node) and on (node) kube_node_role{role="master"}) > 0
|
||||
metricName: containerMemory-Masters
|
||||
instant: true
|
||||
|
||||
- query: (sum(irate(container_cpu_usage_seconds_total{name!="",container!="POD",namespace=~"openshift-(etcd|oauth-apiserver|sdn|ovn-kubernetes|.*apiserver|authentication|.*controller-manager|.*scheduler)"}[2m]) * 100) by (container, pod, namespace, node) and on (node) kube_node_role{role="master"}) > 0
|
||||
metricName: containerCPU-Masters
|
||||
instant: true
|
||||
|
||||
- query: (sum(irate(container_cpu_usage_seconds_total{pod!="",container="prometheus",namespace="openshift-monitoring"}[2m]) * 100) by (container, pod, namespace, node) and on (node) kube_node_role{role="infra"}) > 0
|
||||
metricName: containerCPU-Prometheus
|
||||
instant: true
|
||||
|
||||
- query: (avg(irate(container_cpu_usage_seconds_total{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress)"}[2m]) * 100 and on (node) kube_node_role{role="worker"}) by (namespace, container)) > 0
|
||||
metricName: containerCPU-AggregatedWorkers
|
||||
instant: true
|
||||
|
||||
- query: (avg(irate(container_cpu_usage_seconds_total{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress|monitoring|image-registry|logging)"}[2m]) * 100 and on (node) kube_node_role{role="infra"}) by (namespace, container)) > 0
|
||||
metricName: containerCPU-AggregatedInfra
|
||||
|
||||
- query: (sum(container_memory_rss{pod!="",namespace="openshift-monitoring",name!="",container="prometheus"}) by (container, pod, namespace, node) and on (node) kube_node_role{role="infra"}) > 0
|
||||
metricName: containerMemory-Prometheus
|
||||
instant: True
|
||||
|
||||
- query: avg(container_memory_rss{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress)"} and on (node) kube_node_role{role="worker"}) by (container, namespace)
|
||||
metricName: containerMemory-AggregatedWorkers
|
||||
instant: True
|
||||
|
||||
- query: avg(container_memory_rss{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress|monitoring|image-registry|logging)"} and on (node) kube_node_role{role="infra"}) by (container, namespace)
|
||||
metricName: containerMemory-AggregatedInfra
|
||||
instant: True
|
||||
|
||||
# Node metrics
|
||||
- query: (sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) > 0
|
||||
metricName: nodeCPU-Masters
|
||||
instant: True
|
||||
|
||||
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: maxCPU-Masters
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemory-Masters
|
||||
instant: true
|
||||
|
||||
- query: (avg((sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))) by (mode)) > 0
|
||||
metricName: nodeCPU-AggregatedWorkers
|
||||
instant: True
|
||||
|
||||
- query: (avg((sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))) by (mode)) > 0
|
||||
metricName: nodeCPU-AggregatedInfra
|
||||
instant: True
|
||||
|
||||
- query: avg(node_memory_MemAvailable_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeMemoryAvailable-Masters
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemory-Masters
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: maxMemory-Masters
|
||||
instant: true
|
||||
|
||||
- query: avg(node_memory_MemAvailable_bytes and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryAvailable-AggregatedWorkers
|
||||
instant: True
|
||||
|
||||
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: maxCPU-Workers
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: maxMemory-Workers
|
||||
instant: true
|
||||
|
||||
- query: avg(node_memory_MemAvailable_bytes and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryAvailable-AggregatedInfra
|
||||
instant: True
|
||||
|
||||
- query: avg(node_memory_Active_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeMemoryActive-Masters
|
||||
instant: True
|
||||
|
||||
- query: avg(node_memory_Active_bytes and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryActive-AggregatedWorkers
|
||||
instant: True
|
||||
|
||||
- query: avg(avg(node_memory_Active_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryActive-AggregatedInfra
|
||||
|
||||
- query: avg(node_memory_Cached_bytes) by (instance) + avg(node_memory_Buffers_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeMemoryCached+nodeMemoryBuffers-Masters
|
||||
|
||||
- query: avg(node_memory_Cached_bytes + node_memory_Buffers_bytes and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryCached+nodeMemoryBuffers-AggregatedWorkers
|
||||
|
||||
- query: avg(node_memory_Cached_bytes + node_memory_Buffers_bytes and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryCached+nodeMemoryBuffers-AggregatedInfra
|
||||
|
||||
- query: irate(node_network_receive_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: rxNetworkBytes-Masters
|
||||
|
||||
- query: avg(irate(node_network_receive_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: rxNetworkBytes-AggregatedWorkers
|
||||
|
||||
- query: avg(irate(node_network_receive_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: rxNetworkBytes-AggregatedInfra
|
||||
|
||||
- query: irate(node_network_transmit_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: txNetworkBytes-Masters
|
||||
|
||||
- query: avg(irate(node_network_transmit_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: txNetworkBytes-AggregatedWorkers
|
||||
|
||||
- query: avg(irate(node_network_transmit_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: txNetworkBytes-AggregatedInfra
|
||||
|
||||
- query: rate(node_disk_written_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeDiskWrittenBytes-Masters
|
||||
|
||||
- query: avg(rate(node_disk_written_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: nodeDiskWrittenBytes-AggregatedWorkers
|
||||
|
||||
- query: avg(rate(node_disk_written_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: nodeDiskWrittenBytes-AggregatedInfra
|
||||
|
||||
- query: rate(node_disk_read_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeDiskReadBytes-Masters
|
||||
|
||||
- query: avg(rate(node_disk_read_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: nodeDiskReadBytes-AggregatedWorkers
|
||||
|
||||
- query: avg(rate(node_disk_read_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: nodeDiskReadBytes-AggregatedInfra
|
||||
instant: True
|
||||
|
||||
# Etcd metrics
|
||||
- query: sum(rate(etcd_server_leader_changes_seen_total[2m]))
|
||||
metricName: etcdLeaderChangesRate
|
||||
instant: True
|
||||
|
||||
- query: etcd_server_is_leader > 0
|
||||
metricName: etcdServerIsLeader
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))
|
||||
metricName: 99thEtcdDiskBackendCommitDurationSeconds
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))
|
||||
metricName: 99thEtcdDiskWalFsyncDurationSeconds
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket[5m]))
|
||||
metricName: 99thEtcdRoundTripTimeSeconds
|
||||
|
||||
- query: etcd_mvcc_db_total_size_in_bytes
|
||||
metricName: etcdDBPhysicalSizeBytes
|
||||
|
||||
- query: etcd_mvcc_db_total_size_in_use_in_bytes
|
||||
metricName: etcdDBLogicalSizeBytes
|
||||
instant: True
|
||||
|
||||
- query: sum by (cluster_version)(etcd_cluster_version)
|
||||
metricName: etcdVersion
|
||||
@@ -135,83 +128,16 @@ metrics:
|
||||
|
||||
- query: sum(rate(etcd_object_counts{}[5m])) by (resource) > 0
|
||||
metricName: etcdObjectCount
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99,sum(rate(etcd_request_duration_seconds_bucket[2m])) by (le,operation,apiserver)) > 0
|
||||
metricName: P99APIEtcdRequestLatency
|
||||
|
||||
- query: sum(grpc_server_started_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"})
|
||||
metricName: ActiveWatchStreams
|
||||
|
||||
- query: sum(grpc_server_started_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"})
|
||||
metricName: ActiveLeaseStreams
|
||||
|
||||
- query: sum(rate(etcd_debugging_snap_save_total_duration_seconds_sum{namespace="openshift-etcd"}[2m]))
|
||||
metricName: snapshotSaveLatency
|
||||
|
||||
- query: sum(rate(etcd_server_heartbeat_send_failures_total{namespace="openshift-etcd"}[2m]))
|
||||
metricName: HeartBeatFailures
|
||||
|
||||
- query: sum(rate(etcd_server_health_failures{namespace="openshift-etcd"}[2m]))
|
||||
metricName: HealthFailures
|
||||
|
||||
- query: sum(rate(etcd_server_slow_apply_total{namespace="openshift-etcd"}[2m]))
|
||||
metricName: SlowApplies
|
||||
|
||||
- query: sum(rate(etcd_server_slow_read_indexes_total{namespace="openshift-etcd"}[2m]))
|
||||
metricName: SlowIndexRead
|
||||
|
||||
- query: sum(etcd_server_proposals_pending)
|
||||
metricName: PendingProposals
|
||||
|
||||
- query: histogram_quantile(1.0, sum(rate(etcd_debugging_mvcc_db_compaction_pause_duration_milliseconds_bucket[1m])) by (le, instance))
|
||||
metricName: CompactionMaxPause
|
||||
instant: True
|
||||
|
||||
- query: sum by (instance) (apiserver_storage_objects)
|
||||
metricName: etcdTotalObjectCount
|
||||
instant: True
|
||||
|
||||
- query: topk(500, max by(resource) (apiserver_storage_objects))
|
||||
metricName: etcdTopObectCount
|
||||
|
||||
# Cluster metrics
|
||||
- query: count(kube_namespace_created)
|
||||
metricName: namespaceCount
|
||||
|
||||
- query: sum(kube_pod_status_phase{}) by (phase)
|
||||
metricName: podStatusCount
|
||||
|
||||
- query: count(kube_secret_info{})
|
||||
metricName: secretCount
|
||||
|
||||
- query: count(kube_deployment_labels{})
|
||||
metricName: deploymentCount
|
||||
|
||||
- query: count(kube_configmap_info{})
|
||||
metricName: configmapCount
|
||||
|
||||
- query: count(kube_service_info{})
|
||||
metricName: serviceCount
|
||||
|
||||
- query: kube_node_role
|
||||
metricName: nodeRoles
|
||||
instant: true
|
||||
|
||||
- query: sum(kube_node_status_condition{status="true"}) by (condition)
|
||||
metricName: nodeStatus
|
||||
|
||||
- query: (sum(rate(container_fs_writes_bytes_total{container!="",device!~".+dm.+"}[5m])) by (device, container, node) and on (node) kube_node_role{role="master"}) > 0
|
||||
metricName: containerDiskUsage
|
||||
|
||||
- query: cluster_version{type="completed"}
|
||||
metricName: clusterVersion
|
||||
instant: true
|
||||
|
||||
# Golang metrics
|
||||
|
||||
- query: go_memstats_heap_alloc_bytes{job=~"apiserver|api|etcd"}
|
||||
metricName: goHeapAllocBytes
|
||||
|
||||
- query: go_memstats_heap_inuse_bytes{job=~"apiserver|api|etcd"}
|
||||
metricName: goHeapInuseBytes
|
||||
|
||||
- query: go_gc_duration_seconds{job=~"apiserver|api|etcd",quantile="1"}
|
||||
metricName: goGCDurationSeconds
|
||||
instant: True
|
||||
|
||||
248
config/metrics-report.yaml
Normal file
248
config/metrics-report.yaml
Normal file
@@ -0,0 +1,248 @@
|
||||
metrics:
|
||||
|
||||
# API server
|
||||
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
|
||||
metricName: APIInflightRequests
|
||||
instant: true
|
||||
|
||||
# Kubelet & CRI-O
|
||||
|
||||
# Average and max of the CPU usage from all worker's kubelet
|
||||
- query: avg(avg_over_time(irate(process_cpu_seconds_total{service="kubelet",job="kubelet"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: cpu-kubelet
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(irate(process_cpu_seconds_total{service="kubelet",job="kubelet"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: max-cpu-kubelet
|
||||
instant: true
|
||||
|
||||
# Average of the memory usage from all worker's kubelet
|
||||
- query: avg(avg_over_time(process_resident_memory_bytes{service="kubelet",job="kubelet"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: memory-kubelet
|
||||
instant: true
|
||||
|
||||
# Max of the memory usage from all worker's kubelet
|
||||
- query: max(max_over_time(process_resident_memory_bytes{service="kubelet",job="kubelet"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: max-memory-kubelet
|
||||
instant: true
|
||||
|
||||
- query: max_over_time(sum(process_resident_memory_bytes{service="kubelet",job="kubelet"} and on (node) kube_node_role{role="worker"})[.elapsed:])
|
||||
metricName: max-memory-sum-kubelet
|
||||
instant: true
|
||||
|
||||
# Average and max of the CPU usage from all worker's CRI-O
|
||||
- query: avg(avg_over_time(irate(process_cpu_seconds_total{service="kubelet",job="crio"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: cpu-crio
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(irate(process_cpu_seconds_total{service="kubelet",job="crio"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: max-cpu-crio
|
||||
instant: true
|
||||
|
||||
# Average of the memory usage from all worker's CRI-O
|
||||
- query: avg(avg_over_time(process_resident_memory_bytes{service="kubelet",job="crio"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: memory-crio
|
||||
instant: true
|
||||
|
||||
# Max of the memory usage from all worker's CRI-O
|
||||
- query: max(max_over_time(process_resident_memory_bytes{service="kubelet",job="crio"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: max-memory-crio
|
||||
instant: true
|
||||
|
||||
# Etcd
|
||||
|
||||
- query: avg(avg_over_time(histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))[.elapsed:]))
|
||||
metricName: 99thEtcdDiskBackendCommit
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[.elapsed:]))
|
||||
metricName: 99thEtcdDiskWalFsync
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(histogram_quantile(0.99, irate(etcd_network_peer_round_trip_time_seconds_bucket[2m]))[.elapsed:]))
|
||||
metricName: 99thEtcdRoundTripTime
|
||||
instant: true
|
||||
|
||||
# Control-plane
|
||||
|
||||
- query: avg(avg_over_time(topk(1, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-kube-controller-manager"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-kube-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(topk(1, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-kube-controller-manager"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: max-cpu-kube-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(1, sum(container_memory_rss{name!="", namespace="openshift-kube-controller-manager"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-kube-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(topk(1, sum(container_memory_rss{name!="", namespace="openshift-kube-controller-manager"}) by (pod))[.elapsed:]))
|
||||
metricName: max-memory-kube-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-kube-apiserver"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-kube-apiserver
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(container_memory_rss{name!="", namespace="openshift-kube-apiserver"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-kube-apiserver
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-apiserver"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-openshift-apiserver
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(container_memory_rss{name!="", namespace="openshift-apiserver"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-openshift-apiserver
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-etcd"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-etcd
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3,sum(container_memory_rss{name!="", namespace="openshift-etcd"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-etcd
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(1, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-controller-manager"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-openshift-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(1, sum(container_memory_rss{name!="", namespace="openshift-controller-manager"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-openshift-controller-manager
|
||||
instant: true
|
||||
|
||||
# multus
|
||||
|
||||
- query: avg(avg_over_time(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-multus", pod=~"(multus).+", container!="POD"}[2m])[.elapsed:])) by (container)
|
||||
metricName: cpu-multus
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(container_memory_rss{name!="", namespace="openshift-multus", pod=~"(multus).+", container!="POD"}[.elapsed:])) by (container)
|
||||
metricName: memory-multus
|
||||
instant: true
|
||||
|
||||
# OVNKubernetes - standard & IC
|
||||
|
||||
- query: avg(avg_over_time(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-ovn-kubernetes", pod=~"(ovnkube-master|ovnkube-control-plane).+", container!="POD"}[2m])[.elapsed:])) by (container)
|
||||
metricName: cpu-ovn-control-plane
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(container_memory_rss{name!="", namespace="openshift-ovn-kubernetes", pod=~"(ovnkube-master|ovnkube-control-plane).+", container!="POD"}[.elapsed:])) by (container)
|
||||
metricName: memory-ovn-control-plane
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-ovn-kubernetes", pod=~"ovnkube-node.+", container!="POD"}[2m])[.elapsed:])) by (container)
|
||||
metricName: cpu-ovnkube-node
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(container_memory_rss{name!="", namespace="openshift-ovn-kubernetes", pod=~"ovnkube-node.+", container!="POD"}[.elapsed:])) by (container)
|
||||
metricName: memory-ovnkube-node
|
||||
instant: true
|
||||
|
||||
# Nodes
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: cpu-masters
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: memory-masters
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: max-memory-masters
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: cpu-workers
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: max-cpu-workers
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: memory-workers
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: max-memory-workers
|
||||
instant: true
|
||||
|
||||
- query: sum( (node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)") )
|
||||
metricName: memory-sum-workers
|
||||
instant: true
|
||||
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: cpu-infra
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: max-cpu-infra
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: memory-infra
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: max-memory-infra
|
||||
instant: true
|
||||
|
||||
- query: max_over_time(sum((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))[.elapsed:])
|
||||
metricName: max-memory-sum-infra
|
||||
instant: true
|
||||
|
||||
# Monitoring and ingress
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}[2m])) by (pod)[.elapsed:]))
|
||||
metricName: cpu-prometheus
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}[2m])) by (pod)[.elapsed:]))
|
||||
metricName: max-cpu-prometheus
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(sum(container_memory_rss{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}) by (pod)[.elapsed:]))
|
||||
metricName: memory-prometheus
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(sum(container_memory_rss{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}) by (pod)[.elapsed:]))
|
||||
metricName: max-memory-prometheus
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-ingress", pod=~"router-default.+"}[2m])) by (pod)[.elapsed:]))
|
||||
metricName: cpu-router
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(sum(container_memory_rss{name!="", namespace="openshift-ingress", pod=~"router-default.+"}) by (pod)[.elapsed:]))
|
||||
metricName: memory-router
|
||||
instant: true
|
||||
|
||||
# Cluster
|
||||
|
||||
- query: avg_over_time(cluster:memory_usage:ratio[.elapsed:])
|
||||
metricName: memory-cluster-usage-ratio
|
||||
instant: true
|
||||
|
||||
- query: avg_over_time(cluster:node_cpu:ratio[.elapsed:])
|
||||
metricName: cpu-cluster-usage-ratio
|
||||
instant: true
|
||||
|
||||
# Retain the raw CPU seconds totals for comparison
|
||||
- query: sum(node_cpu_seconds_total and on (instance) label_replace(kube_node_role{role="worker",role!="infra"}, "instance", "$1", "node", "(.+)")) by (mode)
|
||||
metricName: nodeCPUSeconds-Workers
|
||||
instant: true
|
||||
|
||||
|
||||
- query: sum(node_cpu_seconds_total and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) by (mode)
|
||||
metricName: nodeCPUSeconds-Masters
|
||||
instant: true
|
||||
|
||||
|
||||
- query: sum(node_cpu_seconds_total and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (mode)
|
||||
metricName: nodeCPUSeconds-Infra
|
||||
instant: true
|
||||
@@ -1,13 +1,7 @@
|
||||
metrics:
|
||||
# API server
|
||||
- query: histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb!~"WATCH", subresource!="log"}[2m])) by (verb,resource,subresource,instance,le)) > 0
|
||||
metricName: API99thLatency
|
||||
|
||||
- query: sum(irate(apiserver_request_total{apiserver="kube-apiserver",verb!="WATCH",subresource!="log"}[2m])) by (verb,instance,resource,code) > 0
|
||||
metricName: APIRequestRate
|
||||
|
||||
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
|
||||
metricName: APIInflightRequests
|
||||
- query: irate(apiserver_request_total{verb="POST", resource="pods", subresource="binding",code="201"}[2m]) > 0
|
||||
metricName: schedulingThroughput
|
||||
|
||||
# Containers & pod metrics
|
||||
- query: sum(irate(container_cpu_usage_seconds_total{name!="",namespace=~"openshift-(etcd|oauth-apiserver|.*apiserver|ovn-kubernetes|sdn|ingress|authentication|.*controller-manager|.*scheduler|monitoring|logging|image-registry)"}[2m]) * 100) by (pod, namespace, node)
|
||||
@@ -33,8 +27,17 @@ metrics:
|
||||
metricName: crioMemory
|
||||
|
||||
# Node metrics
|
||||
- query: sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) > 0
|
||||
metricName: nodeCPU
|
||||
- query: (sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) > 0
|
||||
metricName: nodeCPU-Masters
|
||||
|
||||
- query: (avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemory-Masters
|
||||
|
||||
- query: (sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) > 0
|
||||
metricName: nodeCPU-Workers
|
||||
|
||||
- query: (avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[2m:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemory-Workers
|
||||
|
||||
- query: avg(node_memory_MemAvailable_bytes) by (instance)
|
||||
metricName: nodeMemoryAvailable
|
||||
@@ -42,6 +45,9 @@ metrics:
|
||||
- query: avg(node_memory_Active_bytes) by (instance)
|
||||
metricName: nodeMemoryActive
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: maxMemory-Masters
|
||||
|
||||
- query: avg(node_memory_Cached_bytes) by (instance) + avg(node_memory_Buffers_bytes) by (instance)
|
||||
metricName: nodeMemoryCached+nodeMemoryBuffers
|
||||
|
||||
@@ -84,34 +90,4 @@ metrics:
|
||||
|
||||
- query: sum by (cluster_version)(etcd_cluster_version)
|
||||
metricName: etcdVersion
|
||||
instant: true
|
||||
|
||||
# Cluster metrics
|
||||
- query: count(kube_namespace_created)
|
||||
metricName: namespaceCount
|
||||
|
||||
- query: sum(kube_pod_status_phase{}) by (phase)
|
||||
metricName: podStatusCount
|
||||
|
||||
- query: count(kube_secret_info{})
|
||||
metricName: secretCount
|
||||
|
||||
- query: count(kube_deployment_labels{})
|
||||
metricName: deploymentCount
|
||||
|
||||
- query: count(kube_configmap_info{})
|
||||
metricName: configmapCount
|
||||
|
||||
- query: count(kube_service_info{})
|
||||
metricName: serviceCount
|
||||
|
||||
- query: kube_node_role
|
||||
metricName: nodeRoles
|
||||
instant: true
|
||||
|
||||
- query: sum(kube_node_status_condition{status="true"}) by (condition)
|
||||
metricName: nodeStatus
|
||||
|
||||
- query: cluster_version{type="completed"}
|
||||
metricName: clusterVersion
|
||||
instant: true
|
||||
instant: true
|
||||
@@ -1,5 +1,5 @@
|
||||
application: openshift-etcd
|
||||
namespace: openshift-etcd
|
||||
namespaces: openshift-etcd
|
||||
labels: app=openshift-etcd
|
||||
kubeconfig: ~/.kube/config.yaml
|
||||
prometheus_endpoint: <Prometheus_Endpoint>
|
||||
@@ -7,6 +7,8 @@ auth_token: <Auth_Token>
|
||||
scrape_duration: 10m
|
||||
chaos_library: "kraken"
|
||||
log_level: INFO
|
||||
json_output_file: False
|
||||
json_output_folder_path:
|
||||
|
||||
# for output purpose only do not change if not needed
|
||||
chaos_tests:
|
||||
@@ -26,4 +28,8 @@ chaos_tests:
|
||||
- pod_network_chaos
|
||||
MEM:
|
||||
- node_memory_hog
|
||||
- pvc_disk_fill
|
||||
- pvc_disk_fill
|
||||
|
||||
threshold: .7
|
||||
cpu_threshold: .5
|
||||
mem_threshold: .5
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
# Dockerfile for kraken
|
||||
|
||||
FROM mcr.microsoft.com/azure-cli:latest as azure-cli
|
||||
|
||||
FROM registry.access.redhat.com/ubi8/ubi:latest
|
||||
|
||||
ENV KUBECONFIG /root/.kube/config
|
||||
|
||||
# Copy azure client binary from azure-cli image
|
||||
COPY --from=azure-cli /usr/local/bin/az /usr/bin/az
|
||||
|
||||
# Install dependencies
|
||||
RUN yum install -y git python39 python3-pip jq gettext wget && \
|
||||
python3.9 -m pip install -U pip && \
|
||||
git clone https://github.com/krkn-chaos/krkn.git --branch v1.5.5 /root/kraken && \
|
||||
mkdir -p /root/.kube && cd /root/kraken && \
|
||||
pip3.9 install -r requirements.txt && \
|
||||
pip3.9 install virtualenv && \
|
||||
wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq && chmod +x /usr/bin/yq
|
||||
|
||||
# Get Kubernetes and OpenShift clients from stable releases
|
||||
WORKDIR /tmp
|
||||
RUN wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-client-linux.tar.gz && tar -xvf openshift-client-linux.tar.gz && cp oc /usr/local/bin/oc && cp kubectl /usr/local/bin/kubectl
|
||||
|
||||
WORKDIR /root/kraken
|
||||
|
||||
ENTRYPOINT ["python3.9", "run_kraken.py"]
|
||||
CMD ["--config=config/config.yaml"]
|
||||
@@ -1,29 +0,0 @@
|
||||
# Dockerfile for kraken
|
||||
|
||||
FROM ppc64le/centos:8
|
||||
|
||||
FROM mcr.microsoft.com/azure-cli:latest as azure-cli
|
||||
|
||||
LABEL org.opencontainers.image.authors="Red Hat OpenShift Chaos Engineering"
|
||||
|
||||
ENV KUBECONFIG /root/.kube/config
|
||||
|
||||
# Copy azure client binary from azure-cli image
|
||||
COPY --from=azure-cli /usr/local/bin/az /usr/bin/az
|
||||
|
||||
# Install dependencies
|
||||
RUN yum install -y git python39 python3-pip jq gettext wget && \
|
||||
python3.9 -m pip install -U pip && \
|
||||
git clone https://github.com/redhat-chaos/krkn.git --branch v1.5.5 /root/kraken && \
|
||||
mkdir -p /root/.kube && cd /root/kraken && \
|
||||
pip3.9 install -r requirements.txt && \
|
||||
pip3.9 install virtualenv && \
|
||||
wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq && chmod +x /usr/bin/yq
|
||||
|
||||
# Get Kubernetes and OpenShift clients from stable releases
|
||||
WORKDIR /tmp
|
||||
RUN wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-client-linux.tar.gz && tar -xvf openshift-client-linux.tar.gz && cp oc /usr/local/bin/oc && cp kubectl /usr/local/bin/kubectl
|
||||
|
||||
WORKDIR /root/kraken
|
||||
|
||||
ENTRYPOINT python3.9 run_kraken.py --config=config/config.yaml
|
||||
90
containers/Dockerfile.template
Normal file
90
containers/Dockerfile.template
Normal file
@@ -0,0 +1,90 @@
|
||||
# oc build
|
||||
FROM golang:1.24.9 AS oc-build
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libkrb5-dev
|
||||
WORKDIR /tmp
|
||||
# oc build
|
||||
RUN git clone --branch release-4.18 https://github.com/openshift/oc.git
|
||||
WORKDIR /tmp/oc
|
||||
RUN go mod edit -go 1.24.9 &&\
|
||||
go mod edit -require github.com/moby/buildkit@v0.12.5 &&\
|
||||
go mod edit -require github.com/containerd/containerd@v1.7.29&&\
|
||||
go mod edit -require github.com/docker/docker@v27.5.1+incompatible&&\
|
||||
go mod edit -require github.com/opencontainers/runc@v1.2.8&&\
|
||||
go mod edit -require github.com/go-git/go-git/v5@v5.13.0&&\
|
||||
go mod edit -require github.com/opencontainers/selinux@v1.13.0&&\
|
||||
go mod edit -require github.com/ulikunitz/xz@v0.5.15&&\
|
||||
go mod edit -require golang.org/x/net@v0.38.0&&\
|
||||
go mod edit -require github.com/containerd/containerd@v1.7.27&&\
|
||||
go mod edit -require golang.org/x/oauth2@v0.27.0&&\
|
||||
go mod edit -require golang.org/x/crypto@v0.35.0&&\
|
||||
go mod edit -replace github.com/containerd/containerd@v1.7.27=github.com/containerd/containerd@v1.7.29&&\
|
||||
go mod tidy && go mod vendor
|
||||
|
||||
RUN make GO_REQUIRED_MIN_VERSION:= oc
|
||||
|
||||
# virtctl build
|
||||
WORKDIR /tmp
|
||||
RUN git clone https://github.com/kubevirt/kubevirt.git
|
||||
WORKDIR /tmp/kubevirt
|
||||
RUN go mod edit -go 1.24.9 &&\
|
||||
go work use &&\
|
||||
go build -o virtctl ./cmd/virtctl/
|
||||
|
||||
FROM fedora:40
|
||||
ARG PR_NUMBER
|
||||
ARG TAG
|
||||
RUN groupadd -g 1001 krkn && useradd -m -u 1001 -g krkn krkn
|
||||
RUN dnf update -y
|
||||
|
||||
ENV KUBECONFIG /home/krkn/.kube/config
|
||||
|
||||
|
||||
# This overwrites any existing configuration in /etc/yum.repos.d/kubernetes.repo
|
||||
RUN dnf update && dnf install -y --setopt=install_weak_deps=False \
|
||||
git python3.11 jq yq gettext wget which ipmitool openssh-server &&\
|
||||
dnf clean all
|
||||
|
||||
# copy oc client binary from oc-build image
|
||||
COPY --from=oc-build /tmp/oc/oc /usr/bin/oc
|
||||
COPY --from=oc-build /tmp/kubevirt/virtctl /usr/bin/virtctl
|
||||
|
||||
# krkn build
|
||||
RUN git clone https://github.com/krkn-chaos/krkn.git /home/krkn/kraken && \
|
||||
mkdir -p /home/krkn/.kube
|
||||
|
||||
RUN mkdir -p /home/krkn/.ssh && \
|
||||
chmod 700 /home/krkn/.ssh
|
||||
|
||||
WORKDIR /home/krkn/kraken
|
||||
|
||||
# default behaviour will be to build main
|
||||
# if it is a PR trigger the PR itself will be checked out
|
||||
RUN if [ -n "$PR_NUMBER" ]; then git fetch origin pull/${PR_NUMBER}/head:pr-${PR_NUMBER} && git checkout pr-${PR_NUMBER};fi
|
||||
# if it is a TAG trigger checkout the tag
|
||||
RUN if [ -n "$TAG" ]; then git checkout "$TAG";fi
|
||||
|
||||
RUN python3.11 -m ensurepip --upgrade --default-pip
|
||||
RUN python3.11 -m pip install --upgrade pip setuptools==78.1.1
|
||||
|
||||
# removes the the vulnerable versions of setuptools and pip
|
||||
RUN rm -rf "$(pip cache dir)"
|
||||
RUN rm -rf /tmp/*
|
||||
RUN rm -rf /usr/local/lib/python3.11/ensurepip/_bundled
|
||||
RUN pip3.11 install -r requirements.txt
|
||||
RUN pip3.11 install jsonschema
|
||||
|
||||
LABEL krknctl.title.global="Krkn Base Image"
|
||||
LABEL krknctl.description.global="This is the krkn base image."
|
||||
LABEL krknctl.input_fields.global='$KRKNCTL_INPUT'
|
||||
|
||||
# SSH setup script
|
||||
RUN chmod +x /home/krkn/kraken/containers/setup-ssh.sh
|
||||
|
||||
# Main entrypoint script
|
||||
RUN chmod +x /home/krkn/kraken/containers/entrypoint.sh
|
||||
|
||||
RUN chown -R krkn:krkn /home/krkn && chmod 755 /home/krkn
|
||||
USER krkn
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "/home/krkn/kraken/containers/entrypoint.sh"]
|
||||
CMD ["--config=config/config.yaml"]
|
||||
@@ -6,41 +6,9 @@ Container image gets automatically built by quay.io at [Kraken image](https://qu
|
||||
|
||||
### Run containerized version
|
||||
|
||||
Refer [instructions](https://github.com/redhat-chaos/krkn/blob/main/docs/installation.md#run-containerized-version) for information on how to run the containerized version of kraken.
|
||||
Refer [instructions](https://krkn-chaos.dev/docs/installation/) for information on how to run the containerized version of kraken.
|
||||
|
||||
|
||||
### Run Custom Kraken Image
|
||||
|
||||
Refer to [instructions](https://github.com/redhat-chaos/krkn/blob/main/containers/build_own_image-README.md) for information on how to run a custom containerized version of kraken using podman.
|
||||
|
||||
|
||||
### Kraken as a KubeApp ( Unsupported and not recommended )
|
||||
|
||||
#### GENERAL NOTES:
|
||||
|
||||
- It is not generally recommended to run Kraken internal to the cluster as the pod which is running Kraken might get disrupted, the suggested use case to run kraken from inside k8s/OpenShift is to target **another** cluster (eg. to bypass network restrictions or to leverage cluster's computational resources)
|
||||
|
||||
- your kubeconfig might contain several cluster contexts and credentials so be sure, before creating the ConfigMap, to keep **only** the credentials related to the destination cluster. Please refer to the [Kubernetes documentation](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) for more details
|
||||
- to add privileges to the service account you must be logged in the cluster with an highly privileged account (ideally kubeadmin)
|
||||
|
||||
|
||||
|
||||
To run containerized Kraken as a Kubernetes/OpenShift Deployment, follow these steps:
|
||||
|
||||
1. Configure the [config.yaml](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) file according to your requirements.
|
||||
|
||||
**NOTE**: both the scenarios ConfigMaps are needed regardless you're running kraken in Kubernetes or OpenShift
|
||||
|
||||
2. Create a namespace under which you want to run the kraken pod using `kubectl create ns <namespace>`.
|
||||
3. Switch to `<namespace>` namespace:
|
||||
- In Kubernetes, use `kubectl config set-context --current --namespace=<namespace>`
|
||||
- In OpenShift, use `oc project <namespace>`
|
||||
|
||||
4. Create a ConfigMap named kube-config using `kubectl create configmap kube-config --from-file=<path_to_kubeconfig>` *(eg. ~/.kube/config)*
|
||||
5. Create a ConfigMap named kraken-config using `kubectl create configmap kraken-config --from-file=<path_to_kraken>/config`
|
||||
6. Create a ConfigMap named scenarios-config using `kubectl create configmap scenarios-config --from-file=<path_to_kraken>/scenarios`
|
||||
7. Create a ConfigMap named scenarios-openshift-config using `kubectl create configmap scenarios-openshift-config --from-file=<path_to_kraken>/scenarios/openshift`
|
||||
8. Create a ConfigMap named scenarios-kube-config using `kubectl create configmap scenarios-kube-config --from-file=<path_to_kraken>/scenarios/kube`
|
||||
9. Create a service account to run the kraken pod `kubectl create serviceaccount useroot`.
|
||||
10. In Openshift, add privileges to service account and execute `oc adm policy add-scc-to-user privileged -z useroot`.
|
||||
11. Create a Job using `kubectl apply -f <path_to_kraken>/containers/kraken.yml` and monitor the status using `oc get jobs` and `oc get pods`.
|
||||
|
||||
5
containers/compile_dockerfile.sh
Executable file
5
containers/compile_dockerfile.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$SCRIPT_DIR"
|
||||
export KRKNCTL_INPUT=$(cat krknctl-input.json|tr -d "\n")
|
||||
|
||||
envsubst '${KRKNCTL_INPUT}' < Dockerfile.template > Dockerfile
|
||||
8
containers/entrypoint.sh
Normal file
8
containers/entrypoint.sh
Normal file
@@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
# Run SSH setup
|
||||
./containers/setup-ssh.sh
|
||||
# Change to kraken directory
|
||||
|
||||
# Execute the main command
|
||||
exec python3.9 run_kraken.py "$@"
|
||||
@@ -1,49 +0,0 @@
|
||||
---
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: kraken
|
||||
spec:
|
||||
parallelism: 1
|
||||
completions: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tool: Kraken
|
||||
spec:
|
||||
serviceAccountName: useroot
|
||||
containers:
|
||||
- name: kraken
|
||||
securityContext:
|
||||
privileged: true
|
||||
image: quay.io/redhat-chaos/krkn
|
||||
command: ["/bin/sh", "-c"]
|
||||
args: ["python3.9 run_kraken.py -c config/config.yaml"]
|
||||
volumeMounts:
|
||||
- mountPath: "/root/.kube"
|
||||
name: config
|
||||
- mountPath: "/root/kraken/config"
|
||||
name: kraken-config
|
||||
- mountPath: "/root/kraken/scenarios"
|
||||
name: scenarios-config
|
||||
- mountPath: "/root/kraken/scenarios/openshift"
|
||||
name: scenarios-openshift-config
|
||||
- mountPath: "/root/kraken/scenarios/kube"
|
||||
name: scenarios-kube-config
|
||||
restartPolicy: Never
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: kube-config
|
||||
- name: kraken-config
|
||||
configMap:
|
||||
name: kraken-config
|
||||
- name: scenarios-config
|
||||
configMap:
|
||||
name: scenarios-config
|
||||
- name: scenarios-openshift-config
|
||||
configMap:
|
||||
name: scenarios-openshift-config
|
||||
- name: scenarios-kube-config
|
||||
configMap:
|
||||
name: scenarios-kube-config
|
||||
553
containers/krknctl-input.json
Normal file
553
containers/krknctl-input.json
Normal file
@@ -0,0 +1,553 @@
|
||||
[
|
||||
{
|
||||
"name": "cerberus-enabled",
|
||||
"short_description": "Enable Cerberus",
|
||||
"description": "Enables Cerberus Support",
|
||||
"variable": "CERBERUS_ENABLED",
|
||||
"type": "enum",
|
||||
"default": "False",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "cerberus-url",
|
||||
"short_description": "Cerberus URL",
|
||||
"description": "Cerberus http url",
|
||||
"variable": "CERBERUS_URL",
|
||||
"type": "string",
|
||||
"default": "http://0.0.0.0:8080",
|
||||
"validator": "^(http|https):\/\/.*",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "distribution",
|
||||
"short_description": "Orchestrator distribution",
|
||||
"description": "Selects the orchestrator distribution",
|
||||
"variable": "DISTRIBUTION",
|
||||
"type": "enum",
|
||||
"default": "openshift",
|
||||
"allowed_values": "openshift,kubernetes",
|
||||
"separator": ",",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "ssh-public-key",
|
||||
"short_description": "Krkn ssh public key path",
|
||||
"description": "Sets the path where krkn will search for ssh public key (in container)",
|
||||
"variable": "KRKN_SSH_PUBLIC",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "ssh-private-key",
|
||||
"short_description": "Krkn ssh private key path",
|
||||
"description": "Sets the path where krkn will search for ssh private key (in container)",
|
||||
"variable": "KRKN_SSH_PRIVATE",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "krkn-kubeconfig",
|
||||
"short_description": "Krkn kubeconfig path",
|
||||
"description": "Sets the path where krkn will search for kubeconfig (in container)",
|
||||
"variable": "KRKN_KUBE_CONFIG",
|
||||
"type": "string",
|
||||
"default": "/home/krkn/.kube/config",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "wait-duration",
|
||||
"short_description": "Post chaos wait duration",
|
||||
"description": "waits for a certain amount of time after the scenario",
|
||||
"variable": "WAIT_DURATION",
|
||||
"type": "number",
|
||||
"default": "1"
|
||||
},
|
||||
{
|
||||
"name": "iterations",
|
||||
"short_description": "Chaos scenario iterations",
|
||||
"description": "number of times the same chaos scenario will be executed",
|
||||
"variable": "ITERATIONS",
|
||||
"type": "number",
|
||||
"default": "1"
|
||||
},
|
||||
{
|
||||
"name": "daemon-mode",
|
||||
"short_description": "Sets krkn daemon mode",
|
||||
"description": "if set the scenario will execute forever",
|
||||
"variable": "DAEMON_MODE",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "prometheus-url",
|
||||
"short_description": "Prometheus url",
|
||||
"description": "Prometheus url for when running on kuberenetes",
|
||||
"variable": "PROMETHEUS_URL",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "prometheus-token",
|
||||
"short_description": "Prometheus bearer token",
|
||||
"description": "Prometheus bearer token for prometheus url authentication",
|
||||
"variable": "PROMETHEUS_TOKEN",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "uuid",
|
||||
"short_description": "Sets krkn run uuid",
|
||||
"description": "sets krkn run uuid instead of generating it",
|
||||
"variable": "UUID",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "capture-metrics",
|
||||
"short_description": "Enables metrics capture",
|
||||
"description": "Enables metrics capture",
|
||||
"variable": "CAPTURE_METRICS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "enable-alerts",
|
||||
"short_description": "Enables cluster alerts check",
|
||||
"description": "Enables cluster alerts check",
|
||||
"variable": "ENABLE_ALERTS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "alerts-path",
|
||||
"short_description": "Cluster alerts path file (in container)",
|
||||
"description": "Allows to specify a different alert file path",
|
||||
"variable": "ALERTS_PATH",
|
||||
"type": "string",
|
||||
"default": "config/alerts.yaml",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "metrics-path",
|
||||
"short_description": "Cluster metrics path file (in container)",
|
||||
"description": "Allows to specify a different metrics file path",
|
||||
"variable": "METRICS_PATH",
|
||||
"type": "string",
|
||||
"default": "config/metrics-aggregated.yaml",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "enable-es",
|
||||
"short_description": "Enables elastic search data collection",
|
||||
"description": "Enables elastic search data collection",
|
||||
"variable": "ENABLE_ES",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-server",
|
||||
"short_description": "Elasticsearch instance URL",
|
||||
"description": "Elasticsearch instance URL",
|
||||
"variable": "ES_SERVER",
|
||||
"type": "string",
|
||||
"default": "http://0.0.0.0",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-port",
|
||||
"short_description": "Elasticsearch instance port",
|
||||
"description": "Elasticsearch instance port",
|
||||
"variable": "ES_PORT",
|
||||
"type": "number",
|
||||
"default": "443",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-username",
|
||||
"short_description": "Elasticsearch instance username",
|
||||
"description": "Elasticsearch instance username",
|
||||
"variable": "ES_USERNAME",
|
||||
"type": "string",
|
||||
"default": "elastic",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-password",
|
||||
"short_description": "Elasticsearch instance password",
|
||||
"description": "Elasticsearch instance password",
|
||||
"variable": "ES_PASSWORD",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-verify-certs",
|
||||
"short_description": "Enables elasticsearch TLS certificate verification",
|
||||
"description": "Enables elasticsearch TLS certificate verification",
|
||||
"variable": "ES_VERIFY_CERTS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-metrics-index",
|
||||
"short_description": "Elasticsearch metrics index",
|
||||
"description": "Index name for metrics in Elasticsearch",
|
||||
"variable": "ES_METRICS_INDEX",
|
||||
"type": "string",
|
||||
"default": "krkn-metrics",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-alerts-index",
|
||||
"short_description": "Elasticsearch alerts index",
|
||||
"description": "Index name for alerts in Elasticsearch",
|
||||
"variable": "ES_ALERTS_INDEX",
|
||||
"type": "string",
|
||||
"default": "krkn-alerts",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-telemetry-index",
|
||||
"short_description": "Elasticsearch telemetry index",
|
||||
"description": "Index name for telemetry in Elasticsearch",
|
||||
"variable": "ES_TELEMETRY_INDEX",
|
||||
"type": "string",
|
||||
"default": "krkn-telemetry",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "check-critical-alerts",
|
||||
"short_description": "Check critical alerts",
|
||||
"description": "Enables checking for critical alerts",
|
||||
"variable": "CHECK_CRITICAL_ALERTS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-enabled",
|
||||
"short_description": "Enable telemetry",
|
||||
"description": "Enables telemetry support",
|
||||
"variable": "TELEMETRY_ENABLED",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-api-url",
|
||||
"short_description": "Telemetry API URL",
|
||||
"description": "API endpoint for telemetry data",
|
||||
"variable": "TELEMETRY_API_URL",
|
||||
"type": "string",
|
||||
"default": "https://ulnmf9xv7j.execute-api.us-west-2.amazonaws.com/production",
|
||||
"validator": "^(http|https):\/\/.*",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-username",
|
||||
"short_description": "Telemetry username",
|
||||
"description": "Username for telemetry authentication",
|
||||
"variable": "TELEMETRY_USERNAME",
|
||||
"type": "string",
|
||||
"default": "redhat-chaos",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-password",
|
||||
"short_description": "Telemetry password",
|
||||
"description": "Password for telemetry authentication",
|
||||
"variable": "TELEMETRY_PASSWORD",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-prometheus-backup",
|
||||
"short_description": "Prometheus backup for telemetry",
|
||||
"description": "Enables Prometheus backup for telemetry",
|
||||
"variable": "TELEMETRY_PROMETHEUS_BACKUP",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "True",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-full-prometheus-backup",
|
||||
"short_description": "Full Prometheus backup",
|
||||
"description": "Enables full Prometheus backup for telemetry",
|
||||
"variable": "TELEMETRY_FULL_PROMETHEUS_BACKUP",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-backup-threads",
|
||||
"short_description": "Telemetry backup threads",
|
||||
"description": "Number of threads for telemetry backup",
|
||||
"variable": "TELEMETRY_BACKUP_THREADS",
|
||||
"type": "number",
|
||||
"default": "5",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-archive-path",
|
||||
"short_description": "Telemetry archive path",
|
||||
"description": "Path to save telemetry archive",
|
||||
"variable": "TELEMETRY_ARCHIVE_PATH",
|
||||
"type": "string",
|
||||
"default": "/tmp",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-max-retries",
|
||||
"short_description": "Telemetry max retries",
|
||||
"description": "Maximum retries for telemetry operations",
|
||||
"variable": "TELEMETRY_MAX_RETRIES",
|
||||
"type": "number",
|
||||
"default": "0",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-run-tag",
|
||||
"short_description": "Telemetry run tag",
|
||||
"description": "Tag for telemetry run",
|
||||
"variable": "TELEMETRY_RUN_TAG",
|
||||
"type": "string",
|
||||
"default": "chaos",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-group",
|
||||
"short_description": "Telemetry group",
|
||||
"description": "Group name for telemetry data",
|
||||
"variable": "TELEMETRY_GROUP",
|
||||
"type": "string",
|
||||
"default": "default",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-archive-size",
|
||||
"short_description": "Telemetry archive size",
|
||||
"description": "Maximum size for telemetry archives",
|
||||
"variable": "TELEMETRY_ARCHIVE_SIZE",
|
||||
"type": "number",
|
||||
"default": "1000",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-logs-backup",
|
||||
"short_description": "Telemetry logs backup",
|
||||
"description": "Enables logs backup for telemetry",
|
||||
"variable": "TELEMETRY_LOGS_BACKUP",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-filter-pattern",
|
||||
"short_description": "Telemetry filter pattern",
|
||||
"description": "Filter pattern for telemetry logs",
|
||||
"variable": "TELEMETRY_FILTER_PATTERN",
|
||||
"type": "string",
|
||||
"default": "[\"(\\\\w{3}\\\\s\\\\d{1,2}\\\\s\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d+).+\",\"kinit (\\\\d+/\\\\d+/\\\\d+\\\\s\\\\d{2}:\\\\d{2}:\\\\d{2})\\\\s+\",\"(\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d+Z).+\"]",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-cli-path",
|
||||
"short_description": "Telemetry CLI path (oc)",
|
||||
"description": "Path to telemetry CLI tool (oc)",
|
||||
"variable": "TELEMETRY_CLI_PATH",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-events-backup",
|
||||
"short_description": "Telemetry events backup",
|
||||
"description": "Enables events backup for telemetry",
|
||||
"variable": "TELEMETRY_EVENTS_BACKUP",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "True",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "health-check-interval",
|
||||
"short_description": "Heath check interval",
|
||||
"description": "How often to check the health check urls",
|
||||
"variable": "HEALTH_CHECK_INTERVAL",
|
||||
"type": "number",
|
||||
"default": "2",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "health-check-url",
|
||||
"short_description": "Health check url",
|
||||
"description": "Url to check the health of",
|
||||
"variable": "HEALTH_CHECK_URL",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "health-check-auth",
|
||||
"short_description": "Health check authentication tuple",
|
||||
"description": "Authentication tuple to authenticate into health check URL",
|
||||
"variable": "HEALTH_CHECK_AUTH",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "health-check-bearer-token",
|
||||
"short_description": "Health check bearer token",
|
||||
"description": "Bearer token to authenticate into health check URL",
|
||||
"variable": "HEALTH_CHECK_BEARER_TOKEN",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "health-check-exit",
|
||||
"short_description": "Health check exit on failure",
|
||||
"description": "Exit on failure when health check URL is not able to connect",
|
||||
"variable": "HEALTH_CHECK_EXIT_ON_FAILURE",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "health-check-verify",
|
||||
"short_description": "SSL Verification of health check url",
|
||||
"description": "SSL Verification to authenticate into health check URL",
|
||||
"variable": "HEALTH_CHECK_VERIFY",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-check-interval",
|
||||
"short_description": "Kube Virt check interval",
|
||||
"description": "How often to check the kube virt check Vms ssh status",
|
||||
"variable": "KUBE_VIRT_CHECK_INTERVAL",
|
||||
"type": "number",
|
||||
"default": "2",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-namespace",
|
||||
"short_description": "KubeVirt namespace to check",
|
||||
"description": "KubeVirt namespace to check the health of",
|
||||
"variable": "KUBE_VIRT_NAMESPACE",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-name",
|
||||
"short_description": "KubeVirt regex names to watch",
|
||||
"description": "KubeVirt regex names to check VMs",
|
||||
"variable": "KUBE_VIRT_NAME",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-only-failures",
|
||||
"short_description": "KubeVirt checks only report if failure occurs",
|
||||
"description": "KubeVirt checks only report if failure occurs",
|
||||
"variable": "KUBE_VIRT_FAILURES",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False,true,false",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-disconnected",
|
||||
"short_description": "KubeVirt checks in disconnected mode",
|
||||
"description": "KubeVirt checks in disconnected mode, bypassing the clusters Api",
|
||||
"variable": "KUBE_VIRT_DISCONNECTED",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False,true,false",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-ssh-node",
|
||||
"short_description": "KubeVirt node to ssh from",
|
||||
"description": "KubeVirt node to ssh from, should be available whole chaos run",
|
||||
"variable": "KUBE_VIRT_SSH_NODE",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-exit-on-failure",
|
||||
"short_description": "KubeVirt fail if failed vms at end of run",
|
||||
"description": "KubeVirt fails run if vms still have false status",
|
||||
"variable": "KUBE_VIRT_EXIT_ON_FAIL",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False,true,false",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-node-node",
|
||||
"short_description": "KubeVirt node to filter vms on",
|
||||
"description": "Only track VMs in KubeVirt on given node name",
|
||||
"variable": "KUBE_VIRT_NODE_NAME",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "krkn-debug",
|
||||
"short_description": "Krkn debug mode",
|
||||
"description": "Enables debug mode for Krkn",
|
||||
"variable": "KRKN_DEBUG",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
}
|
||||
]
|
||||
73
containers/setup-ssh.sh
Normal file
73
containers/setup-ssh.sh
Normal file
@@ -0,0 +1,73 @@
|
||||
#!/bin/bash
|
||||
# Setup SSH key if mounted
|
||||
# Support multiple mount locations
|
||||
MOUNTED_PRIVATE_KEY_ALT="/secrets/id_rsa"
|
||||
MOUNTED_PRIVATE_KEY="/home/krkn/.ssh/id_rsa"
|
||||
MOUNTED_PUBLIC_KEY="/home/krkn/.ssh/id_rsa.pub"
|
||||
WORKING_KEY="/home/krkn/.ssh/id_rsa.key"
|
||||
|
||||
# Determine which source to use
|
||||
SOURCE_KEY=""
|
||||
if [ -f "$MOUNTED_PRIVATE_KEY_ALT" ]; then
|
||||
SOURCE_KEY="$MOUNTED_PRIVATE_KEY_ALT"
|
||||
echo "Found SSH key at alternative location: $SOURCE_KEY"
|
||||
elif [ -f "$MOUNTED_PRIVATE_KEY" ]; then
|
||||
SOURCE_KEY="$MOUNTED_PRIVATE_KEY"
|
||||
echo "Found SSH key at default location: $SOURCE_KEY"
|
||||
fi
|
||||
|
||||
# Setup SSH private key and create config for outbound connections
|
||||
if [ -n "$SOURCE_KEY" ]; then
|
||||
echo "Setting up SSH private key from: $SOURCE_KEY"
|
||||
|
||||
# Check current permissions and ownership
|
||||
ls -la "$SOURCE_KEY"
|
||||
|
||||
# Since the mounted key might be owned by root and we run as krkn user,
|
||||
# we cannot modify it directly. Copy to a new location we can control.
|
||||
echo "Copying SSH key to working location: $WORKING_KEY"
|
||||
|
||||
# Try to copy - if readable by anyone, this will work
|
||||
if cp "$SOURCE_KEY" "$WORKING_KEY" 2>/dev/null || cat "$SOURCE_KEY" > "$WORKING_KEY" 2>/dev/null; then
|
||||
chmod 600 "$WORKING_KEY"
|
||||
echo "SSH key copied successfully"
|
||||
ls -la "$WORKING_KEY"
|
||||
|
||||
# Verify the key is readable
|
||||
if ssh-keygen -y -f "$WORKING_KEY" > /dev/null 2>&1; then
|
||||
echo "SSH private key verified successfully"
|
||||
else
|
||||
echo "Warning: SSH key verification failed, but continuing anyway"
|
||||
fi
|
||||
|
||||
# Create SSH config to use the working key
|
||||
cat > /home/krkn/.ssh/config <<EOF
|
||||
Host *
|
||||
IdentityFile $WORKING_KEY
|
||||
StrictHostKeyChecking no
|
||||
UserKnownHostsFile /dev/null
|
||||
EOF
|
||||
chmod 600 /home/krkn/.ssh/config
|
||||
echo "SSH config created with default identity: $WORKING_KEY"
|
||||
else
|
||||
echo "ERROR: Cannot read SSH key at $SOURCE_KEY"
|
||||
echo "Key is owned by: $(stat -c '%U:%G' "$SOURCE_KEY" 2>/dev/null || stat -f '%Su:%Sg' "$SOURCE_KEY" 2>/dev/null)"
|
||||
echo ""
|
||||
echo "Solutions:"
|
||||
echo "1. Mount with world-readable permissions (less secure): chmod 644 /path/to/key"
|
||||
echo "2. Mount to /secrets/id_rsa instead of /home/krkn/.ssh/id_rsa"
|
||||
echo "3. Change ownership on host: chown \$(id -u):\$(id -g) /path/to/key"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Setup SSH public key if mounted (for inbound server access)
|
||||
if [ -f "$MOUNTED_PUBLIC_KEY" ]; then
|
||||
echo "SSH public key already present at $MOUNTED_PUBLIC_KEY"
|
||||
# Try to fix permissions (will fail silently if file is mounted read-only or owned by another user)
|
||||
chmod 600 "$MOUNTED_PUBLIC_KEY" 2>/dev/null
|
||||
if [ ! -f "/home/krkn/.ssh/authorized_keys" ]; then
|
||||
cp "$MOUNTED_PUBLIC_KEY" /home/krkn/.ssh/authorized_keys
|
||||
chmod 600 /home/krkn/.ssh/authorized_keys
|
||||
fi
|
||||
fi
|
||||
@@ -1,31 +0,0 @@
|
||||
version: "3"
|
||||
services:
|
||||
elastic:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:7.13.2
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
network_mode: host
|
||||
environment:
|
||||
discovery.type: single-node
|
||||
kibana:
|
||||
image: docker.elastic.co/kibana/kibana:7.13.2
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
network_mode: host
|
||||
environment:
|
||||
ELASTICSEARCH_HOSTS: "http://0.0.0.0:9200"
|
||||
cerberus:
|
||||
image: quay.io/openshift-scale/cerberus:latest
|
||||
privileged: true
|
||||
deploy:
|
||||
replicas: 1
|
||||
restart_policy:
|
||||
condition: on-failure
|
||||
network_mode: host
|
||||
volumes:
|
||||
- ./config/cerberus.yaml:/root/cerberus/config/config.yaml:Z # Modify the config in case of the need to monitor additional components
|
||||
- ${HOME}/.kube/config:/root/.kube/config:Z
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user