mirror of
https://github.com/krkn-chaos/krkn.git
synced 2026-02-15 10:29:59 +00:00
Compare commits
151 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4f305e78aa | ||
|
|
b17e933134 | ||
|
|
beea484597 | ||
|
|
0222b0f161 | ||
|
|
f7e674d5ad | ||
|
|
7aea12ce6c | ||
|
|
625e1e90cf | ||
|
|
a9f1ce8f1b | ||
|
|
66e364e293 | ||
|
|
898ce76648 | ||
|
|
4a0f4e7cab | ||
|
|
819191866d | ||
|
|
37ca4bbce7 | ||
|
|
b9dd4e40d3 | ||
|
|
3fd249bb88 | ||
|
|
773107245c | ||
|
|
05bc201528 | ||
|
|
9a316550e1 | ||
|
|
9c261e2599 | ||
|
|
0cc82dc65d | ||
|
|
269e21e9eb | ||
|
|
d0dbe3354a | ||
|
|
4a0686daf3 | ||
|
|
822bebac0c | ||
|
|
a13150b0f5 | ||
|
|
0443637fe1 | ||
|
|
36585630f2 | ||
|
|
1401724312 | ||
|
|
fa204a515c | ||
|
|
b3a5fc2d53 | ||
|
|
05600b62b3 | ||
|
|
126599e02c | ||
|
|
b3d6a19d24 | ||
|
|
65100f26a7 | ||
|
|
10b6e4663e | ||
|
|
ce52183a26 | ||
|
|
e9ab3b47b3 | ||
|
|
3e14fe07b7 | ||
|
|
d9271a4bcc | ||
|
|
850930631e | ||
|
|
15eee80c55 | ||
|
|
ff3c4f5313 | ||
|
|
4c74df301f | ||
|
|
b60b66de43 | ||
|
|
2458022248 | ||
|
|
18385cba2b | ||
|
|
e7fa6bdebc | ||
|
|
c3f6b1a7ff | ||
|
|
f2ba8b85af | ||
|
|
ba3fdea403 | ||
|
|
42d18a8e04 | ||
|
|
4b3617bd8a | ||
|
|
eb7a1e243c | ||
|
|
197ce43f9a | ||
|
|
eecdeed73c | ||
|
|
ef606d0f17 | ||
|
|
9981c26304 | ||
|
|
4ebfc5dde5 | ||
|
|
4527d073c6 | ||
|
|
93d6967331 | ||
|
|
b462c46b28 | ||
|
|
ab4ae85896 | ||
|
|
6acd6f9bd3 | ||
|
|
787759a591 | ||
|
|
957cb355be | ||
|
|
35609484d4 | ||
|
|
959337eb63 | ||
|
|
f4bdbff9dc | ||
|
|
954202cab7 | ||
|
|
a373dcf453 | ||
|
|
d0c604a516 | ||
|
|
82582f5bc3 | ||
|
|
37f0f1eb8b | ||
|
|
d2eab21f95 | ||
|
|
d84910299a | ||
|
|
48f19c0a0e | ||
|
|
eb86885bcd | ||
|
|
967fd14bd7 | ||
|
|
5cefe80286 | ||
|
|
9ee76ce337 | ||
|
|
fd3e7ee2c8 | ||
|
|
c85c435b5d | ||
|
|
d5284ace25 | ||
|
|
c3098ec80b | ||
|
|
6629c7ec33 | ||
|
|
fb6af04b09 | ||
|
|
dc1215a61b | ||
|
|
f74aef18f8 | ||
|
|
166204e3c5 | ||
|
|
fc7667aef1 | ||
|
|
3eea42770f | ||
|
|
77a46e3869 | ||
|
|
b801308d4a | ||
|
|
97f4c1fd9c | ||
|
|
c54390d8b1 | ||
|
|
543729b18a | ||
|
|
a0ea4dc749 | ||
|
|
a5459792ef | ||
|
|
d434bb26fa | ||
|
|
fee41d404e | ||
|
|
8663ee8893 | ||
|
|
a072f0306a | ||
|
|
8221392356 | ||
|
|
671fc581dd | ||
|
|
11508ce017 | ||
|
|
0d78139fb6 | ||
|
|
a3baffe8ee | ||
|
|
438b08fcd5 | ||
|
|
9b930a02a5 | ||
|
|
194e3b87ee | ||
|
|
8c05e44c23 | ||
|
|
88f8cf49f1 | ||
|
|
015ba4d90d | ||
|
|
26fdbef144 | ||
|
|
d77e6dc79c | ||
|
|
2885645e77 | ||
|
|
84169e2d4e | ||
|
|
05bc404d32 | ||
|
|
e8fd432fc5 | ||
|
|
ec05675e3a | ||
|
|
c91648d35c | ||
|
|
24aa9036b0 | ||
|
|
816363d151 | ||
|
|
90c52f907f | ||
|
|
4f250c9601 | ||
|
|
6480adc00a | ||
|
|
5002f210ae | ||
|
|
62c5afa9a2 | ||
|
|
c109fc0b17 | ||
|
|
fff675f3dd | ||
|
|
c125e5acf7 | ||
|
|
ca6995a1a1 | ||
|
|
50cf91ac9e | ||
|
|
11069c6982 | ||
|
|
106d9bf1ae | ||
|
|
17f832637c | ||
|
|
0e5c8c55a4 | ||
|
|
9d9a6f9b80 | ||
|
|
f8fe2ae5b7 | ||
|
|
77b1dd32c7 | ||
|
|
9df727ccf5 | ||
|
|
70c8fec705 | ||
|
|
0731144a6b | ||
|
|
9337052e7b | ||
|
|
dc8d7ad75b | ||
|
|
1cc44e1f18 | ||
|
|
c8190fd1c1 | ||
|
|
9078b35e46 | ||
|
|
e6b1665aa1 | ||
|
|
c56819365c | ||
|
|
6a657576cb |
4
.coveragerc
Normal file
4
.coveragerc
Normal file
@@ -0,0 +1,4 @@
|
||||
[run]
|
||||
omit =
|
||||
tests/*
|
||||
krkn/tests/**
|
||||
1
.github/CODEOWNERS
vendored
Normal file
1
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1 @@
|
||||
* @paigerube14 @tsebastiani @chaitanyaenr
|
||||
43
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
43
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report an issue
|
||||
title: "[BUG]"
|
||||
labels: bug
|
||||
---
|
||||
|
||||
# Bug Description
|
||||
|
||||
## **Describe the bug**
|
||||
|
||||
A clear and concise description of what the bug is.
|
||||
|
||||
## **To Reproduce**
|
||||
|
||||
Any specific steps used to reproduce the behavior
|
||||
|
||||
### Scenario File
|
||||
Scenario file(s) that were specified in your config file (can be starred (*) with confidential information )
|
||||
```yaml
|
||||
<config>
|
||||
|
||||
```
|
||||
|
||||
### Config File
|
||||
Config file you used when error was seen (the default used is config/config.yaml)
|
||||
|
||||
```yaml
|
||||
<config>
|
||||
|
||||
```
|
||||
|
||||
## **Expected behavior**
|
||||
|
||||
A clear and concise description of what you expected to happen.
|
||||
|
||||
## **Krkn Output**
|
||||
|
||||
Krkn output to help show your problem
|
||||
|
||||
## **Additional context**
|
||||
|
||||
Add any other context about the problem
|
||||
16
.github/ISSUE_TEMPLATE/feature.md
vendored
Normal file
16
.github/ISSUE_TEMPLATE/feature.md
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
---
|
||||
name: New Feature Request
|
||||
about: Suggest an idea for this project
|
||||
title: ''
|
||||
labels: enhancement
|
||||
assignees: ''
|
||||
---
|
||||
|
||||
**Is your feature request related to a problem? Please describe.**
|
||||
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
|
||||
|
||||
**Describe the solution you'd like**
|
||||
A clear and concise description of what you want to see added/changed. Ex. new parameter in [xxx] scenario, new scenario that does [xxx]
|
||||
|
||||
**Additional context**
|
||||
Add any other context about the feature request here.
|
||||
45
.github/PULL_REQUEST_TEMPLATE.md
vendored
45
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,10 +1,47 @@
|
||||
## Description
|
||||
<!-- Provide a brief description of the changes made in this PR. -->
|
||||
# Type of change
|
||||
|
||||
## Documentation
|
||||
- [ ] Refactor
|
||||
- [ ] New feature
|
||||
- [ ] Bug fix
|
||||
- [ ] Optimization
|
||||
|
||||
# Description
|
||||
<-- Provide a brief description of the changes made in this PR. -->
|
||||
|
||||
## Related Tickets & Documents
|
||||
If no related issue, please create one and start the converasation on wants of
|
||||
|
||||
- Related Issue #:
|
||||
- Closes #:
|
||||
|
||||
# Documentation
|
||||
- [ ] **Is documentation needed for this update?**
|
||||
|
||||
If checked, a documentation PR must be created and merged in the [website repository](https://github.com/krkn-chaos/website/).
|
||||
|
||||
## Related Documentation PR (if applicable)
|
||||
<!-- Add the link to the corresponding documentation PR in the website repository -->
|
||||
<-- Add the link to the corresponding documentation PR in the website repository -->
|
||||
|
||||
# Checklist before requesting a review
|
||||
[ ] Ensure the changes and proposed solution have been discussed in the relevant issue and have received acknowledgment from the community or maintainers. See [contributing guidelines](https://krkn-chaos.dev/docs/contribution-guidelines/)
|
||||
See [testing your changes](https://krkn-chaos.dev/docs/developers-guide/testing-changes/) and run on any Kubernetes or OpenShift cluster to validate your changes
|
||||
- [ ] I have performed a self-review of my code by running krkn and specific scenario
|
||||
- [ ] If it is a core feature, I have added thorough unit tests with above 80% coverage
|
||||
|
||||
*REQUIRED*:
|
||||
Description of combination of tests performed and output of run
|
||||
|
||||
```bash
|
||||
python run_kraken.py
|
||||
...
|
||||
<---insert test results output--->
|
||||
```
|
||||
|
||||
OR
|
||||
|
||||
|
||||
```bash
|
||||
python -m coverage run -a -m unittest discover -s tests -v
|
||||
...
|
||||
<---insert test results output--->
|
||||
```
|
||||
|
||||
13
.github/workflows/release.yml
vendored
13
.github/workflows/release.yml
vendored
@@ -16,6 +16,7 @@ jobs:
|
||||
PREVIOUS_TAG=$(git tag --sort=-creatordate | sed -n '2 p')
|
||||
echo $PREVIOUS_TAG
|
||||
echo "PREVIOUS_TAG=$PREVIOUS_TAG" >> "$GITHUB_ENV"
|
||||
|
||||
- name: generate release notes from template
|
||||
id: release-notes
|
||||
env:
|
||||
@@ -45,3 +46,15 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh release create ${{ github.ref_name }} --title "${{ github.ref_name }}" -F release-notes.md
|
||||
|
||||
- name: Install Syft
|
||||
run: |
|
||||
curl -sSfL https://raw.githubusercontent.com/anchore/syft/main/install.sh | sudo sh -s -- -b /usr/local/bin
|
||||
|
||||
- name: Generate SBOM
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
syft . --scope all-layers --output cyclonedx-json > sbom.json
|
||||
echo "SBOM generated successfully!"
|
||||
gh release upload ${{ github.ref_name }} sbom.json
|
||||
|
||||
52
.github/workflows/stale.yml
vendored
Normal file
52
.github/workflows/stale.yml
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
name: Manage Stale Issues and Pull Requests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run daily at 1:00 AM UTC
|
||||
- cron: '0 1 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
name: Mark and Close Stale Issues and PRs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Mark and close stale issues and PRs
|
||||
uses: actions/stale@v9
|
||||
with:
|
||||
days-before-issue-stale: 60
|
||||
days-before-issue-close: 14
|
||||
stale-issue-label: 'stale'
|
||||
stale-issue-message: |
|
||||
This issue has been automatically marked as stale because it has not had any activity in the last 60 days.
|
||||
It will be closed in 14 days if no further activity occurs.
|
||||
If this issue is still relevant, please leave a comment or remove the stale label.
|
||||
Thank you for your contributions to krkn!
|
||||
close-issue-message: |
|
||||
This issue has been automatically closed due to inactivity.
|
||||
If you believe this issue is still relevant, please feel free to reopen it or create a new issue with updated information.
|
||||
Thank you for your understanding!
|
||||
close-issue-reason: 'not_planned'
|
||||
|
||||
days-before-pr-stale: 90
|
||||
days-before-pr-close: 14
|
||||
stale-pr-label: 'stale'
|
||||
stale-pr-message: |
|
||||
This pull request has been automatically marked as stale because it has not had any activity in the last 90 days.
|
||||
It will be closed in 14 days if no further activity occurs.
|
||||
If this PR is still relevant, please rebase it, address any pending reviews, or leave a comment.
|
||||
Thank you for your contributions to krkn!
|
||||
close-pr-message: |
|
||||
This pull request has been automatically closed due to inactivity.
|
||||
If you believe this PR is still relevant, please feel free to reopen it or create a new pull request with updated changes.
|
||||
Thank you for your understanding!
|
||||
|
||||
# Exempt labels
|
||||
exempt-issue-labels: 'bug,enhancement,good first issue'
|
||||
exempt-pr-labels: 'pending discussions,hold'
|
||||
|
||||
remove-stale-when-updated: true
|
||||
115
.github/workflows/tests.yml
vendored
115
.github/workflows/tests.yml
vendored
@@ -14,47 +14,51 @@ jobs:
|
||||
uses: actions/checkout@v3
|
||||
- name: Create multi-node KinD cluster
|
||||
uses: redhat-chaos/actions/kind@main
|
||||
- name: Install Helm & add repos
|
||||
run: |
|
||||
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
|
||||
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
|
||||
helm repo add stable https://charts.helm.sh/stable
|
||||
helm repo update
|
||||
- name: Deploy prometheus & Port Forwarding
|
||||
uses: redhat-chaos/actions/prometheus@main
|
||||
- name: Deploy Elasticsearch
|
||||
with:
|
||||
ELASTIC_PORT: ${{ env.ELASTIC_PORT }}
|
||||
RUN_ID: ${{ github.run_id }}
|
||||
uses: redhat-chaos/actions/elastic@main
|
||||
- name: Download elastic password
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: elastic_password_${{ github.run_id }}
|
||||
- name: Set elastic password on env
|
||||
run: |
|
||||
kubectl create namespace prometheus-k8s
|
||||
helm install \
|
||||
--wait --timeout 360s \
|
||||
kind-prometheus \
|
||||
prometheus-community/kube-prometheus-stack \
|
||||
--namespace prometheus-k8s \
|
||||
--set prometheus.service.nodePort=30000 \
|
||||
--set prometheus.service.type=NodePort \
|
||||
--set grafana.service.nodePort=31000 \
|
||||
--set grafana.service.type=NodePort \
|
||||
--set alertmanager.service.nodePort=32000 \
|
||||
--set alertmanager.service.type=NodePort \
|
||||
--set prometheus-node-exporter.service.nodePort=32001 \
|
||||
--set prometheus-node-exporter.service.type=NodePort \
|
||||
--set prometheus.prometheusSpec.maximumStartupDurationSeconds=300
|
||||
|
||||
SELECTOR=`kubectl -n prometheus-k8s get service kind-prometheus-kube-prome-prometheus -o wide --no-headers=true | awk '{ print $7 }'`
|
||||
POD_NAME=`kubectl -n prometheus-k8s get pods --selector="$SELECTOR" --no-headers=true | awk '{ print $1 }'`
|
||||
kubectl -n prometheus-k8s port-forward $POD_NAME 9090:9090 &
|
||||
sleep 5
|
||||
ELASTIC_PASSWORD=$(cat elastic_password.txt)
|
||||
echo "ELASTIC_PASSWORD=$ELASTIC_PASSWORD" >> "$GITHUB_ENV"
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.9'
|
||||
python-version: '3.11'
|
||||
architecture: 'x64'
|
||||
- name: Install environment
|
||||
run: |
|
||||
sudo apt-get install build-essential python3-dev
|
||||
pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
pip install coverage
|
||||
|
||||
- name: Deploy test workloads
|
||||
run: |
|
||||
es_pod_name=$(kubectl get pods -l "app=elasticsearch-master" -o name)
|
||||
echo "POD_NAME: $es_pod_name"
|
||||
kubectl --namespace default port-forward $es_pod_name 9200 &
|
||||
prom_name=$(kubectl get pods -n monitoring -l "app.kubernetes.io/name=prometheus" -o name)
|
||||
kubectl --namespace monitoring port-forward $prom_name 9090 &
|
||||
|
||||
# Wait for Elasticsearch to be ready
|
||||
echo "Waiting for Elasticsearch to be ready..."
|
||||
for i in {1..30}; do
|
||||
if curl -k -s -u elastic:$ELASTIC_PASSWORD https://localhost:9200/_cluster/health > /dev/null 2>&1; then
|
||||
echo "Elasticsearch is ready!"
|
||||
break
|
||||
fi
|
||||
echo "Attempt $i: Elasticsearch not ready yet, waiting..."
|
||||
sleep 2
|
||||
done
|
||||
kubectl apply -f CI/templates/outage_pod.yaml
|
||||
kubectl wait --for=condition=ready pod -l scenario=outage --timeout=300s
|
||||
kubectl apply -f CI/templates/container_scenario_pod.yaml
|
||||
@@ -64,31 +68,39 @@ jobs:
|
||||
kubectl wait --for=condition=ready pod -l scenario=time-skew --timeout=300s
|
||||
kubectl apply -f CI/templates/service_hijacking.yaml
|
||||
kubectl wait --for=condition=ready pod -l "app.kubernetes.io/name=proxy" --timeout=300s
|
||||
kubectl apply -f CI/legacy/scenarios/volume_scenario.yaml
|
||||
kubectl wait --for=condition=ready pod kraken-test-pod -n kraken --timeout=300s
|
||||
- name: Get Kind nodes
|
||||
run: |
|
||||
kubectl get nodes --show-labels=true
|
||||
# Pull request only steps
|
||||
- name: Run unit tests
|
||||
if: github.event_name == 'pull_request'
|
||||
run: python -m coverage run -a -m unittest discover -s tests -v
|
||||
|
||||
- name: Setup Pull Request Functional Tests
|
||||
if: |
|
||||
github.event_name == 'pull_request'
|
||||
- name: Setup Functional Tests
|
||||
run: |
|
||||
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.performance_monitoring="localhost:9090"' CI/config/common_test_config.yaml
|
||||
echo "test_service_hijacking" > ./CI/tests/functional_tests
|
||||
yq -i '.elastic.elastic_port=9200' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.elastic_url="https://localhost"' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.enable_elastic=False' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.password="${{env.ELASTIC_PASSWORD}}"' CI/config/common_test_config.yaml
|
||||
yq -i '.performance_monitoring.prometheus_url="http://localhost:9090"' CI/config/common_test_config.yaml
|
||||
echo "test_app_outages" >> ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_time" >> ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
echo "test_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_customapp_pod" >> ./CI/tests/functional_tests
|
||||
echo "test_io_hog" >> ./CI/tests/functional_tests
|
||||
|
||||
echo "test_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_node" >> ./CI/tests/functional_tests
|
||||
echo "test_pod" >> ./CI/tests/functional_tests
|
||||
echo "test_pod_error" >> ./CI/tests/functional_tests
|
||||
echo "test_service_hijacking" >> ./CI/tests/functional_tests
|
||||
echo "test_pod_network_filter" >> ./CI/tests/functional_tests
|
||||
echo "test_pod_server" >> ./CI/tests/functional_tests
|
||||
echo "test_time" >> ./CI/tests/functional_tests
|
||||
# echo "test_pvc" >> ./CI/tests/functional_tests
|
||||
|
||||
# Push on main only steps + all other functional to collect coverage
|
||||
# for the badge
|
||||
@@ -102,22 +114,9 @@ jobs:
|
||||
- name: Setup Post Merge Request Functional Tests
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: |
|
||||
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.performance_monitoring="localhost:9090"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.username="${{secrets.TELEMETRY_USERNAME}}"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.password="${{secrets.TELEMETRY_PASSWORD}}"' CI/config/common_test_config.yaml
|
||||
echo "test_telemetry" > ./CI/tests/functional_tests
|
||||
echo "test_service_hijacking" >> ./CI/tests/functional_tests
|
||||
echo "test_app_outages" >> ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_time" >> ./CI/tests/functional_tests
|
||||
echo "test_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_io_hog" >> ./CI/tests/functional_tests
|
||||
|
||||
echo "test_telemetry" >> ./CI/tests/functional_tests
|
||||
# Final common steps
|
||||
- name: Run Functional tests
|
||||
env:
|
||||
@@ -127,32 +126,38 @@ jobs:
|
||||
cat ./CI/results.markdown >> $GITHUB_STEP_SUMMARY
|
||||
echo >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload CI logs
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ci-logs
|
||||
path: CI/out
|
||||
if-no-files-found: error
|
||||
- name: Collect coverage report
|
||||
if: ${{ always() }}
|
||||
run: |
|
||||
python -m coverage html
|
||||
python -m coverage json
|
||||
- name: Publish coverage report to job summary
|
||||
if: ${{ always() }}
|
||||
run: |
|
||||
pip install html2text
|
||||
html2text --ignore-images --ignore-links -b 0 htmlcov/index.html >> $GITHUB_STEP_SUMMARY
|
||||
- name: Upload coverage data
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage
|
||||
path: htmlcov
|
||||
if-no-files-found: error
|
||||
- name: Upload json coverage
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage.json
|
||||
path: coverage.json
|
||||
if-no-files-found: error
|
||||
- name: Check CI results
|
||||
if: ${{ always() }}
|
||||
run: "! grep Fail CI/results.markdown"
|
||||
|
||||
badge:
|
||||
@@ -177,7 +182,7 @@ jobs:
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.9
|
||||
python-version: '3.11'
|
||||
- name: Copy badge on GitHub Page Repo
|
||||
env:
|
||||
COLOR: yellow
|
||||
|
||||
@@ -6,3 +6,4 @@ This is a list of organizations that have publicly acknowledged usage of Krkn an
|
||||
|:-|:-|:-|:-|
|
||||
| MarketAxess | 2024 | https://www.marketaxess.com/ | Kraken enables us to achieve our goal of increasing the reliability of our cloud products on Kubernetes. The tool allows us to automatically run various chaos scenarios, identify resilience and performance bottlenecks, and seamlessly restore the system to its original state once scenarios finish. These chaos scenarios include pod disruptions, node (EC2) outages, simulating availability zone (AZ) outages, and filling up storage spaces like EBS and EFS. The community is highly responsive to requests and works on expanding the tool's capabilities. MarketAxess actively contributes to the project, adding features such as the ability to leverage existing network ACLs and proposing several feature improvements to enhance test coverage. |
|
||||
| Red Hat Openshift | 2020 | https://www.redhat.com/ | Kraken is a highly reliable chaos testing tool used to ensure the quality and resiliency of Red Hat Openshift. The engineering team runs all the test scenarios under Kraken on different cloud platforms on both self-managed and cloud services environments prior to the release of a new version of the product. The team also contributes to the Kraken project consistently which helps the test scenarios to keep up with the new features introduced to the product. Inclusion of this test coverage has contributed to gaining the trust of new and existing customers of the product. |
|
||||
| IBM | 2023 | https://www.ibm.com/ | While working on AI for Chaos Testing at IBM Research, we closely collaborated with the Kraken (Krkn) team to advance intelligent chaos engineering. Our contributions included developing AI-enabled chaos injection strategies and integrating reinforcement learning (RL)-based fault search techniques into the Krkn tool, enabling it to identify and explore system vulnerabilities more efficiently. Kraken stands out as one of the most user-friendly and effective tools for chaos engineering, and the Kraken team’s deep technical involvement played a crucial role in the success of this collaboration—helping bridge cutting-edge AI research with practical, real-world system reliability testing. |
|
||||
|
||||
@@ -2,26 +2,30 @@ kraken:
|
||||
distribution: kubernetes # Distribution can be kubernetes or openshift.
|
||||
kubeconfig_path: ~/.kube/config # Path to kubeconfig.
|
||||
exit_on_failure: False # Exit when a post action scenario fails.
|
||||
litmus_version: v1.13.6 # Litmus version to install.
|
||||
litmus_uninstall: False # If you want to uninstall litmus if failure.
|
||||
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
port: 8081 # Signal port
|
||||
auto_rollback: True # Enable auto rollback for scenarios.
|
||||
rollback_versions_directory: /tmp/kraken-rollback # Directory to store rollback version files.
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load.
|
||||
- $scenario_type: # List of chaos pod scenarios to load.
|
||||
- $scenario_file
|
||||
$post_config
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed.
|
||||
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal.
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift.
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
capture_metrics: False
|
||||
metrics_profile_path: config/metrics-aggregated.yaml
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set.
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error.
|
||||
alert_profile: config/alerts.yaml # Path to alert profile with the prometheus queries.
|
||||
enable_alerts: True # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
enable_metrics: True
|
||||
alert_profile: config/alerts.yaml # Path or URL to alert profile with the prometheus queries
|
||||
metrics_profile: config/metrics-report.yaml
|
||||
check_critical_alerts: True # Path to alert profile with the prometheus queries.
|
||||
|
||||
tunings:
|
||||
wait_duration: 6 # Duration to wait between each chaos scenario.
|
||||
@@ -32,13 +36,13 @@ telemetry:
|
||||
api_url: https://yvnn4rfoi7.execute-api.us-west-2.amazonaws.com/test #telemetry service endpoint
|
||||
username: $TELEMETRY_USERNAME # telemetry service username
|
||||
password: $TELEMETRY_PASSWORD # telemetry service password
|
||||
prometheus_namespace: 'prometheus-k8s' # prometheus namespace
|
||||
prometheus_namespace: 'monitoring' # prometheus namespace
|
||||
prometheus_pod_name: 'prometheus-kind-prometheus-kube-prome-prometheus-0' # prometheus pod_name
|
||||
prometheus_container_name: 'prometheus'
|
||||
prometheus_backup: True # enables/disables prometheus data collection
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
archive_path: /tmp # local path where the archive files will be temporarly stored
|
||||
archive_path: /tmp # local path where the archive files will be temporarily stored
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 10000 # the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
@@ -52,8 +56,6 @@ telemetry:
|
||||
telemetry_group: "funtests"
|
||||
elastic:
|
||||
enable_elastic: False
|
||||
collect_metrics: False
|
||||
collect_alerts: False
|
||||
verify_certs: False
|
||||
elastic_url: "https://192.168.39.196" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
|
||||
elastic_port: 32766
|
||||
|
||||
@@ -45,15 +45,45 @@ metadata:
|
||||
name: kraken-test-pod
|
||||
namespace: kraken
|
||||
spec:
|
||||
securityContext:
|
||||
fsGroup: 1001
|
||||
# initContainer to fix permissions on the mounted volume
|
||||
initContainers:
|
||||
- name: fix-permissions
|
||||
image: 'quay.io/centos7/httpd-24-centos7:centos7'
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
echo "Setting up permissions for /home/kraken..."
|
||||
# Create the directory if it doesn't exist
|
||||
mkdir -p /home/kraken
|
||||
# Set ownership to user 1001 and group 1001
|
||||
chown -R 1001:1001 /home/kraken
|
||||
# Set permissions to allow read/write
|
||||
chmod -R 755 /home/kraken
|
||||
rm -rf /home/kraken/*
|
||||
echo "Permissions fixed. Current state:"
|
||||
ls -la /home/kraken
|
||||
volumeMounts:
|
||||
- mountPath: "/home/kraken"
|
||||
name: kraken-test-pv
|
||||
securityContext:
|
||||
runAsUser: 0 # Run as root to fix permissions
|
||||
volumes:
|
||||
- name: kraken-test-pv
|
||||
persistentVolumeClaim:
|
||||
claimName: kraken-test-pvc
|
||||
containers:
|
||||
- name: kraken-test-container
|
||||
image: 'quay.io/centos7/httpd-24-centos7:latest'
|
||||
volumeMounts:
|
||||
- mountPath: "/home/krake-dir/"
|
||||
name: kraken-test-pv
|
||||
image: 'quay.io/centos7/httpd-24-centos7:centos7'
|
||||
securityContext:
|
||||
privileged: true
|
||||
runAsUser: 1001
|
||||
runAsNonRoot: true
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
volumeMounts:
|
||||
- mountPath: "/home/kraken"
|
||||
name: kraken-test-pv
|
||||
|
||||
@@ -8,9 +8,9 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: docker.io/fedora/tools
|
||||
image: quay.io/krkn-chaos/krkn:tools
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
sleep infinity
|
||||
sleep infinity
|
||||
|
||||
29
CI/templates/pod_network_filter.yaml
Normal file
29
CI/templates/pod_network_filter.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-network-filter-test
|
||||
labels:
|
||||
app.kubernetes.io/name: pod-network-filter
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: quay.io/krkn-chaos/krkn-funtests:pod-network-filter
|
||||
ports:
|
||||
- containerPort: 5000
|
||||
name: pod-network-prt
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: pod-network-filter-service
|
||||
spec:
|
||||
selector:
|
||||
app.kubernetes.io/name: pod-network-filter
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: pod-network-filter-svc
|
||||
protocol: TCP
|
||||
port: 80
|
||||
targetPort: pod-network-prt
|
||||
nodePort: 30037
|
||||
@@ -8,9 +8,9 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: docker.io/fedora/tools
|
||||
image: quay.io/krkn-chaos/krkn:tools
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- |
|
||||
sleep infinity
|
||||
sleep infinity
|
||||
|
||||
@@ -13,7 +13,13 @@ function functional_test_app_outage {
|
||||
export scenario_type="application_outages_scenarios"
|
||||
export scenario_file="scenarios/openshift/app_outage.yaml"
|
||||
export post_config=""
|
||||
|
||||
kubectl get services -A
|
||||
|
||||
kubectl get pods
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/app_outage.yaml
|
||||
cat $scenario_file
|
||||
cat CI/config/app_outage.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/app_outage.yaml
|
||||
echo "App outage scenario test: Success"
|
||||
}
|
||||
|
||||
@@ -16,8 +16,10 @@ function functional_test_container_crash {
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/container_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/container_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/container_config.yaml -d True
|
||||
echo "Container scenario test: Success"
|
||||
|
||||
kubectl get pods -n kube-system -l component=etcd
|
||||
}
|
||||
|
||||
functional_test_container_crash
|
||||
|
||||
@@ -7,7 +7,7 @@ trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_cpu_hog {
|
||||
yq -i '.node_selector="kubernetes.io/hostname=kind-worker2"' scenarios/kube/cpu-hog.yml
|
||||
yq -i '."node-selector"="kubernetes.io/hostname=kind-worker2"' scenarios/kube/cpu-hog.yml
|
||||
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/cpu-hog.yml"
|
||||
|
||||
18
CI/tests/test_customapp_pod.sh
Executable file
18
CI/tests/test_customapp_pod.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_customapp_pod_node_selector {
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/openshift/customapp_pod.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/customapp_pod_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/customapp_pod_config.yaml -d True
|
||||
echo "Pod disruption with node_label_selector test: Success"
|
||||
}
|
||||
|
||||
functional_test_customapp_pod_node_selector
|
||||
@@ -5,12 +5,13 @@ source CI/tests/common.sh
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_io_hog {
|
||||
yq -i '.node_selector="kubernetes.io/hostname=kind-worker2"' scenarios/kube/io-hog.yml
|
||||
yq -i '."node-selector"="kubernetes.io/hostname=kind-worker2"' scenarios/kube/io-hog.yml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/io-hog.yml"
|
||||
export post_config=""
|
||||
|
||||
cat $scenario_file
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/io_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/io_hog.yaml
|
||||
echo "IO Hog: Success"
|
||||
|
||||
@@ -7,7 +7,7 @@ trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_memory_hog {
|
||||
yq -i '.node_selector="kubernetes.io/hostname=kind-worker2"' scenarios/kube/memory-hog.yml
|
||||
yq -i '."node-selector"="kubernetes.io/hostname=kind-worker2"' scenarios/kube/memory-hog.yml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/memory-hog.yml"
|
||||
export post_config=""
|
||||
|
||||
18
CI/tests/test_node.sh
Executable file
18
CI/tests/test_node.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
uset -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_node_stop_start {
|
||||
export scenario_type="node_scenarios"
|
||||
export scenario_file="scenarios/kind/node_scenarios_example.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/node_config.yaml
|
||||
cat CI/config/node_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/node_config.yaml
|
||||
echo "Node Stop/Start scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_node_stop_start
|
||||
20
CI/tests/test_pod.sh
Executable file
20
CI/tests/test_pod.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_pod_crash {
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/kind/pod_etcd.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml
|
||||
echo "Pod disruption scenario test: Success"
|
||||
date
|
||||
kubectl get pods -n kube-system -l component=etcd -o yaml
|
||||
}
|
||||
|
||||
functional_test_pod_crash
|
||||
28
CI/tests/test_pod_error.sh
Executable file
28
CI/tests/test_pod_error.sh
Executable file
@@ -0,0 +1,28 @@
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_pod_error {
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/kind/pod_etcd.yml"
|
||||
export post_config=""
|
||||
yq -i '.[0].config.kill=5' scenarios/kind/pod_etcd.yml
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_config.yaml
|
||||
cat CI/config/pod_config.yaml
|
||||
|
||||
cat scenarios/kind/pod_etcd.yml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml
|
||||
|
||||
ret=$?
|
||||
echo "\n\nret $ret"
|
||||
if [[ $ret -ge 1 ]]; then
|
||||
echo "Pod disruption error scenario test: Success"
|
||||
else
|
||||
echo "Pod disruption error scenario test: Failure"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
functional_test_pod_error
|
||||
62
CI/tests/test_pod_network_filter.sh
Executable file
62
CI/tests/test_pod_network_filter.sh
Executable file
@@ -0,0 +1,62 @@
|
||||
function functional_pod_network_filter {
|
||||
export SERVICE_URL="http://localhost:8889"
|
||||
export scenario_type="network_chaos_ng_scenarios"
|
||||
export scenario_file="scenarios/kube/pod-network-filter.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_network_filter.yaml
|
||||
yq -i '.[0].test_duration=10' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].label_selector=""' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].ingress=false' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].egress=true' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].target="pod-network-filter-test"' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].protocols=["tcp"]' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].ports=[443]' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.performance_monitoring.check_critical_alerts=False' CI/config/pod_network_filter.yaml
|
||||
|
||||
## Test webservice deployment
|
||||
kubectl apply -f ./CI/templates/pod_network_filter.yaml
|
||||
COUNTER=0
|
||||
while true
|
||||
do
|
||||
curl $SERVICE_URL
|
||||
EXITSTATUS=$?
|
||||
if [ "$EXITSTATUS" -eq "0" ]
|
||||
then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
COUNTER=$((COUNTER+1))
|
||||
[ $COUNTER -eq "100" ] && echo "maximum number of retry reached, test failed" && exit 1
|
||||
done
|
||||
|
||||
cat scenarios/kube/pod-network-filter.yml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_network_filter.yaml > krkn_pod_network.out 2>&1 &
|
||||
PID=$!
|
||||
|
||||
# wait until the dns resolution starts failing and the service returns 400
|
||||
DNS_FAILURE_STATUS=0
|
||||
while true
|
||||
do
|
||||
OUT_STATUS_CODE=$(curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL)
|
||||
if [ "$OUT_STATUS_CODE" -eq "404" ]
|
||||
then
|
||||
DNS_FAILURE_STATUS=404
|
||||
fi
|
||||
|
||||
if [ "$DNS_FAILURE_STATUS" -eq "404" ] && [ "$OUT_STATUS_CODE" -eq "200" ]
|
||||
then
|
||||
echo "service restored"
|
||||
break
|
||||
fi
|
||||
COUNTER=$((COUNTER+1))
|
||||
[ $COUNTER -eq "100" ] && echo "maximum number of retry reached, test failed" && exit 1
|
||||
sleep 2
|
||||
done
|
||||
|
||||
wait $PID
|
||||
|
||||
}
|
||||
|
||||
functional_pod_network_filter
|
||||
|
||||
35
CI/tests/test_pod_server.sh
Executable file
35
CI/tests/test_pod_server.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_pod_server {
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/kind/pod_etcd.yml"
|
||||
export post_config=""
|
||||
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_config.yaml
|
||||
yq -i '.[0].config.kill=1' scenarios/kind/pod_etcd.yml
|
||||
|
||||
yq -i '.tunings.daemon_mode=True' CI/config/pod_config.yaml
|
||||
cat CI/config/pod_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml &
|
||||
sleep 15
|
||||
curl -X POST http:/0.0.0.0:8081/STOP
|
||||
|
||||
wait
|
||||
|
||||
yq -i '.kraken.signal_state="PAUSE"' CI/config/pod_config.yaml
|
||||
yq -i '.tunings.daemon_mode=False' CI/config/pod_config.yaml
|
||||
cat CI/config/pod_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml &
|
||||
sleep 5
|
||||
curl -X POST http:/0.0.0.0:8081/RUN
|
||||
wait
|
||||
|
||||
echo "Pod disruption with server scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_pod_server
|
||||
18
CI/tests/test_pvc.sh
Executable file
18
CI/tests/test_pvc.sh
Executable file
@@ -0,0 +1,18 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_pvc_fill {
|
||||
export scenario_type="pvc_scenarios"
|
||||
export scenario_file="scenarios/kind/pvc_scenario.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pvc_config.yaml
|
||||
cat CI/config/pvc_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pvc_config.yaml --debug True
|
||||
echo "PVC Fill scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_pvc_fill
|
||||
@@ -39,7 +39,7 @@ function functional_test_service_hijacking {
|
||||
export scenario_file="scenarios/kube/service_hijacking.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/service_hijacking.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/service_hijacking.yaml > /dev/null 2>&1 &
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/service_hijacking.yaml > /tmp/krkn.log 2>&1 &
|
||||
PID=$!
|
||||
#Waiting the hijacking to have effect
|
||||
COUNTER=0
|
||||
@@ -100,8 +100,13 @@ function functional_test_service_hijacking {
|
||||
[ "${PAYLOAD_PATCH_2//[$'\t\r\n ']}" == "${OUT_PATCH//[$'\t\r\n ']}" ] && echo "Step 2 PATCH Payload OK" || (echo "Step 2 PATCH Payload did not match. Test failed." && exit 1)
|
||||
[ "$OUT_STATUS_CODE" == "$STATUS_CODE_PATCH_2" ] && echo "Step 2 PATCH Status Code OK" || (echo "Step 2 PATCH status code did not match. Test failed." && exit 1)
|
||||
[ "$OUT_CONTENT" == "$TEXT_MIME" ] && echo "Step 2 PATCH MIME OK" || (echo " Step 2 PATCH MIME did not match. Test failed." && exit 1)
|
||||
|
||||
|
||||
|
||||
wait $PID
|
||||
|
||||
cat /tmp/krkn.log
|
||||
|
||||
# now checking if service has been restore correctly and nginx responds correctly
|
||||
curl -s $SERVICE_URL | grep nginx! && echo "BODY: Service restored!" || (echo "BODY: failed to restore service" && exit 1)
|
||||
OUT_STATUS_CODE=`curl -X GET -s -o /dev/null -I -w "%{http_code}" $SERVICE_URL`
|
||||
|
||||
@@ -18,9 +18,8 @@ function functional_test_telemetry {
|
||||
yq -i '.performance_monitoring.prometheus_url="http://localhost:9090"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.run_tag=env(RUN_TAG)' CI/config/common_test_config.yaml
|
||||
|
||||
export scenario_type="hog_scenarios"
|
||||
|
||||
export scenario_file="scenarios/kube/cpu-hog.yml"
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/kind/pod_etcd.yml"
|
||||
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/telemetry.yaml
|
||||
|
||||
273
CLAUDE.md
Normal file
273
CLAUDE.md
Normal file
@@ -0,0 +1,273 @@
|
||||
# CLAUDE.md - Krkn Chaos Engineering Framework
|
||||
|
||||
## Project Overview
|
||||
|
||||
Krkn (Kraken) is a chaos engineering tool for Kubernetes/OpenShift clusters. It injects deliberate failures to validate cluster resilience. Plugin-based architecture with multi-cloud support (AWS, Azure, GCP, IBM Cloud, VMware, Alibaba, OpenStack).
|
||||
|
||||
## Repository Structure
|
||||
|
||||
```
|
||||
krkn/
|
||||
├── krkn/
|
||||
│ ├── scenario_plugins/ # Chaos scenario plugins (pod, node, network, hogs, etc.)
|
||||
│ ├── utils/ # Utility functions
|
||||
│ ├── rollback/ # Rollback management
|
||||
│ ├── prometheus/ # Prometheus integration
|
||||
│ └── cerberus/ # Health monitoring
|
||||
├── tests/ # Unit tests (unittest framework)
|
||||
├── scenarios/ # Example scenario configs (openshift/, kube/, kind/)
|
||||
├── config/ # Configuration files
|
||||
└── CI/ # CI/CD test scripts
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Setup (ALWAYS use virtual environment)
|
||||
python3 -m venv venv
|
||||
source venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
|
||||
# Run Krkn
|
||||
python run_kraken.py --config config/config.yaml
|
||||
|
||||
# Note: Scenarios are specified in config.yaml under kraken.chaos_scenarios
|
||||
# There is no --scenario flag; edit config/config.yaml to select scenarios
|
||||
|
||||
# Run tests
|
||||
python -m unittest discover -s tests -v
|
||||
python -m coverage run -a -m unittest discover -s tests -v
|
||||
```
|
||||
|
||||
## Critical Requirements
|
||||
|
||||
### Python Environment
|
||||
- **Python 3.9+** required
|
||||
- **NEVER install packages globally** - always use virtual environment
|
||||
- **CRITICAL**: `docker` must be <7.0 and `requests` must be <2.32 (Unix socket compatibility)
|
||||
|
||||
### Key Dependencies
|
||||
- **krkn-lib** (5.1.13): Core library for Kubernetes/OpenShift operations
|
||||
- **kubernetes** (34.1.0): Kubernetes Python client
|
||||
- **docker** (<7.0), **requests** (<2.32): DO NOT upgrade without verifying compatibility
|
||||
- Cloud SDKs: boto3 (AWS), azure-mgmt-* (Azure), google-cloud-compute (GCP), ibm_vpc (IBM), pyVmomi (VMware)
|
||||
|
||||
## Plugin Architecture (CRITICAL)
|
||||
|
||||
**Strictly enforced naming conventions:**
|
||||
|
||||
### Naming Rules
|
||||
- **Module files**: Must end with `_scenario_plugin.py` and use snake_case
|
||||
- Example: `pod_disruption_scenario_plugin.py`
|
||||
- **Class names**: Must be CamelCase and end with `ScenarioPlugin`
|
||||
- Example: `PodDisruptionScenarioPlugin`
|
||||
- Must match module filename (snake_case ↔ CamelCase)
|
||||
- **Directory structure**: Plugin dirs CANNOT contain "scenario" or "plugin"
|
||||
- Location: `krkn/scenario_plugins/<plugin_name>/`
|
||||
|
||||
### Plugin Implementation
|
||||
Every plugin MUST:
|
||||
1. Extend `AbstractScenarioPlugin`
|
||||
2. Implement `run()` method
|
||||
3. Implement `get_scenario_types()` method
|
||||
|
||||
```python
|
||||
from krkn.scenario_plugins import AbstractScenarioPlugin
|
||||
|
||||
class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
def run(self, config, scenarios_list, kubeconfig_path, wait_duration):
|
||||
pass
|
||||
|
||||
def get_scenario_types(self):
|
||||
return ["pod_scenarios", "pod_outage"]
|
||||
```
|
||||
|
||||
### Creating a New Plugin
|
||||
1. Create directory: `krkn/scenario_plugins/<plugin_name>/`
|
||||
2. Create module: `<plugin_name>_scenario_plugin.py`
|
||||
3. Create class: `<PluginName>ScenarioPlugin` extending `AbstractScenarioPlugin`
|
||||
4. Implement `run()` and `get_scenario_types()`
|
||||
5. Create unit test: `tests/test_<plugin_name>_scenario_plugin.py`
|
||||
6. Add example scenario: `scenarios/<platform>/<scenario>.yaml`
|
||||
|
||||
**DO NOT**: Violate naming conventions (factory will reject), include "scenario"/"plugin" in directory names, create plugins without tests.
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit Tests
|
||||
```bash
|
||||
# Run all tests
|
||||
python -m unittest discover -s tests -v
|
||||
|
||||
# Specific test
|
||||
python -m unittest tests.test_pod_disruption_scenario_plugin
|
||||
|
||||
# With coverage
|
||||
python -m coverage run -a -m unittest discover -s tests -v
|
||||
python -m coverage html
|
||||
```
|
||||
|
||||
**Test requirements:**
|
||||
- Naming: `test_<module>_scenario_plugin.py`
|
||||
- Mock external dependencies (Kubernetes API, cloud providers)
|
||||
- Test success, failure, and edge cases
|
||||
- Keep tests isolated and independent
|
||||
|
||||
### Functional Tests
|
||||
Located in `CI/tests/`. Can be run locally on a kind cluster with Prometheus and Elasticsearch set up.
|
||||
|
||||
**Setup for local testing:**
|
||||
1. Deploy Prometheus and Elasticsearch on your kind cluster:
|
||||
- Prometheus setup: https://krkn-chaos.dev/docs/developers-guide/testing-changes/#prometheus
|
||||
- Elasticsearch setup: https://krkn-chaos.dev/docs/developers-guide/testing-changes/#elasticsearch
|
||||
|
||||
2. Or disable monitoring features in `config/config.yaml`:
|
||||
```yaml
|
||||
performance_monitoring:
|
||||
enable_alerts: False
|
||||
enable_metrics: False
|
||||
check_critical_alerts: False
|
||||
```
|
||||
|
||||
**Note:** Functional tests run automatically in CI with full monitoring enabled.
|
||||
|
||||
## Cloud Provider Implementations
|
||||
|
||||
Node chaos scenarios are cloud-specific. Each in `krkn/scenario_plugins/node_actions/<provider>_node_scenarios.py`:
|
||||
- AWS, Azure, GCP, IBM Cloud, VMware, Alibaba, OpenStack, Bare Metal
|
||||
|
||||
Implement: stop, start, reboot, terminate instances.
|
||||
|
||||
**When modifying**: Maintain consistency with other providers, handle API errors, add logging, update tests.
|
||||
|
||||
### Adding Cloud Provider Support
|
||||
1. Create: `krkn/scenario_plugins/node_actions/<provider>_node_scenarios.py`
|
||||
2. Extend: `abstract_node_scenarios.AbstractNodeScenarios`
|
||||
3. Implement: `stop_instances`, `start_instances`, `reboot_instances`, `terminate_instances`
|
||||
4. Add SDK to `requirements.txt`
|
||||
5. Create unit test with mocked SDK
|
||||
6. Add example scenario: `scenarios/openshift/<provider>_node_scenarios.yml`
|
||||
|
||||
## Configuration
|
||||
|
||||
**Main config**: `config/config.yaml`
|
||||
- `kraken`: Core settings
|
||||
- `cerberus`: Health monitoring
|
||||
- `performance_monitoring`: Prometheus
|
||||
- `elastic`: Elasticsearch telemetry
|
||||
|
||||
**Scenario configs**: `scenarios/` directory
|
||||
```yaml
|
||||
- config:
|
||||
scenario_type: <type> # Must match plugin's get_scenario_types()
|
||||
```
|
||||
|
||||
## Code Style
|
||||
|
||||
- **Import order**: Standard library, third-party, local imports
|
||||
- **Naming**: snake_case (functions/variables), CamelCase (classes)
|
||||
- **Logging**: Use Python's `logging` module
|
||||
- **Error handling**: Return appropriate exit codes
|
||||
- **Docstrings**: Required for public functions/classes
|
||||
|
||||
## Exit Codes
|
||||
|
||||
Krkn uses specific exit codes to communicate execution status:
|
||||
|
||||
- `0`: Success - all scenarios passed, no critical alerts
|
||||
- `1`: Scenario failure - one or more scenarios failed
|
||||
- `2`: Critical alerts fired during execution
|
||||
- `3+`: Health check failure (Cerberus monitoring detected issues)
|
||||
|
||||
**When implementing scenarios:**
|
||||
- Return `0` on success
|
||||
- Return `1` on scenario-specific failures
|
||||
- Propagate health check failures appropriately
|
||||
- Log exit code reasons clearly
|
||||
|
||||
## Container Support
|
||||
|
||||
Krkn can run inside a container. See `containers/` directory.
|
||||
|
||||
**Building custom image:**
|
||||
```bash
|
||||
cd containers
|
||||
./compile_dockerfile.sh # Generates Dockerfile from template
|
||||
docker build -t krkn:latest .
|
||||
```
|
||||
|
||||
**Running containerized:**
|
||||
```bash
|
||||
docker run -v ~/.kube:/root/.kube:Z \
|
||||
-v $(pwd)/config:/config:Z \
|
||||
-v $(pwd)/scenarios:/scenarios:Z \
|
||||
krkn:latest
|
||||
```
|
||||
|
||||
## Git Workflow
|
||||
|
||||
- **NEVER commit directly to main**
|
||||
- **NEVER use `--force` without approval**
|
||||
- **ALWAYS create feature branches**: `git checkout -b feature/description`
|
||||
- **ALWAYS run tests before pushing**
|
||||
|
||||
**Conventional commits**: `feat:`, `fix:`, `test:`, `docs:`, `refactor:`
|
||||
|
||||
```bash
|
||||
git checkout main && git pull origin main
|
||||
git checkout -b feature/your-feature-name
|
||||
# Make changes, write tests
|
||||
python -m unittest discover -s tests -v
|
||||
git add <specific-files>
|
||||
git commit -m "feat: description"
|
||||
git push -u origin feature/your-feature-name
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- `KUBECONFIG`: Path to kubeconfig
|
||||
- `AWS_*`, `AZURE_*`, `GOOGLE_APPLICATION_CREDENTIALS`: Cloud credentials
|
||||
- `PROMETHEUS_URL`, `ELASTIC_URL`, `ELASTIC_PASSWORD`: Monitoring config
|
||||
|
||||
**NEVER commit credentials or API keys.**
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
1. Missing virtual environment - always activate venv
|
||||
2. Running functional tests without cluster setup
|
||||
3. Ignoring exit codes
|
||||
4. Modifying krkn-lib directly (it's a separate package)
|
||||
5. Upgrading docker/requests beyond version constraints
|
||||
|
||||
## Before Writing Code
|
||||
|
||||
1. Check for existing implementations
|
||||
2. Review existing plugins as examples
|
||||
3. Maintain consistency with cloud provider patterns
|
||||
4. Plan rollback logic
|
||||
5. Write tests alongside code
|
||||
6. Update documentation
|
||||
|
||||
## When Adding Dependencies
|
||||
|
||||
1. Check if functionality exists in krkn-lib or current dependencies
|
||||
2. Verify compatibility with existing versions
|
||||
3. Pin specific versions in `requirements.txt`
|
||||
4. Check for security vulnerabilities
|
||||
5. Test thoroughly for conflicts
|
||||
|
||||
## Common Development Tasks
|
||||
|
||||
### Modifying Existing Plugin
|
||||
1. Read plugin code and corresponding test
|
||||
2. Make changes
|
||||
3. Update/add unit tests
|
||||
4. Run: `python -m unittest tests.test_<plugin>_scenario_plugin`
|
||||
|
||||
### Writing Unit Tests
|
||||
1. Create: `tests/test_<module>_scenario_plugin.py`
|
||||
2. Import `unittest` and plugin class
|
||||
3. Mock external dependencies
|
||||
4. Test success, failure, and edge cases
|
||||
5. Run: `python -m unittest tests.test_<module>_scenario_plugin`
|
||||
|
||||
83
GOVERNANCE.md
Normal file
83
GOVERNANCE.md
Normal file
@@ -0,0 +1,83 @@
|
||||
|
||||
|
||||
|
||||
The governance model adopted here is heavily influenced by a set of CNCF projects, especially drew
|
||||
reference from [Kubernetes governance](https://github.com/kubernetes/community/blob/master/governance.md).
|
||||
*For similar structures some of the same wordings from kubernetes governance are borrowed to adhere
|
||||
to the originally construed meaning.*
|
||||
|
||||
## Principles
|
||||
|
||||
- **Open**: Krkn is open source community.
|
||||
- **Welcoming and respectful**: See [Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
- **Transparent and accessible**: Work and collaboration should be done in public.
|
||||
Changes to the Krkn organization, Krkn code repositories, and CNCF related activities (e.g.
|
||||
level, involvement, etc) are done in public.
|
||||
- **Merit**: Ideas and contributions are accepted according to their technical merit
|
||||
and alignment with project objectives, scope and design principles.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Krkn follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).
|
||||
Here is an excerpt:
|
||||
|
||||
> As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities.
|
||||
|
||||
## Maintainer Levels
|
||||
|
||||
### Contributor
|
||||
Contributors contribute to the community. Anyone can become a contributor by participating in discussions, reporting bugs, or contributing code or documentation.
|
||||
|
||||
#### Responsibilities:
|
||||
|
||||
Be active in the community and adhere to the Code of Conduct.
|
||||
|
||||
Report bugs and suggest new features.
|
||||
|
||||
Contribute high-quality code and documentation.
|
||||
|
||||
|
||||
### Member
|
||||
Members are active contributors to the community. Members have demonstrated a strong understanding of the project's codebase and conventions.
|
||||
|
||||
#### Responsibilities:
|
||||
|
||||
Review pull requests for correctness, quality, and adherence to project standards.
|
||||
|
||||
Provide constructive and timely feedback to contributors.
|
||||
|
||||
Ensure that all contributions are well-tested and documented.
|
||||
|
||||
Work with maintainers to ensure a smooth and efficient release process.
|
||||
|
||||
### Maintainer
|
||||
Maintainers are responsible for the overall health and direction of the project. They are long-standing contributors who have shown a deep commitment to the project's success.
|
||||
|
||||
#### Responsibilities:
|
||||
|
||||
Set the technical direction and vision for the project.
|
||||
|
||||
Manage releases and ensure the stability of the main branch.
|
||||
|
||||
Make decisions on feature inclusion and project priorities.
|
||||
|
||||
Mentor other contributors and help grow the community.
|
||||
|
||||
Resolve disputes and make final decisions when consensus cannot be reached.
|
||||
|
||||
### Owner
|
||||
Owners have administrative access to the project and are the final decision-makers.
|
||||
|
||||
#### Responsibilities:
|
||||
|
||||
Manage the core team of maintainers and approvers.
|
||||
|
||||
Set the overall vision and strategy for the project.
|
||||
|
||||
Handle administrative tasks, such as managing the project's repository and other resources.
|
||||
|
||||
Represent the project in the broader open-source community.
|
||||
|
||||
|
||||
# Credits
|
||||
Sections of this document have been borrowed from [Kubernetes governance](https://github.com/kubernetes/community/blob/master/governance.md)
|
||||
@@ -1,12 +1,34 @@
|
||||
## Overview
|
||||
|
||||
This document contains a list of maintainers in this repo.
|
||||
This file lists the maintainers and committers of the Krkn project.
|
||||
|
||||
In short, maintainers are people who are in charge of the maintenance of the Krkn project. Committers are active community members who have shown that they are committed to the continuous development of the project through ongoing engagement with the community.
|
||||
|
||||
For detailed description of the roles, see [Governance](./GOVERNANCE.md) page.
|
||||
|
||||
## Current Maintainers
|
||||
|
||||
| Maintainer | GitHub ID | Email |
|
||||
|---------------------| --------------------------------------------------------- | ----------------------- |
|
||||
| Ravi Elluri | [chaitanyaenr](https://github.com/chaitanyaenr) | nelluri@redhat.com |
|
||||
| Pradeep Surisetty | [psuriset](https://github.com/psuriset) | psuriset@redhat.com |
|
||||
| Paige Rubendall | [paigerube14](https://github.com/paigerube14) | prubenda@redhat.com |
|
||||
| Tullio Sebastiani | [tsebastiani](https://github.com/tsebastiani) | tsebasti@redhat.com |
|
||||
| Maintainer | GitHub ID | Email | Contribution Level |
|
||||
|---------------------| --------------------------------------------------------- | ----------------------- | ---------------------- |
|
||||
| Ravi Elluri | [chaitanyaenr](https://github.com/chaitanyaenr) | nelluri@redhat.com | Owner |
|
||||
| Pradeep Surisetty | [psuriset](https://github.com/psuriset) | psuriset@redhat.com | Owner |
|
||||
| Paige Patton | [paigerube14](https://github.com/paigerube14) | prubenda@redhat.com | Maintainer |
|
||||
| Tullio Sebastiani | [tsebastiani](https://github.com/tsebastiani) | tsebasti@redhat.com | Maintainer |
|
||||
| Yogananth Subramanian | [yogananth-subramanian](https://github.com/yogananth-subramanian) | ysubrama@redhat.com |Maintainer |
|
||||
| Sahil Shah | [shahsahil264](https://github.com/shahsahil264) | sahshah@redhat.com | Member |
|
||||
|
||||
|
||||
Note : It is mandatory for all Krkn community members to follow our [Code of Conduct](./CODE_OF_CONDUCT.md)
|
||||
|
||||
|
||||
## Contributor Ladder
|
||||
This project follows a contributor ladder model, where contributors can take on more responsibilities as they gain experience and demonstrate their commitment to the project.
|
||||
The roles are:
|
||||
* Contributor: A contributor to the community whether it be with code, docs or issues
|
||||
|
||||
* Member: A contributor who is active in the community and reviews pull requests.
|
||||
|
||||
* Maintainer: A contributor who is responsible for the overall health and direction of the project.
|
||||
|
||||
* Owner: A contributor who has administrative ownership of the project.
|
||||
|
||||
10
README.md
10
README.md
@@ -22,14 +22,8 @@ Kraken injects deliberate failures into Kubernetes clusters to check if it is re
|
||||
Instructions on how to setup, configure and run Kraken can be found in the [documentation](https://krkn-chaos.dev/docs/).
|
||||
|
||||
|
||||
### Blogs and other useful resources
|
||||
- Blog post on introduction to Kraken: https://www.openshift.com/blog/introduction-to-kraken-a-chaos-tool-for-openshift/kubernetes
|
||||
- Discussion and demo on how Kraken can be leveraged to ensure OpenShift is reliable, performant and scalable: https://www.youtube.com/watch?v=s1PvupI5sD0&ab_channel=OpenShift
|
||||
- Blog post emphasizing the importance of making Chaos part of Performance and Scale runs to mimic the production environments: https://www.openshift.com/blog/making-chaos-part-of-kubernetes/openshift-performance-and-scalability-tests
|
||||
- Blog post on findings from Chaos test runs: https://cloud.redhat.com/blog/openshift/kubernetes-chaos-stories
|
||||
- Discussion with CNCF TAG App Delivery on Krkn workflow, features and addition to CNCF sandbox: [Github](https://github.com/cncf/sandbox/issues/44), [Tracker](https://github.com/cncf/tag-app-delivery/issues/465), [recording](https://www.youtube.com/watch?v=nXQkBFK_MWc&t=722s)
|
||||
- Blog post on supercharging chaos testing using AI integration in Krkn: https://www.redhat.com/en/blog/supercharging-chaos-testing-using-ai
|
||||
- Blog post announcing Krkn joining CNCF Sandbox: https://www.redhat.com/en/blog/krknchaos-joining-cncf-sandbox
|
||||
### Blogs, podcasts and interviews
|
||||
Additional resources, including blog posts, podcasts, and community interviews, can be found on the [website](https://krkn-chaos.dev/blog)
|
||||
|
||||
|
||||
### Roadmap
|
||||
|
||||
55
RELEASE.md
Normal file
55
RELEASE.md
Normal file
@@ -0,0 +1,55 @@
|
||||
### Release Protocol: The Community-First Cycle
|
||||
|
||||
This document outlines the project's release protocol, a methodology designed to ensure a responsive and transparent development process that is closely aligned with the needs of our users and contributors. This protocol is tailored for projects in their early stages, prioritizing agility and community feedback over a rigid, time-boxed schedule.
|
||||
|
||||
#### 1. Key Principles
|
||||
|
||||
* **Community as the Compass:** The primary driver for all development is feedback from our user and contributor community.
|
||||
* **Prioritization by Impact:** Tasks are prioritized based on their impact on user experience, the urgency of bug fixes, and the value of community-contributed features.
|
||||
* **Event-Driven Releases:** Releases are not bound by a fixed calendar. New versions are published when a significant body of work is complete, a critical issue is resolved, or a new feature is ready for adoption.
|
||||
* **Transparency and Communication:** All development decisions, progress, and plans are communicated openly through our issue tracker, pull requests, and community channels.
|
||||
|
||||
#### 2. The Release Lifecycle
|
||||
|
||||
The release cycle is a continuous flow of activities rather than a series of sequential phases.
|
||||
|
||||
**2.1. Discovery & Prioritization**
|
||||
* New features and bug fixes are identified through user feedback on our issue tracker, community discussions, and direct contributions.
|
||||
* The core maintainers, in collaboration with the community, continuously evaluate and tag issues to create an open and dynamic backlog.
|
||||
|
||||
**2.2. Development & Code Review**
|
||||
* Work is initiated based on the highest-priority items in the backlog.
|
||||
* All code contributions are made via pull requests (PRs).
|
||||
* PRs are reviewed by maintainers and other contributors to ensure code quality, adherence to project standards, and overall stability.
|
||||
|
||||
**2.3. Release Readiness**
|
||||
A new release is considered ready when one of the following conditions is met:
|
||||
* A major new feature has been completed and thoroughly tested.
|
||||
* A critical security vulnerability or bug has been addressed.
|
||||
* A sufficient number of smaller improvements and fixes have been merged, providing meaningful value to users.
|
||||
|
||||
**2.4. Versioning**
|
||||
We adhere to [**Semantic Versioning 2.0.0**](https://semver.org/).
|
||||
* **Major version (`X.y.z`)**: Reserved for releases that introduce breaking changes.
|
||||
* **Minor version (`x.Y.z`)**: Used for new features or significant non-breaking changes.
|
||||
* **Patch version (`x.y.Z`)**: Used for bug fixes and small, non-functional improvements.
|
||||
|
||||
#### 3. Roles and Responsibilities
|
||||
|
||||
* **Members:** The [core team](https://github.com/krkn-chaos/krkn/blob/main/MAINTAINERS.md) responsible for the project's health. Their duties include:
|
||||
* Reviewing pull requests.
|
||||
* Contributing code and documentation via pull requests.
|
||||
* Engaging in discussions and providing feedback.
|
||||
* **Maintainers and Owners:** The [core team](https://github.com/krkn-chaos/krkn/blob/main/MAINTAINERS.md) responsible for the project's health. Their duties include:
|
||||
* Facilitating community discussions and prioritization.
|
||||
* Reviewing and merging pull requests.
|
||||
* Cutting and announcing official releases.
|
||||
* **Contributors:** The community. Their duties include:
|
||||
* Reporting bugs and suggesting new features.
|
||||
* Contributing code and documentation via pull requests.
|
||||
* Engaging in discussions and providing feedback.
|
||||
|
||||
#### 4. Adoption and Future Evolution
|
||||
|
||||
This protocol is designed for the current stage of the project. As the project matures and the contributor base grows, the maintainers will evaluate the need for a more structured methodology to ensure continued scalability and stability.
|
||||
|
||||
10
ROADMAP.md
10
ROADMAP.md
@@ -2,11 +2,11 @@
|
||||
|
||||
Following are a list of enhancements that we are planning to work on adding support in Krkn. Of course any help/contributions are greatly appreciated.
|
||||
|
||||
- [ ] [Ability to run multiple chaos scenarios in parallel under load to mimic real world outages](https://github.com/krkn-chaos/krkn/issues/424)
|
||||
- [x] [Ability to run multiple chaos scenarios in parallel under load to mimic real world outages](https://github.com/krkn-chaos/krkn/issues/424)
|
||||
- [x] [Centralized storage for chaos experiments artifacts](https://github.com/krkn-chaos/krkn/issues/423)
|
||||
- [ ] [Support for causing DNS outages](https://github.com/krkn-chaos/krkn/issues/394)
|
||||
- [x] [Support for causing DNS outages](https://github.com/krkn-chaos/krkn/issues/394)
|
||||
- [x] [Chaos recommender](https://github.com/krkn-chaos/krkn/tree/main/utils/chaos-recommender) to suggest scenarios having probability of impacting the service under test using profiling results
|
||||
- [] Chaos AI integration to improve test coverage while reducing fault space to save costs and execution time
|
||||
- [x] Chaos AI integration to improve test coverage while reducing fault space to save costs and execution time [krkn-chaos-ai](https://github.com/krkn-chaos/krkn-chaos-ai)
|
||||
- [x] [Support for pod level network traffic shaping](https://github.com/krkn-chaos/krkn/issues/393)
|
||||
- [ ] [Ability to visualize the metrics that are being captured by Kraken and stored in Elasticsearch](https://github.com/krkn-chaos/krkn/issues/124)
|
||||
- [x] Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/krkn-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
|
||||
@@ -14,3 +14,7 @@ Following are a list of enhancements that we are planning to work on adding supp
|
||||
- [x] [Switch documentation references to Kubernetes](https://github.com/krkn-chaos/krkn/issues/495)
|
||||
- [x] [OCP and Kubernetes functionalities segregation](https://github.com/krkn-chaos/krkn/issues/497)
|
||||
- [x] [Krknctl - client for running Krkn scenarios with ease](https://github.com/krkn-chaos/krknctl)
|
||||
- [x] [AI Chat bot to help get started with Krkn and commands](https://github.com/krkn-chaos/krkn-lightspeed)
|
||||
- [ ] [Ability to roll back cluster to original state if chaos fails](https://github.com/krkn-chaos/krkn/issues/804)
|
||||
- [ ] Add recovery time metrics to each scenario for better regression analysis
|
||||
- [ ] [Add resiliency scoring to chaos scenarios ran on cluster](https://github.com/krkn-chaos/krkn/issues/125)
|
||||
@@ -40,4 +40,4 @@ The security team currently consists of the [Maintainers of Krkn](https://github
|
||||
|
||||
## Process and Supported Releases
|
||||
|
||||
The Krkn security team will investigate and provide a fix in a timely mannner depending on the severity. The fix will be included in the new release of Krkn and details will be included in the release notes.
|
||||
The Krkn security team will investigate and provide a fix in a timely manner depending on the severity. The fix will be included in the new release of Krkn and details will be included in the release notes.
|
||||
|
||||
@@ -39,7 +39,7 @@ cerberus:
|
||||
Sunday:
|
||||
slack_team_alias: # The slack team alias to be tagged while reporting failures in the slack channel when no watcher is assigned
|
||||
|
||||
custom_checks: # Relative paths of files conataining additional user defined checks
|
||||
custom_checks: # Relative paths of files containing additional user defined checks
|
||||
|
||||
tunings:
|
||||
timeout: 3 # Number of seconds before requests fail
|
||||
|
||||
@@ -1,62 +1,65 @@
|
||||
kraken:
|
||||
kubeconfig_path: ~/.kube/config # Path to kubeconfig
|
||||
exit_on_failure: False # Exit when a post action scenario fails
|
||||
auto_rollback: True # Enable auto rollback for scenarios.
|
||||
rollback_versions_directory: /tmp/kraken-rollback # Directory to store rollback version files.
|
||||
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
port: 8081 # Signal port
|
||||
chaos_scenarios:
|
||||
# List of policies/chaos scenarios to load
|
||||
- hog_scenarios:
|
||||
- scenarios/kube/cpu-hog.yml
|
||||
- scenarios/kube/memory-hog.yml
|
||||
- scenarios/kube/io-hog.yml
|
||||
- application_outages_scenarios:
|
||||
- scenarios/openshift/app_outage.yaml
|
||||
- container_scenarios: # List of chaos pod scenarios to load
|
||||
- scenarios/openshift/container_etcd.yml
|
||||
- pod_network_scenarios:
|
||||
- scenarios/openshift/network_chaos_ingress.yml
|
||||
- scenarios/openshift/pod_network_outage.yml
|
||||
- pod_disruption_scenarios:
|
||||
- scenarios/openshift/etcd.yml
|
||||
- scenarios/openshift/regex_openshift_pod_kill.yml
|
||||
- scenarios/openshift/prom_kill.yml
|
||||
- scenarios/openshift/openshift-apiserver.yml
|
||||
- scenarios/openshift/openshift-kube-apiserver.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/aws_node_scenarios.yml
|
||||
- scenarios/openshift/vmware_node_scenarios.yml
|
||||
- scenarios/openshift/ibmcloud_node_scenarios.yml
|
||||
- time_scenarios: # List of chaos time scenarios to load
|
||||
- scenarios/openshift/time_scenarios_example.yml
|
||||
- cluster_shut_down_scenarios:
|
||||
- scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- service_disruption_scenarios:
|
||||
- scenarios/openshift/regex_namespace.yaml
|
||||
- scenarios/openshift/ingress_namespace.yaml
|
||||
- zone_outages_scenarios:
|
||||
- scenarios/openshift/zone_outage.yaml
|
||||
- pvc_scenarios:
|
||||
- scenarios/openshift/pvc_scenario.yaml
|
||||
- network_chaos_scenarios:
|
||||
- scenarios/openshift/network_chaos.yaml
|
||||
- service_hijacking_scenarios:
|
||||
- scenarios/kube/service_hijacking.yaml
|
||||
- syn_flood_scenarios:
|
||||
- scenarios/kube/syn_flood.yaml
|
||||
- network_chaos_ng_scenarios:
|
||||
- scenarios/kube/network-filter.yml
|
||||
# List of policies/chaos scenarios to load
|
||||
- hog_scenarios:
|
||||
- scenarios/kube/cpu-hog.yml
|
||||
- scenarios/kube/memory-hog.yml
|
||||
- scenarios/kube/io-hog.yml
|
||||
- application_outages_scenarios:
|
||||
- scenarios/openshift/app_outage.yaml
|
||||
- container_scenarios: # List of chaos pod scenarios to load
|
||||
- scenarios/openshift/container_etcd.yml
|
||||
- pod_network_scenarios:
|
||||
- scenarios/openshift/network_chaos_ingress.yml
|
||||
- scenarios/openshift/pod_network_outage.yml
|
||||
- pod_disruption_scenarios:
|
||||
- scenarios/openshift/etcd.yml
|
||||
- scenarios/openshift/regex_openshift_pod_kill.yml
|
||||
- scenarios/openshift/prom_kill.yml
|
||||
- scenarios/openshift/openshift-apiserver.yml
|
||||
- scenarios/openshift/openshift-kube-apiserver.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/aws_node_scenarios.yml
|
||||
- scenarios/openshift/vmware_node_scenarios.yml
|
||||
- scenarios/openshift/ibmcloud_node_scenarios.yml
|
||||
- time_scenarios: # List of chaos time scenarios to load
|
||||
- scenarios/openshift/time_scenarios_example.yml
|
||||
- cluster_shut_down_scenarios:
|
||||
- scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- service_disruption_scenarios:
|
||||
- scenarios/openshift/regex_namespace.yaml
|
||||
- scenarios/openshift/ingress_namespace.yaml
|
||||
- zone_outages_scenarios:
|
||||
- scenarios/openshift/zone_outage.yaml
|
||||
- pvc_scenarios:
|
||||
- scenarios/openshift/pvc_scenario.yaml
|
||||
- network_chaos_scenarios:
|
||||
- scenarios/openshift/network_chaos.yaml
|
||||
- service_hijacking_scenarios:
|
||||
- scenarios/kube/service_hijacking.yaml
|
||||
- syn_flood_scenarios:
|
||||
- scenarios/kube/syn_flood.yaml
|
||||
- network_chaos_ng_scenarios:
|
||||
- scenarios/kube/pod-network-filter.yml
|
||||
- scenarios/kube/node-network-filter.yml
|
||||
- kubevirt_vm_outage:
|
||||
- scenarios/kubevirt/kubevirt-vm-outage.yaml
|
||||
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
prometheus_url: '' # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_url: '' # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
@@ -76,7 +79,7 @@ elastic:
|
||||
telemetry_index: "krkn-telemetry"
|
||||
|
||||
tunings:
|
||||
wait_duration: 60 # Duration to wait between each chaos scenario
|
||||
wait_duration: 1 # Duration to wait between each chaos scenario
|
||||
iterations: 1 # Number of times to execute the scenarios
|
||||
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever
|
||||
telemetry:
|
||||
@@ -90,7 +93,7 @@ telemetry:
|
||||
prometheus_pod_name: "" # name of the prometheus pod (if distribution is kubernetes)
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
archive_path: /tmp # local path where the archive files will be temporarly stored
|
||||
archive_path: /tmp # local path where the archive files will be temporarily stored
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 500000
|
||||
@@ -116,3 +119,13 @@ health_checks: # Utilizing health c
|
||||
bearer_token: # Bearer token for authentication if any
|
||||
auth: # Provide authentication credentials (username , password) in tuple format if any, ex:("admin","secretpassword")
|
||||
exit_on_failure: # If value is True exits when health check failed for application, values can be True/False
|
||||
|
||||
kubevirt_checks: # Utilizing virt check endpoints to observe ssh ability to VMI's during chaos injection.
|
||||
interval: 2 # Interval in seconds to perform virt checks, default value is 2 seconds
|
||||
namespace: # Namespace where to find VMI's
|
||||
name: # Regex Name style of VMI's to watch, optional, will watch all VMI names in the namespace if left blank
|
||||
only_failures: False # Boolean of whether to show all VMI's failures and successful ssh connection (False), or only failure status' (True)
|
||||
disconnected: False # Boolean of how to try to connect to the VMIs; if True will use the ip_address to try ssh from within a node, if false will use the name and uses virtctl to try to connect; Default is False
|
||||
ssh_node: "" # If set, will be a backup way to ssh to a node. Will want to set to a node that isn't targeted in chaos
|
||||
node_names: ""
|
||||
exit_on_failure: # If value is True and VMI's are failing post chaos returns failure, values can be True/False
|
||||
@@ -7,26 +7,33 @@ kraken:
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load
|
||||
- plugin_scenarios:
|
||||
- scenarios/kind/scheduler.yml
|
||||
- node_scenarios:
|
||||
- scenarios/kind/node_scenarios_example.yml
|
||||
- pod_disruption_scenarios:
|
||||
- scenarios/kube/pod.yml
|
||||
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
alert_profile: config/alerts.yaml # Path to alert profile with the prometheus queries
|
||||
|
||||
elastic:
|
||||
enable_elastic: False
|
||||
|
||||
tunings:
|
||||
wait_duration: 60 # Duration to wait between each chaos scenario
|
||||
iterations: 1 # Number of times to execute the scenarios
|
||||
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever
|
||||
|
||||
telemetry:
|
||||
enabled: False # enable/disables the telemetry collection feature
|
||||
archive_path: /tmp # local path where the archive files will be temporarily stored
|
||||
events_backup: False # enables/disables cluster events collection
|
||||
logs_backup: False
|
||||
|
||||
health_checks: # Utilizing health check endpoints to observe application behavior during chaos injection.
|
||||
|
||||
@@ -14,11 +14,9 @@ kraken:
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
repo: "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
|
||||
@@ -35,7 +35,7 @@ kraken:
|
||||
cerberus:
|
||||
cerberus_enabled: True # Enable it when cerberus is previously installed
|
||||
cerberus_url: http://0.0.0.0:8080 # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: True # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
@@ -61,7 +61,7 @@ telemetry:
|
||||
prometheus_backup: True # enables/disables prometheus data collection
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
archive_path: /tmp # local path where the archive files will be temporarly stored
|
||||
archive_path: /tmp # local path where the archive files will be temporarily stored
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 500000 # the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
|
||||
@@ -1,22 +1,35 @@
|
||||
# oc build
|
||||
FROM golang:1.23.1 AS oc-build
|
||||
FROM golang:1.24.9 AS oc-build
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libkrb5-dev
|
||||
WORKDIR /tmp
|
||||
# oc build
|
||||
RUN git clone --branch release-4.18 https://github.com/openshift/oc.git
|
||||
WORKDIR /tmp/oc
|
||||
RUN go mod edit -go 1.23.1 &&\
|
||||
go get github.com/moby/buildkit@v0.12.5 &&\
|
||||
go get github.com/containerd/containerd@v1.7.11&&\
|
||||
go get github.com/docker/docker@v25.0.6&&\
|
||||
go get github.com/opencontainers/runc@v1.1.14&&\
|
||||
go get github.com/go-git/go-git/v5@v5.13.0&&\
|
||||
go get golang.org/x/net@v0.36.0&&\
|
||||
go get github.com/containerd/containerd@v1.7.27&&\
|
||||
go get golang.org/x/oauth2@v0.27.0&&\
|
||||
go get golang.org/x/crypto@v0.35.0&&\
|
||||
RUN go mod edit -go 1.24.9 &&\
|
||||
go mod edit -require github.com/moby/buildkit@v0.12.5 &&\
|
||||
go mod edit -require github.com/containerd/containerd@v1.7.29&&\
|
||||
go mod edit -require github.com/docker/docker@v27.5.1+incompatible&&\
|
||||
go mod edit -require github.com/opencontainers/runc@v1.2.8&&\
|
||||
go mod edit -require github.com/go-git/go-git/v5@v5.13.0&&\
|
||||
go mod edit -require github.com/opencontainers/selinux@v1.13.0&&\
|
||||
go mod edit -require github.com/ulikunitz/xz@v0.5.15&&\
|
||||
go mod edit -require golang.org/x/net@v0.38.0&&\
|
||||
go mod edit -require github.com/containerd/containerd@v1.7.27&&\
|
||||
go mod edit -require golang.org/x/oauth2@v0.27.0&&\
|
||||
go mod edit -require golang.org/x/crypto@v0.35.0&&\
|
||||
go mod edit -replace github.com/containerd/containerd@v1.7.27=github.com/containerd/containerd@v1.7.29&&\
|
||||
go mod tidy && go mod vendor
|
||||
|
||||
RUN make GO_REQUIRED_MIN_VERSION:= oc
|
||||
|
||||
# virtctl build
|
||||
WORKDIR /tmp
|
||||
RUN git clone https://github.com/kubevirt/kubevirt.git
|
||||
WORKDIR /tmp/kubevirt
|
||||
RUN go mod edit -go 1.24.9 &&\
|
||||
go work use &&\
|
||||
go build -o virtctl ./cmd/virtctl/
|
||||
|
||||
FROM fedora:40
|
||||
ARG PR_NUMBER
|
||||
ARG TAG
|
||||
@@ -28,16 +41,20 @@ ENV KUBECONFIG /home/krkn/.kube/config
|
||||
|
||||
# This overwrites any existing configuration in /etc/yum.repos.d/kubernetes.repo
|
||||
RUN dnf update && dnf install -y --setopt=install_weak_deps=False \
|
||||
git python39 jq yq gettext wget which &&\
|
||||
git python3.11 jq yq gettext wget which ipmitool openssh-server &&\
|
||||
dnf clean all
|
||||
|
||||
# copy oc client binary from oc-build image
|
||||
COPY --from=oc-build /tmp/oc/oc /usr/bin/oc
|
||||
COPY --from=oc-build /tmp/kubevirt/virtctl /usr/bin/virtctl
|
||||
|
||||
# krkn build
|
||||
RUN git clone https://github.com/krkn-chaos/krkn.git /home/krkn/kraken && \
|
||||
mkdir -p /home/krkn/.kube
|
||||
|
||||
RUN mkdir -p /home/krkn/.ssh && \
|
||||
chmod 700 /home/krkn/.ssh
|
||||
|
||||
WORKDIR /home/krkn/kraken
|
||||
|
||||
# default behaviour will be to build main
|
||||
@@ -46,17 +63,28 @@ RUN if [ -n "$PR_NUMBER" ]; then git fetch origin pull/${PR_NUMBER}/head:pr-${PR
|
||||
# if it is a TAG trigger checkout the tag
|
||||
RUN if [ -n "$TAG" ]; then git checkout "$TAG";fi
|
||||
|
||||
RUN python3.9 -m ensurepip --upgrade --default-pip
|
||||
RUN python3.9 -m pip install --upgrade pip setuptools==70.0.0
|
||||
RUN pip3.9 install -r requirements.txt
|
||||
RUN pip3.9 install jsonschema
|
||||
RUN python3.11 -m ensurepip --upgrade --default-pip
|
||||
RUN python3.11 -m pip install --upgrade pip setuptools==78.1.1
|
||||
|
||||
# removes the the vulnerable versions of setuptools and pip
|
||||
RUN rm -rf "$(pip cache dir)"
|
||||
RUN rm -rf /tmp/*
|
||||
RUN rm -rf /usr/local/lib/python3.11/ensurepip/_bundled
|
||||
RUN pip3.11 install -r requirements.txt
|
||||
RUN pip3.11 install jsonschema
|
||||
|
||||
LABEL krknctl.title.global="Krkn Base Image"
|
||||
LABEL krknctl.description.global="This is the krkn base image."
|
||||
LABEL krknctl.input_fields.global='$KRKNCTL_INPUT'
|
||||
|
||||
# SSH setup script
|
||||
RUN chmod +x /home/krkn/kraken/containers/setup-ssh.sh
|
||||
|
||||
# Main entrypoint script
|
||||
RUN chmod +x /home/krkn/kraken/containers/entrypoint.sh
|
||||
|
||||
RUN chown -R krkn:krkn /home/krkn && chmod 755 /home/krkn
|
||||
USER krkn
|
||||
ENTRYPOINT ["python3.9", "run_kraken.py"]
|
||||
|
||||
ENTRYPOINT ["/bin/bash", "/home/krkn/kraken/containers/entrypoint.sh"]
|
||||
CMD ["--config=config/config.yaml"]
|
||||
|
||||
7
containers/entrypoint.sh
Normal file
7
containers/entrypoint.sh
Normal file
@@ -0,0 +1,7 @@
|
||||
#!/bin/bash
|
||||
# Run SSH setup
|
||||
./containers/setup-ssh.sh
|
||||
# Change to kraken directory
|
||||
|
||||
# Execute the main command
|
||||
exec python3.9 run_kraken.py "$@"
|
||||
@@ -31,6 +31,24 @@
|
||||
"separator": ",",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "ssh-public-key",
|
||||
"short_description": "Krkn ssh public key path",
|
||||
"description": "Sets the path where krkn will search for ssh public key (in container)",
|
||||
"variable": "KRKN_SSH_PUBLIC",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "ssh-private-key",
|
||||
"short_description": "Krkn ssh private key path",
|
||||
"description": "Sets the path where krkn will search for ssh private key (in container)",
|
||||
"variable": "KRKN_SSH_PRIVATE",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "krkn-kubeconfig",
|
||||
"short_description": "Krkn kubeconfig path",
|
||||
@@ -67,6 +85,24 @@
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "prometheus-url",
|
||||
"short_description": "Prometheus url",
|
||||
"description": "Prometheus url for when running on kuberenetes",
|
||||
"variable": "PROMETHEUS_URL",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "prometheus-token",
|
||||
"short_description": "Prometheus bearer token",
|
||||
"description": "Prometheus bearer token for prometheus url authentication",
|
||||
"variable": "PROMETHEUS_TOKEN",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "uuid",
|
||||
"short_description": "Sets krkn run uuid",
|
||||
@@ -425,6 +461,84 @@
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-check-interval",
|
||||
"short_description": "Kube Virt check interval",
|
||||
"description": "How often to check the kube virt check Vms ssh status",
|
||||
"variable": "KUBE_VIRT_CHECK_INTERVAL",
|
||||
"type": "number",
|
||||
"default": "2",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-namespace",
|
||||
"short_description": "KubeVirt namespace to check",
|
||||
"description": "KubeVirt namespace to check the health of",
|
||||
"variable": "KUBE_VIRT_NAMESPACE",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-name",
|
||||
"short_description": "KubeVirt regex names to watch",
|
||||
"description": "KubeVirt regex names to check VMs",
|
||||
"variable": "KUBE_VIRT_NAME",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-only-failures",
|
||||
"short_description": "KubeVirt checks only report if failure occurs",
|
||||
"description": "KubeVirt checks only report if failure occurs",
|
||||
"variable": "KUBE_VIRT_FAILURES",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False,true,false",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-disconnected",
|
||||
"short_description": "KubeVirt checks in disconnected mode",
|
||||
"description": "KubeVirt checks in disconnected mode, bypassing the clusters Api",
|
||||
"variable": "KUBE_VIRT_DISCONNECTED",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False,true,false",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-ssh-node",
|
||||
"short_description": "KubeVirt node to ssh from",
|
||||
"description": "KubeVirt node to ssh from, should be available whole chaos run",
|
||||
"variable": "KUBE_VIRT_SSH_NODE",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-exit-on-failure",
|
||||
"short_description": "KubeVirt fail if failed vms at end of run",
|
||||
"description": "KubeVirt fails run if vms still have false status",
|
||||
"variable": "KUBE_VIRT_EXIT_ON_FAIL",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False,true,false",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "kubevirt-node-node",
|
||||
"short_description": "KubeVirt node to filter vms on",
|
||||
"description": "Only track VMs in KubeVirt on given node name",
|
||||
"variable": "KUBE_VIRT_NODE_NAME",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "krkn-debug",
|
||||
"short_description": "Krkn debug mode",
|
||||
|
||||
73
containers/setup-ssh.sh
Normal file
73
containers/setup-ssh.sh
Normal file
@@ -0,0 +1,73 @@
|
||||
#!/bin/bash
|
||||
# Setup SSH key if mounted
|
||||
# Support multiple mount locations
|
||||
MOUNTED_PRIVATE_KEY_ALT="/secrets/id_rsa"
|
||||
MOUNTED_PRIVATE_KEY="/home/krkn/.ssh/id_rsa"
|
||||
MOUNTED_PUBLIC_KEY="/home/krkn/.ssh/id_rsa.pub"
|
||||
WORKING_KEY="/home/krkn/.ssh/id_rsa.key"
|
||||
|
||||
# Determine which source to use
|
||||
SOURCE_KEY=""
|
||||
if [ -f "$MOUNTED_PRIVATE_KEY_ALT" ]; then
|
||||
SOURCE_KEY="$MOUNTED_PRIVATE_KEY_ALT"
|
||||
echo "Found SSH key at alternative location: $SOURCE_KEY"
|
||||
elif [ -f "$MOUNTED_PRIVATE_KEY" ]; then
|
||||
SOURCE_KEY="$MOUNTED_PRIVATE_KEY"
|
||||
echo "Found SSH key at default location: $SOURCE_KEY"
|
||||
fi
|
||||
|
||||
# Setup SSH private key and create config for outbound connections
|
||||
if [ -n "$SOURCE_KEY" ]; then
|
||||
echo "Setting up SSH private key from: $SOURCE_KEY"
|
||||
|
||||
# Check current permissions and ownership
|
||||
ls -la "$SOURCE_KEY"
|
||||
|
||||
# Since the mounted key might be owned by root and we run as krkn user,
|
||||
# we cannot modify it directly. Copy to a new location we can control.
|
||||
echo "Copying SSH key to working location: $WORKING_KEY"
|
||||
|
||||
# Try to copy - if readable by anyone, this will work
|
||||
if cp "$SOURCE_KEY" "$WORKING_KEY" 2>/dev/null || cat "$SOURCE_KEY" > "$WORKING_KEY" 2>/dev/null; then
|
||||
chmod 600 "$WORKING_KEY"
|
||||
echo "SSH key copied successfully"
|
||||
ls -la "$WORKING_KEY"
|
||||
|
||||
# Verify the key is readable
|
||||
if ssh-keygen -y -f "$WORKING_KEY" > /dev/null 2>&1; then
|
||||
echo "SSH private key verified successfully"
|
||||
else
|
||||
echo "Warning: SSH key verification failed, but continuing anyway"
|
||||
fi
|
||||
|
||||
# Create SSH config to use the working key
|
||||
cat > /home/krkn/.ssh/config <<EOF
|
||||
Host *
|
||||
IdentityFile $WORKING_KEY
|
||||
StrictHostKeyChecking no
|
||||
UserKnownHostsFile /dev/null
|
||||
EOF
|
||||
chmod 600 /home/krkn/.ssh/config
|
||||
echo "SSH config created with default identity: $WORKING_KEY"
|
||||
else
|
||||
echo "ERROR: Cannot read SSH key at $SOURCE_KEY"
|
||||
echo "Key is owned by: $(stat -c '%U:%G' "$SOURCE_KEY" 2>/dev/null || stat -f '%Su:%Sg' "$SOURCE_KEY" 2>/dev/null)"
|
||||
echo ""
|
||||
echo "Solutions:"
|
||||
echo "1. Mount with world-readable permissions (less secure): chmod 644 /path/to/key"
|
||||
echo "2. Mount to /secrets/id_rsa instead of /home/krkn/.ssh/id_rsa"
|
||||
echo "3. Change ownership on host: chown \$(id -u):\$(id -g) /path/to/key"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Setup SSH public key if mounted (for inbound server access)
|
||||
if [ -f "$MOUNTED_PUBLIC_KEY" ]; then
|
||||
echo "SSH public key already present at $MOUNTED_PUBLIC_KEY"
|
||||
# Try to fix permissions (will fail silently if file is mounted read-only or owned by another user)
|
||||
chmod 600 "$MOUNTED_PUBLIC_KEY" 2>/dev/null
|
||||
if [ ! -f "/home/krkn/.ssh/authorized_keys" ]; then
|
||||
cp "$MOUNTED_PUBLIC_KEY" /home/krkn/.ssh/authorized_keys
|
||||
chmod 600 /home/krkn/.ssh/authorized_keys
|
||||
fi
|
||||
fi
|
||||
@@ -5,6 +5,8 @@ nodes:
|
||||
extraPortMappings:
|
||||
- containerPort: 30036
|
||||
hostPort: 8888
|
||||
- containerPort: 30037
|
||||
hostPort: 8889
|
||||
- role: control-plane
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
|
||||
@@ -14,7 +14,7 @@ def get_status(config, start_time, end_time):
|
||||
if config["cerberus"]["cerberus_enabled"]:
|
||||
cerberus_url = config["cerberus"]["cerberus_url"]
|
||||
check_application_routes = \
|
||||
config["cerberus"]["check_applicaton_routes"]
|
||||
config["cerberus"]["check_application_routes"]
|
||||
if not cerberus_url:
|
||||
logging.error(
|
||||
"url where Cerberus publishes True/False signal "
|
||||
|
||||
@@ -15,13 +15,11 @@ def invoke(command, timeout=None):
|
||||
|
||||
|
||||
# Invokes a given command and returns the stdout
|
||||
def invoke_no_exit(command, timeout=None):
|
||||
def invoke_no_exit(command, timeout=15):
|
||||
output = ""
|
||||
try:
|
||||
output = subprocess.check_output(command, shell=True, universal_newlines=True, timeout=timeout)
|
||||
logging.info("output " + str(output))
|
||||
output = subprocess.check_output(command, shell=True, universal_newlines=True, timeout=timeout, stderr=subprocess.DEVNULL)
|
||||
except Exception as e:
|
||||
logging.error("Failed to run %s, error: %s" % (command, e))
|
||||
return str(e)
|
||||
return output
|
||||
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
import subprocess
|
||||
import logging
|
||||
import git
|
||||
import sys
|
||||
|
||||
|
||||
# Installs a mutable grafana on the Kubernetes/OpenShift cluster and loads the performance dashboards
|
||||
def setup(repo, distribution):
|
||||
if distribution == "kubernetes":
|
||||
command = "cd performance-dashboards/dittybopper && ./k8s-deploy.sh"
|
||||
elif distribution == "openshift":
|
||||
command = "cd performance-dashboards/dittybopper && ./deploy.sh"
|
||||
else:
|
||||
logging.error("Provided distribution: %s is not supported" % (distribution))
|
||||
sys.exit(1)
|
||||
delete_repo = "rm -rf performance-dashboards || exit 0"
|
||||
logging.info(
|
||||
"Cloning, installing mutable grafana on the cluster and loading the dashboards"
|
||||
)
|
||||
try:
|
||||
# delete repo to clone the latest copy if exists
|
||||
subprocess.run(delete_repo, shell=True, universal_newlines=True, timeout=45)
|
||||
# clone the repo
|
||||
git.Repo.clone_from(repo, "performance-dashboards")
|
||||
# deploy performance dashboards
|
||||
subprocess.run(command, shell=True, universal_newlines=True)
|
||||
except Exception as e:
|
||||
logging.error("Failed to install performance-dashboards, error: %s" % (e))
|
||||
@@ -9,6 +9,7 @@ import logging
|
||||
import urllib3
|
||||
import sys
|
||||
import json
|
||||
import tempfile
|
||||
|
||||
import yaml
|
||||
from krkn_lib.elastic.krkn_elastic import KrknElastic
|
||||
@@ -74,10 +75,12 @@ def alerts(
|
||||
def critical_alerts(
|
||||
prom_cli: KrknPrometheus,
|
||||
summary: ChaosRunAlertSummary,
|
||||
elastic: KrknElastic,
|
||||
run_id,
|
||||
scenario,
|
||||
start_time,
|
||||
end_time,
|
||||
elastic_alerts_index
|
||||
):
|
||||
summary.scenario = scenario
|
||||
summary.run_id = run_id
|
||||
@@ -112,7 +115,6 @@ def critical_alerts(
|
||||
summary.chaos_alerts.append(alert)
|
||||
|
||||
post_critical_alerts = prom_cli.process_query(query)
|
||||
|
||||
for alert in post_critical_alerts:
|
||||
if "metric" in alert:
|
||||
alertname = (
|
||||
@@ -135,6 +137,21 @@ def critical_alerts(
|
||||
)
|
||||
alert = ChaosRunAlert(alertname, alertstate, namespace, severity)
|
||||
summary.post_chaos_alerts.append(alert)
|
||||
if elastic:
|
||||
elastic_alert = ElasticAlert(
|
||||
run_uuid=run_id,
|
||||
severity=severity,
|
||||
alert=alertname,
|
||||
created_at=end_time,
|
||||
namespace=namespace,
|
||||
alertstate=alertstate,
|
||||
phase="post_chaos"
|
||||
)
|
||||
result = elastic.push_alert(elastic_alert, elastic_alerts_index)
|
||||
if result == -1:
|
||||
logging.error("failed to save alert on ElasticSearch")
|
||||
pass
|
||||
|
||||
|
||||
during_critical_alerts_count = len(during_critical_alerts)
|
||||
post_critical_alerts_count = len(post_critical_alerts)
|
||||
@@ -148,8 +165,8 @@ def critical_alerts(
|
||||
|
||||
if not firing_alerts:
|
||||
logging.info("No critical alerts are firing!!")
|
||||
|
||||
|
||||
|
||||
|
||||
def metrics(
|
||||
prom_cli: KrknPrometheus,
|
||||
elastic: KrknElastic,
|
||||
@@ -197,7 +214,7 @@ def metrics(
|
||||
end_time=datetime.datetime.fromtimestamp(end_time), granularity=30
|
||||
)
|
||||
else:
|
||||
logging.info('didnt match keys')
|
||||
logging.info("didn't match keys")
|
||||
continue
|
||||
|
||||
for returned_metric in metrics_result:
|
||||
@@ -251,11 +268,37 @@ def metrics(
|
||||
metric[k] = v
|
||||
metric['timestamp'] = str(datetime.datetime.now())
|
||||
metrics_list.append(metric.copy())
|
||||
if elastic:
|
||||
if telemetry_json['virt_checks']:
|
||||
for virt_check in telemetry_json["virt_checks"]:
|
||||
metric_name = "virt_check_recovery"
|
||||
metric = {"metricName": metric_name}
|
||||
for k,v in virt_check.items():
|
||||
metric[k] = v
|
||||
metric['timestamp'] = str(datetime.datetime.now())
|
||||
metrics_list.append(metric.copy())
|
||||
|
||||
save_metrics = False
|
||||
if elastic is not None and elastic_metrics_index is not None:
|
||||
result = elastic.upload_metrics_to_elasticsearch(
|
||||
run_uuid=run_uuid, index=elastic_metrics_index, raw_data=metrics_list
|
||||
)
|
||||
if result == -1:
|
||||
logging.error("failed to save metrics on ElasticSearch")
|
||||
save_metrics = True
|
||||
else:
|
||||
save_metrics = True
|
||||
if save_metrics:
|
||||
local_dir = os.path.join(tempfile.gettempdir(), "krkn_metrics")
|
||||
os.makedirs(local_dir, exist_ok=True)
|
||||
local_file = os.path.join(local_dir, f"{elastic_metrics_index}_{run_uuid}.json")
|
||||
|
||||
try:
|
||||
with open(local_file, "w") as f:
|
||||
json.dump({
|
||||
"run_uuid": run_uuid,
|
||||
"metrics": metrics_list
|
||||
}, f, indent=2)
|
||||
logging.info(f"Metrics saved to {local_file}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to save metrics to {local_file}: {e}")
|
||||
return metrics_list
|
||||
|
||||
113
krkn/rollback/command.py
Normal file
113
krkn/rollback/command.py
Normal file
@@ -0,0 +1,113 @@
|
||||
import os
|
||||
import logging
|
||||
from typing import Optional, TYPE_CHECKING
|
||||
|
||||
from krkn.rollback.config import RollbackConfig
|
||||
from krkn.rollback.handler import execute_rollback_version_files
|
||||
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
|
||||
def list_rollback(run_uuid: Optional[str]=None, scenario_type: Optional[str]=None):
|
||||
"""
|
||||
List rollback version files in a tree-like format.
|
||||
|
||||
:param cfg: Configuration file path
|
||||
:param run_uuid: Optional run UUID to filter by
|
||||
:param scenario_type: Optional scenario type to filter by
|
||||
:return: Exit code (0 for success, 1 for error)
|
||||
"""
|
||||
logging.info("Listing rollback version files")
|
||||
|
||||
versions_directory = RollbackConfig().versions_directory
|
||||
|
||||
logging.info(f"Rollback versions directory: {versions_directory}")
|
||||
|
||||
# Check if the directory exists first
|
||||
if not os.path.exists(versions_directory):
|
||||
logging.info(f"Rollback versions directory does not exist: {versions_directory}")
|
||||
return 0
|
||||
|
||||
# List all directories and files
|
||||
try:
|
||||
# Get all run directories
|
||||
run_dirs = []
|
||||
for item in os.listdir(versions_directory):
|
||||
item_path = os.path.join(versions_directory, item)
|
||||
if os.path.isdir(item_path):
|
||||
# Apply run_uuid filter if specified
|
||||
if run_uuid is None or run_uuid in item:
|
||||
run_dirs.append(item)
|
||||
|
||||
if not run_dirs:
|
||||
if run_uuid:
|
||||
logging.info(f"No rollback directories found for run_uuid: {run_uuid}")
|
||||
else:
|
||||
logging.info("No rollback directories found")
|
||||
return 0
|
||||
|
||||
# Sort directories for consistent output
|
||||
run_dirs.sort()
|
||||
|
||||
print(f"\n{versions_directory}/")
|
||||
for i, run_dir in enumerate(run_dirs):
|
||||
is_last_dir = (i == len(run_dirs) - 1)
|
||||
dir_prefix = "└── " if is_last_dir else "├── "
|
||||
print(f"{dir_prefix}{run_dir}/")
|
||||
|
||||
# List files in this directory
|
||||
run_dir_path = os.path.join(versions_directory, run_dir)
|
||||
try:
|
||||
files = []
|
||||
for file in os.listdir(run_dir_path):
|
||||
file_path = os.path.join(run_dir_path, file)
|
||||
if os.path.isfile(file_path):
|
||||
# Apply scenario_type filter if specified
|
||||
if scenario_type is None or file.startswith(scenario_type):
|
||||
files.append(file)
|
||||
|
||||
files.sort()
|
||||
for j, file in enumerate(files):
|
||||
is_last_file = (j == len(files) - 1)
|
||||
file_prefix = " └── " if is_last_dir else "│ └── " if is_last_file else ("│ ├── " if not is_last_dir else " ├── ")
|
||||
print(f"{file_prefix}{file}")
|
||||
|
||||
except PermissionError:
|
||||
file_prefix = " └── " if is_last_dir else "│ └── "
|
||||
print(f"{file_prefix}[Permission Denied]")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error listing rollback directory: {e}")
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def execute_rollback(telemetry_ocp: "KrknTelemetryOpenshift", run_uuid: Optional[str]=None, scenario_type: Optional[str]=None):
|
||||
"""
|
||||
Execute rollback version files and cleanup if successful.
|
||||
|
||||
:param telemetry_ocp: Instance of KrknTelemetryOpenshift
|
||||
:param run_uuid: Optional run UUID to filter by
|
||||
:param scenario_type: Optional scenario type to filter by
|
||||
:return: Exit code (0 for success, 1 for error)
|
||||
"""
|
||||
logging.info("Executing rollback version files")
|
||||
logging.info(f"Executing rollback for run_uuid={run_uuid or '*'}, scenario_type={scenario_type or '*'}")
|
||||
|
||||
try:
|
||||
# Execute rollback version files
|
||||
execute_rollback_version_files(
|
||||
telemetry_ocp,
|
||||
run_uuid,
|
||||
scenario_type,
|
||||
ignore_auto_rollback_config=True
|
||||
)
|
||||
return 0
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error during rollback execution: {e}")
|
||||
return 1
|
||||
259
krkn/rollback/config.py
Normal file
259
krkn/rollback/config.py
Normal file
@@ -0,0 +1,259 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Callable, TYPE_CHECKING, Optional
|
||||
from typing_extensions import TypeAlias
|
||||
import time
|
||||
import os
|
||||
import logging
|
||||
|
||||
from krkn_lib.utils import get_random_string
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
RollbackCallable: TypeAlias = Callable[
|
||||
["RollbackContent", "KrknTelemetryOpenshift"], None
|
||||
]
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
RollbackCallable: TypeAlias = Callable[
|
||||
["RollbackContent", "KrknTelemetryOpenshift"], None
|
||||
]
|
||||
|
||||
|
||||
class SingletonMeta(type):
|
||||
_instances = {}
|
||||
|
||||
def __call__(cls, *args, **kwargs):
|
||||
if cls not in cls._instances:
|
||||
cls._instances[cls] = super().__call__(*args, **kwargs)
|
||||
return cls._instances[cls]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RollbackContent:
|
||||
"""
|
||||
RollbackContent is a dataclass that defines the necessary fields for rollback operations.
|
||||
"""
|
||||
|
||||
resource_identifier: str
|
||||
namespace: Optional[str] = None
|
||||
|
||||
def __str__(self):
|
||||
namespace = f'"{self.namespace}"' if self.namespace else "None"
|
||||
resource_identifier = f'"{self.resource_identifier}"'
|
||||
return f"RollbackContent(namespace={namespace}, resource_identifier={resource_identifier})"
|
||||
|
||||
|
||||
class RollbackContext(str):
|
||||
"""
|
||||
RollbackContext is a string formatted as '<timestamp (s) >-<run_uuid>'.
|
||||
It represents the context for rollback operations, uniquely identifying a run.
|
||||
"""
|
||||
|
||||
def __new__(cls, run_uuid: str):
|
||||
return super().__new__(cls, f"{time.time_ns()}-{run_uuid}")
|
||||
|
||||
|
||||
class RollbackConfig(metaclass=SingletonMeta):
|
||||
"""Configuration for the rollback scenarios."""
|
||||
|
||||
def __init__(self):
|
||||
self._auto = False
|
||||
self._versions_directory = ""
|
||||
self._registered = False
|
||||
|
||||
@property
|
||||
def auto(self):
|
||||
return self._auto
|
||||
|
||||
@auto.setter
|
||||
def auto(self, value):
|
||||
if self._registered:
|
||||
raise AttributeError("Can't modify 'auto' after registration")
|
||||
self._auto = value
|
||||
|
||||
@property
|
||||
def versions_directory(self):
|
||||
return self._versions_directory
|
||||
|
||||
@versions_directory.setter
|
||||
def versions_directory(self, value):
|
||||
if self._registered:
|
||||
raise AttributeError("Can't modify 'versions_directory' after registration")
|
||||
self._versions_directory = value
|
||||
@classmethod
|
||||
def register(cls, auto=False, versions_directory=""):
|
||||
"""Initialize and return the singleton instance with given configuration."""
|
||||
instance = cls()
|
||||
instance.auto = auto
|
||||
instance.versions_directory = versions_directory
|
||||
instance._registered = True
|
||||
return instance
|
||||
|
||||
@classmethod
|
||||
def get_rollback_versions_directory(cls, rollback_context: RollbackContext) -> str:
|
||||
"""
|
||||
Get the rollback context directory for a given rollback context.
|
||||
|
||||
:param rollback_context: The rollback context string.
|
||||
:return: The path to the rollback context directory.
|
||||
"""
|
||||
return f"{cls().versions_directory}/{rollback_context}"
|
||||
|
||||
@classmethod
|
||||
def is_rollback_version_file_format(cls, file_name: str, expected_scenario_type: str | None = None) -> bool:
|
||||
"""
|
||||
Validate the format of a rollback version file name.
|
||||
|
||||
Expected format: <scenario_type>_<timestamp>_<hash_suffix>.py
|
||||
where:
|
||||
- scenario_type: string (can include underscores)
|
||||
- timestamp: integer (nanoseconds since epoch)
|
||||
- hash_suffix: alphanumeric string (length 8)
|
||||
- .py: file extension
|
||||
|
||||
:param file_name: The name of the file to validate.
|
||||
:param expected_scenario_type: The expected scenario type (if any) to validate against.
|
||||
:return: True if the file name matches the expected format, False otherwise.
|
||||
"""
|
||||
if not file_name.endswith(".py"):
|
||||
return False
|
||||
|
||||
parts = file_name.split("_")
|
||||
if len(parts) < 3:
|
||||
return False
|
||||
|
||||
scenario_type = "_".join(parts[:-2])
|
||||
timestamp_str = parts[-2]
|
||||
hash_suffix_with_ext = parts[-1]
|
||||
hash_suffix = hash_suffix_with_ext[:-3]
|
||||
|
||||
if expected_scenario_type and scenario_type != expected_scenario_type:
|
||||
return False
|
||||
|
||||
if not timestamp_str.isdigit():
|
||||
return False
|
||||
|
||||
if len(hash_suffix) != 8 or not hash_suffix.isalnum():
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def is_rollback_context_directory_format(cls, directory_name: str, expected_run_uuid: str | None = None) -> bool:
|
||||
"""
|
||||
Validate the format of a rollback context directory name.
|
||||
|
||||
Expected format: <timestamp>-<run_uuid>
|
||||
where:
|
||||
- timestamp: integer (nanoseconds since epoch)
|
||||
- run_uuid: alphanumeric string
|
||||
|
||||
:param directory_name: The name of the directory to validate.
|
||||
:param expected_run_uuid: The expected run UUID (if any) to validate against.
|
||||
:return: True if the directory name matches the expected format, False otherwise.
|
||||
"""
|
||||
parts = directory_name.split("-", 1)
|
||||
if len(parts) != 2:
|
||||
return False
|
||||
|
||||
timestamp_str, run_uuid = parts
|
||||
|
||||
# Validate timestamp is numeric
|
||||
if not timestamp_str.isdigit():
|
||||
return False
|
||||
|
||||
# Validate run_uuid
|
||||
if expected_run_uuid and expected_run_uuid != run_uuid:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def search_rollback_version_files(cls, run_uuid: str | None = None, scenario_type: str | None = None) -> list[str]:
|
||||
"""
|
||||
Search for rollback version files based on run_uuid and scenario_type.
|
||||
|
||||
1. Search directories with "run_uuid" in name under "cls.versions_directory".
|
||||
2. Search files in those directories that start with "scenario_type" in matched directories in step 1.
|
||||
|
||||
:param run_uuid: Unique identifier for the run.
|
||||
:param scenario_type: Type of the scenario.
|
||||
:return: List of version file paths.
|
||||
"""
|
||||
|
||||
if not os.path.exists(cls().versions_directory):
|
||||
return []
|
||||
|
||||
rollback_context_directories = []
|
||||
for dir in os.listdir(cls().versions_directory):
|
||||
if cls.is_rollback_context_directory_format(dir, run_uuid):
|
||||
rollback_context_directories.append(dir)
|
||||
else:
|
||||
logger.warning(f"Directory {dir} does not match expected pattern of <timestamp>-<run_uuid>")
|
||||
|
||||
if not rollback_context_directories:
|
||||
logger.warning(f"No rollback context directories found for run UUID {run_uuid}")
|
||||
return []
|
||||
|
||||
|
||||
version_files = []
|
||||
for rollback_context_dir in rollback_context_directories:
|
||||
rollback_context_dir = os.path.join(cls().versions_directory, rollback_context_dir)
|
||||
|
||||
for file in os.listdir(rollback_context_dir):
|
||||
# Skip known non-rollback files/directories
|
||||
if file == "__pycache__" or file.endswith(".executed"):
|
||||
continue
|
||||
|
||||
if cls.is_rollback_version_file_format(file, scenario_type):
|
||||
version_files.append(
|
||||
os.path.join(rollback_context_dir, file)
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"File {file} does not match expected pattern of <{scenario_type or '*'}>_<timestamp>_<hash_suffix>.py"
|
||||
)
|
||||
return version_files
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Version:
|
||||
scenario_type: str
|
||||
rollback_context: RollbackContext
|
||||
timestamp: int = time.time_ns() # Get current timestamp in nanoseconds
|
||||
hash_suffix: str = get_random_string(8) # Generate a random string of 8 characters
|
||||
|
||||
@property
|
||||
def version_file_name(self) -> str:
|
||||
"""
|
||||
Generate a version file name based on the timestamp and hash suffix.
|
||||
:return: The generated version file name.
|
||||
"""
|
||||
return f"{self.scenario_type}_{self.timestamp}_{self.hash_suffix}.py"
|
||||
|
||||
@property
|
||||
def version_file_full_path(self) -> str:
|
||||
"""
|
||||
Get the full path for the version file based on the version object and current context.
|
||||
|
||||
:return: The generated version file full path.
|
||||
"""
|
||||
return f"{RollbackConfig.get_rollback_versions_directory(self.rollback_context)}/{self.version_file_name}"
|
||||
|
||||
@staticmethod
|
||||
def new_version(scenario_type: str, rollback_context: RollbackContext) -> "Version":
|
||||
"""
|
||||
Get the current version of the rollback configuration.
|
||||
:return: An instance of Version with the current timestamp and hash suffix.
|
||||
"""
|
||||
return Version(
|
||||
scenario_type=scenario_type,
|
||||
rollback_context=rollback_context,
|
||||
)
|
||||
255
krkn/rollback/handler.py
Normal file
255
krkn/rollback/handler.py
Normal file
@@ -0,0 +1,255 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import cast, TYPE_CHECKING
|
||||
import os
|
||||
import importlib.util
|
||||
import inspect
|
||||
|
||||
from krkn.rollback.config import RollbackConfig, RollbackContext, Version
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
from krkn.rollback.config import RollbackContent, RollbackCallable
|
||||
from krkn.rollback.serialization import Serializer
|
||||
|
||||
|
||||
def set_rollback_context_decorator(func):
|
||||
"""
|
||||
Decorator to automatically set and clear rollback context.
|
||||
It extracts run_uuid from the function arguments and sets the context in rollback_handler
|
||||
before executing the function, and clears it after execution.
|
||||
|
||||
Usage:
|
||||
|
||||
.. code-block:: python
|
||||
from krkn.rollback.handler import set_rollback_context_decorator
|
||||
# for any scenario plugin that inherits from AbstractScenarioPlugin
|
||||
@set_rollback_context_decorator
|
||||
def run(
|
||||
self,
|
||||
run_uuid: str,
|
||||
scenario: str,
|
||||
krkn_config: dict[str, any],
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
):
|
||||
# Your scenario logic here
|
||||
pass
|
||||
"""
|
||||
|
||||
def wrapper(self, *args, **kwargs):
|
||||
self = cast("AbstractScenarioPlugin", self)
|
||||
# Since `AbstractScenarioPlugin.run_scenarios` will call `self.run` and pass all parameters as `kwargs`
|
||||
logger.debug(f"kwargs of ScenarioPlugin.run: {kwargs}")
|
||||
run_uuid = kwargs.get("run_uuid", None)
|
||||
# so we can safely assume that `run_uuid` will be present in `kwargs`
|
||||
assert run_uuid is not None, "run_uuid must be provided in kwargs"
|
||||
|
||||
# Set context if run_uuid is available and rollback_handler exists
|
||||
if run_uuid and hasattr(self, "rollback_handler"):
|
||||
self.rollback_handler = cast("RollbackHandler", self.rollback_handler)
|
||||
self.rollback_handler.set_context(run_uuid)
|
||||
|
||||
try:
|
||||
# Execute the `run` method with the original arguments
|
||||
result = func(self, *args, **kwargs)
|
||||
return result
|
||||
finally:
|
||||
# Clear context after function execution, regardless of success or failure
|
||||
if hasattr(self, "rollback_handler"):
|
||||
self.rollback_handler = cast("RollbackHandler", self.rollback_handler)
|
||||
self.rollback_handler.clear_context()
|
||||
|
||||
return wrapper
|
||||
|
||||
def _parse_rollback_module(version_file_path: str) -> tuple[RollbackCallable, RollbackContent]:
|
||||
"""
|
||||
Parse a rollback module to extract the rollback function and RollbackContent.
|
||||
|
||||
:param version_file_path: Path to the rollback version file
|
||||
:return: Tuple of (rollback_callable, rollback_content)
|
||||
"""
|
||||
|
||||
# Create a unique module name based on the file path
|
||||
module_name = f"rollback_module_{os.path.basename(version_file_path).replace('.py', '').replace('-', '_')}"
|
||||
|
||||
# Load the module using importlib
|
||||
spec = importlib.util.spec_from_file_location(module_name, version_file_path)
|
||||
if spec is None or spec.loader is None:
|
||||
raise ImportError(f"Could not load module from {version_file_path}")
|
||||
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
|
||||
# Find the rollback function
|
||||
rollback_callable = None
|
||||
for name, obj in inspect.getmembers(module):
|
||||
if inspect.isfunction(obj) and name.startswith('rollback_'):
|
||||
# Check function signature
|
||||
sig = inspect.signature(obj)
|
||||
params = list(sig.parameters.values())
|
||||
if (len(params) == 2 and
|
||||
'RollbackContent' in str(params[0].annotation) and
|
||||
'KrknTelemetryOpenshift' in str(params[1].annotation)):
|
||||
rollback_callable = obj
|
||||
logger.debug(f"Found rollback function: {name}")
|
||||
break
|
||||
|
||||
if rollback_callable is None:
|
||||
raise ValueError(f"No valid rollback function found in {version_file_path}")
|
||||
|
||||
# Find the rollback_content variable
|
||||
if not hasattr(module, 'rollback_content'):
|
||||
raise ValueError("Could not find variable named 'rollback_content' in the module")
|
||||
|
||||
rollback_content = getattr(module, 'rollback_content', None)
|
||||
if rollback_content is None:
|
||||
raise ValueError("Variable 'rollback_content' is None")
|
||||
|
||||
logger.debug(f"Found rollback_content variable in module: {rollback_content}")
|
||||
return rollback_callable, rollback_content
|
||||
|
||||
|
||||
def execute_rollback_version_files(
|
||||
telemetry_ocp: "KrknTelemetryOpenshift",
|
||||
run_uuid: str | None = None,
|
||||
scenario_type: str | None = None,
|
||||
ignore_auto_rollback_config: bool = False
|
||||
):
|
||||
"""
|
||||
Execute rollback version files for the given run_uuid and scenario_type.
|
||||
This function is called when a signal is received to perform rollback operations.
|
||||
|
||||
:param run_uuid: Unique identifier for the run.
|
||||
:param scenario_type: Type of the scenario being rolled back.
|
||||
:param ignore_auto_rollback_config: Flag to ignore auto rollback configuration. Will be set to True for manual execute-rollback calls.
|
||||
"""
|
||||
if not ignore_auto_rollback_config and RollbackConfig().auto is False:
|
||||
logger.warning(f"Auto rollback is disabled, skipping execution for run_uuid={run_uuid or '*'}, scenario_type={scenario_type or '*'}")
|
||||
return
|
||||
|
||||
# Get the rollback versions directory
|
||||
version_files = RollbackConfig.search_rollback_version_files(run_uuid, scenario_type)
|
||||
if not version_files:
|
||||
logger.warning(f"Skip execution for run_uuid={run_uuid or '*'}, scenario_type={scenario_type or '*'}")
|
||||
return
|
||||
|
||||
# Execute all version files in the directory
|
||||
logger.info(f"Executing rollback version files for run_uuid={run_uuid or '*'}, scenario_type={scenario_type or '*'}")
|
||||
for version_file in version_files:
|
||||
try:
|
||||
logger.info(f"Executing rollback version file: {version_file}")
|
||||
|
||||
# Parse the rollback module to get function and content
|
||||
rollback_callable, rollback_content = _parse_rollback_module(version_file)
|
||||
# Execute the rollback function
|
||||
logger.info('Executing rollback callable...')
|
||||
rollback_callable(rollback_content, telemetry_ocp)
|
||||
logger.info('Rollback completed.')
|
||||
success = True
|
||||
except Exception as e:
|
||||
success = False
|
||||
logger.error(f"Failed to execute rollback version file {version_file}: {e}")
|
||||
raise
|
||||
|
||||
# Rename the version file with .executed suffix if successful
|
||||
if success:
|
||||
try:
|
||||
executed_file = f"{version_file}.executed"
|
||||
os.rename(version_file, executed_file)
|
||||
logger.info(f"Renamed {version_file} to {executed_file} successfully.")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to rename rollback version file {version_file}: {e}")
|
||||
raise
|
||||
|
||||
def cleanup_rollback_version_files(run_uuid: str, scenario_type: str):
|
||||
"""
|
||||
Cleanup rollback version files for the given run_uuid and scenario_type.
|
||||
This function is called to remove the rollback version files after successful scenario execution in run_scenarios.
|
||||
|
||||
:param run_uuid: Unique identifier for the run.
|
||||
:param scenario_type: Type of the scenario being rolled back.
|
||||
"""
|
||||
|
||||
# Get the rollback versions directory
|
||||
version_files = RollbackConfig.search_rollback_version_files(run_uuid, scenario_type)
|
||||
if not version_files:
|
||||
logger.warning(f"Skip cleanup for run_uuid={run_uuid}, scenario_type={scenario_type or '*'}")
|
||||
return
|
||||
|
||||
# Remove all version files in the directory
|
||||
logger.info(f"Cleaning up rollback version files for run_uuid={run_uuid}, scenario_type={scenario_type}")
|
||||
for version_file in version_files:
|
||||
try:
|
||||
os.remove(version_file)
|
||||
logger.info(f"Removed {version_file} successfully.")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to remove rollback version file {version_file}: {e}")
|
||||
raise
|
||||
|
||||
class RollbackHandler:
|
||||
def __init__(
|
||||
self,
|
||||
scenario_type: str,
|
||||
serializer: "Serializer",
|
||||
):
|
||||
self.scenario_type = scenario_type
|
||||
self.serializer = serializer
|
||||
self.rollback_context: RollbackContext | None = (
|
||||
None # will be set when `set_context` is called
|
||||
)
|
||||
|
||||
def set_context(self, run_uuid: str):
|
||||
"""
|
||||
Set the context for the rollback handler.
|
||||
:param run_uuid: Unique identifier for the run.
|
||||
"""
|
||||
self.rollback_context = RollbackContext(run_uuid)
|
||||
logger.info(
|
||||
f"Set rollback_context: {self.rollback_context} for scenario_type: {self.scenario_type} RollbackHandler"
|
||||
)
|
||||
|
||||
def clear_context(self):
|
||||
"""
|
||||
Clear the run_uuid context for the rollback handler.
|
||||
"""
|
||||
logger.debug(
|
||||
f"Clear rollback_context {self.rollback_context} for scenario type {self.scenario_type} RollbackHandler"
|
||||
)
|
||||
self.rollback_context = None
|
||||
|
||||
def set_rollback_callable(
|
||||
self,
|
||||
callable: "RollbackCallable",
|
||||
rollback_content: "RollbackContent",
|
||||
):
|
||||
"""
|
||||
Set the rollback callable to be executed after the scenario is finished.
|
||||
|
||||
:param callable: The rollback callable to be set.
|
||||
:param rollback_content: The rollback content for the callable.
|
||||
"""
|
||||
logger.debug(
|
||||
f"Rollback callable set to {callable.__name__} for version directory {RollbackConfig.get_rollback_versions_directory(self.rollback_context)}"
|
||||
)
|
||||
|
||||
version: Version = Version.new_version(
|
||||
scenario_type=self.scenario_type,
|
||||
rollback_context=self.rollback_context,
|
||||
)
|
||||
|
||||
# Serialize the callable to a file
|
||||
try:
|
||||
version_file = self.serializer.serialize_callable(
|
||||
callable, rollback_content, version
|
||||
)
|
||||
logger.info(f"Rollback callable serialized to {version_file}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to serialize rollback callable: {e}")
|
||||
123
krkn/rollback/serialization.py
Normal file
123
krkn/rollback/serialization.py
Normal file
@@ -0,0 +1,123 @@
|
||||
import inspect
|
||||
import os
|
||||
import logging
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from krkn.rollback.config import RollbackCallable, RollbackContent, Version
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Serializer:
|
||||
def __init__(self, scenario_type: str):
|
||||
self.scenario_type = scenario_type
|
||||
# Set up Jinja2 environment to load templates from the rollback directory
|
||||
template_dir = os.path.join(os.path.dirname(__file__))
|
||||
env = Environment(loader=FileSystemLoader(template_dir))
|
||||
self.template = env.get_template("version_template.j2")
|
||||
|
||||
def _parse_rollback_callable_code(
|
||||
self, rollback_callable: "RollbackCallable"
|
||||
) -> tuple[str, str]:
|
||||
"""
|
||||
Parse the rollback callable code to extract its implementation.
|
||||
:param rollback_callable: The callable function to parse (can be staticmethod or regular function).
|
||||
:return: A tuple containing (function_name, function_code).
|
||||
"""
|
||||
# Get the implementation code of the rollback_callable
|
||||
rollback_callable_code = inspect.getsource(rollback_callable)
|
||||
|
||||
# Split into lines for processing
|
||||
code_lines = rollback_callable_code.split("\n")
|
||||
cleaned_lines = []
|
||||
function_name = None
|
||||
|
||||
# Find the function definition line and extract function name
|
||||
def_line_index = None
|
||||
for i, line in enumerate(code_lines):
|
||||
# Skip decorators (including @staticmethod)
|
||||
if line.strip().startswith("@"):
|
||||
continue
|
||||
|
||||
# Look for function definition
|
||||
if line.strip().startswith("def "):
|
||||
def_line_index = i
|
||||
# Extract function name from the def line
|
||||
def_line = line.strip()
|
||||
if "(" in def_line:
|
||||
function_name = def_line.split("def ")[1].split("(")[0].strip()
|
||||
break
|
||||
|
||||
if def_line_index is None or function_name is None:
|
||||
raise ValueError(
|
||||
"Could not find function definition in callable source code"
|
||||
)
|
||||
|
||||
# Get the base indentation level from the def line
|
||||
def_line = code_lines[def_line_index]
|
||||
base_indent_level = len(def_line) - len(def_line.lstrip())
|
||||
|
||||
# Process all lines starting from the def line
|
||||
for i in range(def_line_index, len(code_lines)):
|
||||
line = code_lines[i]
|
||||
|
||||
# Handle empty lines
|
||||
if not line.strip():
|
||||
cleaned_lines.append("")
|
||||
continue
|
||||
|
||||
# Calculate current line's indentation
|
||||
current_indent = len(line) - len(line.lstrip())
|
||||
|
||||
# Remove the base indentation to normalize to function level
|
||||
if current_indent >= base_indent_level:
|
||||
# Remove base indentation
|
||||
normalized_line = line[base_indent_level:]
|
||||
cleaned_lines.append(normalized_line)
|
||||
else:
|
||||
# This shouldn't happen in well-formed code, but handle it gracefully
|
||||
cleaned_lines.append(line.lstrip())
|
||||
|
||||
# Reconstruct the code and clean up trailing whitespace
|
||||
function_code = "\n".join(cleaned_lines).rstrip()
|
||||
|
||||
return function_name, function_code
|
||||
|
||||
def serialize_callable(
|
||||
self,
|
||||
rollback_callable: "RollbackCallable",
|
||||
rollback_content: "RollbackContent",
|
||||
version: "Version",
|
||||
) -> str:
|
||||
"""
|
||||
Serialize a callable function to a file with its arguments and keyword arguments.
|
||||
:param rollback_callable: The callable to serialize.
|
||||
:param rollback_content: The rollback content for the callable.
|
||||
:param version: The version representing the rollback context and file path for the rollback.
|
||||
:return: Path to the serialized callable file.
|
||||
"""
|
||||
|
||||
rollback_callable_name, rollback_callable_code = (
|
||||
self._parse_rollback_callable_code(rollback_callable)
|
||||
)
|
||||
|
||||
# Render the template with the required variables
|
||||
file_content = self.template.render(
|
||||
rollback_callable_name=rollback_callable_name,
|
||||
rollback_callable_code=rollback_callable_code,
|
||||
rollback_content=str(rollback_content),
|
||||
)
|
||||
|
||||
# Write the file to the version directory
|
||||
os.makedirs(os.path.dirname(version.version_file_full_path), exist_ok=True)
|
||||
|
||||
logger.debug("Creating version file at %s", version.version_file_full_path)
|
||||
logger.debug("Version file content:\n%s", file_content)
|
||||
with open(version.version_file_full_path, "w") as f:
|
||||
f.write(file_content)
|
||||
logger.info(f"Serialized callable written to {version.version_file_full_path}")
|
||||
|
||||
return version.version_file_full_path
|
||||
106
krkn/rollback/signal.py
Normal file
106
krkn/rollback/signal.py
Normal file
@@ -0,0 +1,106 @@
|
||||
from typing import Dict, Any, Optional
|
||||
import threading
|
||||
import signal
|
||||
import sys
|
||||
import logging
|
||||
from contextlib import contextmanager
|
||||
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
from krkn.rollback.handler import execute_rollback_version_files
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class SignalHandler:
|
||||
# Class-level variables for signal handling (shared across all instances)
|
||||
_signal_handlers_installed = False # No need for thread-safe variable due to _signal_lock
|
||||
_original_handlers: Dict[int, Any] = {}
|
||||
_signal_lock = threading.Lock()
|
||||
|
||||
# Thread-local storage for context
|
||||
_local = threading.local()
|
||||
|
||||
@classmethod
|
||||
def _set_context(cls, run_uuid: str, scenario_type: str, telemetry_ocp: KrknTelemetryOpenshift):
|
||||
"""Set the current execution context for this thread."""
|
||||
cls._local.run_uuid = run_uuid
|
||||
cls._local.scenario_type = scenario_type
|
||||
cls._local.telemetry_ocp = telemetry_ocp
|
||||
logger.debug(f"Set signal context set for thread {threading.current_thread().name} - run_uuid={run_uuid}, scenario_type={scenario_type}")
|
||||
|
||||
@classmethod
|
||||
def _get_context(cls) -> tuple[Optional[str], Optional[str], Optional[KrknTelemetryOpenshift]]:
|
||||
"""Get the current execution context for this thread."""
|
||||
run_uuid = getattr(cls._local, 'run_uuid', None)
|
||||
scenario_type = getattr(cls._local, 'scenario_type', None)
|
||||
telemetry_ocp = getattr(cls._local, 'telemetry_ocp', None)
|
||||
return run_uuid, scenario_type, telemetry_ocp
|
||||
|
||||
@classmethod
|
||||
def _signal_handler(cls, signum: int, frame):
|
||||
"""Handle signals with current thread context information."""
|
||||
signal_name = signal.Signals(signum).name
|
||||
run_uuid, scenario_type, telemetry_ocp = cls._get_context()
|
||||
if not run_uuid or not scenario_type or not telemetry_ocp:
|
||||
logger.warning(f"Signal {signal_name} received without complete context, skipping rollback.")
|
||||
return
|
||||
|
||||
# Clear the context for the next signal, as another signal may arrive before the rollback completes.
|
||||
# This ensures that the rollback is performed only once.
|
||||
cls._set_context(None, None, telemetry_ocp)
|
||||
|
||||
# Perform rollback
|
||||
logger.info(f"Performing rollback for signal {signal_name} with run_uuid={run_uuid}, scenario_type={scenario_type}")
|
||||
execute_rollback_version_files(telemetry_ocp, run_uuid, scenario_type)
|
||||
|
||||
# Call original handler if it exists
|
||||
if signum not in cls._original_handlers:
|
||||
logger.info(f"Signal {signal_name} has no registered handler, exiting...")
|
||||
return
|
||||
|
||||
original_handler = cls._original_handlers[signum]
|
||||
if callable(original_handler):
|
||||
logger.info(f"Calling original handler for {signal_name}")
|
||||
original_handler(signum, frame)
|
||||
elif original_handler == signal.SIG_DFL:
|
||||
# Restore default behavior
|
||||
logger.info(f"Restoring default signal handler for {signal_name}")
|
||||
signal.signal(signum, signal.SIG_DFL)
|
||||
signal.raise_signal(signum)
|
||||
|
||||
@classmethod
|
||||
def _register_signal_handler(cls):
|
||||
"""Register signal handlers once (called by first instance)."""
|
||||
with cls._signal_lock: # Lock protects _signal_handlers_installed from race conditions
|
||||
if cls._signal_handlers_installed:
|
||||
return
|
||||
|
||||
signals_to_handle = [signal.SIGINT, signal.SIGTERM]
|
||||
if hasattr(signal, 'SIGHUP'):
|
||||
signals_to_handle.append(signal.SIGHUP)
|
||||
|
||||
for sig in signals_to_handle:
|
||||
try:
|
||||
original_handler = signal.signal(sig, cls._signal_handler)
|
||||
cls._original_handlers[sig] = original_handler
|
||||
logger.debug(f"SignalHandler: Registered signal handler for {signal.Signals(sig).name}")
|
||||
except (OSError, ValueError) as e:
|
||||
logger.warning(f"AbstractScenarioPlugin: Could not register handler for signal {sig}: {e}")
|
||||
|
||||
cls._signal_handlers_installed = True
|
||||
logger.info("Signal handlers registered globally")
|
||||
|
||||
@classmethod
|
||||
@contextmanager
|
||||
def signal_context(cls, run_uuid: str, scenario_type: str, telemetry_ocp: KrknTelemetryOpenshift):
|
||||
"""Context manager to set the signal context for the current thread."""
|
||||
cls._set_context(run_uuid, scenario_type, telemetry_ocp)
|
||||
cls._register_signal_handler()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
# Clear context after exiting the context manager
|
||||
cls._set_context(None, None, telemetry_ocp)
|
||||
|
||||
|
||||
signal_handler = SignalHandler()
|
||||
55
krkn/rollback/version_template.j2
Normal file
55
krkn/rollback/version_template.j2
Normal file
@@ -0,0 +1,55 @@
|
||||
# This file is auto-generated by krkn-lib.
|
||||
# It contains the rollback callable and its arguments for the scenario plugin.
|
||||
|
||||
from dataclasses import dataclass
|
||||
import os
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from krkn_lib.utils import SafeLogger
|
||||
from krkn_lib.ocp import KrknOpenshift
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RollbackContent:
|
||||
resource_identifier: str
|
||||
namespace: Optional[str] = None
|
||||
|
||||
# Actual rollback callable
|
||||
{{ rollback_callable_code }}
|
||||
|
||||
# Create necessary variables for execution
|
||||
lib_openshift = None
|
||||
lib_telemetry = None
|
||||
rollback_content = {{ rollback_content }}
|
||||
|
||||
|
||||
# Main entry point for execution
|
||||
if __name__ == '__main__':
|
||||
# setup logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format="%(asctime)s [%(levelname)s] %(message)s",
|
||||
handlers=[
|
||||
logging.StreamHandler(),
|
||||
]
|
||||
)
|
||||
|
||||
# setup logging and get kubeconfig path
|
||||
kubeconfig_path = os.getenv("KUBECONFIG", "~/.kube/config")
|
||||
log_directory = os.path.dirname(os.path.abspath(__file__))
|
||||
os.makedirs(os.path.join(log_directory, 'logs'), exist_ok=True)
|
||||
# setup SafeLogger for telemetry
|
||||
telemetry_log_path = os.path.join(log_directory, 'logs', 'telemetry.log')
|
||||
safe_logger = SafeLogger(telemetry_log_path)
|
||||
# setup krkn-lib objects
|
||||
lib_openshift = KrknOpenshift(kubeconfig_path=kubeconfig_path)
|
||||
lib_telemetry = KrknTelemetryOpenshift(safe_logger=safe_logger, lib_openshift=lib_openshift)
|
||||
|
||||
# execute
|
||||
logging.info('Executing rollback callable...')
|
||||
{{ rollback_callable_name }}(
|
||||
rollback_content,
|
||||
lib_telemetry
|
||||
)
|
||||
logging.info('Rollback completed.')
|
||||
@@ -5,9 +5,26 @@ from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
from krkn import utils
|
||||
|
||||
from krkn.rollback.handler import (
|
||||
RollbackHandler,
|
||||
execute_rollback_version_files,
|
||||
cleanup_rollback_version_files
|
||||
)
|
||||
from krkn.rollback.signal import signal_handler
|
||||
from krkn.rollback.serialization import Serializer
|
||||
|
||||
class AbstractScenarioPlugin(ABC):
|
||||
|
||||
def __init__(self, scenario_type: str = "placeholder_scenario_type"):
|
||||
"""Initializes the AbstractScenarioPlugin with the scenario type and rollback configuration.
|
||||
|
||||
:param scenario_type: the scenario type defined in the config.yaml
|
||||
"""
|
||||
serializer = Serializer(
|
||||
scenario_type=scenario_type,
|
||||
)
|
||||
self.rollback_handler = RollbackHandler(scenario_type, serializer)
|
||||
|
||||
@abstractmethod
|
||||
def run(
|
||||
self,
|
||||
@@ -74,24 +91,39 @@ class AbstractScenarioPlugin(ABC):
|
||||
scenario_telemetry, scenario_config
|
||||
)
|
||||
|
||||
try:
|
||||
logging.info(
|
||||
f"Running {self.__class__.__name__}: {self.get_scenario_types()} -> {scenario_config}"
|
||||
)
|
||||
return_value = self.run(
|
||||
run_uuid,
|
||||
scenario_config,
|
||||
krkn_config,
|
||||
telemetry,
|
||||
scenario_telemetry,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"uncaught exception on scenario `run()` method: {e} "
|
||||
f"please report an issue on https://github.com/krkn-chaos/krkn"
|
||||
)
|
||||
return_value = 1
|
||||
with signal_handler.signal_context(
|
||||
run_uuid=run_uuid,
|
||||
scenario_type=scenario_telemetry.scenario_type,
|
||||
telemetry_ocp=telemetry
|
||||
):
|
||||
try:
|
||||
logging.info(
|
||||
f"Running {self.__class__.__name__}: {self.get_scenario_types()} -> {scenario_config}"
|
||||
)
|
||||
# pass all the parameters by kwargs to make `set_rollback_context_decorator` get the `run_uuid` and `scenario_type`
|
||||
return_value = self.run(
|
||||
run_uuid=run_uuid,
|
||||
scenario=scenario_config,
|
||||
krkn_config=krkn_config,
|
||||
lib_telemetry=telemetry,
|
||||
scenario_telemetry=scenario_telemetry,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"uncaught exception on scenario `run()` method: {e} "
|
||||
f"please report an issue on https://github.com/krkn-chaos/krkn"
|
||||
)
|
||||
return_value = 1
|
||||
|
||||
if return_value == 0:
|
||||
cleanup_rollback_version_files(
|
||||
run_uuid, scenario_telemetry.scenario_type
|
||||
)
|
||||
else:
|
||||
# execute rollback files based on the return value
|
||||
execute_rollback_version_files(
|
||||
telemetry, run_uuid, scenario_telemetry.scenario_type
|
||||
)
|
||||
scenario_telemetry.exit_status = return_value
|
||||
scenario_telemetry.end_timestamp = time.time()
|
||||
utils.collect_and_put_ocp_logs(
|
||||
@@ -114,6 +146,8 @@ class AbstractScenarioPlugin(ABC):
|
||||
if scenario_telemetry.exit_status != 0:
|
||||
failed_scenarios.append(scenario_config)
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
logging.info(f"wating {wait_duration} before running the next scenario")
|
||||
logging.info(f"waiting {wait_duration} before running the next scenario")
|
||||
time.sleep(wait_duration)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
|
||||
@@ -7,9 +7,12 @@ from krkn_lib.utils import get_yaml_item_value, get_random_string
|
||||
from jinja2 import Template
|
||||
from krkn import cerberus
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
from krkn.rollback.config import RollbackContent
|
||||
from krkn.rollback.handler import set_rollback_context_decorator
|
||||
|
||||
|
||||
class ApplicationOutageScenarioPlugin(AbstractScenarioPlugin):
|
||||
@set_rollback_context_decorator
|
||||
def run(
|
||||
self,
|
||||
run_uuid: str,
|
||||
@@ -31,6 +34,21 @@ class ApplicationOutageScenarioPlugin(AbstractScenarioPlugin):
|
||||
)
|
||||
namespace = get_yaml_item_value(scenario_config, "namespace", "")
|
||||
duration = get_yaml_item_value(scenario_config, "duration", 60)
|
||||
exclude_label = get_yaml_item_value(
|
||||
scenario_config, "exclude_label", None
|
||||
)
|
||||
match_expressions = self._build_exclude_expressions(exclude_label)
|
||||
if match_expressions:
|
||||
# Log the format being used for better clarity
|
||||
format_type = "dict" if isinstance(exclude_label, dict) else "string"
|
||||
logging.info(
|
||||
"Excluding pods with labels (%s format): %s",
|
||||
format_type,
|
||||
", ".join(
|
||||
f"{expr['key']} NOT IN {expr['values']}"
|
||||
for expr in match_expressions
|
||||
),
|
||||
)
|
||||
|
||||
start_time = int(time.time())
|
||||
policy_name = f"krkn-deny-{get_random_string(5)}"
|
||||
@@ -40,23 +58,42 @@ class ApplicationOutageScenarioPlugin(AbstractScenarioPlugin):
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: """
|
||||
+ policy_name
|
||||
+ """
|
||||
name: {{ policy_name }}
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels: {{ pod_selector }}
|
||||
{% if match_expressions %}
|
||||
matchExpressions:
|
||||
{% for expression in match_expressions %}
|
||||
- key: {{ expression["key"] }}
|
||||
operator: NotIn
|
||||
values:
|
||||
{% for value in expression["values"] %}
|
||||
- {{ value }}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
policyTypes: {{ traffic_type }}
|
||||
"""
|
||||
)
|
||||
t = Template(network_policy_template)
|
||||
rendered_spec = t.render(
|
||||
pod_selector=pod_selector, traffic_type=traffic_type
|
||||
pod_selector=pod_selector,
|
||||
traffic_type=traffic_type,
|
||||
match_expressions=match_expressions,
|
||||
policy_name=policy_name,
|
||||
)
|
||||
yaml_spec = yaml.safe_load(rendered_spec)
|
||||
# Block the traffic by creating network policy
|
||||
logging.info("Creating the network policy")
|
||||
|
||||
self.rollback_handler.set_rollback_callable(
|
||||
self.rollback_network_policy,
|
||||
RollbackContent(
|
||||
namespace=namespace,
|
||||
resource_identifier=policy_name,
|
||||
),
|
||||
)
|
||||
lib_telemetry.get_lib_kubernetes().create_net_policy(
|
||||
yaml_spec, namespace
|
||||
)
|
||||
@@ -89,5 +126,86 @@ class ApplicationOutageScenarioPlugin(AbstractScenarioPlugin):
|
||||
else:
|
||||
return 0
|
||||
|
||||
@staticmethod
|
||||
def rollback_network_policy(
|
||||
rollback_content: RollbackContent,
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
):
|
||||
"""Rollback function to delete the network policy created during the scenario.
|
||||
|
||||
:param rollback_content: Rollback content containing namespace and resource_identifier.
|
||||
:param lib_telemetry: Instance of KrknTelemetryOpenshift for Kubernetes operations.
|
||||
"""
|
||||
try:
|
||||
namespace = rollback_content.namespace
|
||||
policy_name = rollback_content.resource_identifier
|
||||
logging.info(
|
||||
f"Rolling back network policy: {policy_name} in namespace: {namespace}"
|
||||
)
|
||||
lib_telemetry.get_lib_kubernetes().delete_net_policy(policy_name, namespace)
|
||||
logging.info("Network policy rollback completed successfully.")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to rollback network policy: {e}")
|
||||
|
||||
def get_scenario_types(self) -> list[str]:
|
||||
return ["application_outages_scenarios"]
|
||||
|
||||
@staticmethod
|
||||
def _build_exclude_expressions(exclude_label) -> list[dict]:
|
||||
"""
|
||||
Build match expressions for NetworkPolicy from exclude_label.
|
||||
|
||||
Supports multiple formats:
|
||||
- Dict format (preferred, similar to pod_selector): {key1: value1, key2: [value2, value3]}
|
||||
Example: {tier: "gold", env: ["prod", "staging"]}
|
||||
- String format: "key1=value1,key2=value2" or "key1=value1|value2"
|
||||
Example: "tier=gold,env=prod" or "tier=gold|platinum"
|
||||
- List format (list of strings): ["key1=value1", "key2=value2"]
|
||||
Example: ["tier=gold", "env=prod"]
|
||||
Note: List elements must be strings in "key=value" format.
|
||||
|
||||
:param exclude_label: Can be dict, string, list of strings, or None
|
||||
:return: List of match expression dictionaries
|
||||
"""
|
||||
expressions: list[dict] = []
|
||||
|
||||
if not exclude_label:
|
||||
return expressions
|
||||
|
||||
def _append_expr(key: str, values):
|
||||
if not key or values is None:
|
||||
return
|
||||
if not isinstance(values, list):
|
||||
values = [values]
|
||||
cleaned_values = [str(v).strip() for v in values if str(v).strip()]
|
||||
if cleaned_values:
|
||||
expressions.append({"key": key.strip(), "values": cleaned_values})
|
||||
|
||||
if isinstance(exclude_label, dict):
|
||||
for k, v in exclude_label.items():
|
||||
_append_expr(str(k), v)
|
||||
return expressions
|
||||
|
||||
if isinstance(exclude_label, list):
|
||||
selectors = exclude_label
|
||||
else:
|
||||
selectors = [sel.strip() for sel in str(exclude_label).split(",")]
|
||||
|
||||
for selector in selectors:
|
||||
if not selector:
|
||||
continue
|
||||
if "=" not in selector:
|
||||
logging.warning(
|
||||
"exclude_label entry '%s' is invalid, expected key=value format",
|
||||
selector,
|
||||
)
|
||||
continue
|
||||
key, value = selector.split("=", 1)
|
||||
value_items = (
|
||||
[item.strip() for item in value.split("|") if item.strip()]
|
||||
if value
|
||||
else []
|
||||
)
|
||||
_append_expr(key, value_items or value)
|
||||
|
||||
return expressions
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
import logging
|
||||
import random
|
||||
import time
|
||||
|
||||
import traceback
|
||||
from asyncio import Future
|
||||
import yaml
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.k8s.pods_monitor_pool import PodsMonitorPool
|
||||
from krkn_lib.k8s.pod_monitor import select_and_monitor_by_namespace_pattern_and_label
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.utils import get_yaml_item_value
|
||||
|
||||
from krkn import cerberus
|
||||
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
|
||||
|
||||
@@ -22,31 +23,27 @@ class ContainerScenarioPlugin(AbstractScenarioPlugin):
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
) -> int:
|
||||
pool = PodsMonitorPool(lib_telemetry.get_lib_kubernetes())
|
||||
try:
|
||||
with open(scenario, "r") as f:
|
||||
cont_scenario_config = yaml.full_load(f)
|
||||
|
||||
for kill_scenario in cont_scenario_config["scenarios"]:
|
||||
self.start_monitoring(
|
||||
kill_scenario, pool
|
||||
future_snapshot = self.start_monitoring(
|
||||
kill_scenario,
|
||||
lib_telemetry
|
||||
)
|
||||
killed_containers = self.container_killing_in_pod(
|
||||
self.container_killing_in_pod(
|
||||
kill_scenario, lib_telemetry.get_lib_kubernetes()
|
||||
)
|
||||
result = pool.join()
|
||||
if result.error:
|
||||
logging.error(
|
||||
logging.error(
|
||||
f"ContainerScenarioPlugin pods failed to recovery: {result.error}"
|
||||
)
|
||||
)
|
||||
return 1
|
||||
scenario_telemetry.affected_pods = result
|
||||
|
||||
# publish cerberus status
|
||||
except (RuntimeError, Exception):
|
||||
logging.error("ContainerScenarioPlugin exiting due to Exception %s")
|
||||
snapshot = future_snapshot.result()
|
||||
result = snapshot.get_pods_status()
|
||||
scenario_telemetry.affected_pods = result
|
||||
if len(result.unrecovered) > 0:
|
||||
logging.info("ContainerScenarioPlugin failed with unrecovered containers")
|
||||
return 1
|
||||
except (RuntimeError, Exception) as e:
|
||||
logging.error("Stack trace:\n%s", traceback.format_exc())
|
||||
logging.error("ContainerScenarioPlugin exiting due to Exception %s" % e)
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
@@ -54,16 +51,17 @@ class ContainerScenarioPlugin(AbstractScenarioPlugin):
|
||||
def get_scenario_types(self) -> list[str]:
|
||||
return ["container_scenarios"]
|
||||
|
||||
def start_monitoring(self, kill_scenario: dict, pool: PodsMonitorPool):
|
||||
|
||||
def start_monitoring(self, kill_scenario: dict, lib_telemetry: KrknTelemetryOpenshift) -> Future:
|
||||
namespace_pattern = f"^{kill_scenario['namespace']}$"
|
||||
label_selector = kill_scenario["label_selector"]
|
||||
recovery_time = kill_scenario["expected_recovery_time"]
|
||||
pool.select_and_monitor_by_namespace_pattern_and_label(
|
||||
future_snapshot = select_and_monitor_by_namespace_pattern_and_label(
|
||||
namespace_pattern=namespace_pattern,
|
||||
label_selector=label_selector,
|
||||
max_timeout=recovery_time,
|
||||
v1_client=lib_telemetry.get_lib_kubernetes().cli
|
||||
)
|
||||
return future_snapshot
|
||||
|
||||
def container_killing_in_pod(self, cont_scenario, kubecli: KrknKubernetes):
|
||||
scenario_name = get_yaml_item_value(cont_scenario, "name", "")
|
||||
@@ -73,6 +71,7 @@ class ContainerScenarioPlugin(AbstractScenarioPlugin):
|
||||
container_name = get_yaml_item_value(cont_scenario, "container_name", "")
|
||||
kill_action = get_yaml_item_value(cont_scenario, "action", 1)
|
||||
kill_count = get_yaml_item_value(cont_scenario, "count", 1)
|
||||
exclude_label = get_yaml_item_value(cont_scenario, "exclude_label", "")
|
||||
if not isinstance(kill_action, int):
|
||||
logging.error(
|
||||
"Please make sure the action parameter defined in the "
|
||||
@@ -94,7 +93,19 @@ class ContainerScenarioPlugin(AbstractScenarioPlugin):
|
||||
pods = kubecli.get_all_pods(label_selector)
|
||||
else:
|
||||
# Only returns pod names
|
||||
pods = kubecli.list_pods(namespace, label_selector)
|
||||
# Use list_pods with exclude_label parameter to exclude pods
|
||||
if exclude_label:
|
||||
logging.info(
|
||||
"Using exclude_label '%s' to exclude pods from container scenario %s in namespace %s",
|
||||
exclude_label,
|
||||
scenario_name,
|
||||
namespace,
|
||||
)
|
||||
pods = kubecli.list_pods(
|
||||
namespace=namespace,
|
||||
label_selector=label_selector,
|
||||
exclude_label=exclude_label if exclude_label else None
|
||||
)
|
||||
else:
|
||||
if namespace == "*":
|
||||
logging.error(
|
||||
@@ -105,6 +116,7 @@ class ContainerScenarioPlugin(AbstractScenarioPlugin):
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
pods = pod_names
|
||||
|
||||
# get container and pod name
|
||||
container_pod_list = []
|
||||
for pod in pods:
|
||||
@@ -221,4 +233,5 @@ class ContainerScenarioPlugin(AbstractScenarioPlugin):
|
||||
timer += 5
|
||||
logging.info("Waiting 5 seconds for containers to become ready")
|
||||
time.sleep(5)
|
||||
|
||||
return killed_container_list
|
||||
|
||||
@@ -16,15 +16,23 @@ from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.utils import get_random_string
|
||||
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
from krkn.rollback.config import RollbackContent
|
||||
from krkn.rollback.handler import set_rollback_context_decorator
|
||||
|
||||
|
||||
class HogsScenarioPlugin(AbstractScenarioPlugin):
|
||||
|
||||
@set_rollback_context_decorator
|
||||
def run(self, run_uuid: str, scenario: str, krkn_config: dict[str, any], lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry) -> int:
|
||||
try:
|
||||
with open(scenario, "r") as f:
|
||||
scenario = yaml.full_load(f)
|
||||
scenario_config = HogConfig.from_yaml_dict(scenario)
|
||||
|
||||
# Get node-name if provided
|
||||
node_name = scenario.get('node-name')
|
||||
|
||||
has_selector = True
|
||||
if not scenario_config.node_selector or not re.match("^.+=.*$", scenario_config.node_selector):
|
||||
if scenario_config.node_selector:
|
||||
@@ -33,13 +41,19 @@ class HogsScenarioPlugin(AbstractScenarioPlugin):
|
||||
else:
|
||||
node_selector = scenario_config.node_selector
|
||||
|
||||
available_nodes = lib_telemetry.get_lib_kubernetes().list_nodes(node_selector)
|
||||
if len(available_nodes) == 0:
|
||||
raise Exception("no available nodes to schedule workload")
|
||||
if node_name:
|
||||
logging.info(f"Using specific node: {node_name}")
|
||||
all_nodes = lib_telemetry.get_lib_kubernetes().list_nodes("")
|
||||
if node_name not in all_nodes:
|
||||
raise Exception(f"Specified node {node_name} not found or not available")
|
||||
available_nodes = [node_name]
|
||||
else:
|
||||
available_nodes = lib_telemetry.get_lib_kubernetes().list_nodes(node_selector)
|
||||
if len(available_nodes) == 0:
|
||||
raise Exception("no available nodes to schedule workload")
|
||||
|
||||
if not has_selector:
|
||||
# if selector not specified picks a random node between the available
|
||||
available_nodes = [available_nodes[random.randint(0, len(available_nodes))]]
|
||||
if not has_selector:
|
||||
available_nodes = [available_nodes[random.randint(0, len(available_nodes) - 1)]]
|
||||
|
||||
if scenario_config.number_of_nodes and len(available_nodes) > scenario_config.number_of_nodes:
|
||||
available_nodes = random.sample(available_nodes, scenario_config.number_of_nodes)
|
||||
@@ -69,6 +83,13 @@ class HogsScenarioPlugin(AbstractScenarioPlugin):
|
||||
config.node_selector = f"kubernetes.io/hostname={node}"
|
||||
pod_name = f"{config.type.value}-hog-{get_random_string(5)}"
|
||||
node_resources_start = lib_k8s.get_node_resources_info(node)
|
||||
self.rollback_handler.set_rollback_callable(
|
||||
self.rollback_hog_pod,
|
||||
RollbackContent(
|
||||
namespace=config.namespace,
|
||||
resource_identifier=pod_name,
|
||||
),
|
||||
)
|
||||
lib_k8s.deploy_hog(pod_name, config)
|
||||
start = time.time()
|
||||
# waiting 3 seconds before starting sample collection
|
||||
@@ -140,3 +161,22 @@ class HogsScenarioPlugin(AbstractScenarioPlugin):
|
||||
raise exception
|
||||
except queue.Empty:
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def rollback_hog_pod(rollback_content: RollbackContent, lib_telemetry: KrknTelemetryOpenshift):
|
||||
"""
|
||||
Rollback function to delete hog pod.
|
||||
|
||||
:param rollback_content: Rollback content containing namespace and resource_identifier.
|
||||
:param lib_telemetry: Instance of KrknTelemetryOpenshift for Kubernetes operations
|
||||
"""
|
||||
try:
|
||||
namespace = rollback_content.namespace
|
||||
pod_name = rollback_content.resource_identifier
|
||||
logging.info(
|
||||
f"Rolling back hog pod: {pod_name} in namespace: {namespace}"
|
||||
)
|
||||
lib_telemetry.get_lib_kubernetes().delete_pod(pod_name, namespace)
|
||||
logging.info("Rollback of hog pod completed successfully.")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to rollback hog pod: {e}")
|
||||
|
||||
@@ -0,0 +1,407 @@
|
||||
import logging
|
||||
import time
|
||||
from typing import Dict, Any, Optional
|
||||
import random
|
||||
import re
|
||||
import yaml
|
||||
from kubernetes.client.rest import ApiException
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.utils import log_exception
|
||||
from krkn_lib.models.k8s import AffectedPod, PodsStatus
|
||||
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
|
||||
|
||||
class KubevirtVmOutageScenarioPlugin(AbstractScenarioPlugin):
|
||||
"""
|
||||
A scenario plugin that injects chaos by deleting a KubeVirt Virtual Machine Instance (VMI).
|
||||
This plugin simulates a VM crash or outage scenario and supports automated or manual recovery.
|
||||
"""
|
||||
|
||||
def __init__(self, scenario_type: str = None):
|
||||
scenario_type = self.get_scenario_types()[0]
|
||||
super().__init__(scenario_type)
|
||||
self.k8s_client = None
|
||||
self.original_vmi = None
|
||||
self.vmis_list = []
|
||||
|
||||
# Scenario type is handled directly in execute_scenario
|
||||
def get_scenario_types(self) -> list[str]:
|
||||
return ["kubevirt_vm_outage"]
|
||||
|
||||
def run(
|
||||
self,
|
||||
run_uuid: str,
|
||||
scenario: str,
|
||||
krkn_config: dict[str, any],
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
) -> int:
|
||||
"""
|
||||
Main entry point for the plugin.
|
||||
Parses the scenario configuration and executes the chaos scenario.
|
||||
"""
|
||||
try:
|
||||
with open(scenario, "r") as f:
|
||||
scenario_config = yaml.full_load(f)
|
||||
|
||||
self.init_clients(lib_telemetry.get_lib_kubernetes())
|
||||
pods_status = PodsStatus()
|
||||
for config in scenario_config["scenarios"]:
|
||||
if config.get("scenario") == "kubevirt_vm_outage":
|
||||
single_pods_status = self.execute_scenario(config, scenario_telemetry)
|
||||
pods_status.merge(single_pods_status)
|
||||
|
||||
scenario_telemetry.affected_pods = pods_status
|
||||
if len(scenario_telemetry.affected_pods.unrecovered) > 0:
|
||||
return 1
|
||||
return 0
|
||||
except Exception as e:
|
||||
logging.error(f"KubeVirt VM Outage scenario failed: {e}")
|
||||
log_exception(e)
|
||||
return 1
|
||||
|
||||
def init_clients(self, k8s_client: KrknKubernetes):
|
||||
"""
|
||||
Initialize Kubernetes client for KubeVirt operations.
|
||||
"""
|
||||
self.k8s_client = k8s_client
|
||||
self.custom_object_client = k8s_client.custom_object_client
|
||||
logging.info("Successfully initialized Kubernetes client for KubeVirt operations")
|
||||
|
||||
def get_vmi(self, name: str, namespace: str) -> Optional[Dict]:
|
||||
"""
|
||||
Get a Virtual Machine Instance by name and namespace.
|
||||
|
||||
:param name: Name of the VMI to retrieve
|
||||
:param namespace: Namespace of the VMI
|
||||
:return: The VMI object if found, None otherwise
|
||||
"""
|
||||
try:
|
||||
vmi = self.custom_object_client.get_namespaced_custom_object(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace=namespace,
|
||||
plural="virtualmachineinstances",
|
||||
name=name
|
||||
)
|
||||
return vmi
|
||||
except ApiException as e:
|
||||
if e.status == 404:
|
||||
logging.warning(f"VMI {name} not found in namespace {namespace}")
|
||||
return None
|
||||
else:
|
||||
logging.error(f"Error getting VMI {name}: {e}")
|
||||
raise
|
||||
except Exception as e:
|
||||
logging.error(f"Unexpected error getting VMI {name}: {e}")
|
||||
raise
|
||||
|
||||
def get_vmis(self, regex_name: str, namespace: str) -> Optional[Dict]:
|
||||
"""
|
||||
Get a Virtual Machine Instance by name and namespace.
|
||||
|
||||
:param name: Name of the VMI to retrieve
|
||||
:param namespace: Namespace of the VMI
|
||||
:return: The VMI object if found, None otherwise
|
||||
"""
|
||||
try:
|
||||
namespaces = self.k8s_client.list_namespaces_by_regex(namespace)
|
||||
for namespace in namespaces:
|
||||
vmis = self.custom_object_client.list_namespaced_custom_object(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace=namespace,
|
||||
plural="virtualmachineinstances",
|
||||
)
|
||||
|
||||
for vmi in vmis.get("items"):
|
||||
vmi_name = vmi.get("metadata",{}).get("name")
|
||||
match = re.match(regex_name, vmi_name)
|
||||
if match:
|
||||
self.vmis_list.append(vmi)
|
||||
except ApiException as e:
|
||||
if e.status == 404:
|
||||
logging.warning(f"VMI {regex_name} not found in namespace {namespace}")
|
||||
return []
|
||||
else:
|
||||
logging.error(f"Error getting VMI {regex_name}: {e}")
|
||||
raise
|
||||
except Exception as e:
|
||||
logging.error(f"Unexpected error getting VMI {regex_name}: {e}")
|
||||
raise
|
||||
|
||||
def execute_scenario(self, config: Dict[str, Any], scenario_telemetry: ScenarioTelemetry) -> int:
|
||||
"""
|
||||
Execute a KubeVirt VM outage scenario based on the provided configuration.
|
||||
|
||||
:param config: The scenario configuration
|
||||
:param scenario_telemetry: The telemetry object for recording metrics
|
||||
:return: 0 for success, 1 for failure
|
||||
"""
|
||||
self.pods_status = PodsStatus()
|
||||
try:
|
||||
params = config.get("parameters", {})
|
||||
vm_name = params.get("vm_name")
|
||||
namespace = params.get("namespace", "default")
|
||||
timeout = params.get("timeout", 60)
|
||||
kill_count = params.get("kill_count", 1)
|
||||
disable_auto_restart = params.get("disable_auto_restart", False)
|
||||
|
||||
if not vm_name:
|
||||
logging.error("vm_name parameter is required")
|
||||
return 1
|
||||
self.pods_status = PodsStatus()
|
||||
self.get_vmis(vm_name,namespace)
|
||||
for _ in range(kill_count):
|
||||
|
||||
rand_int = random.randint(0, len(self.vmis_list) - 1)
|
||||
vmi = self.vmis_list[rand_int]
|
||||
|
||||
logging.info(f"Starting KubeVirt VM outage scenario for VM: {vm_name} in namespace: {namespace}")
|
||||
vmi_name = vmi.get("metadata").get("name")
|
||||
vmi_namespace = vmi.get("metadata").get("namespace")
|
||||
if not self.validate_environment(vmi_name, vmi_namespace):
|
||||
return 1
|
||||
|
||||
vmi = self.get_vmi(vmi_name, vmi_namespace)
|
||||
self.affected_pod = AffectedPod(
|
||||
pod_name=vmi_name,
|
||||
namespace=vmi_namespace,
|
||||
)
|
||||
if not vmi:
|
||||
logging.error(f"VMI {vm_name} not found in namespace {namespace}")
|
||||
return 1
|
||||
|
||||
self.original_vmi = vmi
|
||||
logging.info(f"Captured initial state of VMI: {vm_name}")
|
||||
result = self.delete_vmi(vmi_name, vmi_namespace, disable_auto_restart)
|
||||
if result != 0:
|
||||
self.pods_status.unrecovered.append(self.affected_pod)
|
||||
continue
|
||||
|
||||
result = self.wait_for_running(vmi_name,vmi_namespace, timeout)
|
||||
if result != 0:
|
||||
self.pods_status.unrecovered.append(self.affected_pod)
|
||||
continue
|
||||
|
||||
self.affected_pod.total_recovery_time = (
|
||||
self.affected_pod.pod_readiness_time
|
||||
+ self.affected_pod.pod_rescheduling_time
|
||||
)
|
||||
|
||||
self.pods_status.recovered.append(self.affected_pod)
|
||||
logging.info(f"Successfully completed KubeVirt VM outage scenario for VM: {vm_name}")
|
||||
|
||||
return self.pods_status
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error executing KubeVirt VM outage scenario: {e}")
|
||||
log_exception(e)
|
||||
return self.pods_status
|
||||
|
||||
def validate_environment(self, vm_name: str, namespace: str) -> bool:
|
||||
"""
|
||||
Validate that KubeVirt is installed and the specified VM exists.
|
||||
|
||||
:param vm_name: Name of the VM to validate
|
||||
:param namespace: Namespace of the VM
|
||||
:return: True if environment is valid, False otherwise
|
||||
"""
|
||||
try:
|
||||
# Check if KubeVirt CRDs exist
|
||||
crd_list = self.custom_object_client.list_namespaced_custom_object("kubevirt.io","v1",namespace,"virtualmachines")
|
||||
kubevirt_crds = [crd for crd in crd_list.items() ]
|
||||
|
||||
if not kubevirt_crds:
|
||||
logging.error("KubeVirt CRDs not found. Ensure KubeVirt/CNV is installed in the cluster")
|
||||
return False
|
||||
|
||||
# Check if VMI exists
|
||||
vmi = self.get_vmi(vm_name, namespace)
|
||||
if not vmi:
|
||||
logging.error(f"VMI {vm_name} not found in namespace {namespace}")
|
||||
return False
|
||||
|
||||
logging.info(f"Validated environment: KubeVirt is installed and VMI {vm_name} exists")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error validating environment: {e}")
|
||||
return False
|
||||
|
||||
def patch_vm_spec(self, vm_name: str, namespace: str, running: bool) -> bool:
|
||||
"""
|
||||
Patch the VM spec to enable/disable auto-restart.
|
||||
|
||||
:param vm_name: Name of the VM to patch
|
||||
:param namespace: Namespace of the VM
|
||||
:param running: Whether the VM should be set to running state
|
||||
:return: True if patch was successful, False otherwise
|
||||
"""
|
||||
try:
|
||||
# Get the VM object first to get its current spec
|
||||
vm = self.custom_object_client.get_namespaced_custom_object(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace=namespace,
|
||||
plural="virtualmachines",
|
||||
name=vm_name
|
||||
)
|
||||
|
||||
# Update the running state
|
||||
if 'spec' not in vm:
|
||||
vm['spec'] = {}
|
||||
vm['spec']['running'] = running
|
||||
|
||||
# Apply the patch
|
||||
self.custom_object_client.patch_namespaced_custom_object(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace=namespace,
|
||||
plural="virtualmachines",
|
||||
name=vm_name,
|
||||
body=vm
|
||||
)
|
||||
return True
|
||||
|
||||
except ApiException as e:
|
||||
logging.error(f"Failed to patch VM {vm_name}: {e}")
|
||||
return False
|
||||
except Exception as e:
|
||||
logging.error(f"Unexpected error patching VM {vm_name}: {e}")
|
||||
return False
|
||||
|
||||
def delete_vmi(self, vm_name: str, namespace: str, disable_auto_restart: bool = False, timeout: int = 120) -> int:
|
||||
"""
|
||||
Delete a Virtual Machine Instance to simulate a VM outage.
|
||||
|
||||
:param vm_name: Name of the VMI to delete
|
||||
:param namespace: Namespace of the VMI
|
||||
:return: 0 for success, 1 for failure
|
||||
"""
|
||||
try:
|
||||
logging.info(f"Injecting chaos: Deleting VMI {vm_name} in namespace {namespace}")
|
||||
|
||||
# If auto-restart should be disabled, patch the VM spec first
|
||||
if disable_auto_restart:
|
||||
logging.info(f"Disabling auto-restart for VM {vm_name} by setting spec.running=False")
|
||||
if not self.patch_vm_spec(vm_name, namespace, running=False):
|
||||
logging.error("Failed to disable auto-restart for VM"
|
||||
" - proceeding with deletion but VM may auto-restart")
|
||||
start_creation_time = self.original_vmi.get('metadata', {}).get('creationTimestamp')
|
||||
start_time = time.time()
|
||||
try:
|
||||
self.custom_object_client.delete_namespaced_custom_object(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace=namespace,
|
||||
plural="virtualmachineinstances",
|
||||
name=vm_name
|
||||
)
|
||||
except ApiException as e:
|
||||
if e.status == 404:
|
||||
logging.warning(f"VMI {vm_name} not found during deletion")
|
||||
return 1
|
||||
else:
|
||||
logging.error(f"API error during VMI deletion: {e}")
|
||||
return 1
|
||||
|
||||
# Wait for the VMI to be deleted
|
||||
|
||||
while time.time() - start_time < timeout:
|
||||
deleted_vmi = self.get_vmi(vm_name, namespace)
|
||||
if deleted_vmi:
|
||||
if start_creation_time != deleted_vmi.get('metadata', {}).get('creationTimestamp'):
|
||||
logging.info(f"VMI {vm_name} successfully recreated")
|
||||
self.affected_pod.pod_rescheduling_time = time.time() - start_time
|
||||
return 0
|
||||
else:
|
||||
logging.info(f"VMI {vm_name} successfully deleted")
|
||||
time.sleep(1)
|
||||
|
||||
logging.error(f"Timed out waiting for VMI {vm_name} to be deleted")
|
||||
self.pods_status.unrecovered.append(self.affected_pod)
|
||||
return 1
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error deleting VMI {vm_name}: {e}")
|
||||
log_exception(e)
|
||||
self.pods_status.unrecovered.append(self.affected_pod)
|
||||
return 1
|
||||
|
||||
def wait_for_running(self, vm_name: str, namespace: str, timeout: int = 120) -> int:
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < timeout:
|
||||
|
||||
# Check current state once since we've already waited for the duration
|
||||
vmi = self.get_vmi(vm_name, namespace)
|
||||
|
||||
if vmi:
|
||||
if vmi.get('status', {}).get('phase') == "Running":
|
||||
end_time = time.time()
|
||||
self.affected_pod.pod_readiness_time = end_time - start_time
|
||||
|
||||
logging.info(f"VMI {vm_name} is already running")
|
||||
return 0
|
||||
logging.info(f"VMI {vm_name} exists but is not in Running state. Current state: {vmi.get('status', {}).get('phase')}")
|
||||
else:
|
||||
logging.info(f"VMI {vm_name} not yet recreated")
|
||||
time.sleep(1)
|
||||
return 1
|
||||
|
||||
|
||||
def recover(self, vm_name: str, namespace: str, disable_auto_restart: bool = False) -> int:
|
||||
"""
|
||||
Recover a deleted VMI, either by waiting for auto-recovery or manually recreating it.
|
||||
|
||||
:param vm_name: Name of the VMI to recover
|
||||
:param namespace: Namespace of the VMI
|
||||
:param disable_auto_restart: Whether auto-restart was disabled during injection
|
||||
:return: 0 for success, 1 for failure
|
||||
"""
|
||||
try:
|
||||
logging.info(f"Attempting to recover VMI {vm_name} in namespace {namespace}")
|
||||
|
||||
if self.original_vmi:
|
||||
logging.info(f"Auto-recovery didn't occur for VMI {vm_name}. Attempting manual recreation")
|
||||
|
||||
try:
|
||||
# Clean up server-generated fields
|
||||
vmi_dict = self.original_vmi.copy()
|
||||
if 'metadata' in vmi_dict:
|
||||
metadata = vmi_dict['metadata']
|
||||
for field in ['resourceVersion', 'uid', 'creationTimestamp', 'generation']:
|
||||
if field in metadata:
|
||||
del metadata[field]
|
||||
|
||||
# Create the VMI
|
||||
self.custom_object_client.create_namespaced_custom_object(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace=namespace,
|
||||
plural="virtualmachineinstances",
|
||||
body=vmi_dict
|
||||
)
|
||||
logging.info(f"Successfully recreated VMI {vm_name}")
|
||||
|
||||
# Wait for VMI to start running
|
||||
self.wait_for_running(vm_name,namespace)
|
||||
|
||||
logging.warning(f"VMI {vm_name} was recreated but didn't reach Running state in time")
|
||||
return 0 # Still consider it a success as the VMI was recreated
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error recreating VMI {vm_name}: {e}")
|
||||
log_exception(e)
|
||||
return 1
|
||||
else:
|
||||
logging.error(f"Failed to recover VMI {vm_name}: No original state captured and auto-recovery did not occur")
|
||||
return 1
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Unexpected error recovering VMI {vm_name}: {e}")
|
||||
log_exception(e)
|
||||
return 1
|
||||
@@ -1,6 +1,5 @@
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
from krkn.scenario_plugins.native.plugins import PLUGINS
|
||||
from krkn_lib.k8s.pods_monitor_pool import PodsMonitorPool
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from typing import Any
|
||||
@@ -17,76 +16,23 @@ class NativeScenarioPlugin(AbstractScenarioPlugin):
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
) -> int:
|
||||
pool = PodsMonitorPool(lib_telemetry.get_lib_kubernetes())
|
||||
kill_scenarios = [
|
||||
kill_scenario
|
||||
for kill_scenario in PLUGINS.unserialize_scenario(scenario)
|
||||
if kill_scenario["id"] == "kill-pods"
|
||||
]
|
||||
|
||||
try:
|
||||
self.start_monitoring(pool, kill_scenarios)
|
||||
PLUGINS.run(
|
||||
scenario,
|
||||
lib_telemetry.get_lib_kubernetes().get_kubeconfig_path(),
|
||||
krkn_config,
|
||||
run_uuid,
|
||||
)
|
||||
result = pool.join()
|
||||
scenario_telemetry.affected_pods = result
|
||||
if result.error:
|
||||
logging.error(f"NativeScenarioPlugin unrecovered pods: {result.error}")
|
||||
return 1
|
||||
|
||||
except Exception as e:
|
||||
logging.error("NativeScenarioPlugin exiting due to Exception %s" % e)
|
||||
pool.cancel()
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
def get_scenario_types(self) -> list[str]:
|
||||
return [
|
||||
"pod_disruption_scenarios",
|
||||
"pod_network_scenarios",
|
||||
"ingress_node_scenarios"
|
||||
]
|
||||
|
||||
def start_monitoring(self, pool: PodsMonitorPool, scenarios: list[Any]):
|
||||
for kill_scenario in scenarios:
|
||||
recovery_time = kill_scenario["config"]["krkn_pod_recovery_time"]
|
||||
if (
|
||||
"namespace_pattern" in kill_scenario["config"]
|
||||
and "label_selector" in kill_scenario["config"]
|
||||
):
|
||||
namespace_pattern = kill_scenario["config"]["namespace_pattern"]
|
||||
label_selector = kill_scenario["config"]["label_selector"]
|
||||
pool.select_and_monitor_by_namespace_pattern_and_label(
|
||||
namespace_pattern=namespace_pattern,
|
||||
label_selector=label_selector,
|
||||
max_timeout=recovery_time,
|
||||
)
|
||||
logging.info(
|
||||
f"waiting {recovery_time} seconds for pod recovery, "
|
||||
f"pod label selector: {label_selector} namespace pattern: {namespace_pattern}"
|
||||
)
|
||||
|
||||
elif (
|
||||
"namespace_pattern" in kill_scenario["config"]
|
||||
and "name_pattern" in kill_scenario["config"]
|
||||
):
|
||||
namespace_pattern = kill_scenario["config"]["namespace_pattern"]
|
||||
name_pattern = kill_scenario["config"]["name_pattern"]
|
||||
pool.select_and_monitor_by_name_pattern_and_namespace_pattern(
|
||||
pod_name_pattern=name_pattern,
|
||||
namespace_pattern=namespace_pattern,
|
||||
max_timeout=recovery_time,
|
||||
)
|
||||
logging.info(
|
||||
f"waiting {recovery_time} seconds for pod recovery, "
|
||||
f"pod name pattern: {name_pattern} namespace pattern: {namespace_pattern}"
|
||||
)
|
||||
else:
|
||||
raise Exception(
|
||||
f"impossible to determine monitor parameters, check {kill_scenario} configuration"
|
||||
)
|
||||
|
||||
@@ -27,7 +27,7 @@ def get_status(config, start_time, end_time):
|
||||
application_routes_status = True
|
||||
if config["cerberus"]["cerberus_enabled"]:
|
||||
cerberus_url = config["cerberus"]["cerberus_url"]
|
||||
check_application_routes = config["cerberus"]["check_applicaton_routes"]
|
||||
check_application_routes = config["cerberus"]["check_application_routes"]
|
||||
if not cerberus_url:
|
||||
logging.error("url where Cerberus publishes True/False signal is not provided.")
|
||||
sys.exit(1)
|
||||
|
||||
@@ -5,6 +5,7 @@ import time
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
import random
|
||||
from traceback import format_exc
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from . import kubernetes_functions as kube_helper
|
||||
@@ -28,6 +29,14 @@ class NetworkScenarioConfig:
|
||||
},
|
||||
)
|
||||
|
||||
image: typing.Annotated[str, validation.min(1)]= field(
|
||||
default="quay.io/krkn-chaos/krkn:tools",
|
||||
metadata={
|
||||
"name": "Image",
|
||||
"description": "Image of krkn tools to run"
|
||||
}
|
||||
)
|
||||
|
||||
label_selector: typing.Annotated[
|
||||
typing.Optional[str], validation.required_if_not("node_interface_name")
|
||||
] = field(
|
||||
@@ -142,7 +151,7 @@ class NetworkScenarioErrorOutput:
|
||||
)
|
||||
|
||||
|
||||
def get_default_interface(node: str, pod_template, cli: CoreV1Api) -> str:
|
||||
def get_default_interface(node: str, pod_template, cli: CoreV1Api, image: str) -> str:
|
||||
"""
|
||||
Function that returns a random interface from a node
|
||||
|
||||
@@ -160,14 +169,14 @@ def get_default_interface(node: str, pod_template, cli: CoreV1Api) -> str:
|
||||
Returns:
|
||||
Default interface (string) belonging to the node
|
||||
"""
|
||||
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
pod_name_regex = str(random.randint(0, 10000))
|
||||
pod_body = yaml.safe_load(pod_template.render(regex_name=pod_name_regex,nodename=node, image=image))
|
||||
logging.info("Creating pod to query interface on node %s" % node)
|
||||
kube_helper.create_pod(cli, pod_body, "default", 300)
|
||||
|
||||
pod_name = f"fedtools-{pod_name_regex}"
|
||||
try:
|
||||
cmd = ["ip", "r"]
|
||||
output = kube_helper.exec_cmd_in_pod(cli, cmd, "fedtools", "default")
|
||||
output = kube_helper.exec_cmd_in_pod(cli, cmd, pod_name, "default")
|
||||
|
||||
if not output:
|
||||
logging.error("Exception occurred while executing command in pod")
|
||||
@@ -183,13 +192,13 @@ def get_default_interface(node: str, pod_template, cli: CoreV1Api) -> str:
|
||||
|
||||
finally:
|
||||
logging.info("Deleting pod to query interface on node")
|
||||
kube_helper.delete_pod(cli, "fedtools", "default")
|
||||
kube_helper.delete_pod(cli, pod_name, "default")
|
||||
|
||||
return interfaces
|
||||
|
||||
|
||||
def verify_interface(
|
||||
input_interface_list: typing.List[str], node: str, pod_template, cli: CoreV1Api
|
||||
input_interface_list: typing.List[str], node: str, pod_template, cli: CoreV1Api, image: str
|
||||
) -> typing.List[str]:
|
||||
"""
|
||||
Function that verifies whether a list of interfaces is present in the node.
|
||||
@@ -212,13 +221,15 @@ def verify_interface(
|
||||
Returns:
|
||||
The interface list for the node
|
||||
"""
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
pod_name_regex = str(random.randint(0, 10000))
|
||||
pod_body = yaml.safe_load(pod_template.render(regex_name=pod_name_regex,nodename=node, image=image))
|
||||
logging.info("Creating pod to query interface on node %s" % node)
|
||||
kube_helper.create_pod(cli, pod_body, "default", 300)
|
||||
pod_name = f"fedtools-{pod_name_regex}"
|
||||
try:
|
||||
if input_interface_list == []:
|
||||
cmd = ["ip", "r"]
|
||||
output = kube_helper.exec_cmd_in_pod(cli, cmd, "fedtools", "default")
|
||||
output = kube_helper.exec_cmd_in_pod(cli, cmd, pod_name, "default")
|
||||
|
||||
if not output:
|
||||
logging.error("Exception occurred while executing command in pod")
|
||||
@@ -234,7 +245,7 @@ def verify_interface(
|
||||
|
||||
else:
|
||||
cmd = ["ip", "-br", "addr", "show"]
|
||||
output = kube_helper.exec_cmd_in_pod(cli, cmd, "fedtools", "default")
|
||||
output = kube_helper.exec_cmd_in_pod(cli, cmd, pod_name, "default")
|
||||
|
||||
if not output:
|
||||
logging.error("Exception occurred while executing command in pod")
|
||||
@@ -257,7 +268,7 @@ def verify_interface(
|
||||
)
|
||||
finally:
|
||||
logging.info("Deleting pod to query interface on node")
|
||||
kube_helper.delete_pod(cli, "fedtools", "default")
|
||||
kube_helper.delete_pod(cli, pod_name, "default")
|
||||
|
||||
return input_interface_list
|
||||
|
||||
@@ -268,6 +279,7 @@ def get_node_interfaces(
|
||||
instance_count: int,
|
||||
pod_template,
|
||||
cli: CoreV1Api,
|
||||
image: str
|
||||
) -> typing.Dict[str, typing.List[str]]:
|
||||
"""
|
||||
Function that is used to process the input dictionary with the nodes and
|
||||
@@ -309,7 +321,7 @@ def get_node_interfaces(
|
||||
nodes = kube_helper.get_node(None, label_selector, instance_count, cli)
|
||||
node_interface_dict = {}
|
||||
for node in nodes:
|
||||
node_interface_dict[node] = get_default_interface(node, pod_template, cli)
|
||||
node_interface_dict[node] = get_default_interface(node, pod_template, cli, image)
|
||||
else:
|
||||
node_name_list = node_interface_dict.keys()
|
||||
filtered_node_list = []
|
||||
@@ -321,7 +333,7 @@ def get_node_interfaces(
|
||||
|
||||
for node in filtered_node_list:
|
||||
node_interface_dict[node] = verify_interface(
|
||||
node_interface_dict[node], node, pod_template, cli
|
||||
node_interface_dict[node], node, pod_template, cli, image
|
||||
)
|
||||
|
||||
return node_interface_dict
|
||||
@@ -337,6 +349,7 @@ def apply_ingress_filter(
|
||||
cli: CoreV1Api,
|
||||
create_interfaces: bool = True,
|
||||
param_selector: str = "all",
|
||||
image:str = "quay.io/krkn-chaos/krkn:tools",
|
||||
) -> str:
|
||||
"""
|
||||
Function that applies the filters to shape incoming traffic to
|
||||
@@ -382,14 +395,14 @@ def apply_ingress_filter(
|
||||
network_params = {param_selector: cfg.network_params[param_selector]}
|
||||
|
||||
if create_interfaces:
|
||||
create_virtual_interfaces(cli, interface_list, node, pod_template)
|
||||
create_virtual_interfaces(cli, interface_list, node, pod_template, image)
|
||||
|
||||
exec_cmd = get_ingress_cmd(
|
||||
interface_list, network_params, duration=cfg.test_duration
|
||||
)
|
||||
logging.info("Executing %s on node %s" % (exec_cmd, node))
|
||||
job_body = yaml.safe_load(
|
||||
job_template.render(jobname=str(hash(node))[:5], nodename=node, cmd=exec_cmd)
|
||||
job_template.render(jobname=str(hash(node))[:5], nodename=node, image=image, cmd=exec_cmd)
|
||||
)
|
||||
api_response = kube_helper.create_job(batch_cli, job_body)
|
||||
|
||||
@@ -400,7 +413,7 @@ def apply_ingress_filter(
|
||||
|
||||
|
||||
def create_virtual_interfaces(
|
||||
cli: CoreV1Api, interface_list: typing.List[str], node: str, pod_template
|
||||
cli: CoreV1Api, interface_list: typing.List[str], node: str, pod_template, image: str
|
||||
) -> None:
|
||||
"""
|
||||
Function that creates a privileged pod and uses it to create
|
||||
@@ -421,20 +434,22 @@ def create_virtual_interfaces(
|
||||
- The YAML template used to instantiate a pod to create
|
||||
virtual interfaces on the node
|
||||
"""
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
pod_name_regex = str(random.randint(0, 10000))
|
||||
pod_body = yaml.safe_load(pod_template.render(regex_name=pod_name_regex,nodename=node, image=image))
|
||||
kube_helper.create_pod(cli, pod_body, "default", 300)
|
||||
logging.info(
|
||||
"Creating {0} virtual interfaces on node {1} using a pod".format(
|
||||
len(interface_list), node
|
||||
)
|
||||
)
|
||||
create_ifb(cli, len(interface_list), "modtools")
|
||||
pod_name = f"modtools-{pod_name_regex}"
|
||||
create_ifb(cli, len(interface_list), pod_name)
|
||||
logging.info("Deleting pod used to create virtual interfaces")
|
||||
kube_helper.delete_pod(cli, "modtools", "default")
|
||||
kube_helper.delete_pod(cli, pod_name, "default")
|
||||
|
||||
|
||||
def delete_virtual_interfaces(
|
||||
cli: CoreV1Api, node_list: typing.List[str], pod_template
|
||||
cli: CoreV1Api, node_list: typing.List[str], pod_template, image: str
|
||||
):
|
||||
"""
|
||||
Function that creates a privileged pod and uses it to delete all
|
||||
@@ -457,11 +472,13 @@ def delete_virtual_interfaces(
|
||||
"""
|
||||
|
||||
for node in node_list:
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
pod_name_regex = str(random.randint(0, 10000))
|
||||
pod_body = yaml.safe_load(pod_template.render(regex_name=pod_name_regex,nodename=node, image=image))
|
||||
kube_helper.create_pod(cli, pod_body, "default", 300)
|
||||
logging.info("Deleting all virtual interfaces on node {0}".format(node))
|
||||
delete_ifb(cli, "modtools")
|
||||
kube_helper.delete_pod(cli, "modtools", "default")
|
||||
pod_name = f"modtools-{pod_name_regex}"
|
||||
delete_ifb(cli, pod_name)
|
||||
kube_helper.delete_pod(cli, pod_name, "default")
|
||||
|
||||
|
||||
def create_ifb(cli: CoreV1Api, number: int, pod_name: str):
|
||||
@@ -700,7 +717,7 @@ def network_chaos(
|
||||
pod_interface_template = env.get_template("pod_interface.j2")
|
||||
pod_module_template = env.get_template("pod_module.j2")
|
||||
cli, batch_cli = kube_helper.setup_kubernetes(cfg.kubeconfig_path)
|
||||
|
||||
test_image = cfg.image
|
||||
logging.info("Starting Ingress Network Chaos")
|
||||
try:
|
||||
node_interface_dict = get_node_interfaces(
|
||||
@@ -709,6 +726,7 @@ def network_chaos(
|
||||
cfg.instance_count,
|
||||
pod_interface_template,
|
||||
cli,
|
||||
test_image
|
||||
)
|
||||
except Exception:
|
||||
return "error", NetworkScenarioErrorOutput(format_exc())
|
||||
@@ -726,6 +744,7 @@ def network_chaos(
|
||||
job_template,
|
||||
batch_cli,
|
||||
cli,
|
||||
test_image
|
||||
)
|
||||
)
|
||||
logging.info("Waiting for parallel job to finish")
|
||||
@@ -746,6 +765,7 @@ def network_chaos(
|
||||
cli,
|
||||
create_interfaces=create_interfaces,
|
||||
param_selector=param,
|
||||
image=test_image
|
||||
)
|
||||
)
|
||||
logging.info("Waiting for serial job to finish")
|
||||
@@ -772,6 +792,6 @@ def network_chaos(
|
||||
logging.error("Ingress Network Chaos exiting due to Exception - %s" % e)
|
||||
return "error", NetworkScenarioErrorOutput(format_exc())
|
||||
finally:
|
||||
delete_virtual_interfaces(cli, node_interface_dict.keys(), pod_module_template)
|
||||
delete_virtual_interfaces(cli, node_interface_dict.keys(), pod_module_template, test_image)
|
||||
logging.info("Deleting jobs(if any)")
|
||||
delete_jobs(cli, batch_cli, job_list[:])
|
||||
|
||||
@@ -9,7 +9,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: networkchaos
|
||||
image: docker.io/fedora/tools
|
||||
image: {{image}}
|
||||
command: ["/bin/sh", "-c", "{{cmd}}"]
|
||||
securityContext:
|
||||
privileged: true
|
||||
@@ -22,4 +22,4 @@ spec:
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
restartPolicy: Never
|
||||
backoffLimit: 0
|
||||
backoffLimit: 0
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: fedtools
|
||||
name: fedtools-{{regex_name}}
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeName: {{nodename}}
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: docker.io/fedora/tools
|
||||
image: {{image}}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: modtools
|
||||
name: modtools-{{regex_name}}
|
||||
spec:
|
||||
nodeName: {{nodename}}
|
||||
containers:
|
||||
- name: modtools
|
||||
image: docker.io/fedora/tools
|
||||
image: {{image}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
@@ -27,4 +27,4 @@ spec:
|
||||
hostNetwork: true
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
restartPolicy: Never
|
||||
restartPolicy: Never
|
||||
|
||||
@@ -4,7 +4,6 @@ import logging
|
||||
from os.path import abspath
|
||||
from typing import List, Any, Dict
|
||||
from krkn.scenario_plugins.native.run_python_plugin import run_python_file
|
||||
from arcaflow_plugin_kill_pod import kill_pods, wait_for_pods
|
||||
from krkn.scenario_plugins.native.network.ingress_shaping import network_chaos
|
||||
from krkn.scenario_plugins.native.pod_network_outage.pod_network_outage_plugin import (
|
||||
pod_outage,
|
||||
@@ -148,13 +147,6 @@ class Plugins:
|
||||
|
||||
PLUGINS = Plugins(
|
||||
[
|
||||
PluginStep(
|
||||
kill_pods,
|
||||
[
|
||||
"error",
|
||||
],
|
||||
),
|
||||
PluginStep(wait_for_pods, ["error"]),
|
||||
PluginStep(run_python_file, ["error"]),
|
||||
PluginStep(network_chaos, ["error"]),
|
||||
PluginStep(pod_outage, ["error"]),
|
||||
|
||||
@@ -27,7 +27,7 @@ def get_status(config, start_time, end_time):
|
||||
application_routes_status = True
|
||||
if config["cerberus"]["cerberus_enabled"]:
|
||||
cerberus_url = config["cerberus"]["cerberus_url"]
|
||||
check_application_routes = config["cerberus"]["check_applicaton_routes"]
|
||||
check_application_routes = config["cerberus"]["check_application_routes"]
|
||||
if not cerberus_url:
|
||||
logging.error(
|
||||
"url where Cerberus publishes True/False signal is not provided.")
|
||||
|
||||
@@ -9,7 +9,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: networkchaos
|
||||
image: docker.io/fedora/tools
|
||||
image: {{image}}
|
||||
command: ["chroot", "/host", "/bin/sh", "-c", "{{cmd}}"]
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
||||
@@ -23,8 +23,7 @@ def create_job(batch_cli, body, namespace="default"):
|
||||
"""
|
||||
|
||||
try:
|
||||
api_response = batch_cli.create_namespaced_job(
|
||||
body=body, namespace=namespace)
|
||||
api_response = batch_cli.create_namespaced_job(body=body, namespace=namespace)
|
||||
return api_response
|
||||
except ApiException as api:
|
||||
logging.warning(
|
||||
@@ -71,7 +70,8 @@ def create_pod(cli, body, namespace, timeout=120):
|
||||
end_time = time.time() + timeout
|
||||
while True:
|
||||
pod_stat = cli.read_namespaced_pod(
|
||||
name=body["metadata"]["name"], namespace=namespace)
|
||||
name=body["metadata"]["name"], namespace=namespace
|
||||
)
|
||||
if pod_stat.status.phase == "Running":
|
||||
break
|
||||
if time.time() > end_time:
|
||||
@@ -121,16 +121,18 @@ def exec_cmd_in_pod(cli, command, pod_name, namespace, container=None):
|
||||
return ret
|
||||
|
||||
|
||||
def list_pods(cli, namespace, label_selector=None):
|
||||
def list_pods(cli, namespace, label_selector=None, exclude_label=None):
|
||||
"""
|
||||
Function used to list pods in a given namespace and having a certain label
|
||||
Function used to list pods in a given namespace and having a certain label and excluding pods with exclude_label
|
||||
and excluding pods with exclude_label
|
||||
"""
|
||||
|
||||
pods = []
|
||||
try:
|
||||
if label_selector:
|
||||
ret = cli.list_namespaced_pod(
|
||||
namespace, pretty=True, label_selector=label_selector)
|
||||
namespace, pretty=True, label_selector=label_selector
|
||||
)
|
||||
else:
|
||||
ret = cli.list_namespaced_pod(namespace, pretty=True)
|
||||
except ApiException as e:
|
||||
@@ -140,7 +142,16 @@ def list_pods(cli, namespace, label_selector=None):
|
||||
% e
|
||||
)
|
||||
raise e
|
||||
|
||||
for pod in ret.items:
|
||||
# Skip pods with the exclude label if specified
|
||||
if exclude_label and pod.metadata.labels:
|
||||
exclude_key, exclude_value = exclude_label.split("=", 1)
|
||||
if (
|
||||
exclude_key in pod.metadata.labels
|
||||
and pod.metadata.labels[exclude_key] == exclude_value
|
||||
):
|
||||
continue
|
||||
pods.append(pod.metadata.name)
|
||||
|
||||
return pods
|
||||
@@ -152,8 +163,7 @@ def get_job_status(batch_cli, name, namespace="default"):
|
||||
"""
|
||||
|
||||
try:
|
||||
return batch_cli.read_namespaced_job_status(
|
||||
name=name, namespace=namespace)
|
||||
return batch_cli.read_namespaced_job_status(name=name, namespace=namespace)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
@@ -169,7 +179,10 @@ def get_pod_log(cli, name, namespace="default"):
|
||||
"""
|
||||
|
||||
return cli.read_namespaced_pod_log(
|
||||
name=name, namespace=namespace, _return_http_data_only=True, _preload_content=False
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
_return_http_data_only=True,
|
||||
_preload_content=False,
|
||||
)
|
||||
|
||||
|
||||
@@ -191,7 +204,8 @@ def delete_job(batch_cli, name, namespace="default"):
|
||||
name=name,
|
||||
namespace=namespace,
|
||||
body=client.V1DeleteOptions(
|
||||
propagation_policy="Foreground", grace_period_seconds=0),
|
||||
propagation_policy="Foreground", grace_period_seconds=0
|
||||
),
|
||||
)
|
||||
logging.debug("Job deleted. status='%s'" % str(api_response.status))
|
||||
return api_response
|
||||
@@ -247,11 +261,8 @@ def get_node(node_name, label_selector, instance_kill_count, cli):
|
||||
)
|
||||
nodes = list_ready_nodes(cli, label_selector)
|
||||
if not nodes:
|
||||
raise Exception(
|
||||
"Ready nodes with the provided label selector do not exist")
|
||||
logging.info(
|
||||
"Ready nodes with the label selector %s: %s" % (label_selector, nodes)
|
||||
)
|
||||
raise Exception("Ready nodes with the provided label selector do not exist")
|
||||
logging.info("Ready nodes with the label selector %s: %s" % (label_selector, nodes))
|
||||
number_of_nodes = len(nodes)
|
||||
if instance_kill_count == number_of_nodes:
|
||||
return nodes
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: modtools
|
||||
name: modtools-{{regex_name}}
|
||||
spec:
|
||||
nodeName: {{nodename}}
|
||||
containers:
|
||||
- name: modtools
|
||||
image: docker.io/fedora/tools
|
||||
image: {{image}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
@@ -27,4 +27,4 @@ spec:
|
||||
hostNetwork: true
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
restartPolicy: Never
|
||||
restartPolicy: Never
|
||||
|
||||
@@ -19,7 +19,11 @@ from . import cerberus
|
||||
|
||||
|
||||
def get_test_pods(
|
||||
pod_name: str, pod_label: str, namespace: str, kubecli: KrknKubernetes
|
||||
pod_name: str,
|
||||
pod_label: str,
|
||||
namespace: str,
|
||||
kubecli: KrknKubernetes,
|
||||
exclude_label: str = None,
|
||||
) -> typing.List[str]:
|
||||
"""
|
||||
Function that returns a list of pods to apply network policy
|
||||
@@ -32,17 +36,22 @@ def get_test_pods(
|
||||
- pods matching the label on which network policy
|
||||
need to be applied
|
||||
|
||||
namepsace (string)
|
||||
namespace (string)
|
||||
- namespace in which the pod is present
|
||||
|
||||
kubecli (KrknKubernetes)
|
||||
- Object to interact with Kubernetes Python client
|
||||
|
||||
exclude_label (string)
|
||||
- pods matching this label will be excluded from the outage
|
||||
|
||||
Returns:
|
||||
pod names (string) in the namespace
|
||||
"""
|
||||
pods_list = []
|
||||
pods_list = kubecli.list_pods(label_selector=pod_label, namespace=namespace)
|
||||
pods_list = kubecli.list_pods(
|
||||
label_selector=pod_label, namespace=namespace, exclude_label=exclude_label
|
||||
)
|
||||
if pod_name and pod_name not in pods_list:
|
||||
raise Exception("pod name not found in namespace ")
|
||||
elif pod_name and pod_name in pods_list:
|
||||
@@ -192,6 +201,7 @@ def apply_outage_policy(
|
||||
duration: str,
|
||||
bridge_name: str,
|
||||
kubecli: KrknKubernetes,
|
||||
image: str
|
||||
) -> typing.List[str]:
|
||||
"""
|
||||
Function that applies filters(ingress or egress) to block traffic.
|
||||
@@ -223,6 +233,12 @@ def apply_outage_policy(
|
||||
batch_cli (BatchV1Api)
|
||||
- Object to interact with Kubernetes Python client's BatchV1Api API
|
||||
|
||||
image (string)
|
||||
- Image of network chaos tool
|
||||
|
||||
exclude_label (string)
|
||||
- pods matching this label will be excluded from the outage
|
||||
|
||||
Returns:
|
||||
The name of the job created that executes the commands on a node
|
||||
for ingress chaos scenario
|
||||
@@ -239,7 +255,7 @@ def apply_outage_policy(
|
||||
br = "br-int"
|
||||
table = 8
|
||||
for node, ips in node_dict.items():
|
||||
while len(check_cookie(node, pod_template, br, cookie, kubecli)) > 2 or cookie in cookie_list:
|
||||
while len(check_cookie(node, pod_template, br, cookie, kubecli, image)) > 2 or cookie in cookie_list:
|
||||
cookie = random.randint(100, 10000)
|
||||
exec_cmd = ""
|
||||
for ip in ips:
|
||||
@@ -257,6 +273,7 @@ def apply_outage_policy(
|
||||
job_template.render(
|
||||
jobname=str(hash(node))[:5] + str(random.randint(0, 10000)),
|
||||
nodename=node,
|
||||
image=image,
|
||||
cmd=exec_cmd,
|
||||
)
|
||||
)
|
||||
@@ -281,6 +298,7 @@ def apply_ingress_policy(
|
||||
bridge_name: str,
|
||||
kubecli: KrknKubernetes,
|
||||
test_execution: str,
|
||||
image: str,
|
||||
) -> typing.List[str]:
|
||||
"""
|
||||
Function that applies ingress traffic shaping to pod interface.
|
||||
@@ -319,6 +337,9 @@ def apply_ingress_policy(
|
||||
test_execution (String)
|
||||
- The order in which the filters are applied
|
||||
|
||||
exclude_label (string)
|
||||
- pods matching this label will be excluded from the outage
|
||||
|
||||
Returns:
|
||||
The name of the job created that executes the traffic shaping
|
||||
filter
|
||||
@@ -327,22 +348,23 @@ def apply_ingress_policy(
|
||||
job_list = []
|
||||
yml_list = []
|
||||
|
||||
create_virtual_interfaces(kubecli, len(ips), node, pod_template)
|
||||
create_virtual_interfaces(kubecli, len(ips), node, pod_template, image)
|
||||
|
||||
for count, pod_ip in enumerate(set(ips)):
|
||||
pod_inf = get_pod_interface(node, pod_ip, pod_template, bridge_name, kubecli)
|
||||
pod_inf = get_pod_interface(node, pod_ip, pod_template, bridge_name, kubecli, image)
|
||||
exec_cmd = get_ingress_cmd(
|
||||
test_execution, pod_inf, mod, count, network_params, duration
|
||||
)
|
||||
logging.info("Executing %s on pod %s in node %s" % (exec_cmd, pod_ip, node))
|
||||
job_body = yaml.safe_load(
|
||||
job_template.render(jobname=mod + str(pod_ip), nodename=node, cmd=exec_cmd)
|
||||
job_template.render(jobname=mod + str(pod_ip), nodename=node, image=image, cmd=exec_cmd)
|
||||
)
|
||||
yml_list.append(job_body)
|
||||
if pod_ip == node:
|
||||
break
|
||||
|
||||
for job_body in yml_list:
|
||||
print('jbo body' + str(job_body))
|
||||
api_response = kubecli.create_job(job_body)
|
||||
if api_response is None:
|
||||
raise Exception("Error creating job")
|
||||
@@ -362,6 +384,7 @@ def apply_net_policy(
|
||||
bridge_name: str,
|
||||
kubecli: KrknKubernetes,
|
||||
test_execution: str,
|
||||
image: str,
|
||||
) -> typing.List[str]:
|
||||
"""
|
||||
Function that applies egress traffic shaping to pod interface.
|
||||
@@ -400,6 +423,9 @@ def apply_net_policy(
|
||||
test_execution (String)
|
||||
- The order in which the filters are applied
|
||||
|
||||
exclude_label (string)
|
||||
- pods matching this label will be excluded from the outage
|
||||
|
||||
Returns:
|
||||
The name of the job created that executes the traffic shaping
|
||||
filter
|
||||
@@ -415,7 +441,7 @@ def apply_net_policy(
|
||||
)
|
||||
logging.info("Executing %s on pod %s in node %s" % (exec_cmd, pod_ip, node))
|
||||
job_body = yaml.safe_load(
|
||||
job_template.render(jobname=mod + str(pod_ip), nodename=node, cmd=exec_cmd)
|
||||
job_template.render(jobname=mod + str(pod_ip), nodename=node, image=image, cmd=exec_cmd)
|
||||
)
|
||||
yml_list.append(job_body)
|
||||
|
||||
@@ -459,6 +485,9 @@ def get_ingress_cmd(
|
||||
duration (str):
|
||||
- Duration for which the traffic control is to be done
|
||||
|
||||
exclude_label (string)
|
||||
- pods matching this label will be excluded from the outage
|
||||
|
||||
Returns:
|
||||
str: ingress filter
|
||||
"""
|
||||
@@ -510,6 +539,9 @@ def get_egress_cmd(
|
||||
duration (str):
|
||||
- Duration for which the traffic control is to be done
|
||||
|
||||
exclude_label (string)
|
||||
- pods matching this label will be excluded from the outage
|
||||
|
||||
Returns:
|
||||
str: egress filter
|
||||
"""
|
||||
@@ -530,7 +562,7 @@ def get_egress_cmd(
|
||||
|
||||
|
||||
def create_virtual_interfaces(
|
||||
kubecli: KrknKubernetes, nummber: int, node: str, pod_template
|
||||
kubecli: KrknKubernetes, number: int, node: str, pod_template, image: str,
|
||||
) -> None:
|
||||
"""
|
||||
Function that creates a privileged pod and uses it to create
|
||||
@@ -550,19 +582,24 @@ def create_virtual_interfaces(
|
||||
pod_template (jinja2.environment.Template))
|
||||
- The YAML template used to instantiate a pod to create
|
||||
virtual interfaces on the node
|
||||
|
||||
image (string)
|
||||
- Image of network chaos tool
|
||||
"""
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
pod_name_regex = str(random.randint(0, 10000))
|
||||
pod_body = yaml.safe_load(pod_template.render(regex_name=pod_name_regex, nodename=node, image=image))
|
||||
kubecli.create_pod(pod_body, "default", 300)
|
||||
pod_name = f"modtools-{pod_name_regex}"
|
||||
logging.info(
|
||||
"Creating {0} virtual interfaces on node {1} using a pod".format(nummber, node)
|
||||
"Creating {0} virtual interfaces on node {1} using a pod".format(number, node)
|
||||
)
|
||||
create_ifb(kubecli, nummber, "modtools")
|
||||
create_ifb(kubecli, number, pod_name)
|
||||
logging.info("Deleting pod used to create virtual interfaces")
|
||||
kubecli.delete_pod("modtools", "default")
|
||||
kubecli.delete_pod(pod_name, "default")
|
||||
|
||||
|
||||
def delete_virtual_interfaces(
|
||||
kubecli: KrknKubernetes, node_list: typing.List[str], pod_template
|
||||
kubecli: KrknKubernetes, node_list: typing.List[str], pod_template, image: str,
|
||||
):
|
||||
"""
|
||||
Function that creates a privileged pod and uses it to delete all
|
||||
@@ -582,14 +619,18 @@ def delete_virtual_interfaces(
|
||||
pod_template (jinja2.environment.Template))
|
||||
- The YAML template used to instantiate a pod to delete
|
||||
virtual interfaces on the node
|
||||
|
||||
image (string)
|
||||
- Image of network chaos tool
|
||||
"""
|
||||
|
||||
for node in node_list:
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
pod_name_regex = str(random.randint(0, 10000))
|
||||
pod_body = yaml.safe_load(pod_template.render(regex_name=pod_name_regex, nodename=node, image=image))
|
||||
kubecli.create_pod(pod_body, "default", 300)
|
||||
logging.info("Deleting all virtual interfaces on node {0}".format(node))
|
||||
delete_ifb(kubecli, "modtools")
|
||||
kubecli.delete_pod("modtools", "default")
|
||||
delete_ifb(kubecli, "modtools-" + pod_name_regex)
|
||||
kubecli.delete_pod("modtools-" + pod_name_regex, "default")
|
||||
|
||||
|
||||
def create_ifb(kubecli: KrknKubernetes, number: int, pod_name: str):
|
||||
@@ -619,7 +660,7 @@ def delete_ifb(kubecli: KrknKubernetes, pod_name: str):
|
||||
kubecli.exec_cmd_in_pod(exec_command, pod_name, "default", base_command="chroot")
|
||||
|
||||
|
||||
def list_bridges(node: str, pod_template, kubecli: KrknKubernetes) -> typing.List[str]:
|
||||
def list_bridges(node: str, pod_template, kubecli: KrknKubernetes, image: str) -> typing.List[str]:
|
||||
"""
|
||||
Function that returns a list of bridges on the node
|
||||
|
||||
@@ -634,18 +675,24 @@ def list_bridges(node: str, pod_template, kubecli: KrknKubernetes) -> typing.Lis
|
||||
kubecli (KrknKubernetes)
|
||||
- Object to interact with Kubernetes Python client
|
||||
|
||||
image (string)
|
||||
- Image of network chaos tool
|
||||
|
||||
exclude_label (string)
|
||||
- pods matching this label will be excluded from the outage
|
||||
|
||||
Returns:
|
||||
List of bridges on the node.
|
||||
"""
|
||||
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
pod_name_regex = str(random.randint(0, 10000))
|
||||
pod_body = yaml.safe_load(pod_template.render(regex_name=pod_name_regex, nodename=node, image=image))
|
||||
logging.info("Creating pod to query bridge on node %s" % node)
|
||||
kubecli.create_pod(pod_body, "default", 300)
|
||||
|
||||
pod_name = f"modtools-{pod_name_regex}"
|
||||
try:
|
||||
cmd = ["/host", "ovs-vsctl", "list-br"]
|
||||
output = kubecli.exec_cmd_in_pod(
|
||||
cmd, "modtools", "default", base_command="chroot"
|
||||
cmd, pod_name, "default", base_command="chroot"
|
||||
)
|
||||
|
||||
if not output:
|
||||
@@ -656,13 +703,13 @@ def list_bridges(node: str, pod_template, kubecli: KrknKubernetes) -> typing.Lis
|
||||
|
||||
finally:
|
||||
logging.info("Deleting pod to query interface on node")
|
||||
kubecli.delete_pod("modtools", "default")
|
||||
kubecli.delete_pod(pod_name, "default")
|
||||
|
||||
return bridges
|
||||
|
||||
|
||||
def check_cookie(
|
||||
node: str, pod_template, br_name, cookie, kubecli: KrknKubernetes
|
||||
node: str, pod_template, br_name, cookie, kubecli: KrknKubernetes, image: str
|
||||
) -> str:
|
||||
"""
|
||||
Function to check for matching flow rules
|
||||
@@ -684,14 +731,16 @@ def check_cookie(
|
||||
cli (CoreV1Api)
|
||||
- Object to interact with Kubernetes Python client's CoreV1 API
|
||||
|
||||
image (string)
|
||||
- Image of network chaos tool
|
||||
Returns
|
||||
Returns the matching flow rules
|
||||
"""
|
||||
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
pod_name_regex = str(random.randint(0, 10000))
|
||||
pod_body = yaml.safe_load(pod_template.render(regex_name = pod_name_regex,nodename=node, image=image))
|
||||
logging.info("Creating pod to query duplicate rules on node %s" % node)
|
||||
kubecli.create_pod(pod_body, "default", 300)
|
||||
|
||||
pod_name = f"modtools-{pod_name_regex}"
|
||||
try:
|
||||
cmd = [
|
||||
"chroot",
|
||||
@@ -704,7 +753,7 @@ def check_cookie(
|
||||
f"cookie={cookie}/-1",
|
||||
]
|
||||
output = kubecli.exec_cmd_in_pod(
|
||||
cmd, "modtools", "default", base_command="chroot"
|
||||
cmd, pod_name, "default", base_command="chroot"
|
||||
)
|
||||
|
||||
if not output:
|
||||
@@ -715,13 +764,13 @@ def check_cookie(
|
||||
|
||||
finally:
|
||||
logging.info("Deleting pod to query interface on node")
|
||||
kubecli.delete_pod("modtools", "default")
|
||||
kubecli.delete_pod(pod_name, "default")
|
||||
|
||||
return flow_list
|
||||
|
||||
|
||||
def get_pod_interface(
|
||||
node: str, ip: str, pod_template, br_name, kubecli: KrknKubernetes
|
||||
node: str, ip: str, pod_template, br_name, kubecli: KrknKubernetes, image: str = "quay.io/krkn-chaos/krkn:tools"
|
||||
) -> str:
|
||||
"""
|
||||
Function to query the pod interface on a node
|
||||
@@ -746,12 +795,12 @@ def get_pod_interface(
|
||||
Returns
|
||||
Returns the pod interface name
|
||||
"""
|
||||
|
||||
pod_body = yaml.safe_load(pod_template.render(nodename=node))
|
||||
pod_name_regex = str(random.randint(0, 10000))
|
||||
pod_body = yaml.safe_load(pod_template.render(regex_name=pod_name_regex, nodename=node, image=image))
|
||||
logging.info("Creating pod to query pod interface on node %s" % node)
|
||||
kubecli.create_pod(pod_body, "default", 300)
|
||||
inf = ""
|
||||
|
||||
pod_name = f"modtools-{pod_name_regex}"
|
||||
try:
|
||||
if br_name == "br-int":
|
||||
find_ip = f"external-ids:ip_addresses={ip}/23"
|
||||
@@ -769,12 +818,12 @@ def get_pod_interface(
|
||||
]
|
||||
|
||||
output = kubecli.exec_cmd_in_pod(
|
||||
cmd, "modtools", "default", base_command="chroot"
|
||||
cmd, pod_name, "default", base_command="chroot"
|
||||
)
|
||||
if not output:
|
||||
cmd = ["/host", "ip", "addr", "show"]
|
||||
output = kubecli.exec_cmd_in_pod(
|
||||
cmd, "modtools", "default", base_command="chroot"
|
||||
cmd, pod_name, "default", base_command="chroot"
|
||||
)
|
||||
for if_str in output.split("\n"):
|
||||
if re.search(ip, if_str):
|
||||
@@ -783,12 +832,13 @@ def get_pod_interface(
|
||||
inf = output
|
||||
finally:
|
||||
logging.info("Deleting pod to query interface on node")
|
||||
kubecli.delete_pod("modtools", "default")
|
||||
kubecli.delete_pod(pod_name, "default")
|
||||
return inf
|
||||
|
||||
|
||||
def check_bridge_interface(
|
||||
node_name: str, pod_template, bridge_name: str, kubecli: KrknKubernetes
|
||||
node_name: str, pod_template, bridge_name: str, kubecli: KrknKubernetes,
|
||||
image: str = "quay.io/krkn-chaos/krkn:tools"
|
||||
) -> bool:
|
||||
"""
|
||||
Function is used to check if the required OVS or OVN bridge is found in
|
||||
@@ -808,13 +858,16 @@ def check_bridge_interface(
|
||||
kubecli (KrknKubernetes)
|
||||
- Object to interact with Kubernetes Python client
|
||||
|
||||
exclude_label (string)
|
||||
- pods matching this label will be excluded from the outage
|
||||
|
||||
Returns:
|
||||
Returns True if the bridge is found in the node.
|
||||
"""
|
||||
nodes = kubecli.get_node(node_name, None, 1)
|
||||
node_bridge = []
|
||||
for node in nodes:
|
||||
node_bridge = list_bridges(node, pod_template, kubecli)
|
||||
node_bridge = list_bridges(node, pod_template, kubecli, image=image)
|
||||
if bridge_name not in node_bridge:
|
||||
raise Exception(f"OVS bridge {bridge_name} not found on the node ")
|
||||
|
||||
@@ -835,6 +888,14 @@ class InputParams:
|
||||
}
|
||||
)
|
||||
|
||||
image: typing.Annotated[str, validation.min(1)]= field(
|
||||
default="quay.io/krkn-chaos/krkn:tools",
|
||||
metadata={
|
||||
"name": "Image",
|
||||
"description": "Image of krkn tools to run"
|
||||
}
|
||||
)
|
||||
|
||||
direction: typing.List[str] = field(
|
||||
default_factory=lambda: ["ingress", "egress"],
|
||||
metadata={
|
||||
@@ -893,6 +954,15 @@ class InputParams:
|
||||
},
|
||||
)
|
||||
|
||||
exclude_label: typing.Optional[str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Exclude label",
|
||||
"description": "Kubernetes label selector for pods to exclude from the chaos. "
|
||||
"Pods matching this label will be excluded even if they match the label_selector",
|
||||
},
|
||||
)
|
||||
|
||||
kraken_config: typing.Dict[str, typing.Any] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
@@ -1004,6 +1074,7 @@ def pod_outage(
|
||||
test_namespace = params.namespace
|
||||
test_label_selector = params.label_selector
|
||||
test_pod_name = params.pod_name
|
||||
test_image = params.image
|
||||
filter_dict = {}
|
||||
job_list = []
|
||||
publish = False
|
||||
@@ -1025,7 +1096,11 @@ def pod_outage(
|
||||
|
||||
br_name = get_bridge_name(api_ext, custom_obj)
|
||||
pods_list = get_test_pods(
|
||||
test_pod_name, test_label_selector, test_namespace, kubecli
|
||||
test_pod_name,
|
||||
test_label_selector,
|
||||
test_namespace,
|
||||
kubecli,
|
||||
params.exclude_label,
|
||||
)
|
||||
|
||||
while not len(pods_list) <= params.instance_count:
|
||||
@@ -1040,7 +1115,7 @@ def pod_outage(
|
||||
label_set.add("%s=%s" % (key, value))
|
||||
|
||||
check_bridge_interface(
|
||||
list(node_dict.keys())[0], pod_module_template, br_name, kubecli
|
||||
list(node_dict.keys())[0], pod_module_template, br_name, kubecli, test_image
|
||||
)
|
||||
|
||||
for direction, ports in filter_dict.items():
|
||||
@@ -1055,6 +1130,7 @@ def pod_outage(
|
||||
params.test_duration,
|
||||
br_name,
|
||||
kubecli,
|
||||
test_image
|
||||
)
|
||||
)
|
||||
|
||||
@@ -1095,7 +1171,16 @@ class EgressParams:
|
||||
}
|
||||
)
|
||||
|
||||
image: typing.Annotated[str, validation.min(1)]= field(
|
||||
default="quay.io/krkn-chaos/krkn:tools",
|
||||
metadata={
|
||||
"name": "Image",
|
||||
"description": "Image of krkn tools to run"
|
||||
}
|
||||
)
|
||||
|
||||
network_params: typing.Dict[str, str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Network Parameters",
|
||||
"description": "The network filters that are applied on the interface. "
|
||||
@@ -1136,6 +1221,15 @@ class EgressParams:
|
||||
},
|
||||
)
|
||||
|
||||
exclude_label: typing.Optional[str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Exclude label",
|
||||
"description": "Kubernetes label selector for pods to exclude from the chaos. "
|
||||
"Pods matching this label will be excluded even if they match the label_selector",
|
||||
},
|
||||
)
|
||||
|
||||
kraken_config: typing.Dict[str, typing.Any] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
@@ -1254,6 +1348,7 @@ def pod_egress_shaping(
|
||||
test_namespace = params.namespace
|
||||
test_label_selector = params.label_selector
|
||||
test_pod_name = params.pod_name
|
||||
test_image = params.image
|
||||
job_list = []
|
||||
publish = False
|
||||
|
||||
@@ -1273,7 +1368,11 @@ def pod_egress_shaping(
|
||||
|
||||
br_name = get_bridge_name(api_ext, custom_obj)
|
||||
pods_list = get_test_pods(
|
||||
test_pod_name, test_label_selector, test_namespace, kubecli
|
||||
test_pod_name,
|
||||
test_label_selector,
|
||||
test_namespace,
|
||||
kubecli,
|
||||
params.exclude_label,
|
||||
)
|
||||
|
||||
while not len(pods_list) <= params.instance_count:
|
||||
@@ -1287,7 +1386,7 @@ def pod_egress_shaping(
|
||||
label_set.add("%s=%s" % (key, value))
|
||||
|
||||
check_bridge_interface(
|
||||
list(node_dict.keys())[0], pod_module_template, br_name, kubecli
|
||||
list(node_dict.keys())[0], pod_module_template, br_name, kubecli, test_image
|
||||
)
|
||||
|
||||
for mod in mod_lst:
|
||||
@@ -1304,6 +1403,7 @@ def pod_egress_shaping(
|
||||
br_name,
|
||||
kubecli,
|
||||
params.execution_type,
|
||||
test_image
|
||||
)
|
||||
)
|
||||
if params.execution_type == "serial":
|
||||
@@ -1357,8 +1457,17 @@ class IngressParams:
|
||||
"for details.",
|
||||
}
|
||||
)
|
||||
|
||||
image: typing.Annotated[str, validation.min(1)] = field(
|
||||
default="quay.io/krkn-chaos/krkn:tools",
|
||||
metadata={
|
||||
"name": "Image",
|
||||
"description": "Image to use for injecting network chaos",
|
||||
}
|
||||
)
|
||||
|
||||
network_params: typing.Dict[str, str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Network Parameters",
|
||||
"description": "The network filters that are applied on the interface. "
|
||||
@@ -1399,6 +1508,15 @@ class IngressParams:
|
||||
},
|
||||
)
|
||||
|
||||
exclude_label: typing.Optional[str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Exclude label",
|
||||
"description": "Kubernetes label selector for pods to exclude from the chaos. "
|
||||
"Pods matching this label will be excluded even if they match the label_selector",
|
||||
},
|
||||
)
|
||||
|
||||
kraken_config: typing.Dict[str, typing.Any] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
@@ -1518,6 +1636,7 @@ def pod_ingress_shaping(
|
||||
test_namespace = params.namespace
|
||||
test_label_selector = params.label_selector
|
||||
test_pod_name = params.pod_name
|
||||
test_image = params.image
|
||||
job_list = []
|
||||
publish = False
|
||||
|
||||
@@ -1537,7 +1656,11 @@ def pod_ingress_shaping(
|
||||
|
||||
br_name = get_bridge_name(api_ext, custom_obj)
|
||||
pods_list = get_test_pods(
|
||||
test_pod_name, test_label_selector, test_namespace, kubecli
|
||||
test_pod_name,
|
||||
test_label_selector,
|
||||
test_namespace,
|
||||
kubecli,
|
||||
params.exclude_label,
|
||||
)
|
||||
|
||||
while not len(pods_list) <= params.instance_count:
|
||||
@@ -1551,7 +1674,7 @@ def pod_ingress_shaping(
|
||||
label_set.add("%s=%s" % (key, value))
|
||||
|
||||
check_bridge_interface(
|
||||
list(node_dict.keys())[0], pod_module_template, br_name, kubecli
|
||||
list(node_dict.keys())[0], pod_module_template, br_name, kubecli, test_image
|
||||
)
|
||||
|
||||
for mod in mod_lst:
|
||||
@@ -1568,6 +1691,7 @@ def pod_ingress_shaping(
|
||||
br_name,
|
||||
kubecli,
|
||||
params.execution_type,
|
||||
image=test_image
|
||||
)
|
||||
)
|
||||
if params.execution_type == "serial":
|
||||
@@ -1604,6 +1728,6 @@ def pod_ingress_shaping(
|
||||
logging.error("Pod network Shaping scenario exiting due to Exception - %s" % e)
|
||||
return "error", PodIngressNetShapingErrorOutput(format_exc())
|
||||
finally:
|
||||
delete_virtual_interfaces(kubecli, node_dict.keys(), pod_module_template)
|
||||
delete_virtual_interfaces(kubecli, node_dict.keys(), pod_module_template, test_image)
|
||||
logging.info("Deleting jobs(if any)")
|
||||
delete_jobs(kubecli, job_list[:])
|
||||
|
||||
@@ -9,7 +9,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: networkchaos
|
||||
image: docker.io/fedora/tools
|
||||
image: {{image}}
|
||||
command: ["/bin/sh", "-c", "{{cmd}}"]
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
||||
@@ -42,7 +42,9 @@ class NetworkChaosScenarioPlugin(AbstractScenarioPlugin):
|
||||
test_egress = get_yaml_item_value(
|
||||
test_dict, "egress", {"bandwidth": "100mbit"}
|
||||
)
|
||||
|
||||
test_image = get_yaml_item_value(
|
||||
test_dict, "image", "quay.io/krkn-chaos/krkn:tools"
|
||||
)
|
||||
if test_node:
|
||||
node_name_list = test_node.split(",")
|
||||
nodelst = common_node_functions.get_node_by_name(node_name_list, lib_telemetry.get_lib_kubernetes())
|
||||
@@ -60,6 +62,7 @@ class NetworkChaosScenarioPlugin(AbstractScenarioPlugin):
|
||||
nodelst,
|
||||
pod_template,
|
||||
lib_telemetry.get_lib_kubernetes(),
|
||||
image=test_image
|
||||
)
|
||||
joblst = []
|
||||
egress_lst = [i for i in param_lst if i in test_egress]
|
||||
@@ -71,6 +74,7 @@ class NetworkChaosScenarioPlugin(AbstractScenarioPlugin):
|
||||
"execution": test_execution,
|
||||
"instance_count": test_instance_count,
|
||||
"egress": test_egress,
|
||||
"image": test_image
|
||||
}
|
||||
}
|
||||
logging.info(
|
||||
@@ -94,6 +98,7 @@ class NetworkChaosScenarioPlugin(AbstractScenarioPlugin):
|
||||
jobname=i + str(hash(node))[:5],
|
||||
nodename=node,
|
||||
cmd=exec_cmd,
|
||||
image=test_image
|
||||
)
|
||||
)
|
||||
joblst.append(job_body["metadata"]["name"])
|
||||
@@ -153,20 +158,22 @@ class NetworkChaosScenarioPlugin(AbstractScenarioPlugin):
|
||||
return 0
|
||||
|
||||
def verify_interface(
|
||||
self, test_interface, nodelst, template, kubecli: KrknKubernetes
|
||||
self, test_interface, nodelst, template, kubecli: KrknKubernetes, image: str
|
||||
):
|
||||
pod_index = random.randint(0, len(nodelst) - 1)
|
||||
pod_body = yaml.safe_load(template.render(nodename=nodelst[pod_index]))
|
||||
pod_name_regex = str(random.randint(0, 10000))
|
||||
pod_body = yaml.safe_load(template.render(regex_name=pod_name_regex,nodename=nodelst[pod_index], image=image))
|
||||
logging.info("Creating pod to query interface on node %s" % nodelst[pod_index])
|
||||
kubecli.create_pod(pod_body, "default", 300)
|
||||
pod_name = f"fedtools-{pod_name_regex}"
|
||||
try:
|
||||
if test_interface == []:
|
||||
cmd = "ip r | grep default | awk '/default/ {print $5}'"
|
||||
output = kubecli.exec_cmd_in_pod(cmd, "fedtools", "default")
|
||||
output = kubecli.exec_cmd_in_pod(cmd, pod_name, "default")
|
||||
test_interface = [output.replace("\n", "")]
|
||||
else:
|
||||
cmd = "ip -br addr show|awk -v ORS=',' '{print $1}'"
|
||||
output = kubecli.exec_cmd_in_pod(cmd, "fedtools", "default")
|
||||
output = kubecli.exec_cmd_in_pod(cmd, pod_name, "default")
|
||||
interface_lst = output[:-1].split(",")
|
||||
for interface in test_interface:
|
||||
if interface not in interface_lst:
|
||||
@@ -177,8 +184,8 @@ class NetworkChaosScenarioPlugin(AbstractScenarioPlugin):
|
||||
raise RuntimeError()
|
||||
return test_interface
|
||||
finally:
|
||||
logging.info("Deleteing pod to query interface on node")
|
||||
kubecli.delete_pod("fedtools", "default")
|
||||
logging.info("Deleting pod to query interface on node")
|
||||
kubecli.delete_pod(pod_name, "default")
|
||||
|
||||
# krkn_lib
|
||||
def get_job_pods(self, api_response, kubecli: KrknKubernetes):
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: fedtools
|
||||
name: fedtools-{{regex_name}}
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeName: {{nodename}}
|
||||
containers:
|
||||
- name: fedtools
|
||||
image: docker.io/fedora/tools
|
||||
image: {{image}}
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
|
||||
@@ -14,9 +14,11 @@ class BaseNetworkChaosConfig:
|
||||
wait_duration: int
|
||||
test_duration: int
|
||||
label_selector: str
|
||||
service_account: str
|
||||
instance_count: int
|
||||
execution: str
|
||||
namespace: str
|
||||
taints: list[str]
|
||||
|
||||
def validate(self) -> list[str]:
|
||||
errors = []
|
||||
@@ -28,8 +30,12 @@ class BaseNetworkChaosConfig:
|
||||
errors.append(
|
||||
f"{self.execution} is not in supported execution mod: {','.join(self.supported_execution)}"
|
||||
)
|
||||
if self.label_selector is None:
|
||||
if self.id == "node_network_filter" and self.label_selector is None:
|
||||
errors.append("label_selector cannot be None")
|
||||
if not isinstance(self.wait_duration, int):
|
||||
errors.append("wait_duration must be an int")
|
||||
if not isinstance(self.test_duration, int):
|
||||
errors.append("test_duration must be an int")
|
||||
return errors
|
||||
|
||||
|
||||
@@ -41,8 +47,14 @@ class NetworkFilterConfig(BaseNetworkChaosConfig):
|
||||
target: str
|
||||
ports: list[int]
|
||||
image: str
|
||||
protocols: list[str]
|
||||
|
||||
def validate(self) -> list[str]:
|
||||
errors = super().validate()
|
||||
# here further validations
|
||||
allowed_protocols = {"tcp", "udp"}
|
||||
if not set(self.protocols).issubset(allowed_protocols):
|
||||
errors.append(
|
||||
f"{self.protocols} contains not allowed protocols only tcp and udp is allowed"
|
||||
)
|
||||
return errors
|
||||
|
||||
@@ -3,19 +3,25 @@ import logging
|
||||
import queue
|
||||
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn.scenario_plugins.network_chaos_ng.models import BaseNetworkChaosConfig, NetworkChaosScenarioType
|
||||
from krkn.scenario_plugins.network_chaos_ng.models import (
|
||||
BaseNetworkChaosConfig,
|
||||
NetworkChaosScenarioType,
|
||||
)
|
||||
|
||||
|
||||
class AbstractNetworkChaosModule(abc.ABC):
|
||||
"""
|
||||
The abstract class that needs to be implemented by each Network Chaos Scenario
|
||||
"""
|
||||
|
||||
kubecli: KrknTelemetryOpenshift
|
||||
base_network_config: BaseNetworkChaosConfig
|
||||
|
||||
@abc.abstractmethod
|
||||
def run(self, target: str, kubecli: KrknTelemetryOpenshift, error_queue: queue.Queue = None):
|
||||
def run(self, target: str, error_queue: queue.Queue = None):
|
||||
"""
|
||||
the entrypoint method for the Network Chaos Scenario
|
||||
:param target: The resource name that will be targeted by the scenario (Node Name, Pod Name etc.)
|
||||
:param kubecli: The `KrknTelemetryOpenshift` needed by the scenario to access to the krkn-lib methods
|
||||
:param error_queue: A queue that will be used by the plugin to push the errors raised during the execution of parallel modules
|
||||
"""
|
||||
pass
|
||||
@@ -28,31 +34,17 @@ class AbstractNetworkChaosModule(abc.ABC):
|
||||
"""
|
||||
pass
|
||||
|
||||
def get_targets(self) -> list[str]:
|
||||
"""
|
||||
checks and returns the targets based on the common scenario configuration
|
||||
"""
|
||||
|
||||
def log_info(self, message: str, parallel: bool = False, node_name: str = ""):
|
||||
"""
|
||||
log helper method for INFO severity to be used in the scenarios
|
||||
"""
|
||||
if parallel:
|
||||
logging.info(f"[{node_name}]: {message}")
|
||||
else:
|
||||
logging.info(message)
|
||||
pass
|
||||
|
||||
def log_warning(self, message: str, parallel: bool = False, node_name: str = ""):
|
||||
"""
|
||||
log helper method for WARNING severity to be used in the scenarios
|
||||
"""
|
||||
if parallel:
|
||||
logging.warning(f"[{node_name}]: {message}")
|
||||
else:
|
||||
logging.warning(message)
|
||||
|
||||
|
||||
def log_error(self, message: str, parallel: bool = False, node_name: str = ""):
|
||||
"""
|
||||
log helper method for ERROR severity to be used in the scenarios
|
||||
"""
|
||||
if parallel:
|
||||
logging.error(f"[{node_name}]: {message}")
|
||||
else:
|
||||
logging.error(message)
|
||||
def __init__(
|
||||
self,
|
||||
base_network_config: BaseNetworkChaosConfig,
|
||||
kubecli: KrknTelemetryOpenshift,
|
||||
):
|
||||
self.kubecli = kubecli
|
||||
self.base_network_config = base_network_config
|
||||
|
||||
@@ -1,11 +1,6 @@
|
||||
import os
|
||||
import queue
|
||||
import time
|
||||
|
||||
import yaml
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.utils import get_random_string
|
||||
from krkn.scenario_plugins.network_chaos_ng.models import (
|
||||
@@ -16,88 +11,92 @@ from krkn.scenario_plugins.network_chaos_ng.models import (
|
||||
from krkn.scenario_plugins.network_chaos_ng.modules.abstract_network_chaos_module import (
|
||||
AbstractNetworkChaosModule,
|
||||
)
|
||||
from krkn.scenario_plugins.network_chaos_ng.modules.utils import log_info
|
||||
|
||||
from krkn.scenario_plugins.network_chaos_ng.modules.utils_network_filter import (
|
||||
deploy_network_filter_pod,
|
||||
apply_network_rules,
|
||||
clean_network_rules,
|
||||
generate_rules,
|
||||
get_default_interface,
|
||||
)
|
||||
|
||||
|
||||
class NodeNetworkFilterModule(AbstractNetworkChaosModule):
|
||||
config: NetworkFilterConfig
|
||||
kubecli: KrknTelemetryOpenshift
|
||||
|
||||
def run(
|
||||
self,
|
||||
target: str,
|
||||
kubecli: KrknTelemetryOpenshift,
|
||||
error_queue: queue.Queue = None,
|
||||
):
|
||||
def run(self, target: str, error_queue: queue.Queue = None):
|
||||
parallel = False
|
||||
if error_queue:
|
||||
parallel = True
|
||||
try:
|
||||
file_loader = FileSystemLoader(os.path.abspath(os.path.dirname(__file__)))
|
||||
env = Environment(loader=file_loader, autoescape=True)
|
||||
pod_name = f"node-filter-{get_random_string(5)}"
|
||||
pod_template = env.get_template("templates/network-chaos.j2")
|
||||
pod_body = yaml.safe_load(
|
||||
pod_template.render(
|
||||
pod_name=pod_name,
|
||||
namespace=self.config.namespace,
|
||||
host_network=True,
|
||||
target=target,
|
||||
workload_image=self.config.image,
|
||||
)
|
||||
)
|
||||
self.log_info(
|
||||
f"creating pod to filter "
|
||||
log_info(
|
||||
f"creating workload to filter node {target} network"
|
||||
f"ports {','.join([str(port) for port in self.config.ports])}, "
|
||||
f"ingress:{str(self.config.ingress)}, "
|
||||
f"egress:{str(self.config.egress)}",
|
||||
parallel,
|
||||
target,
|
||||
)
|
||||
kubecli.get_lib_kubernetes().create_pod(
|
||||
pod_body, self.config.namespace, 300
|
||||
|
||||
pod_name = f"node-filter-{get_random_string(5)}"
|
||||
deploy_network_filter_pod(
|
||||
self.config,
|
||||
target,
|
||||
pod_name,
|
||||
self.kubecli.get_lib_kubernetes(),
|
||||
)
|
||||
|
||||
if len(self.config.interfaces) == 0:
|
||||
interfaces = [
|
||||
self.get_default_interface(pod_name, self.config.namespace, kubecli)
|
||||
get_default_interface(
|
||||
pod_name,
|
||||
self.config.namespace,
|
||||
self.kubecli.get_lib_kubernetes(),
|
||||
)
|
||||
]
|
||||
self.log_info(f"detected default interface {interfaces[0]}")
|
||||
|
||||
log_info(
|
||||
f"detected default interface {interfaces[0]}", parallel, target
|
||||
)
|
||||
|
||||
else:
|
||||
interfaces = self.config.interfaces
|
||||
|
||||
input_rules, output_rules = self.generate_rules(interfaces)
|
||||
input_rules, output_rules = generate_rules(interfaces, self.config)
|
||||
|
||||
for rule in input_rules:
|
||||
self.log_info(f"applying iptables INPUT rule: {rule}", parallel, target)
|
||||
kubecli.get_lib_kubernetes().exec_cmd_in_pod(
|
||||
[rule], pod_name, self.config.namespace
|
||||
)
|
||||
for rule in output_rules:
|
||||
self.log_info(
|
||||
f"applying iptables OUTPUT rule: {rule}", parallel, target
|
||||
)
|
||||
kubecli.get_lib_kubernetes().exec_cmd_in_pod(
|
||||
[rule], pod_name, self.config.namespace
|
||||
)
|
||||
self.log_info(
|
||||
f"waiting {self.config.test_duration} seconds before removing the iptables rules"
|
||||
apply_network_rules(
|
||||
self.kubecli.get_lib_kubernetes(),
|
||||
input_rules,
|
||||
output_rules,
|
||||
pod_name,
|
||||
self.config.namespace,
|
||||
parallel,
|
||||
target,
|
||||
)
|
||||
|
||||
log_info(
|
||||
f"waiting {self.config.test_duration} seconds before removing the iptables rules",
|
||||
parallel,
|
||||
target,
|
||||
)
|
||||
|
||||
time.sleep(self.config.test_duration)
|
||||
self.log_info("removing iptables rules")
|
||||
for _ in input_rules:
|
||||
# always deleting the first rule since has been inserted from the top
|
||||
kubecli.get_lib_kubernetes().exec_cmd_in_pod(
|
||||
[f"iptables -D INPUT 1"], pod_name, self.config.namespace
|
||||
)
|
||||
for _ in output_rules:
|
||||
# always deleting the first rule since has been inserted from the top
|
||||
kubecli.get_lib_kubernetes().exec_cmd_in_pod(
|
||||
[f"iptables -D OUTPUT 1"], pod_name, self.config.namespace
|
||||
)
|
||||
self.log_info(
|
||||
f"deleting network chaos pod {pod_name} from {self.config.namespace}"
|
||||
|
||||
log_info("removing iptables rules", parallel, target)
|
||||
|
||||
clean_network_rules(
|
||||
self.kubecli.get_lib_kubernetes(),
|
||||
input_rules,
|
||||
output_rules,
|
||||
pod_name,
|
||||
self.config.namespace,
|
||||
)
|
||||
|
||||
kubecli.get_lib_kubernetes().delete_pod(pod_name, self.config.namespace)
|
||||
self.kubecli.get_lib_kubernetes().delete_pod(
|
||||
pod_name, self.config.namespace
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
if error_queue is None:
|
||||
@@ -105,33 +104,25 @@ class NodeNetworkFilterModule(AbstractNetworkChaosModule):
|
||||
else:
|
||||
error_queue.put(str(e))
|
||||
|
||||
def __init__(self, config: NetworkFilterConfig):
|
||||
def __init__(self, config: NetworkFilterConfig, kubecli: KrknTelemetryOpenshift):
|
||||
super().__init__(config, kubecli)
|
||||
self.config = config
|
||||
|
||||
def get_config(self) -> (NetworkChaosScenarioType, BaseNetworkChaosConfig):
|
||||
return NetworkChaosScenarioType.Node, self.config
|
||||
|
||||
def get_default_interface(
|
||||
self, pod_name: str, namespace: str, kubecli: KrknTelemetryOpenshift
|
||||
) -> str:
|
||||
cmd = "ip r | grep default | awk '/default/ {print $5}'"
|
||||
output = kubecli.get_lib_kubernetes().exec_cmd_in_pod(
|
||||
[cmd], pod_name, namespace
|
||||
)
|
||||
return output.replace("\n", "")
|
||||
def get_targets(self) -> list[str]:
|
||||
if self.base_network_config.label_selector:
|
||||
return self.kubecli.get_lib_kubernetes().list_nodes(
|
||||
self.base_network_config.label_selector
|
||||
)
|
||||
else:
|
||||
if not self.config.target:
|
||||
raise Exception(
|
||||
"neither node selector nor node_name (target) specified, aborting."
|
||||
)
|
||||
node_info = self.kubecli.get_lib_kubernetes().list_nodes()
|
||||
if self.config.target not in node_info:
|
||||
raise Exception(f"node {self.config.target} not found, aborting")
|
||||
|
||||
def generate_rules(self, interfaces: list[str]) -> (list[str], list[str]):
|
||||
input_rules = []
|
||||
output_rules = []
|
||||
for interface in interfaces:
|
||||
for port in self.config.ports:
|
||||
if self.config.egress:
|
||||
output_rules.append(
|
||||
f"iptables -I OUTPUT 1 -p tcp --dport {port} -m state --state NEW,RELATED,ESTABLISHED -j DROP"
|
||||
)
|
||||
|
||||
if self.config.ingress:
|
||||
input_rules.append(
|
||||
f"iptables -I INPUT 1 -i {interface} -p tcp --dport {port} -m state --state NEW,RELATED,ESTABLISHED -j DROP"
|
||||
)
|
||||
return input_rules, output_rules
|
||||
return [self.config.target]
|
||||
|
||||
@@ -0,0 +1,182 @@
|
||||
import logging
|
||||
import queue
|
||||
import time
|
||||
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.utils import get_random_string
|
||||
|
||||
from krkn.scenario_plugins.network_chaos_ng.models import (
|
||||
NetworkChaosScenarioType,
|
||||
BaseNetworkChaosConfig,
|
||||
NetworkFilterConfig,
|
||||
)
|
||||
from krkn.scenario_plugins.network_chaos_ng.modules.abstract_network_chaos_module import (
|
||||
AbstractNetworkChaosModule,
|
||||
)
|
||||
from krkn.scenario_plugins.network_chaos_ng.modules.utils import log_info, log_error
|
||||
from krkn.scenario_plugins.network_chaos_ng.modules.utils_network_filter import (
|
||||
deploy_network_filter_pod,
|
||||
generate_namespaced_rules,
|
||||
apply_network_rules,
|
||||
clean_network_rules_namespaced,
|
||||
)
|
||||
|
||||
|
||||
class PodNetworkFilterModule(AbstractNetworkChaosModule):
|
||||
config: NetworkFilterConfig
|
||||
|
||||
def run(self, target: str, error_queue: queue.Queue = None):
|
||||
parallel = False
|
||||
if error_queue:
|
||||
parallel = True
|
||||
try:
|
||||
pod_name = f"pod-filter-{get_random_string(5)}"
|
||||
container_name = f"fedora-container-{get_random_string(5)}"
|
||||
pod_info = self.kubecli.get_lib_kubernetes().get_pod_info(
|
||||
target, self.config.namespace
|
||||
)
|
||||
|
||||
log_info(
|
||||
f"creating workload to filter pod {self.config.target} network"
|
||||
f"ports {','.join([str(port) for port in self.config.ports])}, "
|
||||
f"ingress:{str(self.config.ingress)}, "
|
||||
f"egress:{str(self.config.egress)}",
|
||||
parallel,
|
||||
pod_name,
|
||||
)
|
||||
|
||||
if not pod_info:
|
||||
raise Exception(
|
||||
f"impossible to retrieve infos for pod {self.config.target} namespace {self.config.namespace}"
|
||||
)
|
||||
|
||||
deploy_network_filter_pod(
|
||||
self.config,
|
||||
pod_info.nodeName,
|
||||
pod_name,
|
||||
self.kubecli.get_lib_kubernetes(),
|
||||
container_name,
|
||||
host_network=False,
|
||||
)
|
||||
|
||||
if len(self.config.interfaces) == 0:
|
||||
interfaces = (
|
||||
self.kubecli.get_lib_kubernetes().list_pod_network_interfaces(
|
||||
target, self.config.namespace
|
||||
)
|
||||
)
|
||||
|
||||
if len(interfaces) == 0:
|
||||
log_error(
|
||||
"no network interface found in pod, impossible to execute the network filter scenario",
|
||||
parallel,
|
||||
pod_name,
|
||||
)
|
||||
return
|
||||
log_info(
|
||||
f"detected network interfaces: {','.join(interfaces)}",
|
||||
parallel,
|
||||
pod_name,
|
||||
)
|
||||
else:
|
||||
interfaces = self.config.interfaces
|
||||
|
||||
container_ids = self.kubecli.get_lib_kubernetes().get_container_ids(
|
||||
target, self.config.namespace
|
||||
)
|
||||
|
||||
if len(container_ids) == 0:
|
||||
raise Exception(
|
||||
f"impossible to resolve container id for pod {target} namespace {self.config.namespace}"
|
||||
)
|
||||
|
||||
log_info(f"targeting container {container_ids[0]}", parallel, pod_name)
|
||||
|
||||
pids = self.kubecli.get_lib_kubernetes().get_pod_pids(
|
||||
base_pod_name=pod_name,
|
||||
base_pod_namespace=self.config.namespace,
|
||||
base_pod_container_name=container_name,
|
||||
pod_name=target,
|
||||
pod_namespace=self.config.namespace,
|
||||
pod_container_id=container_ids[0],
|
||||
)
|
||||
|
||||
if not pids:
|
||||
raise Exception(f"impossible to resolve pid for pod {target}")
|
||||
|
||||
log_info(
|
||||
f"resolved pids {pids} in node {pod_info.nodeName} for pod {target}",
|
||||
parallel,
|
||||
pod_name,
|
||||
)
|
||||
|
||||
input_rules, output_rules = generate_namespaced_rules(
|
||||
interfaces, self.config, pids
|
||||
)
|
||||
|
||||
apply_network_rules(
|
||||
self.kubecli.get_lib_kubernetes(),
|
||||
input_rules,
|
||||
output_rules,
|
||||
pod_name,
|
||||
self.config.namespace,
|
||||
parallel,
|
||||
target,
|
||||
)
|
||||
|
||||
log_info(
|
||||
f"waiting {self.config.test_duration} seconds before removing the iptables rules",
|
||||
parallel,
|
||||
pod_name,
|
||||
)
|
||||
|
||||
time.sleep(self.config.test_duration)
|
||||
|
||||
log_info("removing iptables rules", parallel, pod_name)
|
||||
|
||||
clean_network_rules_namespaced(
|
||||
self.kubecli.get_lib_kubernetes(),
|
||||
input_rules,
|
||||
output_rules,
|
||||
pod_name,
|
||||
self.config.namespace,
|
||||
pids,
|
||||
)
|
||||
|
||||
self.kubecli.get_lib_kubernetes().delete_pod(
|
||||
pod_name, self.config.namespace
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
if error_queue is None:
|
||||
raise e
|
||||
else:
|
||||
error_queue.put(str(e))
|
||||
|
||||
def __init__(self, config: NetworkFilterConfig, kubecli: KrknTelemetryOpenshift):
|
||||
super().__init__(config, kubecli)
|
||||
self.config = config
|
||||
|
||||
def get_config(self) -> (NetworkChaosScenarioType, BaseNetworkChaosConfig):
|
||||
return NetworkChaosScenarioType.Pod, self.config
|
||||
|
||||
def get_targets(self) -> list[str]:
|
||||
if not self.config.namespace:
|
||||
raise Exception("namespace not specified, aborting")
|
||||
if self.base_network_config.label_selector:
|
||||
return self.kubecli.get_lib_kubernetes().list_pods(
|
||||
self.config.namespace, self.config.label_selector
|
||||
)
|
||||
else:
|
||||
if not self.config.target:
|
||||
raise Exception(
|
||||
"neither node selector nor node_name (target) specified, aborting."
|
||||
)
|
||||
if not self.kubecli.get_lib_kubernetes().check_if_pod_exists(
|
||||
self.config.target, self.config.namespace
|
||||
):
|
||||
raise Exception(
|
||||
f"pod {self.config.target} not found in namespace {self.config.namespace}"
|
||||
)
|
||||
|
||||
return [self.config.target]
|
||||
@@ -4,13 +4,28 @@ metadata:
|
||||
name: {{pod_name}}
|
||||
namespace: {{namespace}}
|
||||
spec:
|
||||
{% if service_account %}
|
||||
serviceAccountName: {{ service_account }}
|
||||
{%endif%}
|
||||
{% if host_network %}
|
||||
hostNetwork: true
|
||||
{%endif%}
|
||||
{% if taints %}
|
||||
tolerations:
|
||||
{% for toleration in taints %}
|
||||
- key: "{{ toleration.key }}"
|
||||
operator: "{{ toleration.operator }}"
|
||||
{% if toleration.value %}
|
||||
value: "{{ toleration.value }}"
|
||||
{% endif %}
|
||||
effect: "{{ toleration.effect }}"
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
hostPID: true
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: {{target}}
|
||||
containers:
|
||||
- name: fedora
|
||||
- name: {{container_name}}
|
||||
imagePullPolicy: Always
|
||||
image: {{workload_image}}
|
||||
securityContext:
|
||||
|
||||
31
krkn/scenario_plugins/network_chaos_ng/modules/utils.py
Normal file
31
krkn/scenario_plugins/network_chaos_ng/modules/utils.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import logging
|
||||
|
||||
|
||||
def log_info(message: str, parallel: bool = False, node_name: str = ""):
|
||||
"""
|
||||
log helper method for INFO severity to be used in the scenarios
|
||||
"""
|
||||
if parallel:
|
||||
logging.info(f"[{node_name}]: {message}")
|
||||
else:
|
||||
logging.info(message)
|
||||
|
||||
|
||||
def log_error(message: str, parallel: bool = False, node_name: str = ""):
|
||||
"""
|
||||
log helper method for ERROR severity to be used in the scenarios
|
||||
"""
|
||||
if parallel:
|
||||
logging.error(f"[{node_name}]: {message}")
|
||||
else:
|
||||
logging.error(message)
|
||||
|
||||
|
||||
def log_warning(message: str, parallel: bool = False, node_name: str = ""):
|
||||
"""
|
||||
log helper method for WARNING severity to be used in the scenarios
|
||||
"""
|
||||
if parallel:
|
||||
logging.warning(f"[{node_name}]: {message}")
|
||||
else:
|
||||
logging.warning(message)
|
||||
@@ -0,0 +1,161 @@
|
||||
import os
|
||||
|
||||
import yaml
|
||||
from jinja2 import FileSystemLoader, Environment
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
from krkn.scenario_plugins.network_chaos_ng.models import NetworkFilterConfig
|
||||
from krkn.scenario_plugins.network_chaos_ng.modules.utils import log_info
|
||||
|
||||
|
||||
def generate_rules(
|
||||
interfaces: list[str], config: NetworkFilterConfig
|
||||
) -> (list[str], list[str]):
|
||||
input_rules = []
|
||||
output_rules = []
|
||||
for interface in interfaces:
|
||||
for port in config.ports:
|
||||
if config.egress:
|
||||
for protocol in set(config.protocols):
|
||||
output_rules.append(
|
||||
f"iptables -I OUTPUT 1 -p {protocol} --dport {port} -m state --state NEW,RELATED,ESTABLISHED -j DROP"
|
||||
)
|
||||
|
||||
if config.ingress:
|
||||
for protocol in set(config.protocols):
|
||||
input_rules.append(
|
||||
f"iptables -I INPUT 1 -i {interface} -p {protocol} --dport {port} -m state --state NEW,RELATED,ESTABLISHED -j DROP"
|
||||
)
|
||||
return input_rules, output_rules
|
||||
|
||||
|
||||
def generate_namespaced_rules(
|
||||
interfaces: list[str], config: NetworkFilterConfig, pids: list[str]
|
||||
) -> (list[str], list[str]):
|
||||
namespaced_input_rules: list[str] = []
|
||||
namespaced_output_rules: list[str] = []
|
||||
input_rules, output_rules = generate_rules(interfaces, config)
|
||||
for pid in pids:
|
||||
ns_input_rules = [
|
||||
f"nsenter --target {pid} --net -- {rule}" for rule in input_rules
|
||||
]
|
||||
ns_output_rules = [
|
||||
f"nsenter --target {pid} --net -- {rule}" for rule in output_rules
|
||||
]
|
||||
namespaced_input_rules.extend(ns_input_rules)
|
||||
namespaced_output_rules.extend(ns_output_rules)
|
||||
|
||||
return namespaced_input_rules, namespaced_output_rules
|
||||
|
||||
|
||||
def deploy_network_filter_pod(
|
||||
config: NetworkFilterConfig,
|
||||
target_node: str,
|
||||
pod_name: str,
|
||||
kubecli: KrknKubernetes,
|
||||
container_name: str = "fedora",
|
||||
host_network: bool = True,
|
||||
):
|
||||
file_loader = FileSystemLoader(os.path.abspath(os.path.dirname(__file__)))
|
||||
env = Environment(loader=file_loader, autoescape=True)
|
||||
pod_template = env.get_template("templates/network-chaos.j2")
|
||||
tolerations = []
|
||||
|
||||
for taint in config.taints:
|
||||
key_value_part, effect = taint.split(":", 1)
|
||||
if "=" in key_value_part:
|
||||
key, value = key_value_part.split("=", 1)
|
||||
operator = "Equal"
|
||||
else:
|
||||
key = key_value_part
|
||||
value = None
|
||||
operator = "Exists"
|
||||
toleration = {
|
||||
"key": key,
|
||||
"operator": operator,
|
||||
"effect": effect,
|
||||
}
|
||||
if value is not None:
|
||||
toleration["value"] = value
|
||||
tolerations.append(toleration)
|
||||
|
||||
pod_body = yaml.safe_load(
|
||||
pod_template.render(
|
||||
pod_name=pod_name,
|
||||
namespace=config.namespace,
|
||||
host_network=host_network,
|
||||
target=target_node,
|
||||
container_name=container_name,
|
||||
workload_image=config.image,
|
||||
taints=tolerations,
|
||||
service_account=config.service_account,
|
||||
)
|
||||
)
|
||||
|
||||
kubecli.create_pod(pod_body, config.namespace, 300)
|
||||
|
||||
|
||||
def apply_network_rules(
|
||||
kubecli: KrknKubernetes,
|
||||
input_rules: list[str],
|
||||
output_rules: list[str],
|
||||
pod_name: str,
|
||||
namespace: str,
|
||||
parallel: bool,
|
||||
node_name: str,
|
||||
):
|
||||
for rule in input_rules:
|
||||
log_info(f"applying iptables INPUT rule: {rule}", parallel, node_name)
|
||||
kubecli.exec_cmd_in_pod([rule], pod_name, namespace)
|
||||
for rule in output_rules:
|
||||
log_info(f"applying iptables OUTPUT rule: {rule}", parallel, node_name)
|
||||
kubecli.exec_cmd_in_pod([rule], pod_name, namespace)
|
||||
|
||||
|
||||
def clean_network_rules(
|
||||
kubecli: KrknKubernetes,
|
||||
input_rules: list[str],
|
||||
output_rules: list[str],
|
||||
pod_name: str,
|
||||
namespace: str,
|
||||
):
|
||||
for _ in input_rules:
|
||||
# always deleting the first rule since has been inserted from the top
|
||||
kubecli.exec_cmd_in_pod([f"iptables -D INPUT 1"], pod_name, namespace)
|
||||
for _ in output_rules:
|
||||
# always deleting the first rule since has been inserted from the top
|
||||
kubecli.exec_cmd_in_pod([f"iptables -D OUTPUT 1"], pod_name, namespace)
|
||||
|
||||
|
||||
def clean_network_rules_namespaced(
|
||||
kubecli: KrknKubernetes,
|
||||
input_rules: list[str],
|
||||
output_rules: list[str],
|
||||
pod_name: str,
|
||||
namespace: str,
|
||||
pids: list[str],
|
||||
):
|
||||
for _ in input_rules:
|
||||
for pid in pids:
|
||||
# always deleting the first rule since has been inserted from the top
|
||||
kubecli.exec_cmd_in_pod(
|
||||
[f"nsenter --target {pid} --net -- iptables -D INPUT 1"],
|
||||
pod_name,
|
||||
namespace,
|
||||
)
|
||||
for _ in output_rules:
|
||||
for pid in pids:
|
||||
# always deleting the first rule since has been inserted from the top
|
||||
kubecli.exec_cmd_in_pod(
|
||||
[f"nsenter --target {pid} --net -- iptables -D OUTPUT 1"],
|
||||
pod_name,
|
||||
namespace,
|
||||
)
|
||||
|
||||
|
||||
def get_default_interface(
|
||||
pod_name: str, namespace: str, kubecli: KrknKubernetes
|
||||
) -> str:
|
||||
cmd = "ip r | grep default | awk '/default/ {print $5}'"
|
||||
output = kubecli.exec_cmd_in_pod([cmd], pod_name, namespace)
|
||||
return output.replace("\n", "")
|
||||
@@ -1,14 +1,25 @@
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
from krkn.scenario_plugins.network_chaos_ng.models import NetworkFilterConfig
|
||||
from krkn.scenario_plugins.network_chaos_ng.modules.abstract_network_chaos_module import AbstractNetworkChaosModule
|
||||
from krkn.scenario_plugins.network_chaos_ng.modules.node_network_filter import NodeNetworkFilterModule
|
||||
from krkn.scenario_plugins.network_chaos_ng.modules.abstract_network_chaos_module import (
|
||||
AbstractNetworkChaosModule,
|
||||
)
|
||||
from krkn.scenario_plugins.network_chaos_ng.modules.node_network_filter import (
|
||||
NodeNetworkFilterModule,
|
||||
)
|
||||
from krkn.scenario_plugins.network_chaos_ng.modules.pod_network_filter import (
|
||||
PodNetworkFilterModule,
|
||||
)
|
||||
|
||||
supported_modules = ["node_network_filter", "pod_network_filter"]
|
||||
|
||||
supported_modules = ["node_network_filter"]
|
||||
|
||||
class NetworkChaosFactory:
|
||||
|
||||
@staticmethod
|
||||
def get_instance(config: dict[str, str]) -> AbstractNetworkChaosModule:
|
||||
def get_instance(
|
||||
config: dict[str, str], kubecli: KrknTelemetryOpenshift
|
||||
) -> AbstractNetworkChaosModule:
|
||||
if config["id"] is None:
|
||||
raise Exception("network chaos id cannot be None")
|
||||
if config["id"] not in supported_modules:
|
||||
@@ -19,6 +30,10 @@ class NetworkChaosFactory:
|
||||
errors = config.validate()
|
||||
if len(errors) > 0:
|
||||
raise Exception(f"config validation errors: [{';'.join(errors)}]")
|
||||
return NodeNetworkFilterModule(config)
|
||||
|
||||
|
||||
return NodeNetworkFilterModule(config, kubecli)
|
||||
if config["id"] == "pod_network_filter":
|
||||
config = NetworkFilterConfig(**config)
|
||||
errors = config.validate()
|
||||
if len(errors) > 0:
|
||||
raise Exception(f"config validation errors: [{';'.join(errors)}]")
|
||||
return PodNetworkFilterModule(config, kubecli)
|
||||
|
||||
@@ -9,10 +9,6 @@ from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
from krkn.scenario_plugins.network_chaos_ng.models import (
|
||||
NetworkChaosScenarioType,
|
||||
BaseNetworkChaosConfig,
|
||||
)
|
||||
from krkn.scenario_plugins.network_chaos_ng.modules.abstract_network_chaos_module import (
|
||||
AbstractNetworkChaosModule,
|
||||
)
|
||||
@@ -39,56 +35,52 @@ class NetworkChaosNgScenarioPlugin(AbstractScenarioPlugin):
|
||||
)
|
||||
return 1
|
||||
for config in scenario_config:
|
||||
network_chaos = NetworkChaosFactory.get_instance(config)
|
||||
network_chaos_config = network_chaos.get_config()
|
||||
logging.info(
|
||||
f"running network_chaos scenario: {network_chaos_config[1].id}"
|
||||
network_chaos = NetworkChaosFactory.get_instance(
|
||||
config, lib_telemetry
|
||||
)
|
||||
if network_chaos_config[0] == NetworkChaosScenarioType.Node:
|
||||
targets = lib_telemetry.get_lib_kubernetes().list_nodes(
|
||||
network_chaos_config[1].label_selector
|
||||
)
|
||||
else:
|
||||
targets = lib_telemetry.get_lib_kubernetes().list_pods(
|
||||
network_chaos_config[1].namespace,
|
||||
network_chaos_config[1].label_selector,
|
||||
)
|
||||
network_chaos_type, network_chaos_config = (
|
||||
network_chaos.get_config()
|
||||
)
|
||||
logging.info(
|
||||
f"running network_chaos scenario: {network_chaos_config.id}"
|
||||
)
|
||||
targets = network_chaos.get_targets()
|
||||
if len(targets) == 0:
|
||||
logging.warning(
|
||||
f"no targets found for {network_chaos_config[1].id} "
|
||||
f"network chaos scenario with selector {network_chaos_config[1].label_selector} "
|
||||
f"with target type {network_chaos_config[0]}"
|
||||
f"no targets found for {network_chaos_config.id} "
|
||||
f"network chaos scenario with selector {network_chaos_config.label_selector} "
|
||||
f"with target type {network_chaos_type}"
|
||||
)
|
||||
|
||||
if network_chaos_config[1].instance_count != 0 and network_chaos_config[1].instance_count > len(targets):
|
||||
targets = random.sample(targets, network_chaos_config[1].instance_count)
|
||||
if (
|
||||
network_chaos_config.instance_count != 0
|
||||
and network_chaos_config.instance_count > len(targets)
|
||||
):
|
||||
targets = random.sample(
|
||||
targets, network_chaos_config.instance_count
|
||||
)
|
||||
|
||||
if network_chaos_config[1].execution == "parallel":
|
||||
self.run_parallel(targets, network_chaos, lib_telemetry)
|
||||
if network_chaos_config.execution == "parallel":
|
||||
self.run_parallel(targets, network_chaos)
|
||||
else:
|
||||
self.run_serial(targets, network_chaos, lib_telemetry)
|
||||
self.run_serial(targets, network_chaos)
|
||||
if len(config) > 1:
|
||||
logging.info(f"waiting {network_chaos_config[1].wait_duration} seconds before running the next "
|
||||
f"Network Chaos NG Module")
|
||||
time.sleep(network_chaos_config[1].wait_duration)
|
||||
logging.info(
|
||||
f"waiting {network_chaos_config.wait_duration} seconds before running the next "
|
||||
f"Network Chaos NG Module"
|
||||
)
|
||||
time.sleep(network_chaos_config.wait_duration)
|
||||
except Exception as e:
|
||||
logging.error(str(e))
|
||||
return 1
|
||||
return 0
|
||||
|
||||
def run_parallel(
|
||||
self,
|
||||
targets: list[str],
|
||||
module: AbstractNetworkChaosModule,
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
):
|
||||
def run_parallel(self, targets: list[str], module: AbstractNetworkChaosModule):
|
||||
error_queue = queue.Queue()
|
||||
threads = []
|
||||
errors = []
|
||||
for target in targets:
|
||||
thread = threading.Thread(
|
||||
target=module.run, args=[target, lib_telemetry, error_queue]
|
||||
)
|
||||
thread = threading.Thread(target=module.run, args=[target, error_queue])
|
||||
thread.start()
|
||||
threads.append(thread)
|
||||
for thread in threads:
|
||||
@@ -103,14 +95,9 @@ class NetworkChaosNgScenarioPlugin(AbstractScenarioPlugin):
|
||||
f"module {module.get_config()[1].id} execution failed: [{';'.join(errors)}]"
|
||||
)
|
||||
|
||||
def run_serial(
|
||||
self,
|
||||
targets: list[str],
|
||||
module: AbstractNetworkChaosModule,
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
):
|
||||
def run_serial(self, targets: list[str], module: AbstractNetworkChaosModule):
|
||||
for target in targets:
|
||||
module.run(target, lib_telemetry)
|
||||
module.run(target)
|
||||
|
||||
def get_scenario_types(self) -> list[str]:
|
||||
return ["network_chaos_ng_scenarios"]
|
||||
|
||||
@@ -18,20 +18,20 @@ class abstract_node_scenarios:
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
pass
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
pass
|
||||
|
||||
# Node scenario to stop and then start the node
|
||||
def node_stop_start_scenario(self, instance_kill_count, node, timeout, duration):
|
||||
def node_stop_start_scenario(self, instance_kill_count, node, timeout, duration, poll_interval):
|
||||
logging.info("Starting node_stop_start_scenario injection")
|
||||
self.node_stop_scenario(instance_kill_count, node, timeout)
|
||||
self.node_stop_scenario(instance_kill_count, node, timeout, poll_interval)
|
||||
logging.info("Waiting for %s seconds before starting the node" % (duration))
|
||||
time.sleep(duration)
|
||||
self.node_start_scenario(instance_kill_count, node, timeout)
|
||||
self.node_start_scenario(instance_kill_count, node, timeout, poll_interval)
|
||||
self.affected_nodes_status.merge_affected_nodes()
|
||||
logging.info("node_stop_start_scenario has been successfully injected!")
|
||||
|
||||
@@ -56,11 +56,11 @@ class abstract_node_scenarios:
|
||||
logging.error("node_disk_detach_attach_scenario failed!")
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
pass
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout, soft_reboot=False):
|
||||
pass
|
||||
|
||||
# Node scenario to stop the kubelet
|
||||
@@ -76,7 +76,7 @@ class abstract_node_scenarios:
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli, affected_node)
|
||||
|
||||
logging.info("The kubelet of the node %s has been stopped" % (node))
|
||||
logging.info("stop_kubelet_scenario has been successfuly injected!")
|
||||
logging.info("stop_kubelet_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to stop the kubelet of the node. Encountered following "
|
||||
@@ -84,7 +84,7 @@ class abstract_node_scenarios:
|
||||
)
|
||||
logging.error("stop_kubelet_scenario injection failed!")
|
||||
raise e
|
||||
self.add_affected_node(affected_node)
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop and start the kubelet
|
||||
def stop_start_kubelet_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -106,10 +106,9 @@ class abstract_node_scenarios:
|
||||
+ node
|
||||
+ " -- chroot /host systemctl restart kubelet &"
|
||||
)
|
||||
nodeaction.wait_for_not_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli,affected_node)
|
||||
logging.info("The kubelet of the node %s has been restarted" % (node))
|
||||
logging.info("restart_kubelet_scenario has been successfuly injected!")
|
||||
logging.info("restart_kubelet_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to restart the kubelet of the node. Encountered following "
|
||||
@@ -117,7 +116,7 @@ class abstract_node_scenarios:
|
||||
)
|
||||
logging.error("restart_kubelet_scenario injection failed!")
|
||||
raise e
|
||||
self.add_affected_node(affected_node)
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to crash the node
|
||||
def node_crash_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -125,18 +124,18 @@ class abstract_node_scenarios:
|
||||
try:
|
||||
logging.info("Starting node_crash_scenario injection")
|
||||
logging.info("Crashing the node %s" % (node))
|
||||
runcommand.invoke(
|
||||
runcommand.run(
|
||||
"oc debug node/" + node + " -- chroot /host "
|
||||
"dd if=/dev/urandom of=/proc/sysrq-trigger"
|
||||
)
|
||||
logging.info("node_crash_scenario has been successfuly injected!")
|
||||
logging.info("node_crash_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to crash the node. Encountered following exception: %s. "
|
||||
"Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_crash_scenario injection failed!")
|
||||
raise e
|
||||
return 1
|
||||
|
||||
# Node scenario to check service status on helper node
|
||||
def node_service_status(self, node, service, ssh_private_key, timeout):
|
||||
|
||||
@@ -234,7 +234,7 @@ class alibaba_node_scenarios(abstract_node_scenarios):
|
||||
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -260,7 +260,7 @@ class alibaba_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -286,7 +286,7 @@ class alibaba_node_scenarios(abstract_node_scenarios):
|
||||
|
||||
# Might need to stop and then release the instance
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -316,7 +316,7 @@ class alibaba_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout, soft_reboot=False):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
|
||||
@@ -77,10 +77,21 @@ class AWS:
|
||||
# until a successful state is reached. An error is returned after 40 failed checks
|
||||
# Setting timeout for consistency with other cloud functions
|
||||
# Wait until the node instance is running
|
||||
def wait_until_running(self, instance_id, timeout=600, affected_node=None):
|
||||
def wait_until_running(self, instance_id, timeout=600, affected_node=None, poll_interval=15):
|
||||
try:
|
||||
start_time = time.time()
|
||||
self.boto_instance.wait_until_running(InstanceIds=[instance_id])
|
||||
if timeout > 0:
|
||||
max_attempts = max(1, int(timeout / poll_interval))
|
||||
else:
|
||||
max_attempts = 40
|
||||
|
||||
self.boto_instance.wait_until_running(
|
||||
InstanceIds=[instance_id],
|
||||
WaiterConfig={
|
||||
'Delay': poll_interval,
|
||||
'MaxAttempts': max_attempts
|
||||
}
|
||||
)
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("running", end_time - start_time)
|
||||
@@ -93,10 +104,21 @@ class AWS:
|
||||
return False
|
||||
|
||||
# Wait until the node instance is stopped
|
||||
def wait_until_stopped(self, instance_id, timeout=600, affected_node= None):
|
||||
def wait_until_stopped(self, instance_id, timeout=600, affected_node= None, poll_interval=15):
|
||||
try:
|
||||
start_time = time.time()
|
||||
self.boto_instance.wait_until_stopped(InstanceIds=[instance_id])
|
||||
if timeout > 0:
|
||||
max_attempts = max(1, int(timeout / poll_interval))
|
||||
else:
|
||||
max_attempts = 40
|
||||
|
||||
self.boto_instance.wait_until_stopped(
|
||||
InstanceIds=[instance_id],
|
||||
WaiterConfig={
|
||||
'Delay': poll_interval,
|
||||
'MaxAttempts': max_attempts
|
||||
}
|
||||
)
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("stopped", end_time - start_time)
|
||||
@@ -109,10 +131,21 @@ class AWS:
|
||||
return False
|
||||
|
||||
# Wait until the node instance is terminated
|
||||
def wait_until_terminated(self, instance_id, timeout=600, affected_node= None):
|
||||
def wait_until_terminated(self, instance_id, timeout=600, affected_node= None, poll_interval=15):
|
||||
try:
|
||||
start_time = time.time()
|
||||
self.boto_instance.wait_until_terminated(InstanceIds=[instance_id])
|
||||
if timeout > 0:
|
||||
max_attempts = max(1, int(timeout / poll_interval))
|
||||
else:
|
||||
max_attempts = 40
|
||||
|
||||
self.boto_instance.wait_until_terminated(
|
||||
InstanceIds=[instance_id],
|
||||
WaiterConfig={
|
||||
'Delay': poll_interval,
|
||||
'MaxAttempts': max_attempts
|
||||
}
|
||||
)
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("terminated", end_time - start_time)
|
||||
@@ -267,7 +300,7 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -278,7 +311,7 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
"Starting the node %s with instance ID: %s " % (node, instance_id)
|
||||
)
|
||||
self.aws.start_instances(instance_id)
|
||||
self.aws.wait_until_running(instance_id, affected_node=affected_node)
|
||||
self.aws.wait_until_running(instance_id, timeout=timeout, affected_node=affected_node, poll_interval=poll_interval)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
logging.info(
|
||||
@@ -296,7 +329,7 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -307,7 +340,7 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
"Stopping the node %s with instance ID: %s " % (node, instance_id)
|
||||
)
|
||||
self.aws.stop_instances(instance_id)
|
||||
self.aws.wait_until_stopped(instance_id, affected_node=affected_node)
|
||||
self.aws.wait_until_stopped(instance_id, timeout=timeout, affected_node=affected_node, poll_interval=poll_interval)
|
||||
logging.info(
|
||||
"Node with instance ID: %s is in stopped state" % (instance_id)
|
||||
)
|
||||
@@ -324,7 +357,7 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -336,7 +369,7 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
% (node, instance_id)
|
||||
)
|
||||
self.aws.terminate_instances(instance_id)
|
||||
self.aws.wait_until_terminated(instance_id, affected_node=affected_node)
|
||||
self.aws.wait_until_terminated(instance_id, timeout=timeout, affected_node=affected_node, poll_interval=poll_interval)
|
||||
for _ in range(timeout):
|
||||
if node not in self.kubecli.list_nodes():
|
||||
break
|
||||
@@ -346,7 +379,7 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
logging.info(
|
||||
"Node with instance ID: %s has been terminated" % (instance_id)
|
||||
)
|
||||
logging.info("node_termination_scenario has been successfuly injected!")
|
||||
logging.info("node_termination_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to terminate node instance. Encountered following exception:"
|
||||
@@ -358,7 +391,7 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout, soft_reboot=False):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -375,7 +408,7 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
logging.info(
|
||||
"Node with instance ID: %s has been rebooted" % (instance_id)
|
||||
)
|
||||
logging.info("node_reboot_scenario has been successfuly injected!")
|
||||
logging.info("node_reboot_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to reboot node instance. Encountered following exception:"
|
||||
|
||||
@@ -18,8 +18,6 @@ class Azure:
|
||||
logging.info("azure " + str(self))
|
||||
# Acquire a credential object using CLI-based authentication.
|
||||
credentials = DefaultAzureCredential()
|
||||
# az_account = runcommand.invoke("az account list -o yaml")
|
||||
# az_account_yaml = yaml.safe_load(az_account, Loader=yaml.FullLoader)
|
||||
logger = logging.getLogger("azure")
|
||||
logger.setLevel(logging.WARNING)
|
||||
subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID")
|
||||
@@ -218,7 +216,7 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -246,7 +244,7 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -273,7 +271,7 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -308,7 +306,7 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout, soft_reboot=False):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
|
||||
@@ -10,6 +10,7 @@ import time
|
||||
import traceback
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
from krkn_lib.utils import get_random_string
|
||||
|
||||
class BM:
|
||||
def __init__(self, bm_info, user, passwd):
|
||||
@@ -21,6 +22,17 @@ class BM:
|
||||
with oc.project("openshift-machine-api"):
|
||||
return oc.selector("node/" + node_name).object()
|
||||
|
||||
def get_bm_disks(self, node_name):
|
||||
if (
|
||||
self.bm_info is not None
|
||||
and node_name in self.bm_info
|
||||
and "disks" in self.bm_info[node_name]
|
||||
):
|
||||
return self.bm_info[node_name]["disks"]
|
||||
else:
|
||||
return []
|
||||
|
||||
|
||||
# Get the ipmi or other BMC address of the baremetal node
|
||||
def get_bmc_addr(self, node_name):
|
||||
# Addresses in the config get higher priority.
|
||||
@@ -141,7 +153,7 @@ class bm_node_scenarios(abstract_node_scenarios):
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -170,7 +182,7 @@ class bm_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -198,11 +210,11 @@ class bm_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
logging.info("Node termination scenario is not supported on baremetal")
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout, soft_reboot=False):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -217,7 +229,7 @@ class bm_node_scenarios(abstract_node_scenarios):
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli, affected_node)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
logging.info("Node with bmc address: %s has been rebooted" % (bmc_addr))
|
||||
logging.info("node_reboot_scenario has been successfuly injected!")
|
||||
logging.info("node_reboot_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to reboot node instance. Encountered following exception:"
|
||||
@@ -228,3 +240,104 @@ class bm_node_scenarios(abstract_node_scenarios):
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
raise e
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
def node_disk_detach_attach_scenario(self, instance_kill_count, node, timeout, duration):
|
||||
logging.info("Starting disk_detach_attach_scenario injection")
|
||||
disk_attachment_details = self.get_disk_attachment_info(instance_kill_count, node)
|
||||
if disk_attachment_details:
|
||||
self.disk_detach_scenario(instance_kill_count, node, disk_attachment_details, timeout)
|
||||
logging.info("Waiting for %s seconds before attaching the disk" % (duration))
|
||||
time.sleep(duration)
|
||||
self.disk_attach_scenario(instance_kill_count, node, disk_attachment_details)
|
||||
logging.info("node_disk_detach_attach_scenario has been successfully injected!")
|
||||
else:
|
||||
logging.error("Node %s has only root disk attached" % (node))
|
||||
logging.error("node_disk_detach_attach_scenario failed!")
|
||||
|
||||
|
||||
# Get volume attachment info
|
||||
def get_disk_attachment_info(self, instance_kill_count, node):
|
||||
for _ in range(instance_kill_count):
|
||||
try:
|
||||
logging.info("Obtaining disk attachment information")
|
||||
user_disks= self.bm.get_bm_disks(node)
|
||||
disk_pod_name = f"disk-pod-{get_random_string(5)}"
|
||||
cmd = '''bootdev=$(lsblk -no PKNAME $(findmnt -no SOURCE /boot));
|
||||
for path in /sys/block/*/device/state; do
|
||||
dev=$(basename $(dirname $(dirname "$path")));
|
||||
[[ "$dev" != "$bootdev" ]] && echo "$dev";
|
||||
done'''
|
||||
pod_command = ["chroot /host /bin/sh -c '" + cmd + "'"]
|
||||
disk_response = self.kubecli.exec_command_on_node(
|
||||
node, pod_command, disk_pod_name, "default"
|
||||
)
|
||||
logging.info("Disk response: %s" % (disk_response))
|
||||
node_disks = [disk for disk in disk_response.split("\n") if disk]
|
||||
logging.info("Node disks: %s" % (node_disks))
|
||||
offline_disks = [disk for disk in user_disks if disk in node_disks]
|
||||
return offline_disks if offline_disks else node_disks
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to obtain disk attachment information of %s node. "
|
||||
"Encounteres following exception: %s." % (node, e)
|
||||
)
|
||||
raise RuntimeError()
|
||||
finally:
|
||||
self.kubecli.delete_pod(disk_pod_name, "default")
|
||||
|
||||
# Node scenario to detach the volume
|
||||
def disk_detach_scenario(self, instance_kill_count, node, disk_attachment_details, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
try:
|
||||
logging.info("Starting disk_detach_scenario injection")
|
||||
logging.info(
|
||||
"Detaching the %s disks from instance %s "
|
||||
% (disk_attachment_details, node)
|
||||
)
|
||||
disk_pod_name = f"detach-disk-pod-{get_random_string(5)}"
|
||||
detach_disk_command=''
|
||||
for disk in disk_attachment_details:
|
||||
detach_disk_command = detach_disk_command + "echo offline > /sys/block/" + disk + "/device/state;"
|
||||
pod_command = ["chroot /host /bin/sh -c '" + detach_disk_command + "'"]
|
||||
cmd_output = self.kubecli.exec_command_on_node(
|
||||
node, pod_command, disk_pod_name, "default"
|
||||
)
|
||||
logging.info("Disk command output: %s" % (cmd_output))
|
||||
logging.info("Disk %s has been detached from %s node" % (disk_attachment_details, node))
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to detach disk from %s node. Encountered following"
|
||||
"exception: %s." % (node, e)
|
||||
)
|
||||
logging.debug("")
|
||||
raise RuntimeError()
|
||||
finally:
|
||||
self.kubecli.delete_pod(disk_pod_name, "default")
|
||||
|
||||
# Node scenario to attach the volume
|
||||
def disk_attach_scenario(self, instance_kill_count, node, disk_attachment_details):
|
||||
for _ in range(instance_kill_count):
|
||||
try:
|
||||
logging.info(
|
||||
"Attaching the %s disks from instance %s "
|
||||
% (disk_attachment_details, node)
|
||||
)
|
||||
disk_pod_name = f"attach-disk-pod-{get_random_string(5)}"
|
||||
attach_disk_command=''
|
||||
for disk in disk_attachment_details:
|
||||
attach_disk_command = attach_disk_command + "echo running > /sys/block/" + disk + "/device/state;"
|
||||
pod_command = ["chroot /host /bin/sh -c '" + attach_disk_command + "'"]
|
||||
cmd_output = self.kubecli.exec_command_on_node(
|
||||
node, pod_command, disk_pod_name, "default"
|
||||
)
|
||||
logging.info("Disk command output: %s" % (cmd_output))
|
||||
logging.info("Disk %s has been attached to %s node" % (disk_attachment_details, node))
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to attach disk to %s node. Encountered following"
|
||||
"exception: %s." % (node, e)
|
||||
)
|
||||
logging.debug("")
|
||||
raise RuntimeError()
|
||||
finally:
|
||||
self.kubecli.delete_pod(disk_pod_name, "default")
|
||||
|
||||
@@ -1,41 +1,37 @@
|
||||
import datetime
|
||||
import time
|
||||
import random
|
||||
import logging
|
||||
import paramiko
|
||||
from krkn_lib.models.k8s import AffectedNode
|
||||
import krkn.invoke.command as runcommand
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
from krkn_lib.models.k8s import AffectedNode
|
||||
|
||||
node_general = False
|
||||
|
||||
|
||||
def get_node_by_name(node_name_list, kubecli: KrknKubernetes):
|
||||
killable_nodes = kubecli.list_killable_nodes()
|
||||
for node_name in node_name_list:
|
||||
if node_name not in killable_nodes:
|
||||
logging.info(
|
||||
f"Node with provided ${node_name} does not exist or the node might "
|
||||
f"Node with provided {node_name} does not exist or the node might "
|
||||
"be in NotReady state."
|
||||
)
|
||||
return
|
||||
return node_name_list
|
||||
|
||||
|
||||
|
||||
# Pick a random node with specified label selector
|
||||
def get_node(label_selector, instance_kill_count, kubecli: KrknKubernetes):
|
||||
|
||||
label_selector_list = label_selector.split(",")
|
||||
label_selector_list = label_selector.split(",")
|
||||
nodes = []
|
||||
for label_selector in label_selector_list:
|
||||
for label_selector in label_selector_list:
|
||||
nodes.extend(kubecli.list_killable_nodes(label_selector))
|
||||
if not nodes:
|
||||
raise Exception("Ready nodes with the provided label selector do not exist")
|
||||
logging.info("Ready nodes with the label selector %s: %s" % (label_selector_list, nodes))
|
||||
logging.info(
|
||||
"Ready nodes with the label selector %s: %s" % (label_selector_list, nodes)
|
||||
)
|
||||
number_of_nodes = len(nodes)
|
||||
if instance_kill_count == number_of_nodes:
|
||||
if instance_kill_count == number_of_nodes or instance_kill_count == 0:
|
||||
return nodes
|
||||
nodes_to_return = []
|
||||
for i in range(instance_kill_count):
|
||||
@@ -44,35 +40,34 @@ def get_node(label_selector, instance_kill_count, kubecli: KrknKubernetes):
|
||||
nodes.remove(node_to_add)
|
||||
return nodes_to_return
|
||||
|
||||
|
||||
# krkn_lib
|
||||
# Wait until the node status becomes Ready
|
||||
def wait_for_ready_status(node, timeout, kubecli: KrknKubernetes, affected_node: AffectedNode = None):
|
||||
affected_node = kubecli.watch_node_status(node, "True", timeout, affected_node)
|
||||
def wait_for_ready_status(
|
||||
node, timeout, kubecli: KrknKubernetes, affected_node: AffectedNode = None
|
||||
):
|
||||
affected_node = kubecli.watch_node_status(node, "True", timeout, affected_node)
|
||||
return affected_node
|
||||
|
||||
|
||||
|
||||
# krkn_lib
|
||||
# Wait until the node status becomes Not Ready
|
||||
def wait_for_not_ready_status(node, timeout, kubecli: KrknKubernetes, affected_node: AffectedNode = None):
|
||||
def wait_for_not_ready_status(
|
||||
node, timeout, kubecli: KrknKubernetes, affected_node: AffectedNode = None
|
||||
):
|
||||
affected_node = kubecli.watch_node_status(node, "False", timeout, affected_node)
|
||||
return affected_node
|
||||
|
||||
|
||||
|
||||
# krkn_lib
|
||||
# Wait until the node status becomes Unknown
|
||||
def wait_for_unknown_status(node, timeout, kubecli: KrknKubernetes, affected_node: AffectedNode = None):
|
||||
def wait_for_unknown_status(
|
||||
node, timeout, kubecli: KrknKubernetes, affected_node: AffectedNode = None
|
||||
):
|
||||
affected_node = kubecli.watch_node_status(node, "Unknown", timeout, affected_node)
|
||||
return affected_node
|
||||
|
||||
|
||||
# Get the ip of the cluster node
|
||||
def get_node_ip(node):
|
||||
return runcommand.invoke(
|
||||
"kubectl get node %s -o "
|
||||
"jsonpath='{.status.addresses[?(@.type==\"InternalIP\")].address}'" % (node)
|
||||
)
|
||||
|
||||
|
||||
def check_service_status(node, service, ssh_private_key, timeout):
|
||||
ssh = paramiko.SSHClient()
|
||||
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||
|
||||
@@ -2,49 +2,176 @@ import krkn.scenario_plugins.node_actions.common_node_functions as nodeaction
|
||||
from krkn.scenario_plugins.node_actions.abstract_node_scenarios import (
|
||||
abstract_node_scenarios,
|
||||
)
|
||||
import os
|
||||
import platform
|
||||
import logging
|
||||
import docker
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
|
||||
class Docker:
|
||||
"""
|
||||
Container runtime client wrapper supporting both Docker and Podman.
|
||||
|
||||
This class automatically detects and connects to either Docker or Podman
|
||||
container runtimes using the Docker-compatible API. It tries multiple
|
||||
connection methods in order of preference:
|
||||
|
||||
1. Docker Unix socket (unix:///var/run/docker.sock)
|
||||
2. Platform-specific Podman sockets:
|
||||
- macOS: ~/.local/share/containers/podman/machine/podman.sock
|
||||
- Linux rootful: unix:///run/podman/podman.sock
|
||||
- Linux rootless: unix:///run/user/<uid>/podman/podman.sock
|
||||
3. Environment variables (DOCKER_HOST or CONTAINER_HOST)
|
||||
|
||||
The runtime type (docker/podman) is auto-detected and logged for debugging.
|
||||
Supports Kind clusters running on Podman.
|
||||
|
||||
Assisted By: Claude Code
|
||||
"""
|
||||
def __init__(self):
|
||||
self.client = docker.from_env()
|
||||
self.client = None
|
||||
self.runtime = 'unknown'
|
||||
|
||||
|
||||
# Try multiple connection methods in order of preference
|
||||
# Supports both Docker and Podman
|
||||
connection_methods = [
|
||||
('unix:///var/run/docker.sock', 'Docker Unix socket'),
|
||||
]
|
||||
|
||||
# Add platform-specific Podman sockets
|
||||
if platform.system() == 'Darwin': # macOS
|
||||
# On macOS, Podman uses podman-machine with socket typically at:
|
||||
# ~/.local/share/containers/podman/machine/podman.sock
|
||||
# This is often symlinked to /var/run/docker.sock
|
||||
podman_machine_sock = os.path.expanduser('~/.local/share/containers/podman/machine/podman.sock')
|
||||
if os.path.exists(podman_machine_sock):
|
||||
connection_methods.append((f'unix://{podman_machine_sock}', 'Podman machine socket (macOS)'))
|
||||
else: # Linux
|
||||
connection_methods.extend([
|
||||
('unix:///run/podman/podman.sock', 'Podman Unix socket (rootful)'),
|
||||
('unix:///run/user/{uid}/podman/podman.sock', 'Podman Unix socket (rootless)'),
|
||||
])
|
||||
|
||||
# Always try from_env as last resort
|
||||
connection_methods.append(('from_env', 'Environment variables (DOCKER_HOST/CONTAINER_HOST)'))
|
||||
|
||||
for method, description in connection_methods:
|
||||
try:
|
||||
# Handle rootless Podman socket path with {uid} placeholder
|
||||
if '{uid}' in method:
|
||||
uid = os.getuid()
|
||||
method = method.format(uid=uid)
|
||||
logging.info(f'Attempting to connect using {description}: {method}')
|
||||
|
||||
if method == 'from_env':
|
||||
logging.info(f'Attempting to connect using {description}')
|
||||
self.client = docker.from_env()
|
||||
else:
|
||||
logging.info(f'Attempting to connect using {description}: {method}')
|
||||
self.client = docker.DockerClient(base_url=method)
|
||||
|
||||
# Test the connection
|
||||
self.client.ping()
|
||||
|
||||
# Detect runtime type
|
||||
try:
|
||||
version_info = self.client.version()
|
||||
version_str = version_info.get('Version', '')
|
||||
if 'podman' in version_str.lower():
|
||||
self.runtime = 'podman'
|
||||
else:
|
||||
self.runtime = 'docker'
|
||||
logging.debug(f'Runtime version info: {version_str}')
|
||||
except Exception as version_err:
|
||||
logging.warning(f'Could not detect runtime version: {version_err}')
|
||||
self.runtime = 'unknown'
|
||||
|
||||
logging.info(f'Successfully connected to {self.runtime} using {description}')
|
||||
|
||||
# Log available containers for debugging
|
||||
try:
|
||||
containers = self.client.containers.list(all=True)
|
||||
logging.info(f'Found {len(containers)} total containers')
|
||||
for container in containers[:5]: # Log first 5
|
||||
logging.debug(f' Container: {container.name} ({container.status})')
|
||||
except Exception as list_err:
|
||||
logging.warning(f'Could not list containers: {list_err}')
|
||||
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
logging.warning(f'Failed to connect using {description}: {e}')
|
||||
continue
|
||||
|
||||
if self.client is None:
|
||||
error_msg = 'Failed to initialize container runtime client (Docker/Podman) with any connection method'
|
||||
logging.error(error_msg)
|
||||
logging.error('Attempted connection methods:')
|
||||
for method, desc in connection_methods:
|
||||
logging.error(f' - {desc}: {method}')
|
||||
raise RuntimeError(error_msg)
|
||||
|
||||
logging.info(f'Container runtime client initialized successfully: {self.runtime}')
|
||||
|
||||
def get_container_id(self, node_name):
|
||||
"""Get the container ID for a given node name."""
|
||||
container = self.client.containers.get(node_name)
|
||||
logging.info(f'Found {self.runtime} container for node {node_name}: {container.id}')
|
||||
return container.id
|
||||
|
||||
# Start the node instance
|
||||
def start_instances(self, node_name):
|
||||
"""Start a container instance (works with both Docker and Podman)."""
|
||||
logging.info(f'Starting {self.runtime} container for node: {node_name}')
|
||||
container = self.client.containers.get(node_name)
|
||||
container.start()
|
||||
logging.info(f'Container {container.id} started successfully')
|
||||
|
||||
# Stop the node instance
|
||||
def stop_instances(self, node_name):
|
||||
"""Stop a container instance (works with both Docker and Podman)."""
|
||||
logging.info(f'Stopping {self.runtime} container for node: {node_name}')
|
||||
container = self.client.containers.get(node_name)
|
||||
container.stop()
|
||||
logging.info(f'Container {container.id} stopped successfully')
|
||||
|
||||
# Reboot the node instance
|
||||
def reboot_instances(self, node_name):
|
||||
"""Restart a container instance (works with both Docker and Podman)."""
|
||||
logging.info(f'Restarting {self.runtime} container for node: {node_name}')
|
||||
container = self.client.containers.get(node_name)
|
||||
container.restart()
|
||||
logging.info(f'Container {container.id} restarted successfully')
|
||||
|
||||
# Terminate the node instance
|
||||
def terminate_instances(self, node_name):
|
||||
"""Stop and remove a container instance (works with both Docker and Podman)."""
|
||||
logging.info(f'Terminating {self.runtime} container for node: {node_name}')
|
||||
container = self.client.containers.get(node_name)
|
||||
container.stop()
|
||||
container.remove()
|
||||
logging.info(f'Container {container.id} terminated and removed successfully')
|
||||
|
||||
|
||||
class docker_node_scenarios(abstract_node_scenarios):
|
||||
"""
|
||||
Node chaos scenarios for containerized Kubernetes nodes.
|
||||
|
||||
Supports both Docker and Podman container runtimes. This class provides
|
||||
methods to inject chaos into Kubernetes nodes running as containers
|
||||
(e.g., Kind clusters, Podman-based clusters).
|
||||
"""
|
||||
def __init__(self, kubecli: KrknKubernetes, node_action_kube_check: bool, affected_nodes_status: AffectedNodeStatus):
|
||||
logging.info('Initializing docker_node_scenarios (supports Docker and Podman)')
|
||||
super().__init__(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
self.docker = Docker()
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
logging.info(f'Node scenarios initialized successfully using {self.docker.runtime} runtime')
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -71,7 +198,7 @@ class docker_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -97,7 +224,7 @@ class docker_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
try:
|
||||
logging.info("Starting node_termination_scenario injection")
|
||||
@@ -110,7 +237,7 @@ class docker_node_scenarios(abstract_node_scenarios):
|
||||
logging.info(
|
||||
"Node with container ID: %s has been terminated" % (container_id)
|
||||
)
|
||||
logging.info("node_termination_scenario has been successfuly injected!")
|
||||
logging.info("node_termination_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to terminate node instance. Encountered following exception:"
|
||||
@@ -120,7 +247,7 @@ class docker_node_scenarios(abstract_node_scenarios):
|
||||
raise e
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout, soft_reboot=False):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -137,7 +264,7 @@ class docker_node_scenarios(abstract_node_scenarios):
|
||||
logging.info(
|
||||
"Node with container ID: %s has been rebooted" % (container_id)
|
||||
)
|
||||
logging.info("node_reboot_scenario has been successfuly injected!")
|
||||
logging.info("node_reboot_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to reboot node instance. Encountered following exception:"
|
||||
|
||||
@@ -227,7 +227,7 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -257,7 +257,7 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -286,7 +286,7 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -309,7 +309,7 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
logging.info(
|
||||
"Node with instance ID: %s has been terminated" % instance_id
|
||||
)
|
||||
logging.info("node_termination_scenario has been successfuly injected!")
|
||||
logging.info("node_termination_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to terminate node instance. Encountered following exception:"
|
||||
@@ -321,7 +321,7 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout, soft_reboot=False):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -341,7 +341,7 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
logging.info(
|
||||
"Node with instance ID: %s has been rebooted" % instance_id
|
||||
)
|
||||
logging.info("node_reboot_scenario has been successfuly injected!")
|
||||
logging.info("node_reboot_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to reboot node instance. Encountered following exception:"
|
||||
|
||||
@@ -18,28 +18,28 @@ class general_node_scenarios(abstract_node_scenarios):
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
logging.info(
|
||||
"Node start is not set up yet for this cloud type, "
|
||||
"no action is going to be taken"
|
||||
)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
logging.info(
|
||||
"Node stop is not set up yet for this cloud type,"
|
||||
" no action is going to be taken"
|
||||
)
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
logging.info(
|
||||
"Node termination is not set up yet for this cloud type, "
|
||||
"no action is going to be taken"
|
||||
)
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout, soft_reboot=False):
|
||||
logging.info(
|
||||
"Node reboot is not set up yet for this cloud type,"
|
||||
" no action is going to be taken"
|
||||
|
||||
@@ -36,10 +36,25 @@ class IbmCloud:
|
||||
self.service = VpcV1(authenticator=authenticator)
|
||||
|
||||
self.service.set_service_url(service_url)
|
||||
|
||||
except Exception as e:
|
||||
logging.error("error authenticating" + str(e))
|
||||
|
||||
|
||||
def configure_ssl_verification(self, disable_ssl_verification):
|
||||
"""
|
||||
Configure SSL verification for IBM Cloud VPC service.
|
||||
|
||||
Args:
|
||||
disable_ssl_verification: If True, disables SSL verification.
|
||||
"""
|
||||
logging.info(f"Configuring SSL verification: disable_ssl_verification={disable_ssl_verification}")
|
||||
if disable_ssl_verification:
|
||||
self.service.set_disable_ssl_verification(True)
|
||||
logging.info("SSL verification disabled for IBM Cloud VPC service")
|
||||
else:
|
||||
self.service.set_disable_ssl_verification(False)
|
||||
logging.info("SSL verification enabled for IBM Cloud VPC service")
|
||||
|
||||
# Get the instance ID of the node
|
||||
def get_instance_id(self, node_name):
|
||||
node_list = self.list_instances()
|
||||
@@ -260,12 +275,16 @@ class IbmCloud:
|
||||
|
||||
@dataclass
|
||||
class ibm_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, kubecli: KrknKubernetes, node_action_kube_check: bool, affected_nodes_status: AffectedNodeStatus):
|
||||
def __init__(self, kubecli: KrknKubernetes, node_action_kube_check: bool, affected_nodes_status: AffectedNodeStatus, disable_ssl_verification: bool):
|
||||
super().__init__(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
self.ibmcloud = IbmCloud()
|
||||
|
||||
# Configure SSL verification
|
||||
self.ibmcloud.configure_ssl_verification(disable_ssl_verification)
|
||||
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
try:
|
||||
instance_id = self.ibmcloud.get_instance_id( node)
|
||||
affected_node = AffectedNode(node, node_id=instance_id)
|
||||
@@ -298,7 +317,7 @@ class ibm_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
try:
|
||||
instance_id = self.ibmcloud.get_instance_id(node)
|
||||
for _ in range(instance_kill_count):
|
||||
@@ -308,46 +327,59 @@ class ibm_node_scenarios(abstract_node_scenarios):
|
||||
vm_stopped = self.ibmcloud.stop_instances(instance_id)
|
||||
if vm_stopped:
|
||||
self.ibmcloud.wait_until_stopped(instance_id, timeout, affected_node)
|
||||
logging.info(
|
||||
"Node with instance ID: %s is in stopped state" % node
|
||||
)
|
||||
logging.info(
|
||||
"node_stop_scenario has been successfully injected!"
|
||||
)
|
||||
logging.info(
|
||||
"Node with instance ID: %s is in stopped state" % node
|
||||
)
|
||||
logging.info(
|
||||
"node_stop_scenario has been successfully injected!"
|
||||
)
|
||||
else:
|
||||
logging.error(
|
||||
"Failed to stop node instance %s. Stop command failed." % instance_id
|
||||
)
|
||||
raise Exception("Stop command failed for instance %s" % instance_id)
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Test Failed")
|
||||
logging.error("Failed to stop node instance. Test Failed: %s" % str(e))
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
|
||||
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout, soft_reboot=False):
|
||||
try:
|
||||
instance_id = self.ibmcloud.get_instance_id(node)
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node, node_id=instance_id)
|
||||
logging.info("Starting node_reboot_scenario injection")
|
||||
logging.info("Rebooting the node %s " % (node))
|
||||
self.ibmcloud.reboot_instances(instance_id)
|
||||
self.ibmcloud.wait_until_rebooted(instance_id, timeout)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(
|
||||
node, timeout, affected_node
|
||||
vm_rebooted = self.ibmcloud.reboot_instances(instance_id)
|
||||
if vm_rebooted:
|
||||
self.ibmcloud.wait_until_rebooted(instance_id, timeout, affected_node)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(
|
||||
node, timeout, self.kubecli, affected_node
|
||||
)
|
||||
nodeaction.wait_for_ready_status(
|
||||
node, timeout, self.kubecli, affected_node
|
||||
)
|
||||
logging.info(
|
||||
"Node with instance ID: %s has rebooted successfully" % node
|
||||
)
|
||||
nodeaction.wait_for_ready_status(
|
||||
node, timeout, affected_node
|
||||
logging.info(
|
||||
"node_reboot_scenario has been successfully injected!"
|
||||
)
|
||||
logging.info(
|
||||
"Node with instance ID: %s has rebooted successfully" % node
|
||||
)
|
||||
logging.info(
|
||||
"node_reboot_scenario has been successfully injected!"
|
||||
)
|
||||
else:
|
||||
logging.error(
|
||||
"Failed to reboot node instance %s. Reboot command failed." % instance_id
|
||||
)
|
||||
raise Exception("Reboot command failed for instance %s" % instance_id)
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
except Exception as e:
|
||||
logging.error("Failed to reboot node instance. Test Failed")
|
||||
logging.error("Failed to reboot node instance. Test Failed: %s" % str(e))
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
|
||||
|
||||
def node_terminate_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_terminate_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
try:
|
||||
instance_id = self.ibmcloud.get_instance_id(node)
|
||||
for _ in range(instance_kill_count):
|
||||
@@ -364,7 +396,8 @@ class ibm_node_scenarios(abstract_node_scenarios):
|
||||
logging.info(
|
||||
"node_terminate_scenario has been successfully injected!"
|
||||
)
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
except Exception as e:
|
||||
logging.error("Failed to terminate node instance. Test Failed")
|
||||
logging.error("Failed to terminate node instance. Test Failed: %s" % str(e))
|
||||
logging.error("node_terminate_scenario injection failed!")
|
||||
|
||||
|
||||
@@ -0,0 +1,403 @@
|
||||
#!/usr/bin/env python
|
||||
import time
|
||||
from os import environ
|
||||
from dataclasses import dataclass
|
||||
import logging
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
import krkn.scenario_plugins.node_actions.common_node_functions as nodeaction
|
||||
from krkn.scenario_plugins.node_actions.abstract_node_scenarios import (
|
||||
abstract_node_scenarios,
|
||||
)
|
||||
import requests
|
||||
import sys
|
||||
import json
|
||||
|
||||
|
||||
# -o, --operation string Operation to be done in a PVM server instance.
|
||||
# Valid values are: hard-reboot, immediate-shutdown, soft-reboot, reset-state, start, stop.
|
||||
|
||||
from krkn_lib.models.k8s import AffectedNodeStatus, AffectedNode
|
||||
|
||||
|
||||
class IbmCloudPower:
|
||||
def __init__(self):
|
||||
"""
|
||||
Initialize the ibm cloud client by using the the env variables:
|
||||
'IBMC_APIKEY' 'IBMC_URL'
|
||||
"""
|
||||
self.api_key = environ.get("IBMC_APIKEY")
|
||||
self.service_url = environ.get("IBMC_POWER_URL")
|
||||
self.CRN = environ.get("IBMC_POWER_CRN")
|
||||
self.cloud_instance_id = self.CRN.split(":")[-3]
|
||||
print(self.cloud_instance_id)
|
||||
self.headers = None
|
||||
self.token = None
|
||||
if not self.api_key:
|
||||
raise Exception("Environmental variable 'IBMC_APIKEY' is not set")
|
||||
if not self.service_url:
|
||||
raise Exception("Environmental variable 'IBMC_POWER_URL' is not set")
|
||||
if not self.CRN:
|
||||
raise Exception("Environmental variable 'IBMC_POWER_CRN' is not set")
|
||||
try:
|
||||
self.authenticate()
|
||||
|
||||
except Exception as e:
|
||||
logging.error("error authenticating" + str(e))
|
||||
|
||||
def authenticate(self):
|
||||
url = "https://iam.cloud.ibm.com/identity/token"
|
||||
iam_auth_headers = {
|
||||
"content-type": "application/x-www-form-urlencoded",
|
||||
"accept": "application/json",
|
||||
}
|
||||
data = {
|
||||
"grant_type": "urn:ibm:params:oauth:grant-type:apikey",
|
||||
"apikey": self.api_key,
|
||||
}
|
||||
|
||||
response = self._make_request("POST", url, data=data, headers=iam_auth_headers)
|
||||
if response.status_code == 200:
|
||||
self.token = response.json()
|
||||
self.headers = {
|
||||
"Authorization": f"Bearer {self.token['access_token']}",
|
||||
"Content-Type": "application/json",
|
||||
"CRN": self.CRN,
|
||||
}
|
||||
else:
|
||||
logging.error(f"Authentication Error: {response.status_code}")
|
||||
return None, None
|
||||
|
||||
|
||||
def _make_request(self,method, url, data=None, headers=None):
|
||||
try:
|
||||
response = requests.request(method, url, data=data, headers=headers)
|
||||
response.raise_for_status()
|
||||
return response
|
||||
except Exception as e:
|
||||
raise Exception(f"API Error: {e}")
|
||||
|
||||
# Get the instance ID of the node
|
||||
def get_instance_id(self, node_name):
|
||||
|
||||
url = f"{self.service_url}/pcloud/v1/cloud-instances/{self.cloud_instance_id}/pvm-instances/"
|
||||
response = self._make_request("GET", url, headers=self.headers)
|
||||
for node in response.json()["pvmInstances"]:
|
||||
if node_name == node["serverName"]:
|
||||
return node["pvmInstanceID"]
|
||||
logging.error("Couldn't find node with name " + str(node_name) + ", you could try another region")
|
||||
sys.exit(1)
|
||||
|
||||
def delete_instance(self, instance_id):
|
||||
"""
|
||||
Deletes the Instance whose name is given by 'instance_id'
|
||||
"""
|
||||
try:
|
||||
url = f"{self.service_url}/pcloud/v1/cloud-instances/{self.cloud_instance_id}/pvm-instances/{instance_id}/action"
|
||||
self._make_request("POST", url, headers=self.headers, data=json.dumps({"action": "immediate-shutdown"}))
|
||||
logging.info("Deleted Instance -- '{}'".format(instance_id))
|
||||
except Exception as e:
|
||||
logging.info("Instance '{}' could not be deleted. ".format(instance_id))
|
||||
return False
|
||||
|
||||
def reboot_instances(self, instance_id, soft=False):
|
||||
"""
|
||||
Reboots the Instance whose name is given by 'instance_id'. Returns True if successful, or
|
||||
returns False if the Instance is not powered on
|
||||
"""
|
||||
|
||||
try:
|
||||
if soft:
|
||||
action = "soft-reboot"
|
||||
else:
|
||||
action = "hard-reboot"
|
||||
url = f"{self.service_url}/pcloud/v1/cloud-instances/{self.cloud_instance_id}/pvm-instances/{instance_id}/action"
|
||||
self._make_request("POST", url, headers=self.headers, data=json.dumps({"action": action}))
|
||||
logging.info("Reset Instance -- '{}'".format(instance_id))
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.info("Instance '{}' could not be rebooted".format(instance_id))
|
||||
return False
|
||||
|
||||
def stop_instances(self, instance_id):
|
||||
"""
|
||||
Stops the Instance whose name is given by 'instance_id'. Returns True if successful, or
|
||||
returns False if the Instance is already stopped
|
||||
"""
|
||||
|
||||
try:
|
||||
url = f"{self.service_url}/pcloud/v1/cloud-instances/{self.cloud_instance_id}/pvm-instances/{instance_id}/action"
|
||||
self._make_request("POST", url, headers=self.headers, data=json.dumps({"action": "stop"}))
|
||||
logging.info("Stopped Instance -- '{}'".format(instance_id))
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.info("Instance '{}' could not be stopped".format(instance_id))
|
||||
logging.info("error" + str(e))
|
||||
return False
|
||||
|
||||
def start_instances(self, instance_id):
|
||||
"""
|
||||
Stops the Instance whose name is given by 'instance_id'. Returns True if successful, or
|
||||
returns False if the Instance is already running
|
||||
"""
|
||||
|
||||
try:
|
||||
url = f"{self.service_url}/pcloud/v1/cloud-instances/{self.cloud_instance_id}/pvm-instances/{instance_id}/action"
|
||||
self._make_request("POST", url, headers=self.headers, data=json.dumps({"action": "start"}))
|
||||
logging.info("Started Instance -- '{}'".format(instance_id))
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.info("Instance '{}' could not start running".format(instance_id))
|
||||
return False
|
||||
|
||||
def list_instances(self):
|
||||
"""
|
||||
Returns a list of Instances present in the datacenter
|
||||
"""
|
||||
instance_names = []
|
||||
try:
|
||||
url = f"{self.service_url}/pcloud/v1/cloud-instances/{self.cloud_instance_id}/pvm-instances/"
|
||||
response = self._make_request("GET", url, headers=self.headers)
|
||||
for pvm in response.json()["pvmInstances"]:
|
||||
instance_names.append({"serverName": pvm.serverName, "pvmInstanceID": pvm.pvmInstanceID})
|
||||
except Exception as e:
|
||||
logging.error("Error listing out instances: " + str(e))
|
||||
sys.exit(1)
|
||||
return instance_names
|
||||
|
||||
def find_id_in_list(self, name, vpc_list):
|
||||
for vpc in vpc_list:
|
||||
if vpc["vpc_name"] == name:
|
||||
return vpc["vpc_id"]
|
||||
|
||||
def get_instance_status(self, instance_id):
|
||||
"""
|
||||
Returns the status of the Instance whose name is given by 'instance_id'
|
||||
"""
|
||||
|
||||
try:
|
||||
url = f"{self.service_url}/pcloud/v1/cloud-instances/{self.cloud_instance_id}/pvm-instances/{instance_id}"
|
||||
response = self._make_request("GET", url, headers=self.headers)
|
||||
state = response.json()["status"]
|
||||
return state
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to get node instance status %s. Encountered following "
|
||||
"exception: %s." % (instance_id, e)
|
||||
)
|
||||
return None
|
||||
|
||||
def wait_until_deleted(self, instance_id, timeout, affected_node=None):
|
||||
"""
|
||||
Waits until the instance is deleted or until the timeout. Returns True if
|
||||
the instance is successfully deleted, else returns False
|
||||
"""
|
||||
start_time = time.time()
|
||||
time_counter = 0
|
||||
vpc = self.get_instance_status(instance_id)
|
||||
while vpc is not None:
|
||||
vpc = self.get_instance_status(instance_id)
|
||||
logging.info(
|
||||
"Instance %s is still being deleted, sleeping for 5 seconds"
|
||||
% instance_id
|
||||
)
|
||||
time.sleep(5)
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info(
|
||||
"Instance %s is still not deleted in allotted time" % instance_id
|
||||
)
|
||||
return False
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("terminated", end_time - start_time)
|
||||
return True
|
||||
|
||||
def wait_until_running(self, instance_id, timeout, affected_node=None):
|
||||
"""
|
||||
Waits until the Instance switches to running state or until the timeout.
|
||||
Returns True if the Instance switches to running, else returns False
|
||||
"""
|
||||
start_time = time.time()
|
||||
time_counter = 0
|
||||
status = self.get_instance_status(instance_id)
|
||||
while status != "ACTIVE":
|
||||
status = self.get_instance_status(instance_id)
|
||||
logging.info(
|
||||
"Instance %s is still not running, sleeping for 5 seconds" % instance_id
|
||||
)
|
||||
time.sleep(5)
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info(
|
||||
"Instance %s is still not ready in allotted time" % instance_id
|
||||
)
|
||||
return False
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("running", end_time - start_time)
|
||||
return True
|
||||
|
||||
def wait_until_stopped(self, instance_id, timeout, affected_node):
|
||||
"""
|
||||
Waits until the Instance switches to stopped state or until the timeout.
|
||||
Returns True if the Instance switches to stopped, else returns False
|
||||
"""
|
||||
start_time = time.time()
|
||||
time_counter = 0
|
||||
status = self.get_instance_status(instance_id)
|
||||
while status != "STOPPED":
|
||||
status = self.get_instance_status(instance_id)
|
||||
logging.info(
|
||||
"Instance %s is still not stopped, sleeping for 5 seconds" % instance_id
|
||||
)
|
||||
time.sleep(5)
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info(
|
||||
"Instance %s is still not stopped in allotted time" % instance_id
|
||||
)
|
||||
return False
|
||||
end_time = time.time()
|
||||
print('affected_node' + str(affected_node))
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("stopped", end_time - start_time)
|
||||
return True
|
||||
|
||||
|
||||
def wait_until_rebooted(self, instance_id, timeout, affected_node):
|
||||
"""
|
||||
Waits until the Instance switches to restarting state and then running state or until the timeout.
|
||||
Returns True if the Instance switches back to running, else returns False
|
||||
"""
|
||||
|
||||
time_counter = 0
|
||||
status = self.get_instance_status(instance_id)
|
||||
while status == "HARD_REBOOT" or status == "SOFT_REBOOT":
|
||||
status = self.get_instance_status(instance_id)
|
||||
logging.info(
|
||||
"Instance %s is still restarting, sleeping for 5 seconds" % instance_id
|
||||
)
|
||||
time.sleep(5)
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info(
|
||||
"Instance %s is still restarting after allotted time" % instance_id
|
||||
)
|
||||
return False
|
||||
self.wait_until_running(instance_id, timeout, affected_node)
|
||||
print('affected_node' + str(affected_node))
|
||||
return True
|
||||
|
||||
|
||||
@dataclass
|
||||
class ibmcloud_power_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, kubecli: KrknKubernetes, node_action_kube_check: bool, affected_nodes_status: AffectedNodeStatus, disable_ssl_verification: bool):
|
||||
super().__init__(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
self.ibmcloud_power = IbmCloudPower()
|
||||
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
try:
|
||||
instance_id = self.ibmcloud_power.get_instance_id( node)
|
||||
affected_node = AffectedNode(node, node_id=instance_id)
|
||||
for _ in range(instance_kill_count):
|
||||
logging.info("Starting node_start_scenario injection")
|
||||
logging.info("Starting the node %s " % (node))
|
||||
|
||||
if instance_id:
|
||||
vm_started = self.ibmcloud_power.start_instances(instance_id)
|
||||
if vm_started:
|
||||
self.ibmcloud_power.wait_until_running(instance_id, timeout, affected_node)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_ready_status(
|
||||
node, timeout, self.kubecli, affected_node
|
||||
)
|
||||
logging.info(
|
||||
"Node with instance ID: %s is in running state" % node
|
||||
)
|
||||
logging.info(
|
||||
"node_start_scenario has been successfully injected!"
|
||||
)
|
||||
else:
|
||||
logging.error(
|
||||
"Failed to find node that matched instances on ibm cloud in region"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logging.error("Failed to start node instance. Test Failed")
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
try:
|
||||
instance_id = self.ibmcloud_power.get_instance_id(node)
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node, instance_id)
|
||||
logging.info("Starting node_stop_scenario injection")
|
||||
logging.info("Stopping the node %s " % (node))
|
||||
vm_stopped = self.ibmcloud_power.stop_instances(instance_id)
|
||||
if vm_stopped:
|
||||
self.ibmcloud_power.wait_until_stopped(instance_id, timeout, affected_node)
|
||||
logging.info(
|
||||
"Node with instance ID: %s is in stopped state" % node
|
||||
)
|
||||
logging.info(
|
||||
"node_stop_scenario has been successfully injected!"
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Test Failed")
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
|
||||
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout, soft_reboot=False):
|
||||
try:
|
||||
instance_id = self.ibmcloud_power.get_instance_id(node)
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node, node_id=instance_id)
|
||||
logging.info("Starting node_reboot_scenario injection")
|
||||
logging.info("Rebooting the node %s " % (node))
|
||||
self.ibmcloud_power.reboot_instances(instance_id, soft_reboot)
|
||||
self.ibmcloud_power.wait_until_rebooted(instance_id, timeout, affected_node)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(
|
||||
node, timeout, affected_node
|
||||
)
|
||||
nodeaction.wait_for_ready_status(
|
||||
node, timeout, affected_node
|
||||
)
|
||||
logging.info(
|
||||
"Node with instance ID: %s has rebooted successfully" % node
|
||||
)
|
||||
logging.info(
|
||||
"node_reboot_scenario has been successfully injected!"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logging.error("Failed to reboot node instance. Test Failed")
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
|
||||
|
||||
def node_terminate_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
try:
|
||||
instance_id = self.ibmcloud_power.get_instance_id(node)
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node, node_id=instance_id)
|
||||
logging.info(
|
||||
"Starting node_termination_scenario injection by first stopping the node"
|
||||
)
|
||||
logging.info("Deleting the node with instance ID: %s " % (node))
|
||||
self.ibmcloud_power.delete_instance(instance_id)
|
||||
self.ibmcloud_power.wait_until_deleted(node, timeout, affected_node)
|
||||
logging.info(
|
||||
"Node with instance ID: %s has been released" % node
|
||||
)
|
||||
logging.info(
|
||||
"node_terminate_scenario has been successfully injected!"
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to terminate node instance. Test Failed")
|
||||
logging.error("node_terminate_scenario injection failed!")
|
||||
|
||||
@@ -22,8 +22,16 @@ from krkn.scenario_plugins.node_actions.gcp_node_scenarios import gcp_node_scena
|
||||
from krkn.scenario_plugins.node_actions.general_cloud_node_scenarios import (
|
||||
general_node_scenarios,
|
||||
)
|
||||
from krkn.scenario_plugins.node_actions.vmware_node_scenarios import vmware_node_scenarios
|
||||
from krkn.scenario_plugins.node_actions.ibmcloud_node_scenarios import ibm_node_scenarios
|
||||
from krkn.scenario_plugins.node_actions.vmware_node_scenarios import (
|
||||
vmware_node_scenarios,
|
||||
)
|
||||
from krkn.scenario_plugins.node_actions.ibmcloud_node_scenarios import (
|
||||
ibm_node_scenarios,
|
||||
)
|
||||
|
||||
from krkn.scenario_plugins.node_actions.ibmcloud_power_node_scenarios import (
|
||||
ibmcloud_power_node_scenarios,
|
||||
)
|
||||
node_general = False
|
||||
|
||||
|
||||
@@ -63,29 +71,39 @@ class NodeActionsScenarioPlugin(AbstractScenarioPlugin):
|
||||
def get_node_scenario_object(self, node_scenario, kubecli: KrknKubernetes):
|
||||
affected_nodes_status = AffectedNodeStatus()
|
||||
|
||||
node_action_kube_check = get_yaml_item_value(node_scenario,"kube_check",True)
|
||||
node_action_kube_check = get_yaml_item_value(node_scenario, "kube_check", True)
|
||||
if (
|
||||
"cloud_type" not in node_scenario.keys()
|
||||
or node_scenario["cloud_type"] == "generic"
|
||||
):
|
||||
global node_general
|
||||
node_general = True
|
||||
return general_node_scenarios(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
return general_node_scenarios(
|
||||
kubecli, node_action_kube_check, affected_nodes_status
|
||||
)
|
||||
if node_scenario["cloud_type"].lower() == "aws":
|
||||
return aws_node_scenarios(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
return aws_node_scenarios(
|
||||
kubecli, node_action_kube_check, affected_nodes_status
|
||||
)
|
||||
elif node_scenario["cloud_type"].lower() == "gcp":
|
||||
return gcp_node_scenarios(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
return gcp_node_scenarios(
|
||||
kubecli, node_action_kube_check, affected_nodes_status
|
||||
)
|
||||
elif node_scenario["cloud_type"].lower() == "openstack":
|
||||
from krkn.scenario_plugins.node_actions.openstack_node_scenarios import (
|
||||
openstack_node_scenarios,
|
||||
)
|
||||
|
||||
return openstack_node_scenarios(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
return openstack_node_scenarios(
|
||||
kubecli, node_action_kube_check, affected_nodes_status
|
||||
)
|
||||
elif (
|
||||
node_scenario["cloud_type"].lower() == "azure"
|
||||
or node_scenario["cloud_type"].lower() == "az"
|
||||
):
|
||||
return azure_node_scenarios(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
return azure_node_scenarios(
|
||||
kubecli, node_action_kube_check, affected_nodes_status
|
||||
)
|
||||
elif (
|
||||
node_scenario["cloud_type"].lower() == "alibaba"
|
||||
or node_scenario["cloud_type"].lower() == "alicloud"
|
||||
@@ -94,7 +112,9 @@ class NodeActionsScenarioPlugin(AbstractScenarioPlugin):
|
||||
alibaba_node_scenarios,
|
||||
)
|
||||
|
||||
return alibaba_node_scenarios(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
return alibaba_node_scenarios(
|
||||
kubecli, node_action_kube_check, affected_nodes_status
|
||||
)
|
||||
elif node_scenario["cloud_type"].lower() == "bm":
|
||||
from krkn.scenario_plugins.node_actions.bm_node_scenarios import (
|
||||
bm_node_scenarios,
|
||||
@@ -106,21 +126,31 @@ class NodeActionsScenarioPlugin(AbstractScenarioPlugin):
|
||||
node_scenario.get("bmc_password", None),
|
||||
kubecli,
|
||||
node_action_kube_check,
|
||||
affected_nodes_status
|
||||
affected_nodes_status,
|
||||
)
|
||||
elif node_scenario["cloud_type"].lower() == "docker":
|
||||
return docker_node_scenarios(kubecli,node_action_kube_check,
|
||||
affected_nodes_status)
|
||||
return docker_node_scenarios(
|
||||
kubecli, node_action_kube_check, affected_nodes_status
|
||||
)
|
||||
elif (
|
||||
node_scenario["cloud_type"].lower() == "vsphere"
|
||||
or node_scenario["cloud_type"].lower() == "vmware"
|
||||
):
|
||||
return vmware_node_scenarios(kubecli, node_action_kube_check,affected_nodes_status)
|
||||
return vmware_node_scenarios(
|
||||
kubecli, node_action_kube_check, affected_nodes_status
|
||||
)
|
||||
elif (
|
||||
node_scenario["cloud_type"].lower() == "ibm"
|
||||
or node_scenario["cloud_type"].lower() == "ibmcloud"
|
||||
):
|
||||
return ibm_node_scenarios(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
disable_ssl_verification = get_yaml_item_value(node_scenario, "disable_ssl_verification", True)
|
||||
return ibm_node_scenarios(kubecli, node_action_kube_check, affected_nodes_status, disable_ssl_verification)
|
||||
elif (
|
||||
node_scenario["cloud_type"].lower() == "ibmpower"
|
||||
or node_scenario["cloud_type"].lower() == "ibmcloudpower"
|
||||
):
|
||||
disable_ssl_verification = get_yaml_item_value(node_scenario, "disable_ssl_verification", True)
|
||||
return ibmcloud_power_node_scenarios(kubecli, node_action_kube_check, affected_nodes_status, disable_ssl_verification)
|
||||
else:
|
||||
logging.error(
|
||||
"Cloud type "
|
||||
@@ -138,16 +168,22 @@ class NodeActionsScenarioPlugin(AbstractScenarioPlugin):
|
||||
)
|
||||
|
||||
def inject_node_scenario(
|
||||
self, action, node_scenario, node_scenario_object, kubecli: KrknKubernetes, scenario_telemetry: ScenarioTelemetry
|
||||
self,
|
||||
action,
|
||||
node_scenario,
|
||||
node_scenario_object,
|
||||
kubecli: KrknKubernetes,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
):
|
||||
|
||||
|
||||
# Get the node scenario configurations for setting nodes
|
||||
|
||||
|
||||
instance_kill_count = get_yaml_item_value(node_scenario, "instance_count", 1)
|
||||
node_name = get_yaml_item_value(node_scenario, "node_name", "")
|
||||
label_selector = get_yaml_item_value(node_scenario, "label_selector", "")
|
||||
exclude_label = get_yaml_item_value(node_scenario, "exclude_label", "")
|
||||
parallel_nodes = get_yaml_item_value(node_scenario, "parallel", False)
|
||||
|
||||
|
||||
# Get the node to apply the scenario
|
||||
if node_name:
|
||||
node_name_list = node_name.split(",")
|
||||
@@ -156,11 +192,22 @@ class NodeActionsScenarioPlugin(AbstractScenarioPlugin):
|
||||
nodes = common_node_functions.get_node(
|
||||
label_selector, instance_kill_count, kubecli
|
||||
)
|
||||
|
||||
# GCP api doesn't support multiprocessing calls, will only actually run 1
|
||||
if exclude_label:
|
||||
exclude_nodes = common_node_functions.get_node(
|
||||
exclude_label, 0, kubecli
|
||||
)
|
||||
|
||||
for node in nodes:
|
||||
if node in exclude_nodes:
|
||||
logging.info(
|
||||
f"excluding node {node} with exclude label {exclude_nodes}"
|
||||
)
|
||||
nodes.remove(node)
|
||||
|
||||
# GCP api doesn't support multiprocessing calls, will only actually run 1
|
||||
if parallel_nodes:
|
||||
self.multiprocess_nodes(nodes, node_scenario_object, action, node_scenario)
|
||||
else:
|
||||
else:
|
||||
for single_node in nodes:
|
||||
self.run_node(single_node, node_scenario_object, action, node_scenario)
|
||||
affected_nodes_status = node_scenario_object.affected_nodes_status
|
||||
@@ -170,21 +217,29 @@ class NodeActionsScenarioPlugin(AbstractScenarioPlugin):
|
||||
try:
|
||||
# pool object with number of element
|
||||
pool = ThreadPool(processes=len(nodes))
|
||||
|
||||
pool.starmap(self.run_node,zip(nodes, repeat(node_scenario_object), repeat(action), repeat(node_scenario)))
|
||||
|
||||
pool.starmap(
|
||||
self.run_node,
|
||||
zip(
|
||||
nodes,
|
||||
repeat(node_scenario_object),
|
||||
repeat(action),
|
||||
repeat(node_scenario),
|
||||
),
|
||||
)
|
||||
|
||||
pool.close()
|
||||
except Exception as e:
|
||||
logging.info("Error on pool multiprocessing: " + str(e))
|
||||
|
||||
|
||||
def run_node(self, single_node, node_scenario_object, action, node_scenario):
|
||||
# Get the scenario specifics for running action nodes
|
||||
run_kill_count = get_yaml_item_value(node_scenario, "runs", 1)
|
||||
duration = get_yaml_item_value(node_scenario, "duration", 120)
|
||||
|
||||
poll_interval = get_yaml_item_value(node_scenario, "poll_interval", 15)
|
||||
timeout = get_yaml_item_value(node_scenario, "timeout", 120)
|
||||
service = get_yaml_item_value(node_scenario, "service", "")
|
||||
soft_reboot = get_yaml_item_value(node_scenario, "soft_reboot", False)
|
||||
ssh_private_key = get_yaml_item_value(
|
||||
node_scenario, "ssh_private_key", "~/.ssh/id_rsa"
|
||||
)
|
||||
@@ -199,27 +254,28 @@ class NodeActionsScenarioPlugin(AbstractScenarioPlugin):
|
||||
else:
|
||||
if action == "node_start_scenario":
|
||||
node_scenario_object.node_start_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
run_kill_count, single_node, timeout, poll_interval
|
||||
)
|
||||
elif action == "node_stop_scenario":
|
||||
node_scenario_object.node_stop_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
run_kill_count, single_node, timeout, poll_interval
|
||||
)
|
||||
elif action == "node_stop_start_scenario":
|
||||
node_scenario_object.node_stop_start_scenario(
|
||||
run_kill_count, single_node, timeout, duration
|
||||
run_kill_count, single_node, timeout, duration, poll_interval
|
||||
)
|
||||
elif action == "node_termination_scenario":
|
||||
node_scenario_object.node_termination_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
run_kill_count, single_node, timeout, poll_interval
|
||||
)
|
||||
elif action == "node_reboot_scenario":
|
||||
node_scenario_object.node_reboot_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
run_kill_count, single_node, timeout, soft_reboot
|
||||
)
|
||||
elif action == "node_disk_detach_attach_scenario":
|
||||
node_scenario_object.node_disk_detach_attach_scenario(
|
||||
run_kill_count, single_node, timeout, duration)
|
||||
run_kill_count, single_node, timeout, duration
|
||||
)
|
||||
elif action == "stop_start_kubelet_scenario":
|
||||
node_scenario_object.stop_start_kubelet_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
@@ -247,9 +303,7 @@ class NodeActionsScenarioPlugin(AbstractScenarioPlugin):
|
||||
else:
|
||||
if not node_scenario["helper_node_ip"]:
|
||||
logging.error("Helper node IP address is not provided")
|
||||
raise Exception(
|
||||
"Helper node IP address is not provided"
|
||||
)
|
||||
raise Exception("Helper node IP address is not provided")
|
||||
node_scenario_object.helper_node_stop_start_scenario(
|
||||
run_kill_count, node_scenario["helper_node_ip"], timeout
|
||||
)
|
||||
@@ -269,6 +323,5 @@ class NodeActionsScenarioPlugin(AbstractScenarioPlugin):
|
||||
% action
|
||||
)
|
||||
|
||||
|
||||
def get_scenario_types(self) -> list[str]:
|
||||
return ["node_scenarios"]
|
||||
|
||||
@@ -122,7 +122,7 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -147,7 +147,7 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -171,7 +171,7 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout, soft_reboot=False):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -184,7 +184,7 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli, affected_node)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
logging.info("Node with instance name: %s has been rebooted" % (node))
|
||||
logging.info("node_reboot_scenario has been successfuly injected!")
|
||||
logging.info("node_reboot_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to reboot node instance. Encountered following exception:"
|
||||
@@ -249,7 +249,7 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
node_ip.strip(), service, ssh_private_key, timeout
|
||||
)
|
||||
logging.info("Service status checked on %s" % (node_ip))
|
||||
logging.info("Check service status is successfuly injected!")
|
||||
logging.info("Check service status is successfully injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to check service status. Encountered following exception:"
|
||||
|
||||
@@ -73,7 +73,7 @@ class vSphere:
|
||||
vms = self.client.vcenter.VM.list(VM.FilterSpec(names=names))
|
||||
|
||||
if len(vms) == 0:
|
||||
logging.info("VM with name ({}) not found", instance_id)
|
||||
logging.info("VM with name ({}) not found".format(instance_id))
|
||||
return None
|
||||
vm = vms[0].vm
|
||||
|
||||
@@ -97,7 +97,7 @@ class vSphere:
|
||||
self.client.vcenter.vm.Power.start(vm)
|
||||
self.client.vcenter.vm.Power.stop(vm)
|
||||
self.client.vcenter.VM.delete(vm)
|
||||
logging.info("Deleted VM -- '{}-({})'", instance_id, vm)
|
||||
logging.info("Deleted VM -- '{}-({})'".format(instance_id, vm))
|
||||
|
||||
def reboot_instances(self, instance_id):
|
||||
"""
|
||||
@@ -108,11 +108,11 @@ class vSphere:
|
||||
vm = self.get_vm(instance_id)
|
||||
try:
|
||||
self.client.vcenter.vm.Power.reset(vm)
|
||||
logging.info("Reset VM -- '{}-({})'", instance_id, vm)
|
||||
logging.info("Reset VM -- '{}-({})'".format(instance_id, vm))
|
||||
return True
|
||||
except NotAllowedInCurrentState:
|
||||
logging.info(
|
||||
"VM '{}'-'({})' is not Powered On. Cannot reset it", instance_id, vm
|
||||
"VM '{}'-'({})' is not Powered On. Cannot reset it".format(instance_id, vm)
|
||||
)
|
||||
return False
|
||||
|
||||
@@ -158,7 +158,7 @@ class vSphere:
|
||||
try:
|
||||
datacenter_id = datacenter_summaries[0].datacenter
|
||||
except IndexError:
|
||||
logging.error("Datacenter '{}' doesn't exist", datacenter)
|
||||
logging.error("Datacenter '{}' doesn't exist".format(datacenter))
|
||||
sys.exit(1)
|
||||
|
||||
vm_filter = self.client.vcenter.VM.FilterSpec(datacenters={datacenter_id})
|
||||
@@ -389,7 +389,7 @@ class vmware_node_scenarios(abstract_node_scenarios):
|
||||
self.vsphere = vSphere()
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
try:
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
@@ -409,7 +409,7 @@ class vmware_node_scenarios(abstract_node_scenarios):
|
||||
f"node_start_scenario injection failed! " f"Error was: {str(e)}"
|
||||
)
|
||||
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
try:
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
@@ -432,7 +432,7 @@ class vmware_node_scenarios(abstract_node_scenarios):
|
||||
)
|
||||
|
||||
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout, soft_reboot=False):
|
||||
try:
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
@@ -456,7 +456,7 @@ class vmware_node_scenarios(abstract_node_scenarios):
|
||||
)
|
||||
|
||||
|
||||
def node_terminate_scenario(self, instance_kill_count, node, timeout):
|
||||
def node_terminate_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
try:
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user