mirror of
https://github.com/krkn-chaos/krkn.git
synced 2026-02-17 03:19:54 +00:00
Compare commits
75 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
890e3012dd | ||
|
|
d0dafa872d | ||
|
|
149eb8fcd3 | ||
|
|
4c462a8971 | ||
|
|
5bdbf622c3 | ||
|
|
0dcb901da1 | ||
|
|
6e94df9cfc | ||
|
|
87c2b3c8fd | ||
|
|
7e4b2aff65 | ||
|
|
27f0845182 | ||
|
|
4c9cd5bced | ||
|
|
075dbd10c7 | ||
|
|
e080ad2ee2 | ||
|
|
693520f306 | ||
|
|
bf909a7c18 | ||
|
|
abbcfe09ec | ||
|
|
32fb6eec07 | ||
|
|
608b7c847f | ||
|
|
edd0159251 | ||
|
|
cf9f7702ed | ||
|
|
cfe624f153 | ||
|
|
62f50db195 | ||
|
|
aee838d3ac | ||
|
|
3b4d8a13f9 | ||
|
|
a86bb6ab95 | ||
|
|
7f0110972b | ||
|
|
126f4ebb35 | ||
|
|
83d99bbb02 | ||
|
|
2624102d65 | ||
|
|
02587bcbe6 | ||
|
|
c51bf04f9e | ||
|
|
41195b1a60 | ||
|
|
ab80acbee7 | ||
|
|
3573d13ea9 | ||
|
|
9c5251d52f | ||
|
|
a0bba27edc | ||
|
|
0d0143d1e0 | ||
|
|
0004c05f81 | ||
|
|
57a747a34a | ||
|
|
22108ae4e7 | ||
|
|
cecaa1eda3 | ||
|
|
5450ecb914 | ||
|
|
cad6b68f43 | ||
|
|
0eba329305 | ||
|
|
ce8593f2f0 | ||
|
|
9061ddbb5b | ||
|
|
dd4d0d0389 | ||
|
|
0cabe5e91d | ||
|
|
32fe0223ff | ||
|
|
a25736ad08 | ||
|
|
440890d252 | ||
|
|
69bf20fc76 | ||
|
|
2a42a2dc31 | ||
|
|
21ab8d475d | ||
|
|
b024cfde19 | ||
|
|
c7e068a562 | ||
|
|
64cfd2ca4d | ||
|
|
9cb701a616 | ||
|
|
0372013b67 | ||
|
|
4fea1a354d | ||
|
|
667798d588 | ||
|
|
0c30d89a1b | ||
|
|
2ba20fa483 | ||
|
|
97035a765c | ||
|
|
10ba53574e | ||
|
|
0ecba41082 | ||
|
|
491f59d152 | ||
|
|
2549c9a146 | ||
|
|
949f1f09e0 | ||
|
|
959766254d | ||
|
|
0e68dedb12 | ||
|
|
34a676a795 | ||
|
|
e5c5b35db3 | ||
|
|
93d2e60386 | ||
|
|
462c9ac67e |
10
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
10
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
## Description
|
||||
<!-- Provide a brief description of the changes made in this PR. -->
|
||||
|
||||
## Documentation
|
||||
- [ ] **Is documentation needed for this update?**
|
||||
|
||||
If checked, a documentation PR must be created and merged in the [website repository](https://github.com/krkn-chaos/website/).
|
||||
|
||||
## Related Documentation PR (if applicable)
|
||||
<!-- Add the link to the corresponding documentation PR in the website repository -->
|
||||
7
.github/release-template.md
vendored
Normal file
7
.github/release-template.md
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
## Release {VERSION}
|
||||
|
||||
### Download Artifacts
|
||||
- 📦 Krkn sources (noarch): [krkn-{VERSION}-src.tar.gz](https://krkn-chaos.gateway.scarf.sh/krkn-src-{VERSION}.tar.gz)
|
||||
|
||||
### Changes
|
||||
{CHANGES}
|
||||
2
.github/workflows/docker-image.yml
vendored
2
.github/workflows/docker-image.yml
vendored
@@ -13,6 +13,7 @@ jobs:
|
||||
- name: Build the Docker images
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
run: |
|
||||
./containers/compile_dockerfile.sh
|
||||
docker build --no-cache -t quay.io/krkn-chaos/krkn containers/ --build-arg TAG=${GITHUB_REF#refs/tags/}
|
||||
docker tag quay.io/krkn-chaos/krkn quay.io/redhat-chaos/krkn
|
||||
docker tag quay.io/krkn-chaos/krkn quay.io/krkn-chaos/krkn:${GITHUB_REF#refs/tags/}
|
||||
@@ -21,6 +22,7 @@ jobs:
|
||||
- name: Test Build the Docker images
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
run: |
|
||||
./containers/compile_dockerfile.sh
|
||||
docker build --no-cache -t quay.io/krkn-chaos/krkn containers/ --build-arg PR_NUMBER=${{ github.event.pull_request.number }}
|
||||
- name: Login in quay
|
||||
if: startsWith(github.ref, 'refs/tags')
|
||||
|
||||
47
.github/workflows/release.yml
vendored
Normal file
47
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
name: Create Release
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
jobs:
|
||||
release:
|
||||
permissions:
|
||||
contents: write
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: calculate previous tag
|
||||
run: |
|
||||
git fetch --tags origin
|
||||
PREVIOUS_TAG=$(git tag --sort=-creatordate | sed -n '2 p')
|
||||
echo $PREVIOUS_TAG
|
||||
echo "PREVIOUS_TAG=$PREVIOUS_TAG" >> "$GITHUB_ENV"
|
||||
- name: generate release notes from template
|
||||
id: release-notes
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
NOTES=$(gh api \
|
||||
--method POST \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
/repos/krkn-chaos/krkn/releases/generate-notes \
|
||||
-f "tag_name=${{ github.ref_name }}" -f "target_commitish=main" -f "previous_tag_name=${{ env.PREVIOUS_TAG }}" | jq -r .body)
|
||||
echo "NOTES<<EOF" >> $GITHUB_ENV
|
||||
echo "$NOTES" >> $GITHUB_ENV
|
||||
echo "EOF" >> $GITHUB_ENV
|
||||
|
||||
- name: replace placeholders in template
|
||||
run: |
|
||||
echo "${{ env.NOTES }}"
|
||||
TEMPLATE=$(cat .github/release-template.md)
|
||||
VERSION=${{ github.ref_name }}
|
||||
NOTES="${{ env.NOTES }}"
|
||||
OUTPUT=${TEMPLATE//\{VERSION\}/$VERSION}
|
||||
OUTPUT=${OUTPUT//\{CHANGES\}/$NOTES}
|
||||
echo "$OUTPUT" > release-notes.md
|
||||
- name: create release
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh release create ${{ github.ref_name }} --title "${{ github.ref_name }}" -F release-notes.md
|
||||
45
.github/workflows/require-docs.yml
vendored
Normal file
45
.github/workflows/require-docs.yml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
name: Require Documentation Update
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened, edited, synchronize]
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
check-docs:
|
||||
name: Check Documentation Update
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Check if Documentation is Required
|
||||
id: check_docs
|
||||
run: |
|
||||
echo "Checking PR body for documentation checkbox..."
|
||||
# Read the PR body from the GitHub event payload
|
||||
if echo "${{ github.event.pull_request.body }}" | grep -qi '\[x\].*documentation needed'; then
|
||||
echo "Documentation required detected."
|
||||
echo "docs_required=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Documentation not required."
|
||||
echo "docs_required=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Enforce Documentation Update (if required)
|
||||
if: steps.check_docs.outputs.docs_required == 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
# Retrieve feature branch and repository owner from the GitHub context
|
||||
FEATURE_BRANCH="${{ github.head_ref }}"
|
||||
REPO_OWNER="${{ github.repository_owner }}"
|
||||
WEBSITE_REPO="website"
|
||||
echo "Searching for a merged documentation PR for feature branch: $FEATURE_BRANCH in $REPO_OWNER/$WEBSITE_REPO..."
|
||||
MERGED_PR=$(gh pr list --repo "$REPO_OWNER/$WEBSITE_REPO" --state merged --json headRefName,title,url | jq -r \
|
||||
--arg FEATURE_BRANCH "$FEATURE_BRANCH" '.[] | select(.title | contains($FEATURE_BRANCH)) | .url')
|
||||
if [[ -z "$MERGED_PR" ]]; then
|
||||
echo ":x: Documentation PR for branch '$FEATURE_BRANCH' is required and has not been merged."
|
||||
exit 1
|
||||
else
|
||||
echo ":white_check_mark: Found merged documentation PR: $MERGED_PR"
|
||||
fi
|
||||
21
.github/workflows/tests.yml
vendored
21
.github/workflows/tests.yml
vendored
@@ -35,7 +35,8 @@ jobs:
|
||||
--set alertmanager.service.nodePort=32000 \
|
||||
--set alertmanager.service.type=NodePort \
|
||||
--set prometheus-node-exporter.service.nodePort=32001 \
|
||||
--set prometheus-node-exporter.service.type=NodePort
|
||||
--set prometheus-node-exporter.service.type=NodePort \
|
||||
--set prometheus.prometheusSpec.maximumStartupDurationSeconds=300
|
||||
|
||||
SELECTOR=`kubectl -n prometheus-k8s get service kind-prometheus-kube-prome-prometheus -o wide --no-headers=true | awk '{ print $7 }'`
|
||||
POD_NAME=`kubectl -n prometheus-k8s get pods --selector="$SELECTOR" --no-headers=true | awk '{ print $1 }'`
|
||||
@@ -84,9 +85,9 @@ jobs:
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_time" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_io_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_io_hog" >> ./CI/tests/functional_tests
|
||||
|
||||
|
||||
# Push on main only steps + all other functional to collect coverage
|
||||
@@ -113,9 +114,9 @@ jobs:
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_time" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_arca_io_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_io_hog" >> ./CI/tests/functional_tests
|
||||
|
||||
# Final common steps
|
||||
- name: Run Functional tests
|
||||
@@ -152,7 +153,8 @@ jobs:
|
||||
path: coverage.json
|
||||
if-no-files-found: error
|
||||
- name: Check CI results
|
||||
run: grep Fail CI/results.markdown && false || true
|
||||
run: "! grep Fail CI/results.markdown"
|
||||
|
||||
badge:
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -192,7 +194,8 @@ jobs:
|
||||
cd krkn-lib-docs
|
||||
git add .
|
||||
git config user.name "krkn-chaos"
|
||||
git config user.email "<>"
|
||||
git config user.email "krkn-actions@users.noreply.github.com"
|
||||
git commit -m "[KRKN] Coverage Badge ${GITHUB_REF##*/}" || echo "no changes to commit"
|
||||
git push
|
||||
|
||||
|
||||
|
||||
8
ADOPTERS.md
Normal file
8
ADOPTERS.md
Normal file
@@ -0,0 +1,8 @@
|
||||
# Krkn Adopters
|
||||
|
||||
This is a list of organizations that have publicly acknowledged usage of Krkn and shared details of how they are leveraging it in their environment for chaos engineering use cases. Do you want to add yourself to this list? Please fork the repository and open a PR with the required change.
|
||||
|
||||
| Organization | Since | Website | Use-Case |
|
||||
|:-|:-|:-|:-|
|
||||
| MarketAxess | 2024 | https://www.marketaxess.com/ | Kraken enables us to achieve our goal of increasing the reliability of our cloud products on Kubernetes. The tool allows us to automatically run various chaos scenarios, identify resilience and performance bottlenecks, and seamlessly restore the system to its original state once scenarios finish. These chaos scenarios include pod disruptions, node (EC2) outages, simulating availability zone (AZ) outages, and filling up storage spaces like EBS and EFS. The community is highly responsive to requests and works on expanding the tool's capabilities. MarketAxess actively contributes to the project, adding features such as the ability to leverage existing network ACLs and proposing several feature improvements to enhance test coverage. |
|
||||
| Red Hat Openshift | 2020 | https://www.redhat.com/ | Kraken is a highly reliable chaos testing tool used to ensure the quality and resiliency of Red Hat Openshift. The engineering team runs all the test scenarios under Kraken on different cloud platforms on both self-managed and cloud services environments prior to the release of a new version of the product. The team also contributes to the Kraken project consistently which helps the test scenarios to keep up with the new features introduced to the product. Inclusion of this test coverage has contributed to gaining the trust of new and existing customers of the product. |
|
||||
@@ -62,3 +62,11 @@ elastic:
|
||||
metrics_index: "krkn-metrics"
|
||||
alerts_index: "krkn-alerts"
|
||||
telemetry_index: "krkn-telemetry"
|
||||
|
||||
health_checks: # Utilizing health check endpoints to observe application behavior during chaos injection.
|
||||
interval: # Interval in seconds to perform health checks, default value is 2 seconds
|
||||
config: # Provide list of health check configurations for applications
|
||||
- url: # Provide application endpoint
|
||||
bearer_token: # Bearer token for authentication if any
|
||||
auth: # Provide authentication credentials (username , password) in tuple format if any, ex:("admin","secretpassword")
|
||||
exit_on_failure: # If value is True exits when health check failed for application, values can be True/False
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_arca_cpu_hog {
|
||||
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/kube/cpu-hog/input.yaml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/cpu-hog/input.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/arca_cpu_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/arca_cpu_hog.yaml
|
||||
echo "Arcaflow CPU Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_arca_cpu_hog
|
||||
@@ -1,19 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_arca_io_hog {
|
||||
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/kube/io-hog/input.yaml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/io-hog/input.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/arca_io_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/arca_io_hog.yaml
|
||||
echo "Arcaflow IO Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_arca_io_hog
|
||||
@@ -1,19 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_arca_memory_hog {
|
||||
yq -i '.input_list[0].node_selector={"kubernetes.io/hostname":"kind-worker2"}' scenarios/kube/memory-hog/input.yaml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/memory-hog/input.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/arca_memory_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/arca_memory_hog.yaml
|
||||
echo "Arcaflow Memory Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_arca_memory_hog
|
||||
20
CI/tests/test_cpu_hog.sh
Normal file
20
CI/tests/test_cpu_hog.sh
Normal file
@@ -0,0 +1,20 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_cpu_hog {
|
||||
yq -i '.node_selector="kubernetes.io/hostname=kind-worker2"' scenarios/kube/cpu-hog.yml
|
||||
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/cpu-hog.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/cpu_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/cpu_hog.yaml
|
||||
echo "CPU Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_cpu_hog
|
||||
19
CI/tests/test_io_hog.sh
Normal file
19
CI/tests/test_io_hog.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_io_hog {
|
||||
yq -i '.node_selector="kubernetes.io/hostname=kind-worker2"' scenarios/kube/io-hog.yml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/io-hog.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/io_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/io_hog.yaml
|
||||
echo "IO Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_io_hog
|
||||
19
CI/tests/test_memory_hog.sh
Normal file
19
CI/tests/test_memory_hog.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
|
||||
function functional_test_memory_hog {
|
||||
yq -i '.node_selector="kubernetes.io/hostname=kind-worker2"' scenarios/kube/memory-hog.yml
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/memory-hog.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/memory_hog.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/memory_hog.yaml
|
||||
echo "Memory Hog: Success"
|
||||
}
|
||||
|
||||
functional_test_memory_hog
|
||||
@@ -19,7 +19,9 @@ function functional_test_telemetry {
|
||||
yq -i '.telemetry.run_tag=env(RUN_TAG)' CI/config/common_test_config.yaml
|
||||
|
||||
export scenario_type="hog_scenarios"
|
||||
export scenario_file="scenarios/kube/cpu-hog/input.yaml"
|
||||
|
||||
export scenario_file="scenarios/kube/cpu-hog.yml"
|
||||
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/telemetry.yaml
|
||||
retval=$(python3 -m coverage run -a run_kraken.py -c CI/config/telemetry.yaml)
|
||||
|
||||
97
README.md
97
README.md
@@ -2,6 +2,7 @@
|
||||

|
||||

|
||||

|
||||
[](https://www.bestpractices.dev/projects/10548)
|
||||
|
||||

|
||||
|
||||
@@ -10,91 +11,15 @@ Kraken injects deliberate failures into Kubernetes clusters to check if it is re
|
||||
|
||||
|
||||
### Workflow
|
||||

|
||||
|
||||
### Demo
|
||||
[](https://youtu.be/LN-fZywp_mo "Kraken Demo - Click to Watch!")
|
||||

|
||||
|
||||
|
||||
### Chaos Testing Guide
|
||||
[Guide](docs/index.md) encapsulates:
|
||||
- Test methodology that needs to be embraced.
|
||||
- Best practices that an Kubernetes cluster, platform and applications running on top of it should take into account for best user experience, performance, resilience and reliability.
|
||||
- Tooling.
|
||||
- Scenarios supported.
|
||||
- Test environment recommendations as to how and where to run chaos tests.
|
||||
- Chaos testing in practice.
|
||||
|
||||
The guide is hosted at https://krkn-chaos.github.io/krkn.
|
||||
<!-- ### Demo
|
||||
[](https://youtu.be/LN-fZywp_mo "Kraken Demo - Click to Watch!") -->
|
||||
|
||||
|
||||
### How to Get Started
|
||||
Instructions on how to setup, configure and run Kraken can be found at [Installation](docs/installation.md).
|
||||
|
||||
You may consider utilizing the chaos recommendation tool prior to initiating the chaos runs to profile the application service(s) under test. This tool discovers a list of Krkn scenarios with a high probability of causing failures or disruptions to your application service(s). The tool can be accessed at [Chaos-Recommender](utils/chaos_recommender/README.md).
|
||||
|
||||
See the [getting started doc](docs/getting_started.md) on support on how to get started with your own custom scenario or editing current scenarios for your specific usage.
|
||||
|
||||
After installation, refer back to the below sections for supported scenarios and how to tweak the kraken config to load them on your cluster.
|
||||
|
||||
|
||||
#### Running Kraken with minimal configuration tweaks
|
||||
For cases where you want to run Kraken with minimal configuration changes, refer to [krkn-hub](https://github.com/krkn-chaos/krkn-hub). One use case is CI integration where you do not want to carry around different configuration files for the scenarios.
|
||||
|
||||
|
||||
### Config
|
||||
Instructions on how to setup the config and the options supported can be found at [Config](docs/config.md).
|
||||
|
||||
|
||||
### Kubernetes chaos scenarios supported
|
||||
|
||||
Scenario type | Kubernetes
|
||||
--------------------------- | ------------- |
|
||||
[Pod Scenarios](docs/pod_scenarios.md) | :heavy_check_mark: |
|
||||
[Pod Network Scenarios](docs/pod_network_scenarios.md) | :x: |
|
||||
[Container Scenarios](docs/container_scenarios.md) | :heavy_check_mark: |
|
||||
[Node Scenarios](docs/node_scenarios.md) | :heavy_check_mark: |
|
||||
[Time Scenarios](docs/time_scenarios.md) | :heavy_check_mark: |
|
||||
[Hog Scenarios: CPU, Memory](docs/arcaflow_scenarios.md) | :heavy_check_mark: |
|
||||
[Cluster Shut Down Scenarios](docs/cluster_shut_down_scenarios.md) | :heavy_check_mark: |
|
||||
[Service Disruption Scenarios](docs/service_disruption_scenarios.md.md) | :heavy_check_mark: |
|
||||
[Zone Outage Scenarios](docs/zone_outage.md) | :heavy_check_mark: |
|
||||
[Application_outages](docs/application_outages.md) | :heavy_check_mark: |
|
||||
[PVC scenario](docs/pvc_scenario.md) | :heavy_check_mark: |
|
||||
[Network_Chaos](docs/network_chaos.md) | :heavy_check_mark: |
|
||||
[ManagedCluster Scenarios](docs/managedcluster_scenarios.md) | :heavy_check_mark: |
|
||||
[Service Hijacking Scenarios](docs/service_hijacking_scenarios.md) | :heavy_check_mark: |
|
||||
[SYN Flood Scenarios](docs/syn_flood_scenarios.md) | :heavy_check_mark: |
|
||||
|
||||
|
||||
### Kraken scenario pass/fail criteria and report
|
||||
It is important to make sure to check if the targeted component recovered from the chaos injection and also if the Kubernetes cluster is healthy as failures in one component can have an adverse impact on other components. Kraken does this by:
|
||||
- Having built in checks for pod and node based scenarios to ensure the expected number of replicas and nodes are up. It also supports running custom scripts with the checks.
|
||||
- Leveraging [Cerberus](https://github.com/krkn-chaos/cerberus) to monitor the cluster under test and consuming the aggregated go/no-go signal to determine pass/fail post chaos. It is highly recommended to turn on the Cerberus health check feature available in Kraken. Instructions on installing and setting up Cerberus can be found [here](https://github.com/openshift-scale/cerberus#installation) or can be installed from Kraken using the [instructions](https://github.com/krkn-chaos/krkn#setting-up-infrastructure-dependencies). Once Cerberus is up and running, set cerberus_enabled to True and cerberus_url to the url where Cerberus publishes go/no-go signal in the Kraken config file. Cerberus can monitor [application routes](https://github.com/redhat-chaos/cerberus/blob/main/docs/config.md#watch-routes) during the chaos and fails the run if it encounters downtime as it is a potential downtime in a customers, or users environment as well. It is especially important during the control plane chaos scenarios including the API server, Etcd, Ingress etc. It can be enabled by setting `check_applicaton_routes: True` in the [Kraken config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) provided application routes are being monitored in the [cerberus config](https://github.com/redhat-chaos/krkn/blob/main/config/cerberus.yaml).
|
||||
- Leveraging built-in alert collection feature to fail the runs in case of critical alerts.
|
||||
|
||||
### Signaling
|
||||
In CI runs or any external job it is useful to stop Kraken once a certain test or state gets reached. We created a way to signal to kraken to pause the chaos or stop it completely using a signal posted to a port of your choice.
|
||||
|
||||
For example if we have a test run loading the cluster running and kraken separately running; we want to be able to know when to start/stop the kraken run based on when the test run completes or gets to a certain loaded state.
|
||||
|
||||
More detailed information on enabling and leveraging this feature can be found [here](docs/signal.md).
|
||||
|
||||
|
||||
### Performance monitoring
|
||||
Monitoring the Kubernetes/OpenShift cluster to observe the impact of Kraken chaos scenarios on various components is key to find out the bottlenecks as it is important to make sure the cluster is healthy in terms if both recovery as well as performance during/after the failure has been injected. Instructions on enabling it can be found [here](docs/performance_dashboards.md).
|
||||
|
||||
|
||||
### SLOs validation during and post chaos
|
||||
- In addition to checking the recovery and health of the cluster and components under test, Kraken takes in a profile with the Prometheus expressions to validate and alerts, exits with a non-zero return code depending on the severity set. This feature can be used to determine pass/fail or alert on abnormalities observed in the cluster based on the metrics.
|
||||
- Kraken also provides ability to check if any critical alerts are firing in the cluster post chaos and pass/fail's.
|
||||
|
||||
Information on enabling and leveraging this feature can be found [here](docs/SLOs_validation.md)
|
||||
|
||||
|
||||
### OCM / ACM integration
|
||||
|
||||
Kraken supports injecting faults into [Open Cluster Management (OCM)](https://open-cluster-management.io/) and [Red Hat Advanced Cluster Management for Kubernetes (ACM)](https://www.krkn.com/en/technologies/management/advanced-cluster-management) managed clusters through [ManagedCluster Scenarios](docs/managedcluster_scenarios.md).
|
||||
Instructions on how to setup, configure and run Kraken can be found in the [documentation](https://krkn-chaos.dev/docs/).
|
||||
|
||||
|
||||
### Blogs and other useful resources
|
||||
@@ -106,6 +31,7 @@ Kraken supports injecting faults into [Open Cluster Management (OCM)](https://op
|
||||
- Blog post on supercharging chaos testing using AI integration in Krkn: https://www.redhat.com/en/blog/supercharging-chaos-testing-using-ai
|
||||
- Blog post announcing Krkn joining CNCF Sandbox: https://www.redhat.com/en/blog/krknchaos-joining-cncf-sandbox
|
||||
|
||||
|
||||
### Roadmap
|
||||
Enhancements being planned can be found in the [roadmap](ROADMAP.md).
|
||||
|
||||
@@ -113,17 +39,8 @@ Enhancements being planned can be found in the [roadmap](ROADMAP.md).
|
||||
### Contributions
|
||||
We are always looking for more enhancements, fixes to make it better, any contributions are most welcome. Feel free to report or work on the issues filed on github.
|
||||
|
||||
[More information on how to Contribute](docs/contribute.md)
|
||||
[More information on how to Contribute](https://krkn-chaos.dev/docs/contribution-guidelines/)
|
||||
|
||||
If adding a new scenario or tweaking the main config, be sure to add in updates into the CI to be sure the CI is up to date.
|
||||
Please read [this file]((CI/README.md#adding-a-test-case)) for more information on updates.
|
||||
|
||||
|
||||
### Scenario Plugin Development
|
||||
|
||||
If you're gearing up to develop new scenarios, take a moment to review our
|
||||
[Scenario Plugin API Documentation](docs/scenario_plugin_api.md).
|
||||
It’s the perfect starting point to tap into your chaotic creativity!
|
||||
|
||||
### Community
|
||||
Key Members(slack_usernames/full name): paigerube14/Paige Rubendall, mffiedler/Mike Fiedler, tsebasti/Tullio Sebastiani, yogi/Yogananth Subramanian, sahil/Sahil Shah, pradeep/Pradeep Surisetty and ravielluri/Naga Ravi Chaitanya Elluri.
|
||||
|
||||
11
ROADMAP.md
11
ROADMAP.md
@@ -6,10 +6,11 @@ Following are a list of enhancements that we are planning to work on adding supp
|
||||
- [x] [Centralized storage for chaos experiments artifacts](https://github.com/krkn-chaos/krkn/issues/423)
|
||||
- [ ] [Support for causing DNS outages](https://github.com/krkn-chaos/krkn/issues/394)
|
||||
- [x] [Chaos recommender](https://github.com/krkn-chaos/krkn/tree/main/utils/chaos-recommender) to suggest scenarios having probability of impacting the service under test using profiling results
|
||||
- [ ] Chaos AI integration to improve and automate test coverage
|
||||
- [] Chaos AI integration to improve test coverage while reducing fault space to save costs and execution time
|
||||
- [x] [Support for pod level network traffic shaping](https://github.com/krkn-chaos/krkn/issues/393)
|
||||
- [ ] [Ability to visualize the metrics that are being captured by Kraken and stored in Elasticsearch](https://github.com/krkn-chaos/krkn/issues/124)
|
||||
- [ ] Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/krkn-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
|
||||
- [ ] Continue to improve [Chaos Testing Guide](https://krkn-chaos.github.io/krkn) in terms of adding best practices, test environment recommendations and scenarios to make sure the OpenShift platform, as well the applications running on top it, are resilient and performant under chaotic conditions.
|
||||
- [ ] [Switch documentation references to Kubernetes](https://github.com/krkn-chaos/krkn/issues/495)
|
||||
- [ ] [OCP and Kubernetes functionalities segregation](https://github.com/krkn-chaos/krkn/issues/497)
|
||||
- [x] Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/krkn-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
|
||||
- [x] Continue to improve [Chaos Testing Guide](https://krkn-chaos.github.io/krkn) in terms of adding best practices, test environment recommendations and scenarios to make sure the OpenShift platform, as well the applications running on top it, are resilient and performant under chaotic conditions.
|
||||
- [x] [Switch documentation references to Kubernetes](https://github.com/krkn-chaos/krkn/issues/495)
|
||||
- [x] [OCP and Kubernetes functionalities segregation](https://github.com/krkn-chaos/krkn/issues/497)
|
||||
- [x] [Krknctl - client for running Krkn scenarios with ease](https://github.com/krkn-chaos/krknctl)
|
||||
|
||||
43
SECURITY.md
Normal file
43
SECURITY.md
Normal file
@@ -0,0 +1,43 @@
|
||||
# Security Policy
|
||||
|
||||
We attach great importance to code security. We are very grateful to the users, security vulnerability researchers, etc. for reporting security vulnerabilities to the Krkn community. All reported security vulnerabilities will be carefully assessed and addressed in a timely manner.
|
||||
|
||||
|
||||
## Security Checks
|
||||
|
||||
Krkn leverages [Snyk](https://snyk.io/) to ensure that any security vulnerabilities found
|
||||
in the code base and dependencies are fixed and published in the latest release. Security
|
||||
vulnerability checks are enabled for each pull request to enable developers to get insights
|
||||
and proactively fix them.
|
||||
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
The Krkn project treats security vulnerabilities seriously, so we
|
||||
strive to take action quickly when required.
|
||||
|
||||
The project requests that security issues be disclosed in a responsible
|
||||
manner to allow adequate time to respond. If a security issue or
|
||||
vulnerability has been found, please disclose the details to our
|
||||
dedicated email address:
|
||||
|
||||
cncf-krkn-maintainers@lists.cncf.io
|
||||
|
||||
You can also use the [GitHub vulnerability report mechanism](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability) to report the security vulnerability.
|
||||
|
||||
Please include as much information as possible with the report. The
|
||||
following details assist with analysis efforts:
|
||||
- Description of the vulnerability
|
||||
- Affected component (version, commit, branch etc)
|
||||
- Affected code (file path, line numbers)
|
||||
- Exploit code
|
||||
|
||||
|
||||
## Security Team
|
||||
|
||||
The security team currently consists of the [Maintainers of Krkn](https://github.com/krkn-chaos/krkn/blob/main/MAINTAINERS.md)
|
||||
|
||||
|
||||
## Process and Supported Releases
|
||||
|
||||
The Krkn security team will investigate and provide a fix in a timely mannner depending on the severity. The fix will be included in the new release of Krkn and details will be included in the release notes.
|
||||
@@ -1,5 +1,4 @@
|
||||
kraken:
|
||||
distribution: kubernetes # Distribution can be kubernetes or openshift
|
||||
kubeconfig_path: ~/.kube/config # Path to kubeconfig
|
||||
exit_on_failure: False # Exit when a post action scenario fails
|
||||
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
|
||||
@@ -9,10 +8,9 @@ kraken:
|
||||
chaos_scenarios:
|
||||
# List of policies/chaos scenarios to load
|
||||
- hog_scenarios:
|
||||
- scenarios/kube/cpu-hog/input.yaml
|
||||
- scenarios/kube/memory-hog/input.yaml
|
||||
- scenarios/kube/io-hog/input.yaml
|
||||
- scenarios/kube/io-hog/input.yaml
|
||||
- scenarios/kube/cpu-hog.yml
|
||||
- scenarios/kube/memory-hog.yml
|
||||
- scenarios/kube/io-hog.yml
|
||||
- application_outages_scenarios:
|
||||
- scenarios/openshift/app_outage.yaml
|
||||
- container_scenarios: # List of chaos pod scenarios to load
|
||||
@@ -26,12 +24,10 @@ kraken:
|
||||
- scenarios/openshift/prom_kill.yml
|
||||
- scenarios/openshift/openshift-apiserver.yml
|
||||
- scenarios/openshift/openshift-kube-apiserver.yml
|
||||
- vmware_node_scenarios:
|
||||
- scenarios/openshift/vmware_node_scenarios.yml
|
||||
- ibmcloud_node_scenarios:
|
||||
- scenarios/openshift/ibmcloud_node_scenarios.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/aws_node_scenarios.yml
|
||||
- scenarios/openshift/vmware_node_scenarios.yml
|
||||
- scenarios/openshift/ibmcloud_node_scenarios.yml
|
||||
- time_scenarios: # List of chaos time scenarios to load
|
||||
- scenarios/openshift/time_scenarios_example.yml
|
||||
- cluster_shut_down_scenarios:
|
||||
@@ -49,6 +45,8 @@ kraken:
|
||||
- scenarios/kube/service_hijacking.yaml
|
||||
- syn_flood_scenarios:
|
||||
- scenarios/kube/syn_flood.yaml
|
||||
- network_chaos_ng_scenarios:
|
||||
- scenarios/kube/network-filter.yml
|
||||
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
@@ -64,12 +62,10 @@ performance_monitoring:
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
enable_metrics: False
|
||||
alert_profile: config/alerts.yaml # Path or URL to alert profile with the prometheus queries
|
||||
metrics_profile: config/metrics.yaml
|
||||
metrics_profile: config/metrics-report.yaml
|
||||
check_critical_alerts: False # When enabled will check prometheus for critical alerts firing post chaos
|
||||
elastic:
|
||||
enable_elastic: False
|
||||
collect_metrics: False
|
||||
collect_alerts: False
|
||||
verify_certs: False
|
||||
elastic_url: "" # To track results in elasticsearch, give url to server here; will post telemetry details when url and index not blank
|
||||
elastic_port: 32766
|
||||
@@ -113,7 +109,10 @@ telemetry:
|
||||
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
|
||||
events_backup: True # enables/disables cluster events collection
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
health_checks: # Utilizing health check endpoints to observe application behavior during chaos injection.
|
||||
interval: # Interval in seconds to perform health checks, default value is 2 seconds
|
||||
config: # Provide list of health check configurations for applications
|
||||
- url: # Provide application endpoint
|
||||
bearer_token: # Bearer token for authentication if any
|
||||
auth: # Provide authentication credentials (username , password) in tuple format if any, ex:("admin","secretpassword")
|
||||
exit_on_failure: # If value is True exits when health check failed for application, values can be True/False
|
||||
|
||||
@@ -1,133 +1,126 @@
|
||||
metrics:
|
||||
# API server
|
||||
- query: histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb!~"WATCH", subresource!="log"}[2m])) by (verb,resource,subresource,instance,le)) > 0
|
||||
metricName: API99thLatency
|
||||
|
||||
- query: sum(irate(apiserver_request_total{apiserver="kube-apiserver",verb!="WATCH",subresource!="log"}[2m])) by (verb,instance,resource,code) > 0
|
||||
metricName: APIRequestRate
|
||||
instant: True
|
||||
|
||||
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
|
||||
metricName: APIInflightRequests
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99, rate(apiserver_current_inflight_requests[5m]))
|
||||
metricName: APIInflightRequests
|
||||
instant: True
|
||||
|
||||
# Container & pod metrics
|
||||
- query: (sum(container_memory_rss{name!="",container!="POD",namespace=~"openshift-(etcd|oauth-apiserver|.*apiserver|ovn-kubernetes|sdn|ingress|authentication|.*controller-manager|.*scheduler)"}) by (container, pod, namespace, node) and on (node) kube_node_role{role="master"}) > 0
|
||||
metricName: containerMemory-Masters
|
||||
instant: true
|
||||
|
||||
- query: (sum(irate(container_cpu_usage_seconds_total{name!="",container!="POD",namespace=~"openshift-(etcd|oauth-apiserver|sdn|ovn-kubernetes|.*apiserver|authentication|.*controller-manager|.*scheduler)"}[2m]) * 100) by (container, pod, namespace, node) and on (node) kube_node_role{role="master"}) > 0
|
||||
metricName: containerCPU-Masters
|
||||
instant: true
|
||||
|
||||
- query: (sum(irate(container_cpu_usage_seconds_total{pod!="",container="prometheus",namespace="openshift-monitoring"}[2m]) * 100) by (container, pod, namespace, node) and on (node) kube_node_role{role="infra"}) > 0
|
||||
metricName: containerCPU-Prometheus
|
||||
instant: true
|
||||
|
||||
- query: (avg(irate(container_cpu_usage_seconds_total{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress)"}[2m]) * 100 and on (node) kube_node_role{role="worker"}) by (namespace, container)) > 0
|
||||
metricName: containerCPU-AggregatedWorkers
|
||||
instant: true
|
||||
|
||||
- query: (avg(irate(container_cpu_usage_seconds_total{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress|monitoring|image-registry|logging)"}[2m]) * 100 and on (node) kube_node_role{role="infra"}) by (namespace, container)) > 0
|
||||
metricName: containerCPU-AggregatedInfra
|
||||
|
||||
- query: (sum(container_memory_rss{pod!="",namespace="openshift-monitoring",name!="",container="prometheus"}) by (container, pod, namespace, node) and on (node) kube_node_role{role="infra"}) > 0
|
||||
metricName: containerMemory-Prometheus
|
||||
instant: True
|
||||
|
||||
- query: avg(container_memory_rss{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress)"} and on (node) kube_node_role{role="worker"}) by (container, namespace)
|
||||
metricName: containerMemory-AggregatedWorkers
|
||||
instant: True
|
||||
|
||||
- query: avg(container_memory_rss{name!="",container!="POD",namespace=~"openshift-(sdn|ovn-kubernetes|ingress|monitoring|image-registry|logging)"} and on (node) kube_node_role{role="infra"}) by (container, namespace)
|
||||
metricName: containerMemory-AggregatedInfra
|
||||
instant: True
|
||||
|
||||
# Node metrics
|
||||
- query: (sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) > 0
|
||||
metricName: nodeCPU-Masters
|
||||
instant: True
|
||||
|
||||
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: maxCPU-Masters
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemory-Masters
|
||||
instant: true
|
||||
|
||||
- query: (avg((sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))) by (mode)) > 0
|
||||
metricName: nodeCPU-AggregatedWorkers
|
||||
instant: True
|
||||
|
||||
- query: (avg((sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))) by (mode)) > 0
|
||||
metricName: nodeCPU-AggregatedInfra
|
||||
instant: True
|
||||
|
||||
- query: avg(node_memory_MemAvailable_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeMemoryAvailable-Masters
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemory-Masters
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: maxMemory-Masters
|
||||
instant: true
|
||||
|
||||
- query: avg(node_memory_MemAvailable_bytes and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryAvailable-AggregatedWorkers
|
||||
instant: True
|
||||
|
||||
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: maxCPU-Workers
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: maxMemory-Workers
|
||||
instant: true
|
||||
|
||||
- query: avg(node_memory_MemAvailable_bytes and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryAvailable-AggregatedInfra
|
||||
instant: True
|
||||
|
||||
- query: avg(node_memory_Active_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeMemoryActive-Masters
|
||||
instant: True
|
||||
|
||||
- query: avg(node_memory_Active_bytes and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryActive-AggregatedWorkers
|
||||
instant: True
|
||||
|
||||
- query: avg(avg(node_memory_Active_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryActive-AggregatedInfra
|
||||
|
||||
- query: avg(node_memory_Cached_bytes) by (instance) + avg(node_memory_Buffers_bytes) by (instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeMemoryCached+nodeMemoryBuffers-Masters
|
||||
|
||||
- query: avg(node_memory_Cached_bytes + node_memory_Buffers_bytes and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryCached+nodeMemoryBuffers-AggregatedWorkers
|
||||
|
||||
- query: avg(node_memory_Cached_bytes + node_memory_Buffers_bytes and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemoryCached+nodeMemoryBuffers-AggregatedInfra
|
||||
|
||||
- query: irate(node_network_receive_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: rxNetworkBytes-Masters
|
||||
|
||||
- query: avg(irate(node_network_receive_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: rxNetworkBytes-AggregatedWorkers
|
||||
|
||||
- query: avg(irate(node_network_receive_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: rxNetworkBytes-AggregatedInfra
|
||||
|
||||
- query: irate(node_network_transmit_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: txNetworkBytes-Masters
|
||||
|
||||
- query: avg(irate(node_network_transmit_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: txNetworkBytes-AggregatedWorkers
|
||||
|
||||
- query: avg(irate(node_network_transmit_bytes_total{device=~"^(ens|eth|bond|team).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: txNetworkBytes-AggregatedInfra
|
||||
|
||||
- query: rate(node_disk_written_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeDiskWrittenBytes-Masters
|
||||
|
||||
- query: avg(rate(node_disk_written_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: nodeDiskWrittenBytes-AggregatedWorkers
|
||||
|
||||
- query: avg(rate(node_disk_written_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: nodeDiskWrittenBytes-AggregatedInfra
|
||||
|
||||
- query: rate(node_disk_read_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")
|
||||
metricName: nodeDiskReadBytes-Masters
|
||||
|
||||
- query: avg(rate(node_disk_read_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: nodeDiskReadBytes-AggregatedWorkers
|
||||
|
||||
- query: avg(rate(node_disk_read_bytes_total{device!~"^(dm|rb).*"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (device)
|
||||
metricName: nodeDiskReadBytes-AggregatedInfra
|
||||
instant: True
|
||||
|
||||
# Etcd metrics
|
||||
- query: sum(rate(etcd_server_leader_changes_seen_total[2m]))
|
||||
metricName: etcdLeaderChangesRate
|
||||
instant: True
|
||||
|
||||
- query: etcd_server_is_leader > 0
|
||||
metricName: etcdServerIsLeader
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))
|
||||
metricName: 99thEtcdDiskBackendCommitDurationSeconds
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))
|
||||
metricName: 99thEtcdDiskWalFsyncDurationSeconds
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket[5m]))
|
||||
metricName: 99thEtcdRoundTripTimeSeconds
|
||||
|
||||
- query: etcd_mvcc_db_total_size_in_bytes
|
||||
metricName: etcdDBPhysicalSizeBytes
|
||||
|
||||
- query: etcd_mvcc_db_total_size_in_use_in_bytes
|
||||
metricName: etcdDBLogicalSizeBytes
|
||||
instant: True
|
||||
|
||||
- query: sum by (cluster_version)(etcd_cluster_version)
|
||||
metricName: etcdVersion
|
||||
@@ -135,83 +128,16 @@ metrics:
|
||||
|
||||
- query: sum(rate(etcd_object_counts{}[5m])) by (resource) > 0
|
||||
metricName: etcdObjectCount
|
||||
instant: True
|
||||
|
||||
- query: histogram_quantile(0.99,sum(rate(etcd_request_duration_seconds_bucket[2m])) by (le,operation,apiserver)) > 0
|
||||
metricName: P99APIEtcdRequestLatency
|
||||
|
||||
- query: sum(grpc_server_started_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"})
|
||||
metricName: ActiveWatchStreams
|
||||
|
||||
- query: sum(grpc_server_started_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"})
|
||||
metricName: ActiveLeaseStreams
|
||||
|
||||
- query: sum(rate(etcd_debugging_snap_save_total_duration_seconds_sum{namespace="openshift-etcd"}[2m]))
|
||||
metricName: snapshotSaveLatency
|
||||
|
||||
- query: sum(rate(etcd_server_heartbeat_send_failures_total{namespace="openshift-etcd"}[2m]))
|
||||
metricName: HeartBeatFailures
|
||||
|
||||
- query: sum(rate(etcd_server_health_failures{namespace="openshift-etcd"}[2m]))
|
||||
metricName: HealthFailures
|
||||
|
||||
- query: sum(rate(etcd_server_slow_apply_total{namespace="openshift-etcd"}[2m]))
|
||||
metricName: SlowApplies
|
||||
|
||||
- query: sum(rate(etcd_server_slow_read_indexes_total{namespace="openshift-etcd"}[2m]))
|
||||
metricName: SlowIndexRead
|
||||
|
||||
- query: sum(etcd_server_proposals_pending)
|
||||
metricName: PendingProposals
|
||||
|
||||
- query: histogram_quantile(1.0, sum(rate(etcd_debugging_mvcc_db_compaction_pause_duration_milliseconds_bucket[1m])) by (le, instance))
|
||||
metricName: CompactionMaxPause
|
||||
instant: True
|
||||
|
||||
- query: sum by (instance) (apiserver_storage_objects)
|
||||
metricName: etcdTotalObjectCount
|
||||
instant: True
|
||||
|
||||
- query: topk(500, max by(resource) (apiserver_storage_objects))
|
||||
metricName: etcdTopObectCount
|
||||
|
||||
# Cluster metrics
|
||||
- query: count(kube_namespace_created)
|
||||
metricName: namespaceCount
|
||||
|
||||
- query: sum(kube_pod_status_phase{}) by (phase)
|
||||
metricName: podStatusCount
|
||||
|
||||
- query: count(kube_secret_info{})
|
||||
metricName: secretCount
|
||||
|
||||
- query: count(kube_deployment_labels{})
|
||||
metricName: deploymentCount
|
||||
|
||||
- query: count(kube_configmap_info{})
|
||||
metricName: configmapCount
|
||||
|
||||
- query: count(kube_service_info{})
|
||||
metricName: serviceCount
|
||||
|
||||
- query: kube_node_role
|
||||
metricName: nodeRoles
|
||||
instant: true
|
||||
|
||||
- query: sum(kube_node_status_condition{status="true"}) by (condition)
|
||||
metricName: nodeStatus
|
||||
|
||||
- query: (sum(rate(container_fs_writes_bytes_total{container!="",device!~".+dm.+"}[5m])) by (device, container, node) and on (node) kube_node_role{role="master"}) > 0
|
||||
metricName: containerDiskUsage
|
||||
|
||||
- query: cluster_version{type="completed"}
|
||||
metricName: clusterVersion
|
||||
instant: true
|
||||
|
||||
# Golang metrics
|
||||
|
||||
- query: go_memstats_heap_alloc_bytes{job=~"apiserver|api|etcd"}
|
||||
metricName: goHeapAllocBytes
|
||||
|
||||
- query: go_memstats_heap_inuse_bytes{job=~"apiserver|api|etcd"}
|
||||
metricName: goHeapInuseBytes
|
||||
|
||||
- query: go_gc_duration_seconds{job=~"apiserver|api|etcd",quantile="1"}
|
||||
metricName: goGCDurationSeconds
|
||||
instant: True
|
||||
|
||||
248
config/metrics-report.yaml
Normal file
248
config/metrics-report.yaml
Normal file
@@ -0,0 +1,248 @@
|
||||
metrics:
|
||||
|
||||
# API server
|
||||
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
|
||||
metricName: APIInflightRequests
|
||||
instant: true
|
||||
|
||||
# Kubelet & CRI-O
|
||||
|
||||
# Average and max of the CPU usage from all worker's kubelet
|
||||
- query: avg(avg_over_time(irate(process_cpu_seconds_total{service="kubelet",job="kubelet"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: cpu-kubelet
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(irate(process_cpu_seconds_total{service="kubelet",job="kubelet"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: max-cpu-kubelet
|
||||
instant: true
|
||||
|
||||
# Average of the memory usage from all worker's kubelet
|
||||
- query: avg(avg_over_time(process_resident_memory_bytes{service="kubelet",job="kubelet"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: memory-kubelet
|
||||
instant: true
|
||||
|
||||
# Max of the memory usage from all worker's kubelet
|
||||
- query: max(max_over_time(process_resident_memory_bytes{service="kubelet",job="kubelet"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: max-memory-kubelet
|
||||
instant: true
|
||||
|
||||
- query: max_over_time(sum(process_resident_memory_bytes{service="kubelet",job="kubelet"} and on (node) kube_node_role{role="worker"})[.elapsed:])
|
||||
metricName: max-memory-sum-kubelet
|
||||
instant: true
|
||||
|
||||
# Average and max of the CPU usage from all worker's CRI-O
|
||||
- query: avg(avg_over_time(irate(process_cpu_seconds_total{service="kubelet",job="crio"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: cpu-crio
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(irate(process_cpu_seconds_total{service="kubelet",job="crio"}[2m])[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: max-cpu-crio
|
||||
instant: true
|
||||
|
||||
# Average of the memory usage from all worker's CRI-O
|
||||
- query: avg(avg_over_time(process_resident_memory_bytes{service="kubelet",job="crio"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: memory-crio
|
||||
instant: true
|
||||
|
||||
# Max of the memory usage from all worker's CRI-O
|
||||
- query: max(max_over_time(process_resident_memory_bytes{service="kubelet",job="crio"}[.elapsed:]) and on (node) kube_node_role{role="worker"})
|
||||
metricName: max-memory-crio
|
||||
instant: true
|
||||
|
||||
# Etcd
|
||||
|
||||
- query: avg(avg_over_time(histogram_quantile(0.99, rate(etcd_disk_backend_commit_duration_seconds_bucket[2m]))[.elapsed:]))
|
||||
metricName: 99thEtcdDiskBackendCommit
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[.elapsed:]))
|
||||
metricName: 99thEtcdDiskWalFsync
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(histogram_quantile(0.99, irate(etcd_network_peer_round_trip_time_seconds_bucket[2m]))[.elapsed:]))
|
||||
metricName: 99thEtcdRoundTripTime
|
||||
instant: true
|
||||
|
||||
# Control-plane
|
||||
|
||||
- query: avg(avg_over_time(topk(1, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-kube-controller-manager"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-kube-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(topk(1, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-kube-controller-manager"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: max-cpu-kube-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(1, sum(container_memory_rss{name!="", namespace="openshift-kube-controller-manager"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-kube-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(topk(1, sum(container_memory_rss{name!="", namespace="openshift-kube-controller-manager"}) by (pod))[.elapsed:]))
|
||||
metricName: max-memory-kube-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-kube-apiserver"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-kube-apiserver
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(container_memory_rss{name!="", namespace="openshift-kube-apiserver"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-kube-apiserver
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-apiserver"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-openshift-apiserver
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(container_memory_rss{name!="", namespace="openshift-apiserver"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-openshift-apiserver
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-etcd"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-etcd
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(3,sum(container_memory_rss{name!="", namespace="openshift-etcd"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-etcd
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(1, sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-controller-manager"}[2m])) by (pod))[.elapsed:]))
|
||||
metricName: cpu-openshift-controller-manager
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(topk(1, sum(container_memory_rss{name!="", namespace="openshift-controller-manager"}) by (pod))[.elapsed:]))
|
||||
metricName: memory-openshift-controller-manager
|
||||
instant: true
|
||||
|
||||
# multus
|
||||
|
||||
- query: avg(avg_over_time(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-multus", pod=~"(multus).+", container!="POD"}[2m])[.elapsed:])) by (container)
|
||||
metricName: cpu-multus
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(container_memory_rss{name!="", namespace="openshift-multus", pod=~"(multus).+", container!="POD"}[.elapsed:])) by (container)
|
||||
metricName: memory-multus
|
||||
instant: true
|
||||
|
||||
# OVNKubernetes - standard & IC
|
||||
|
||||
- query: avg(avg_over_time(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-ovn-kubernetes", pod=~"(ovnkube-master|ovnkube-control-plane).+", container!="POD"}[2m])[.elapsed:])) by (container)
|
||||
metricName: cpu-ovn-control-plane
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(container_memory_rss{name!="", namespace="openshift-ovn-kubernetes", pod=~"(ovnkube-master|ovnkube-control-plane).+", container!="POD"}[.elapsed:])) by (container)
|
||||
metricName: memory-ovn-control-plane
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-ovn-kubernetes", pod=~"ovnkube-node.+", container!="POD"}[2m])[.elapsed:])) by (container)
|
||||
metricName: cpu-ovnkube-node
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(container_memory_rss{name!="", namespace="openshift-ovn-kubernetes", pod=~"ovnkube-node.+", container!="POD"}[.elapsed:])) by (container)
|
||||
metricName: memory-ovnkube-node
|
||||
instant: true
|
||||
|
||||
# Nodes
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: cpu-masters
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: memory-masters
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: max-memory-masters
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: cpu-workers
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: max-cpu-workers
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: memory-workers
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: max-memory-workers
|
||||
instant: true
|
||||
|
||||
- query: sum( (node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)") )
|
||||
metricName: memory-sum-workers
|
||||
instant: true
|
||||
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: cpu-infra
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(sum(irate(node_cpu_seconds_total{mode!="idle", mode!="steal"}[2m]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (instance)[.elapsed:]))
|
||||
metricName: max-cpu-infra
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: memory-infra
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: max-memory-infra
|
||||
instant: true
|
||||
|
||||
- query: max_over_time(sum((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes) and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)"))[.elapsed:])
|
||||
metricName: max-memory-sum-infra
|
||||
instant: true
|
||||
|
||||
# Monitoring and ingress
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}[2m])) by (pod)[.elapsed:]))
|
||||
metricName: cpu-prometheus
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}[2m])) by (pod)[.elapsed:]))
|
||||
metricName: max-cpu-prometheus
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(sum(container_memory_rss{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}) by (pod)[.elapsed:]))
|
||||
metricName: memory-prometheus
|
||||
instant: true
|
||||
|
||||
- query: max(max_over_time(sum(container_memory_rss{name!="", namespace="openshift-monitoring", pod=~"prometheus-k8s.+"}) by (pod)[.elapsed:]))
|
||||
metricName: max-memory-prometheus
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(sum(irate(container_cpu_usage_seconds_total{name!="", namespace="openshift-ingress", pod=~"router-default.+"}[2m])) by (pod)[.elapsed:]))
|
||||
metricName: cpu-router
|
||||
instant: true
|
||||
|
||||
- query: avg(avg_over_time(sum(container_memory_rss{name!="", namespace="openshift-ingress", pod=~"router-default.+"}) by (pod)[.elapsed:]))
|
||||
metricName: memory-router
|
||||
instant: true
|
||||
|
||||
# Cluster
|
||||
|
||||
- query: avg_over_time(cluster:memory_usage:ratio[.elapsed:])
|
||||
metricName: memory-cluster-usage-ratio
|
||||
instant: true
|
||||
|
||||
- query: avg_over_time(cluster:node_cpu:ratio[.elapsed:])
|
||||
metricName: cpu-cluster-usage-ratio
|
||||
instant: true
|
||||
|
||||
# Retain the raw CPU seconds totals for comparison
|
||||
- query: sum(node_cpu_seconds_total and on (instance) label_replace(kube_node_role{role="worker",role!="infra"}, "instance", "$1", "node", "(.+)")) by (mode)
|
||||
metricName: nodeCPUSeconds-Workers
|
||||
instant: true
|
||||
|
||||
|
||||
- query: sum(node_cpu_seconds_total and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) by (mode)
|
||||
metricName: nodeCPUSeconds-Masters
|
||||
instant: true
|
||||
|
||||
|
||||
- query: sum(node_cpu_seconds_total and on (instance) label_replace(kube_node_role{role="infra"}, "instance", "$1", "node", "(.+)")) by (mode)
|
||||
metricName: nodeCPUSeconds-Infra
|
||||
instant: true
|
||||
@@ -1,13 +1,7 @@
|
||||
metrics:
|
||||
# API server
|
||||
- query: histogram_quantile(0.99, sum(rate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb!~"WATCH", subresource!="log"}[2m])) by (verb,resource,subresource,instance,le)) > 0
|
||||
metricName: API99thLatency
|
||||
|
||||
- query: sum(irate(apiserver_request_total{apiserver="kube-apiserver",verb!="WATCH",subresource!="log"}[2m])) by (verb,instance,resource,code) > 0
|
||||
metricName: APIRequestRate
|
||||
|
||||
- query: sum(apiserver_current_inflight_requests{}) by (request_kind) > 0
|
||||
metricName: APIInflightRequests
|
||||
- query: irate(apiserver_request_total{verb="POST", resource="pods", subresource="binding",code="201"}[2m]) > 0
|
||||
metricName: schedulingThroughput
|
||||
|
||||
# Containers & pod metrics
|
||||
- query: sum(irate(container_cpu_usage_seconds_total{name!="",namespace=~"openshift-(etcd|oauth-apiserver|.*apiserver|ovn-kubernetes|sdn|ingress|authentication|.*controller-manager|.*scheduler|monitoring|logging|image-registry)"}[2m]) * 100) by (pod, namespace, node)
|
||||
@@ -33,8 +27,17 @@ metrics:
|
||||
metricName: crioMemory
|
||||
|
||||
# Node metrics
|
||||
- query: sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) > 0
|
||||
metricName: nodeCPU
|
||||
- query: (sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)")) > 0
|
||||
metricName: nodeCPU-Masters
|
||||
|
||||
- query: (avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemory-Masters
|
||||
|
||||
- query: (sum(irate(node_cpu_seconds_total[2m])) by (mode,instance) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)")) > 0
|
||||
metricName: nodeCPU-Workers
|
||||
|
||||
- query: (avg_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[2m:]) and on (instance) label_replace(kube_node_role{role="worker"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: nodeMemory-Workers
|
||||
|
||||
- query: avg(node_memory_MemAvailable_bytes) by (instance)
|
||||
metricName: nodeMemoryAvailable
|
||||
@@ -42,6 +45,9 @@ metrics:
|
||||
- query: avg(node_memory_Active_bytes) by (instance)
|
||||
metricName: nodeMemoryActive
|
||||
|
||||
- query: max(max_over_time((node_memory_MemTotal_bytes - node_memory_MemAvailable_bytes)[.elapsed:]) and on (instance) label_replace(kube_node_role{role="master"}, "instance", "$1", "node", "(.+)"))
|
||||
metricName: maxMemory-Masters
|
||||
|
||||
- query: avg(node_memory_Cached_bytes) by (instance) + avg(node_memory_Buffers_bytes) by (instance)
|
||||
metricName: nodeMemoryCached+nodeMemoryBuffers
|
||||
|
||||
@@ -84,34 +90,4 @@ metrics:
|
||||
|
||||
- query: sum by (cluster_version)(etcd_cluster_version)
|
||||
metricName: etcdVersion
|
||||
instant: true
|
||||
|
||||
# Cluster metrics
|
||||
- query: count(kube_namespace_created)
|
||||
metricName: namespaceCount
|
||||
|
||||
- query: sum(kube_pod_status_phase{}) by (phase)
|
||||
metricName: podStatusCount
|
||||
|
||||
- query: count(kube_secret_info{})
|
||||
metricName: secretCount
|
||||
|
||||
- query: count(kube_deployment_labels{})
|
||||
metricName: deploymentCount
|
||||
|
||||
- query: count(kube_configmap_info{})
|
||||
metricName: configmapCount
|
||||
|
||||
- query: count(kube_service_info{})
|
||||
metricName: serviceCount
|
||||
|
||||
- query: kube_node_role
|
||||
metricName: nodeRoles
|
||||
instant: true
|
||||
|
||||
- query: sum(kube_node_status_condition{status="true"}) by (condition)
|
||||
metricName: nodeStatus
|
||||
|
||||
- query: cluster_version{type="completed"}
|
||||
metricName: clusterVersion
|
||||
instant: true
|
||||
instant: true
|
||||
@@ -1,14 +1,19 @@
|
||||
# oc build
|
||||
FROM golang:1.22.5 AS oc-build
|
||||
FROM golang:1.23.1 AS oc-build
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libkrb5-dev
|
||||
WORKDIR /tmp
|
||||
RUN git clone --branch release-4.18 https://github.com/openshift/oc.git
|
||||
WORKDIR /tmp/oc
|
||||
RUN go mod edit -go 1.22.5 &&\
|
||||
RUN go mod edit -go 1.23.1 &&\
|
||||
go get github.com/moby/buildkit@v0.12.5 &&\
|
||||
go get github.com/containerd/containerd@v1.7.11&&\
|
||||
go get github.com/docker/docker@v25.0.6&&\
|
||||
go get github.com/opencontainers/runc@v1.1.14&&\
|
||||
go get github.com/go-git/go-git/v5@v5.13.0&&\
|
||||
go get golang.org/x/net@v0.36.0&&\
|
||||
go get github.com/containerd/containerd@v1.7.27&&\
|
||||
go get golang.org/x/oauth2@v0.27.0&&\
|
||||
go get golang.org/x/crypto@v0.35.0&&\
|
||||
go mod tidy && go mod vendor
|
||||
RUN make GO_REQUIRED_MIN_VERSION:= oc
|
||||
|
||||
@@ -20,10 +25,6 @@ RUN dnf update -y
|
||||
|
||||
ENV KUBECONFIG /home/krkn/.kube/config
|
||||
|
||||
# install kubectl
|
||||
RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" &&\
|
||||
cp kubectl /usr/local/bin/kubectl && chmod +x /usr/local/bin/kubectl &&\
|
||||
cp kubectl /usr/bin/kubectl && chmod +x /usr/bin/kubectl
|
||||
|
||||
# This overwrites any existing configuration in /etc/yum.repos.d/kubernetes.repo
|
||||
RUN dnf update && dnf install -y --setopt=install_weak_deps=False \
|
||||
@@ -45,10 +46,16 @@ RUN if [ -n "$PR_NUMBER" ]; then git fetch origin pull/${PR_NUMBER}/head:pr-${PR
|
||||
# if it is a TAG trigger checkout the tag
|
||||
RUN if [ -n "$TAG" ]; then git checkout "$TAG";fi
|
||||
|
||||
RUN python3.9 -m ensurepip
|
||||
RUN python3.9 -m ensurepip --upgrade --default-pip
|
||||
RUN python3.9 -m pip install --upgrade pip setuptools==70.0.0
|
||||
RUN pip3.9 install -r requirements.txt
|
||||
RUN pip3.9 install jsonschema
|
||||
|
||||
LABEL krknctl.title.global="Krkn Base Image"
|
||||
LABEL krknctl.description.global="This is the krkn base image."
|
||||
LABEL krknctl.input_fields.global='$KRKNCTL_INPUT'
|
||||
|
||||
|
||||
RUN chown -R krkn:krkn /home/krkn && chmod 755 /home/krkn
|
||||
USER krkn
|
||||
ENTRYPOINT ["python3.9", "run_kraken.py"]
|
||||
@@ -6,7 +6,7 @@ Container image gets automatically built by quay.io at [Kraken image](https://qu
|
||||
|
||||
### Run containerized version
|
||||
|
||||
Refer [instructions](https://github.com/redhat-chaos/krkn/blob/main/docs/installation.md#run-containerized-version) for information on how to run the containerized version of kraken.
|
||||
Refer [instructions](https://krkn-chaos.dev/docs/installation/) for information on how to run the containerized version of kraken.
|
||||
|
||||
|
||||
### Run Custom Kraken Image
|
||||
|
||||
5
containers/compile_dockerfile.sh
Executable file
5
containers/compile_dockerfile.sh
Executable file
@@ -0,0 +1,5 @@
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
cd "$SCRIPT_DIR"
|
||||
export KRKNCTL_INPUT=$(cat krknctl-input.json|tr -d "\n")
|
||||
|
||||
envsubst '${KRKNCTL_INPUT}' < Dockerfile.template > Dockerfile
|
||||
439
containers/krknctl-input.json
Normal file
439
containers/krknctl-input.json
Normal file
@@ -0,0 +1,439 @@
|
||||
[
|
||||
{
|
||||
"name": "cerberus-enabled",
|
||||
"short_description": "Enable Cerberus",
|
||||
"description": "Enables Cerberus Support",
|
||||
"variable": "CERBERUS_ENABLED",
|
||||
"type": "enum",
|
||||
"default": "False",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "cerberus-url",
|
||||
"short_description": "Cerberus URL",
|
||||
"description": "Cerberus http url",
|
||||
"variable": "CERBERUS_URL",
|
||||
"type": "string",
|
||||
"default": "http://0.0.0.0:8080",
|
||||
"validator": "^(http|https):\/\/.*",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "distribution",
|
||||
"short_description": "Orchestrator distribution",
|
||||
"description": "Selects the orchestrator distribution",
|
||||
"variable": "DISTRIBUTION",
|
||||
"type": "enum",
|
||||
"default": "openshift",
|
||||
"allowed_values": "openshift,kubernetes",
|
||||
"separator": ",",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "krkn-kubeconfig",
|
||||
"short_description": "Krkn kubeconfig path",
|
||||
"description": "Sets the path where krkn will search for kubeconfig (in container)",
|
||||
"variable": "KRKN_KUBE_CONFIG",
|
||||
"type": "string",
|
||||
"default": "/home/krkn/.kube/config",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "wait-duration",
|
||||
"short_description": "Post chaos wait duration",
|
||||
"description": "waits for a certain amount of time after the scenario",
|
||||
"variable": "WAIT_DURATION",
|
||||
"type": "number",
|
||||
"default": "1"
|
||||
},
|
||||
{
|
||||
"name": "iterations",
|
||||
"short_description": "Chaos scenario iterations",
|
||||
"description": "number of times the same chaos scenario will be executed",
|
||||
"variable": "ITERATIONS",
|
||||
"type": "number",
|
||||
"default": "1"
|
||||
},
|
||||
{
|
||||
"name": "daemon-mode",
|
||||
"short_description": "Sets krkn daemon mode",
|
||||
"description": "if set the scenario will execute forever",
|
||||
"variable": "DAEMON_MODE",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "uuid",
|
||||
"short_description": "Sets krkn run uuid",
|
||||
"description": "sets krkn run uuid instead of generating it",
|
||||
"variable": "UUID",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "capture-metrics",
|
||||
"short_description": "Enables metrics capture",
|
||||
"description": "Enables metrics capture",
|
||||
"variable": "CAPTURE_METRICS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "enable-alerts",
|
||||
"short_description": "Enables cluster alerts check",
|
||||
"description": "Enables cluster alerts check",
|
||||
"variable": "ENABLE_ALERTS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "alerts-path",
|
||||
"short_description": "Cluster alerts path file (in container)",
|
||||
"description": "Allows to specify a different alert file path",
|
||||
"variable": "ALERTS_PATH",
|
||||
"type": "string",
|
||||
"default": "config/alerts.yaml",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "metrics-path",
|
||||
"short_description": "Cluster metrics path file (in container)",
|
||||
"description": "Allows to specify a different metrics file path",
|
||||
"variable": "METRICS_PATH",
|
||||
"type": "string",
|
||||
"default": "config/metrics-aggregated.yaml",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "enable-es",
|
||||
"short_description": "Enables elastic search data collection",
|
||||
"description": "Enables elastic search data collection",
|
||||
"variable": "ENABLE_ES",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-server",
|
||||
"short_description": "Elasticsearch instance URL",
|
||||
"description": "Elasticsearch instance URL",
|
||||
"variable": "ES_SERVER",
|
||||
"type": "string",
|
||||
"default": "http://0.0.0.0",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-port",
|
||||
"short_description": "Elasticsearch instance port",
|
||||
"description": "Elasticsearch instance port",
|
||||
"variable": "ES_PORT",
|
||||
"type": "number",
|
||||
"default": "443",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-username",
|
||||
"short_description": "Elasticsearch instance username",
|
||||
"description": "Elasticsearch instance username",
|
||||
"variable": "ES_USERNAME",
|
||||
"type": "string",
|
||||
"default": "elastic",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-password",
|
||||
"short_description": "Elasticsearch instance password",
|
||||
"description": "Elasticsearch instance password",
|
||||
"variable": "ES_PASSWORD",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-verify-certs",
|
||||
"short_description": "Enables elasticsearch TLS certificate verification",
|
||||
"description": "Enables elasticsearch TLS certificate verification",
|
||||
"variable": "ES_VERIFY_CERTS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-metrics-index",
|
||||
"short_description": "Elasticsearch metrics index",
|
||||
"description": "Index name for metrics in Elasticsearch",
|
||||
"variable": "ES_METRICS_INDEX",
|
||||
"type": "string",
|
||||
"default": "krkn-metrics",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-alerts-index",
|
||||
"short_description": "Elasticsearch alerts index",
|
||||
"description": "Index name for alerts in Elasticsearch",
|
||||
"variable": "ES_ALERTS_INDEX",
|
||||
"type": "string",
|
||||
"default": "krkn-alerts",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "es-telemetry-index",
|
||||
"short_description": "Elasticsearch telemetry index",
|
||||
"description": "Index name for telemetry in Elasticsearch",
|
||||
"variable": "ES_TELEMETRY_INDEX",
|
||||
"type": "string",
|
||||
"default": "krkn-telemetry",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "check-critical-alerts",
|
||||
"short_description": "Check critical alerts",
|
||||
"description": "Enables checking for critical alerts",
|
||||
"variable": "CHECK_CRITICAL_ALERTS",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-enabled",
|
||||
"short_description": "Enable telemetry",
|
||||
"description": "Enables telemetry support",
|
||||
"variable": "TELEMETRY_ENABLED",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-api-url",
|
||||
"short_description": "Telemetry API URL",
|
||||
"description": "API endpoint for telemetry data",
|
||||
"variable": "TELEMETRY_API_URL",
|
||||
"type": "string",
|
||||
"default": "https://ulnmf9xv7j.execute-api.us-west-2.amazonaws.com/production",
|
||||
"validator": "^(http|https):\/\/.*",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-username",
|
||||
"short_description": "Telemetry username",
|
||||
"description": "Username for telemetry authentication",
|
||||
"variable": "TELEMETRY_USERNAME",
|
||||
"type": "string",
|
||||
"default": "redhat-chaos",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-password",
|
||||
"short_description": "Telemetry password",
|
||||
"description": "Password for telemetry authentication",
|
||||
"variable": "TELEMETRY_PASSWORD",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-prometheus-backup",
|
||||
"short_description": "Prometheus backup for telemetry",
|
||||
"description": "Enables Prometheus backup for telemetry",
|
||||
"variable": "TELEMETRY_PROMETHEUS_BACKUP",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "True",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-full-prometheus-backup",
|
||||
"short_description": "Full Prometheus backup",
|
||||
"description": "Enables full Prometheus backup for telemetry",
|
||||
"variable": "TELEMETRY_FULL_PROMETHEUS_BACKUP",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-backup-threads",
|
||||
"short_description": "Telemetry backup threads",
|
||||
"description": "Number of threads for telemetry backup",
|
||||
"variable": "TELEMETRY_BACKUP_THREADS",
|
||||
"type": "number",
|
||||
"default": "5",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-archive-path",
|
||||
"short_description": "Telemetry archive path",
|
||||
"description": "Path to save telemetry archive",
|
||||
"variable": "TELEMETRY_ARCHIVE_PATH",
|
||||
"type": "string",
|
||||
"default": "/tmp",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-max-retries",
|
||||
"short_description": "Telemetry max retries",
|
||||
"description": "Maximum retries for telemetry operations",
|
||||
"variable": "TELEMETRY_MAX_RETRIES",
|
||||
"type": "number",
|
||||
"default": "0",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-run-tag",
|
||||
"short_description": "Telemetry run tag",
|
||||
"description": "Tag for telemetry run",
|
||||
"variable": "TELEMETRY_RUN_TAG",
|
||||
"type": "string",
|
||||
"default": "chaos",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-group",
|
||||
"short_description": "Telemetry group",
|
||||
"description": "Group name for telemetry data",
|
||||
"variable": "TELEMETRY_GROUP",
|
||||
"type": "string",
|
||||
"default": "default",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-archive-size",
|
||||
"short_description": "Telemetry archive size",
|
||||
"description": "Maximum size for telemetry archives",
|
||||
"variable": "TELEMETRY_ARCHIVE_SIZE",
|
||||
"type": "number",
|
||||
"default": "1000",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-logs-backup",
|
||||
"short_description": "Telemetry logs backup",
|
||||
"description": "Enables logs backup for telemetry",
|
||||
"variable": "TELEMETRY_LOGS_BACKUP",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-filter-pattern",
|
||||
"short_description": "Telemetry filter pattern",
|
||||
"description": "Filter pattern for telemetry logs",
|
||||
"variable": "TELEMETRY_FILTER_PATTERN",
|
||||
"type": "string",
|
||||
"default": "[\"(\\\\w{3}\\\\s\\\\d{1,2}\\\\s\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d+).+\",\"kinit (\\\\d+/\\\\d+/\\\\d+\\\\s\\\\d{2}:\\\\d{2}:\\\\d{2})\\\\s+\",\"(\\\\d{4}-\\\\d{2}-\\\\d{2}T\\\\d{2}:\\\\d{2}:\\\\d{2}\\\\.\\\\d+Z).+\"]",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-cli-path",
|
||||
"short_description": "Telemetry CLI path (oc)",
|
||||
"description": "Path to telemetry CLI tool (oc)",
|
||||
"variable": "TELEMETRY_CLI_PATH",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "telemetry-events-backup",
|
||||
"short_description": "Telemetry events backup",
|
||||
"description": "Enables events backup for telemetry",
|
||||
"variable": "TELEMETRY_EVENTS_BACKUP",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "True",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "health-check-interval",
|
||||
"short_description": "Heath check interval",
|
||||
"description": "How often to check the health check urls",
|
||||
"variable": "HEALTH_CHECK_INTERVAL",
|
||||
"type": "number",
|
||||
"default": "2",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "health-check-url",
|
||||
"short_description": "Health check url",
|
||||
"description": "Url to check the health of",
|
||||
"variable": "HEALTH_CHECK_URL",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "health-check-auth",
|
||||
"short_description": "Health check authentication tuple",
|
||||
"description": "Authentication tuple to authenticate into health check URL",
|
||||
"variable": "HEALTH_CHECK_AUTH",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "health-check-bearer-token",
|
||||
"short_description": "Health check bearer token",
|
||||
"description": "Bearer token to authenticate into health check URL",
|
||||
"variable": "HEALTH_CHECK_BEARER_TOKEN",
|
||||
"type": "string",
|
||||
"default": "",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "health-check-exit",
|
||||
"short_description": "Health check exit on failure",
|
||||
"description": "Exit on failure when health check URL is not able to connect",
|
||||
"variable": "HEALTH_CHECK_EXIT_ON_FAILURE",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "health-check-verify",
|
||||
"short_description": "SSL Verification of health check url",
|
||||
"description": "SSL Verification to authenticate into health check URL",
|
||||
"variable": "HEALTH_CHECK_VERIFY",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
},
|
||||
{
|
||||
"name": "krkn-debug",
|
||||
"short_description": "Krkn debug mode",
|
||||
"description": "Enables debug mode for Krkn",
|
||||
"variable": "KRKN_DEBUG",
|
||||
"type": "enum",
|
||||
"allowed_values": "True,False",
|
||||
"separator": ",",
|
||||
"default": "False",
|
||||
"required": "false"
|
||||
}
|
||||
]
|
||||
@@ -1,48 +0,0 @@
|
||||
## SLOs validation
|
||||
|
||||
Pass/fail based on metrics captured from the cluster is important in addition to checking the health status and recovery. Kraken supports:
|
||||
|
||||
### Checking for critical alerts post chaos
|
||||
If enabled, the check runs at the end of each scenario ( post chaos ) and Kraken exits in case critical alerts are firing to allow user to debug. You can enable it in the config:
|
||||
|
||||
```
|
||||
performance_monitoring:
|
||||
check_critical_alerts: False # When enabled will check prometheus for critical alerts firing post chaos
|
||||
```
|
||||
|
||||
### Validation and alerting based on the queries defined by the user during chaos
|
||||
Takes PromQL queries as input and modifies the return code of the run to determine pass/fail. It's especially useful in case of automated runs in CI where user won't be able to monitor the system. This feature can be enabled in the [config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml) by setting the following:
|
||||
|
||||
```
|
||||
performance_monitoring:
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
enable_alerts: True # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error.
|
||||
alert_profile: config/alerts.yaml # Path to alert profile with the prometheus queries.
|
||||
```
|
||||
|
||||
#### Alert profile
|
||||
A couple of [alert profiles](https://github.com/redhat-chaos/krkn/tree/main/config) [alerts](https://github.com/redhat-chaos/krkn/blob/main/config/alerts.yaml) are shipped by default and can be tweaked to add more queries to alert on. User can provide a URL or path to the file in the [config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml). The following are a few alerts examples:
|
||||
|
||||
```
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[5m:]) > 0.01
|
||||
description: 5 minutes avg. etcd fsync latency on {{$labels.pod}} higher than 10ms {{$value}}
|
||||
severity: error
|
||||
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket[5m]))[5m:]) > 0.1
|
||||
description: 5 minutes avg. etcd network peer round trip on {{$labels.pod}} higher than 100ms {{$value}}
|
||||
severity: info
|
||||
|
||||
- expr: increase(etcd_server_leader_changes_seen_total[2m]) > 0
|
||||
description: etcd leader changes observed
|
||||
severity: critical
|
||||
```
|
||||
|
||||
Kube-burner supports setting the severity for the alerts with each one having different effects:
|
||||
|
||||
```
|
||||
info: Prints an info message with the alarm description to stdout. By default all expressions have this severity.
|
||||
warning: Prints a warning message with the alarm description to stdout.
|
||||
error: Prints a error message with the alarm description to stdout and makes kube-burner rc = 1
|
||||
critical: Prints a fatal message with the alarm description to stdout and exits execution inmediatly with rc != 0
|
||||
```
|
||||
@@ -1 +0,0 @@
|
||||
theme: jekyll-theme-cayman
|
||||
@@ -1,17 +0,0 @@
|
||||
### Application outages
|
||||
Scenario to block the traffic ( Ingress/Egress ) of an application matching the labels for the specified duration of time to understand the behavior of the service/other services which depend on it during downtime. This helps with planning the requirements accordingly, be it improving the timeouts or tweaking the alerts etc.
|
||||
|
||||
##### Sample scenario config
|
||||
```
|
||||
application_outage: # Scenario to create an outage of an application by blocking traffic
|
||||
duration: 600 # Duration in seconds after which the routes will be accessible
|
||||
namespace: <namespace-with-application> # Namespace to target - all application routes will go inaccessible if pod selector is empty
|
||||
pod_selector: {app: foo} # Pods to target
|
||||
block: [Ingress, Egress] # It can be Ingress or Egress or Ingress, Egress
|
||||
```
|
||||
|
||||
##### Debugging steps in case of failures
|
||||
Kraken creates a network policy blocking the ingress/egress traffic to create an outage, in case of failures before reverting back the network policy, you can delete it manually by executing the following commands to stop the outage:
|
||||
```
|
||||
$ oc delete networkpolicy/kraken-deny -n <targeted-namespace>
|
||||
```
|
||||
@@ -1,70 +0,0 @@
|
||||
## Arcaflow Scenarios
|
||||
Arcaflow is a workflow engine in development which provides the ability to execute workflow steps in sequence, in parallel, repeatedly, etc. The main difference to competitors such as Netflix Conductor is the ability to run ad-hoc workflows without an infrastructure setup required.
|
||||
|
||||
The engine uses containers to execute plugins and runs them either locally in Docker/Podman or remotely on a Kubernetes cluster. The workflow system is strongly typed and allows for generating JSON schema and OpenAPI documents for all data formats involved.
|
||||
|
||||
### Available Scenarios
|
||||
#### Hog scenarios:
|
||||
- [CPU Hog](arcaflow_scenarios/cpu_hog.md)
|
||||
- [Memory Hog](arcaflow_scenarios/memory_hog.md)
|
||||
- [I/O Hog](arcaflow_scenarios/io_hog.md)
|
||||
|
||||
|
||||
### Prequisites
|
||||
Arcaflow supports three deployment technologies:
|
||||
- Docker
|
||||
- Podman
|
||||
- Kubernetes
|
||||
|
||||
#### Docker
|
||||
In order to run Arcaflow Scenarios with the Docker deployer, be sure that:
|
||||
- Docker is correctly installed in your Operating System (to find instructions on how to install docker please refer to [Docker Documentation](https://www.docker.com/))
|
||||
- The Docker daemon is running
|
||||
|
||||
#### Podman
|
||||
The podman deployer is built around the podman CLI and doesn't need necessarily to be run along with the podman daemon.
|
||||
To run Arcaflow Scenarios in your Operating system be sure that:
|
||||
- podman is correctly installed in your Operating System (to find instructions on how to install podman refer to [Podman Documentation](https://podman.io/))
|
||||
- the podman CLI is in your shell PATH
|
||||
|
||||
#### Kubernetes
|
||||
The kubernetes deployer integrates directly the Kubernetes API Client and needs only a valid kubeconfig file and a reachable Kubernetes/OpenShift Cluster.
|
||||
|
||||
### Usage
|
||||
|
||||
To enable arcaflow scenarios edit the kraken config file, go to the section `kraken -> chaos_scenarios` of the yaml structure
|
||||
and add a new element to the list named `arcaflow_scenarios` then add the desired scenario
|
||||
pointing to the `input.yaml` file.
|
||||
```
|
||||
kraken:
|
||||
...
|
||||
chaos_scenarios:
|
||||
- arcaflow_scenarios:
|
||||
- scenarios/arcaflow/cpu-hog/input.yaml
|
||||
```
|
||||
|
||||
#### input.yaml
|
||||
The implemented scenarios can be found in *scenarios/arcaflow/<scenario_name>* folder.
|
||||
The entrypoint of each scenario is the *input.yaml* file.
|
||||
In this file there are all the options to set up the scenario accordingly to the desired target
|
||||
### config.yaml
|
||||
The arcaflow config file. Here you can set the arcaflow deployer and the arcaflow log level.
|
||||
The supported deployers are:
|
||||
- Docker
|
||||
- Podman (podman daemon not needed, suggested option)
|
||||
- Kubernetes
|
||||
|
||||
The supported log levels are:
|
||||
- debug
|
||||
- info
|
||||
- warning
|
||||
- error
|
||||
### workflow.yaml
|
||||
This file contains the steps that will be executed to perform the scenario against the target.
|
||||
Each step is represented by a container that will be executed from the deployer and its options.
|
||||
Note that we provide the scenarios as a template, but they can be manipulated to define more complex workflows.
|
||||
To have more details regarding the arcaflow workflows architecture and syntax it is suggested to refer to the [Arcaflow Documentation](https://arcalot.io/arcaflow/).
|
||||
|
||||
This edit is no longer in quay image
|
||||
Working on fix in ticket: https://issues.redhat.com/browse/CHAOS-494
|
||||
This will effect all versions 4.12 and higher of OpenShift
|
||||
@@ -1,19 +0,0 @@
|
||||
# CPU Hog
|
||||
This scenario is based on the arcaflow [arcaflow-plugin-stressng](https://github.com/arcalot/arcaflow-plugin-stressng) plugin.
|
||||
The purpose of this scenario is to create cpu pressure on a particular node of the Kubernetes/OpenShift cluster for a time span.
|
||||
To enable this plugin add the pointer to the scenario input file `scenarios/arcaflow/cpu-hog/input.yaml` as described in the
|
||||
Usage section.
|
||||
This scenario takes a list of objects named `input_list` with the following properties:
|
||||
|
||||
- **kubeconfig :** *string* the kubeconfig needed by the deployer to deploy the sysbench plugin in the target cluster
|
||||
- **namespace :** *string* the namespace where the scenario container will be deployed
|
||||
**Note:** this parameter will be automatically filled by kraken if the `kubeconfig_path` property is correctly set
|
||||
- **node_selector :** *key-value map* the node label that will be used as `nodeSelector` by the pod to target a specific cluster node
|
||||
- **duration :** *string* stop stress test after N seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y.
|
||||
- **cpu_count :** *int* the number of CPU cores to be used (0 means all)
|
||||
- **cpu_method :** *string* a fine-grained control of which cpu stressors to use (ackermann, cfloat etc. see [manpage](https://manpages.org/sysbench) for all the cpu_method options)
|
||||
- **cpu_load_percentage :** *int* the CPU load by percentage
|
||||
|
||||
To perform several load tests in the same run simultaneously (eg. stress two or more nodes in the same run) add another item
|
||||
to the `input_list` with the same properties (and eventually different values eg. different node_selectors
|
||||
to schedule the pod on different nodes). To reduce (or increase) the parallelism change the value `parallelism` in `workload.yaml` file
|
||||
@@ -1,21 +0,0 @@
|
||||
# I/O Hog
|
||||
This scenario is based on the arcaflow [arcaflow-plugin-stressng](https://github.com/arcalot/arcaflow-plugin-stressng) plugin.
|
||||
The purpose of this scenario is to create disk pressure on a particular node of the Kubernetes/OpenShift cluster for a time span.
|
||||
The scenario allows to attach a node path to the pod as a `hostPath` volume.
|
||||
To enable this plugin add the pointer to the scenario input file `scenarios/arcaflow/io-hog/input.yaml` as described in the
|
||||
Usage section.
|
||||
This scenario takes a list of objects named `input_list` with the following properties:
|
||||
|
||||
- **kubeconfig :** *string* the kubeconfig needed by the deployer to deploy the sysbench plugin in the target cluster
|
||||
- **namespace :** *string* the namespace where the scenario container will be deployed
|
||||
**Note:** this parameter will be automatically filled by kraken if the `kubeconfig_path` property is correctly set
|
||||
- **node_selector :** *key-value map* the node label that will be used as `nodeSelector` by the pod to target a specific cluster node
|
||||
- **duration :** *string* stop stress test after N seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y.
|
||||
- **target_pod_folder :** *string* the path in the pod where the volume is mounted
|
||||
- **target_pod_volume :** *object* the `hostPath` volume definition in the [Kubernetes/OpenShift](https://docs.openshift.com/container-platform/3.11/install_config/persistent_storage/using_hostpath.html) format, that will be attached to the pod as a volume
|
||||
- **io_write_bytes :** *string* writes N bytes for each hdd process. The size can be expressed as % of free space on the file system or in units of Bytes, KBytes, MBytes and GBytes using the suffix b, k, m or g
|
||||
- **io_block_size :** *string* size of each write in bytes. Size can be from 1 byte to 4m.
|
||||
|
||||
To perform several load tests in the same run simultaneously (eg. stress two or more nodes in the same run) add another item
|
||||
to the `input_list` with the same properties (and eventually different values eg. different node_selectors
|
||||
to schedule the pod on different nodes). To reduce (or increase) the parallelism change the value `parallelism` in `workload.yaml` file
|
||||
@@ -1,18 +0,0 @@
|
||||
# Memory Hog
|
||||
This scenario is based on the arcaflow [arcaflow-plugin-stressng](https://github.com/arcalot/arcaflow-plugin-stressng) plugin.
|
||||
The purpose of this scenario is to create Virtual Memory pressure on a particular node of the Kubernetes/OpenShift cluster for a time span.
|
||||
To enable this plugin add the pointer to the scenario input file `scenarios/arcaflow/memory-hog/input.yaml` as described in the
|
||||
Usage section.
|
||||
This scenario takes a list of objects named `input_list` with the following properties:
|
||||
|
||||
- **kubeconfig :** *string* the kubeconfig needed by the deployer to deploy the sysbench plugin in the target cluster
|
||||
- **namespace :** *string* the namespace where the scenario container will be deployed
|
||||
**Note:** this parameter will be automatically filled by kraken if the `kubeconfig_path` property is correctly set
|
||||
- **node_selector :** *key-value map* the node label that will be used as `nodeSelector` by the pod to target a specific cluster node
|
||||
- **duration :** *string* stop stress test after N seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y.
|
||||
- **vm_bytes :** *string* N bytes per vm process or percentage of memory used (using the % symbol). The size can be expressed in units of Bytes, KBytes, MBytes and GBytes using the suffix b, k, m or g.
|
||||
- **vm_workers :** *int* Number of VM stressors to be run (0 means 1 stressor per CPU)
|
||||
|
||||
To perform several load tests in the same run simultaneously (eg. stress two or more nodes in the same run) add another item
|
||||
to the `input_list` with the same properties (and eventually different values eg. different node_selectors
|
||||
to schedule the pod on different nodes). To reduce (or increase) the parallelism change the value `parallelism` in `workload.yaml` file
|
||||
@@ -1,87 +0,0 @@
|
||||
Supported Cloud Providers:
|
||||
|
||||
- [AWS](#aws)
|
||||
- [GCP](#gcp)
|
||||
- [Openstack](#openstack)
|
||||
- [Azure](#azure)
|
||||
- [Alibaba](#alibaba)
|
||||
- [VMware](#vmware)
|
||||
- [IBMCloud](#ibmcloud)
|
||||
|
||||
## AWS
|
||||
|
||||
**NOTE**: For clusters with AWS make sure [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) is installed and properly [configured](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) using an AWS account
|
||||
|
||||
## GCP
|
||||
**NOTE**: For clusters with GCP make sure [GCP CLI](https://cloud.google.com/sdk/docs/install#linux) is installed.
|
||||
|
||||
A google service account is required to give proper authentication to GCP for node actions. See [here](https://cloud.google.com/docs/authentication/getting-started) for how to create a service account.
|
||||
|
||||
**NOTE**: A user with 'resourcemanager.projects.setIamPolicy' permission is required to grant project-level permissions to the service account.
|
||||
|
||||
After creating the service account you will need to enable the account using the following: ```export GOOGLE_APPLICATION_CREDENTIALS="<serviceaccount.json>"```
|
||||
|
||||
## Openstack
|
||||
|
||||
**NOTE**: For clusters with Openstack Cloud, ensure to create and source the [OPENSTACK RC file](https://docs.openstack.org/newton/user-guide/common/cli-set-environment-variables-using-openstack-rc.html) to set the OPENSTACK environment variables from the server where Kraken runs.
|
||||
|
||||
## Azure
|
||||
|
||||
**NOTE**: You will need to create a service principal and give it the correct access, see [here](https://docs.openshift.com/container-platform/4.5/installing/installing_azure/installing-azure-account.html) for creating the service principal and setting the proper permissions.
|
||||
|
||||
To properly run the service principal requires “Azure Active Directory Graph/Application.ReadWrite.OwnedBy” api permission granted and “User Access Administrator”.
|
||||
|
||||
Before running you will need to set the following:
|
||||
1. ```export AZURE_SUBSCRIPTION_ID=<subscription_id>```
|
||||
|
||||
2. ```export AZURE_TENANT_ID=<tenant_id>```
|
||||
|
||||
3. ```export AZURE_CLIENT_SECRET=<client secret>```
|
||||
|
||||
4. ```export AZURE_CLIENT_ID=<client id>```
|
||||
|
||||
## Alibaba
|
||||
|
||||
See the [Installation guide](https://www.alibabacloud.com/help/en/alibaba-cloud-cli/latest/installation-guide) to install alicloud cli.
|
||||
|
||||
1. ```export ALIBABA_ID=<access_key_id>```
|
||||
|
||||
2. ```export ALIBABA_SECRET=<access key secret>```
|
||||
|
||||
3. ```export ALIBABA_REGION_ID=<region id>```
|
||||
|
||||
Refer to [region and zone page](https://www.alibabacloud.com/help/en/elastic-compute-service/latest/regions-and-zones#concept-2459516) to get the region id for the region you are running on.
|
||||
|
||||
Set cloud_type to either alibaba or alicloud in your node scenario yaml file.
|
||||
|
||||
## VMware
|
||||
|
||||
Set the following environment variables
|
||||
|
||||
1. ```export VSPHERE_IP=<vSphere_client_IP_address>```
|
||||
|
||||
2. ```export VSPHERE_USERNAME=<vSphere_client_username>```
|
||||
|
||||
3. ```export VSPHERE_PASSWORD=<vSphere_client_password>```
|
||||
|
||||
These are the credentials that you would normally use to access the vSphere client.
|
||||
|
||||
|
||||
## IBMCloud
|
||||
If no api key is set up with proper VPC resource permissions, use the following to create:
|
||||
* Access group
|
||||
* Service id with the following access
|
||||
* With policy **VPC Infrastructure Services**
|
||||
* Resources = All
|
||||
* Roles:
|
||||
* Editor
|
||||
* Administrator
|
||||
* Operator
|
||||
* Viewer
|
||||
* API Key
|
||||
|
||||
Set the following environment variables
|
||||
|
||||
1. ```export IBMC_URL=https://<region>.iaas.cloud.ibm.com/v1```
|
||||
|
||||
2. ```export IBMC_APIKEY=<ibmcloud_api_key>```
|
||||
@@ -1,18 +0,0 @@
|
||||
#### Kubernetes cluster shut down scenario
|
||||
Scenario to shut down all the nodes including the masters and restart them after specified duration. Cluster shut down scenario can be injected by placing the shut_down config file under cluster_shut_down_scenario option in the kraken config. Refer to [cluster_shut_down_scenario](https://github.com/krkn-chaos/krkn/blob/main/scenarios/cluster_shut_down_scenario.yml) config file.
|
||||
|
||||
Refer to [cloud setup](cloud_setup.md) to configure your cli properly for the cloud provider of the cluster you want to shut down.
|
||||
|
||||
Current accepted cloud types:
|
||||
* [Azure](cloud_setup.md#azure)
|
||||
* [GCP](cloud_setup.md#gcp)
|
||||
* [AWS](cloud_setup.md#aws)
|
||||
* [Openstack](cloud_setup.md#openstack)
|
||||
|
||||
|
||||
```
|
||||
cluster_shut_down_scenario: # Scenario to stop all the nodes for specified duration and restart the nodes.
|
||||
runs: 1 # Number of times to execute the cluster_shut_down scenario.
|
||||
shut_down_duration: 120 # Duration in seconds to shut down the cluster.
|
||||
cloud_type: aws # Cloud type on which Kubernetes/OpenShift runs.
|
||||
```
|
||||
@@ -1,65 +0,0 @@
|
||||
### Config
|
||||
Set the scenarios to inject and the tunings like duration to wait between each scenario in the config file located at [config/config.yaml](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml).
|
||||
|
||||
**NOTE**: [config](https://github.com/redhat-chaos/krkn/blob/main/config/config_performance.yaml) can be used if leveraging the [automated way](https://github.com/redhat-chaos/krkn#setting-up-infrastructure-dependencies) to install the infrastructure pieces.
|
||||
|
||||
Config components:
|
||||
* [Kraken](#kraken)
|
||||
* [Cerberus](#cerberus)
|
||||
* [Performance Monitoring](#performance-monitoring)
|
||||
* [Tunings](#tunings)
|
||||
|
||||
# Kraken
|
||||
This section defines scenarios and specific data to the chaos run
|
||||
|
||||
## Distribution
|
||||
Either **openshift** or **kubernetes** depending on the type of cluster you want to run chaos on.
|
||||
The prometheus url/route and bearer token are automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
|
||||
## Exit on failure
|
||||
**exit_on_failure**: Exit when a post action check or cerberus run fails
|
||||
|
||||
## Publish kraken status
|
||||
**publish_kraken_status**: Can be accessed at http://0.0.0.0:8081 (or what signal_address and port you set in signal address section)
|
||||
**signal_state**: State you want kraken to start at; will wait for the RUN signal to start running a chaos iteration. When set to PAUSE before running the scenarios, refer to [signal.md](signal.md) for more details
|
||||
|
||||
## Signal Address
|
||||
**signal_address**: Address to listen/post the signal state to
|
||||
**port**: port to listen/post the signal state to
|
||||
|
||||
## Chaos Scenarios
|
||||
|
||||
**chaos_scenarios**: List of different types of chaos scenarios you want to run with paths to their specific yaml file configurations
|
||||
|
||||
If a scenario has a post action check script, it will be run before and after each scenario to validate the component under test starts and ends at the same state
|
||||
|
||||
Currently the scenarios are run one after another (in sequence) and will exit if one of the scenarios fail, without moving onto the next one
|
||||
|
||||
Chaos scenario types:
|
||||
- container_scenarios
|
||||
- plugin_scenarios
|
||||
- node_scenarios
|
||||
- time_scenarios
|
||||
- cluster_shut_down_scenarios
|
||||
- namespace_scenarios
|
||||
- zone_outages
|
||||
- application_outages
|
||||
- pvc_scenarios
|
||||
- network_chaos
|
||||
|
||||
|
||||
# Cerberus
|
||||
Parameters to set for enabling of cerberus checks at the end of each executed scenario. The given url will pinged after the scenario and post action check have been completed for each scenario and iteration.
|
||||
**cerberus_enabled**: Enable it when cerberus is previously installed
|
||||
**cerberus_url**: When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
|
||||
**check_applicaton_routes**: When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
|
||||
# Performance Monitoring
|
||||
There are 2 main sections defined in this part of the config [metrics](metrics.md) and [alerts](alerts.md); read more about each of these configurations in their respective docs
|
||||
|
||||
# Tunings
|
||||
**wait_duration**: Duration to wait between each chaos scenario
|
||||
**iterations**: Number of times to execute the scenarios
|
||||
**daemon_mode**: True or False; If true, iterations are set to infinity which means that the kraken will cause chaos forever and number of iterations is ignored
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
### Container Scenarios
|
||||
Kraken uses the `oc exec` command to `kill` specific containers in a pod.
|
||||
This can be based on the pods namespace or labels. If you know the exact object you want to kill, you can also specify the specific container name or pod name in the scenario yaml file.
|
||||
These scenarios are in a simple yaml format that you can manipulate to run your specific tests or use the pre-existing scenarios to see how it works.
|
||||
|
||||
#### Example Config
|
||||
The following are the components of Kubernetes for which a basic chaos scenario config exists today.
|
||||
|
||||
```
|
||||
scenarios:
|
||||
- name: "<name of scenario>"
|
||||
namespace: "<specific namespace>" # can specify "*" if you want to find in all namespaces
|
||||
label_selector: "<label of pod(s)>"
|
||||
container_name: "<specific container name>" # This is optional, can take out and will kill all containers in all pods found under namespace and label
|
||||
pod_names: # This is optional, can take out and will select all pods with given namespace and label
|
||||
- <pod_name>
|
||||
count: <number of containers to disrupt, default=1>
|
||||
action: <kill signal to run. For example 1 ( hang up ) or 9. Default is set to 1>
|
||||
expected_recovery_time: <number of seconds to wait for container to be running again> (defaults to 120seconds)
|
||||
```
|
||||
|
||||
#### Post Action
|
||||
In all scenarios we do a post chaos check to wait and verify the specific component.
|
||||
|
||||
Here there are two options:
|
||||
1. Pass a custom script in the main config scenario list that will run before the chaos and verify the output matches post chaos scenario.
|
||||
|
||||
See [scenarios/post_action_etcd_container.py](https://github.com/krkn-chaos/krkn/blob/main/scenarios/post_action_etcd_container.py) for an example.
|
||||
```
|
||||
- container_scenarios: # List of chaos pod scenarios to load.
|
||||
- - scenarios/container_etcd.yml
|
||||
- scenarios/post_action_etcd_container.py
|
||||
```
|
||||
|
||||
2. Allow kraken to wait and check the killed containers until they become ready again. Kraken keeps a list of the specific
|
||||
containers that were killed as well as the namespaces and pods to verify all containers that were affected recover properly.
|
||||
|
||||
```
|
||||
expected_recovery_time: <seconds to wait for container to recover>
|
||||
```
|
||||
@@ -1,95 +0,0 @@
|
||||
# How to contribute
|
||||
|
||||
Contributions are always appreciated.
|
||||
|
||||
How to:
|
||||
* [Submit Pull Request](#pull-request)
|
||||
* [Fix Formatting](#fix-formatting)
|
||||
* [Squash Commits](#squash-commits)
|
||||
* [Rebase Upstream](#rebase-with-upstream)
|
||||
|
||||
## Pull request
|
||||
|
||||
In order to submit a change or a PR, please fork the project and follow these instructions:
|
||||
```bash
|
||||
$ git clone http://github.com/<me>/krkn
|
||||
$ cd krkn
|
||||
$ git checkout -b <branch_name>
|
||||
$ <make change>
|
||||
$ git add <changes>
|
||||
$ git commit -a
|
||||
$ <insert good message>
|
||||
$ git push
|
||||
```
|
||||
|
||||
## Fix Formatting
|
||||
Kraken uses [pre-commit](https://pre-commit.com) framework to maintain the code linting and python code styling.
|
||||
The CI would run the pre-commit check on each pull request.
|
||||
We encourage our contributors to follow the same pattern while contributing to the code.
|
||||
|
||||
The pre-commit configuration file is present in the repository `.pre-commit-config.yaml`.
|
||||
It contains the different code styling and linting guides which we use for the application.
|
||||
|
||||
The following command can be used to run the pre-commit:
|
||||
`pre-commit run --all-files`
|
||||
|
||||
If pre-commit is not installed in your system, it can be installed with `pip install pre-commit`.
|
||||
|
||||
## Squash Commits
|
||||
If there are multiple commits, please rebase/squash multiple commits
|
||||
before creating the PR by following:
|
||||
|
||||
```bash
|
||||
$ git checkout <my-working-branch>
|
||||
$ git rebase -i HEAD~<num_of_commits_to_merge>
|
||||
-OR-
|
||||
$ git rebase -i <commit_id_of_first_change_commit>
|
||||
```
|
||||
|
||||
In the interactive rebase screen, set the first commit to `pick`, and all others to `squash`, or whatever else you may need to do.
|
||||
|
||||
|
||||
Push your rebased commits (you may need to force), then issue your PR.
|
||||
|
||||
```
|
||||
$ git push origin <my-working-branch> --force
|
||||
```
|
||||
|
||||
## Rebase with Upstream
|
||||
|
||||
If changes go into the main repository while you're working on your code it is best to rebase your code with the
|
||||
upstream, so you stay up to date with all changes and fix any conflicting code changes.
|
||||
|
||||
If not already configured, set the upstream url for kraken.
|
||||
```
|
||||
git remote add upstream https://github.com/krkn-chaos/krkn.git
|
||||
```
|
||||
|
||||
Rebase to upstream master branch.
|
||||
```
|
||||
git fetch upstream
|
||||
git rebase upstream/master
|
||||
git push origin <branch_name> --force
|
||||
```
|
||||
|
||||
If any errors occur, it will list off any files that have merge issues.
|
||||
Edit the files with the code you want to keep. See below for detailed help from Git.
|
||||
1. Vi <file(s)>
|
||||
2. Resolving-a-merge-conflict-using-the-command-line
|
||||
3. git add <all files you edit>
|
||||
4. git rebase --continue
|
||||
5. Might need to repeat steps 2 through 4 until rebase complete
|
||||
6. git status <this will also tell you if you have other files to edit>
|
||||
7. git push origin <branch_name> --force [push the changes to github remote]
|
||||
|
||||
|
||||
Merge Conflicts Example
|
||||
```
|
||||
1. git rebase upstream/kraken
|
||||
2. vi run_kraken.py [edit at the indicated places, get rid of arrowed lines and dashes, and apply correct changes]
|
||||
3. git add run_kraken.py
|
||||
4. git rebase --continue
|
||||
5. repeat 2-4 until done
|
||||
6. git status <this will also tell you if you have other files to edit>
|
||||
7. git push origin <branch_name> --force [push the changes to github remote]
|
||||
```
|
||||
@@ -1,51 +0,0 @@
|
||||
## Getting Started Running Chaos Scenarios
|
||||
|
||||
#### Adding New Scenarios
|
||||
Adding a new scenario is as simple as adding a new config file under [scenarios directory](https://github.com/redhat-chaos/krkn/tree/main/scenarios) and defining it in the main kraken [config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml#L8).
|
||||
You can either copy an existing yaml file and make it your own, or fill in one of the templates below to suit your needs.
|
||||
|
||||
### Templates
|
||||
#### Pod Scenario Yaml Template
|
||||
For example, for adding a pod level scenario for a new application, refer to the sample scenario below to know what fields are necessary and what to add in each location:
|
||||
```
|
||||
# yaml-language-server: $schema=../plugin.schema.json
|
||||
- id: kill-pods
|
||||
config:
|
||||
namespace_pattern: ^<namespace>$
|
||||
label_selector: <pod label>
|
||||
kill: <number of pods to kill>
|
||||
krkn_pod_recovery_time: <expected time for the pod to become ready>
|
||||
```
|
||||
|
||||
#### Node Scenario Yaml Template
|
||||
|
||||
```
|
||||
node_scenarios:
|
||||
- actions: # Node chaos scenarios to be injected.
|
||||
- <chaos scenario>
|
||||
- <chaos scenario>
|
||||
node_name: <node name> # Can be left blank.
|
||||
label_selector: <node label>
|
||||
instance_kill_count: <number of nodes on which to perform action>
|
||||
timeout: <duration to wait for completion>
|
||||
cloud_type: <cloud provider>
|
||||
```
|
||||
|
||||
|
||||
#### Time Chaos Scenario Template
|
||||
```
|
||||
time_scenarios:
|
||||
- action: 'skew_time' or 'skew_date'
|
||||
object_type: 'pod' or 'node'
|
||||
label_selector: <label of pod or node>
|
||||
```
|
||||
|
||||
|
||||
### Common Scenario Edits
|
||||
If you just want to make small changes to pre-existing scenarios, feel free to edit the scenario file itself.
|
||||
|
||||
#### Example of Quick Pod Scenario Edit:
|
||||
If you want to kill 2 pods instead of 1 in any of the pre-existing scenarios, you can either edit the number located at filters -> randomSample -> size or the runs under the config -> runStrategy section.
|
||||
|
||||
#### Example of Quick Nodes Scenario Edit:
|
||||
If your cluster is build on GCP instead of AWS, just change the cloud type in the node_scenarios_example.yml file.
|
||||
310
docs/index.md
310
docs/index.md
@@ -1,310 +0,0 @@
|
||||
## Chaos Testing Guide
|
||||
|
||||
|
||||
### Table of Contents
|
||||
* [Introduction](#introduction)
|
||||
* [Test Stratagies and Methodology](#test-strategies-and-methodology)
|
||||
* [Best Practices](#best-practices)
|
||||
* [Tooling](#tooling)
|
||||
* [Workflow](#workflow)
|
||||
* [Cluster recovery checks, metrics evaluation and pass/fail criteria](#cluster-recovery-checks-metrics-evaluation-and-passfail-criteria)
|
||||
* [Scenarios](#scenarios)
|
||||
* [Test Environment Recommendations - how and where to run chaos tests](#test-environment-recommendations---how-and-where-to-run-chaos-tests)
|
||||
* [Chaos testing in Practice](#chaos-testing-in-practice)
|
||||
* [OpenShift oraganization](#openshift-organization)
|
||||
* [startx-lab](#startx-lab)
|
||||
|
||||
|
||||
### Introduction
|
||||
There are a couple of false assumptions that users might have when operating and running their applications in distributed systems:
|
||||
|
||||
The network is reliable.
|
||||
There is zero latency.
|
||||
Bandwidth is infinite.
|
||||
The network is secure.
|
||||
Topology never changes.
|
||||
The network is homogeneous.
|
||||
Consistent resource usage with no spikes.
|
||||
All shared resources are available from all places.
|
||||
|
||||
Various assumptions led to a number of outages in production environments in the past. The services suffered from poor performance or were inaccessible to the customers, leading to missing Service Level Agreement uptime promises, revenue loss, and a degradation in the perceived reliability of said services.
|
||||
|
||||
How can we best avoid this from happening? This is where Chaos testing can add value.
|
||||
|
||||
|
||||
|
||||
### Test Strategies and Methodology
|
||||
Failures in production are costly. To help mitigate risk to service health, consider the following strategies and approaches to service testing:
|
||||
|
||||
- Be proactive vs reactive. We have different types of test suites in place - unit, integration and end-to-end - that help expose bugs in code in a controlled environment. Through implementation of a chaos engineering strategy, we can discover potential causes of service degradation. We need to understand the systems' behavior under unpredictable conditions in order to find the areas to harden, and use performance data points to size the clusters to handle failures in order to keep downtime to a minimum.
|
||||
|
||||
- Test the resiliency of a system under turbulent conditions by running tests that are designed to disrupt while monitoring the systems adaptability and performance:
|
||||
- Establish and define your steady state and metrics - understand the behavior and performance under stable conditions and define the metrics that will be used to evaluate the system’s behavior. Then decide on acceptable outcomes before injecting chaos.
|
||||
- Analyze the statuses and metrics of all components during the chaos test runs.
|
||||
- Improve the areas that are not resilient and performant by comparing the key metrics and Service Level Objectives (SLOs) to the stable conditions before the chaos.
|
||||
For example: evaluating the API server latency or application uptime to see if the key performance indicators and service level indicators are still within acceptable limits.
|
||||
|
||||
|
||||
|
||||
|
||||
### Best Practices
|
||||
Now that we understand the test methodology, let us take a look at the best practices for an Kubernetes cluster. On that platform there are user applications and cluster workloads that need to be designed for stability and to provide the best user experience possible:
|
||||
|
||||
- Alerts with appropriate severity should get fired.
|
||||
- Alerts are key to identify when a component starts degrading, and can help focus the investigation effort on affected system components.
|
||||
- Alerts should have proper severity, description, notification policy, escalation policy, and SOP in order to reduce MTTR for responding SRE or Ops resources.
|
||||
- Detailed information on the alerts consistency can be found [here](https://github.com/openshift/enhancements/blob/master/enhancements/monitoring/alerting-consistency.md).
|
||||
|
||||
- Minimal performance impact - Network, CPU, Memory, Disk, Throughput etc.
|
||||
- The system, as well as the applications, should be designed to have minimal performance impact during disruptions to ensure stability and also to avoid hogging resources that other applications can use.
|
||||
We want to look at this in terms of CPU, Memory, Disk, Throughput, Network etc.
|
||||
- We want to look at this in terms of CPU, Memory, Disk, Throughput, Network etc.
|
||||
|
||||
- Appropriate CPU/Memory limits set to avoid performance throttling and OOM kills.
|
||||
- There might be rogue applications hogging resources ( CPU/Memory ) on the nodes which might lead to applications underperforming or worse getting OOM killed. It is important to ensure that applications and system components have reserved resources for the kube-scheduler to take into consideration in order to keep them performing at the expected levels.
|
||||
|
||||
- Services dependent on the system under test need to handle the failure gracefully to avoid performance degradation and downtime - appropriate timeouts.
|
||||
- In a distributed system, services deployed coordinate with each other and might have external dependencies. Each of the services deployed as a deployment, pod, or container, need to handle the downtime of other dependent services gracefully instead of crashing due to not having appropriate timeouts, fallback logic etc.
|
||||
|
||||
- Proper node sizing to avoid cascading failures and ensure cluster stability especially when the cluster is large and dense
|
||||
- The platform needs to be sized taking into account the resource usage spikes that might occur during chaotic events. For example, if one of the main nodes goes down, the other two main nodes need to have enough resources to handle the load. The resource usage depends on the load or number of objects that are running being managed by the Control Plane ( Api Server, Etcd, Controller and Scheduler ). As such, it’s critical to test such conditions, understand the behavior, and leverage the data to size the platform appropriately. This can help keep the applications stable during unplanned events without the control plane undergoing cascading failures which can potentially bring down the entire cluster.
|
||||
|
||||
- Proper node sizing to avoid application failures and maintain stability.
|
||||
- An application pod might use more resources during reinitialization after a crash, so it is important to take that into account for sizing the nodes in the cluster to accommodate it. For example, monitoring solutions like Prometheus need high amounts of memory to replay the write ahead log ( WAL ) when it restarts. As such, it’s critical to test such conditions, understand the behavior, and leverage the data to size the platform appropriately. This can help keep the application stable during unplanned events without undergoing degradation in performance or even worse hog the resources on the node which can impact other applications and system pods.
|
||||
|
||||
|
||||
- Minimal initialization time and fast recovery logic.
|
||||
- The controller watching the component should recognize a failure as soon as possible. The component needs to have minimal initialization time to avoid extended downtime or overloading the replicas if it is a highly available configuration. The cause of failure can be because of issues with the infrastructure on top of which it is running, application failures, or because of service failures that it depends on.
|
||||
|
||||
- High Availability deployment strategy.
|
||||
- There should be multiple replicas ( both Kubernetes and application control planes ) running preferably in different availability zones to survive outages while still serving the user/system requests. Avoid single points of failure.
|
||||
- Backed by persistent storage
|
||||
- It is important to have the system/application backed by persistent storage. This is especially important in cases where the application is a database or a stateful application given that a node, pod, or container failure will wipe off the data.
|
||||
|
||||
- There should be fallback routes to the backend in case of using CDN, for example, Akamai in case of console.redhat.com - a managed service deployed on top of Kubernetes dedicated:
|
||||
- Content delivery networks (CDNs) are commonly used to host resources such as images, JavaScript files, and CSS. The average web page is nearly 2 MB in size, and offloading heavy resources to third-parties is extremely effective for reducing backend server traffic and latency. However, this makes each CDN an additional point of failure for every site that relies on it. If the CDN fails, its customers could also fail.
|
||||
- To test how the application reacts to failures, drop all network traffic between the system and CDN. The application should still serve the content to the user irrespective of the failure.
|
||||
|
||||
- Appropriate caching and Content Delivery Network should be enabled to be performant and usable when there is a latency on the client side.
|
||||
- Not every user or machine has access to unlimited bandwidth, there might be a delay on the user side ( client ) to access the API’s due to limited bandwidth, throttling or latency depending on the geographic location. It is important to inject latency between the client and API calls to understand the behavior and optimize things including caching wherever possible, using CDN’s or opting for different protocols like HTTP/2 or HTTP/3 vs HTTP.
|
||||
|
||||
|
||||
|
||||
|
||||
### Tooling
|
||||
Now that we looked at the best practices, In this section, we will go through how [Kraken](https://github.com/redhat-chaos/krkn) - a chaos testing framework can help test the resilience of Kubernetes and make sure the applications and services are following the best practices.
|
||||
|
||||
#### Workflow
|
||||
Let us start by understanding the workflow of kraken: the user will start by running kraken by pointing to a specific Kubernetes cluster using kubeconfig to be able to talk to the platform on top of which the Kubernetes cluster is hosted. This can be done by either the oc/kubectl API or the cloud API. Based on the configuration of kraken, it will inject specific chaos scenarios as shown below, talk to [Cerberus](https://github.com/redhat-chaos/cerberus) to get the go/no-go signal representing the overall health of the cluster ( optional - can be turned off ), scrapes metrics from in-cluster prometheus given a metrics profile with the promql queries and stores them long term in Elasticsearch configured ( optional - can be turned off ), evaluates the promql expressions specified in the alerts profile ( optional - can be turned off ) and aggregated everything to set the pass/fail i.e. exits 0 or 1. More about the metrics collection, cerberus and metrics evaluation can be found in the next section.
|
||||
|
||||

|
||||
|
||||
#### Cluster recovery checks, metrics evaluation and pass/fail criteria
|
||||
- Most of the scenarios have built in checks to verify if the targeted component recovered from the failure after the specified duration of time but there might be cases where other components might have an impact because of a certain failure and it’s extremely important to make sure that the system/application is healthy as a whole post chaos. This is exactly where [Cerberus](https://github.com/redhat-chaos/cerberus) comes to the rescue.
|
||||
If the monitoring tool, cerberus is enabled it will consume the signal and continue running chaos or not based on that signal.
|
||||
|
||||
- Apart from checking the recovery and cluster health status, it’s equally important to evaluate the performance metrics like latency, resource usage spikes, throughput, etcd health like disk fsync, leader elections etc. To help with this, Kraken has a way to evaluate promql expressions from the incluster prometheus and set the exit status to 0 or 1 based on the severity set for each of the query. Details on how to use this feature can be found [here](https://github.com/redhat-chaos/krkn#alerts).
|
||||
|
||||
- The overall pass or fail of kraken is based on the recovery of the specific component (within a certain amount of time), the cerberus health signal which tracks the health of the entire cluster and metrics evaluation from incluster prometheus.
|
||||
|
||||
|
||||
|
||||
|
||||
### Scenarios
|
||||
|
||||
Let us take a look at how to run the chaos scenarios on your Kubernetes clusters using Kraken-hub - a lightweight wrapper around Kraken to ease the runs by providing the ability to run them by just running container images using podman with parameters set as environment variables. This eliminates the need to carry around and edit configuration files and makes it easy for any CI framework integration. Here are the scenarios supported:
|
||||
|
||||
- Pod Scenarios ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/pod-scenarios.md))
|
||||
- Disrupts Kubernetes/Kubernetes and applications deployed as pods:
|
||||
- Helps understand the availability of the application, the initialization timing and recovery status.
|
||||
- [Demo](https://asciinema.org/a/452351?speed=3&theme=solarized-dark)
|
||||
|
||||
- Container Scenarios ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/container-scenarios.md))
|
||||
- Disrupts Kubernetes/Kubernetes and applications deployed as containers running as part of a pod(s) using a specified kill signal to mimic failures:
|
||||
- Helps understand the impact and recovery timing when the program/process running in the containers are disrupted - hangs, paused, killed etc., using various kill signals, i.e. SIGHUP, SIGTERM, SIGKILL etc.
|
||||
- [Demo](https://asciinema.org/a/BXqs9JSGDSEKcydTIJ5LpPZBM?speed=3&theme=solarized-dark)
|
||||
|
||||
- Node Scenarios ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/node-scenarios.md))
|
||||
- Disrupts nodes as part of the cluster infrastructure by talking to the cloud API. AWS, Azure, GCP, OpenStack and Baremetal are the supported platforms as of now. Possible disruptions include:
|
||||
- Terminate nodes
|
||||
- Fork bomb inside the node
|
||||
- Stop the node
|
||||
- Crash the kubelet running on the node
|
||||
- etc.
|
||||
- [Demo](https://asciinema.org/a/ANZY7HhPdWTNaWt4xMFanF6Q5)
|
||||
|
||||
- Zone Outages ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/zone-outages.md))
|
||||
- Creates outage of availability zone(s) in a targeted region in the public cloud where the Kubernetes cluster is running by tweaking the network acl of the zone to simulate the failure, and that in turn will stop both ingress and egress traffic from all nodes in a particular zone for the specified duration and reverts it back to the previous state.
|
||||
- Helps understand the impact on both Kubernetes/Kubernetes control plane as well as applications and services running on the worker nodes in that zone.
|
||||
- Currently, only set up for AWS cloud platform: 1 VPC and multiples subnets within the VPC can be specified.
|
||||
- [Demo](https://asciinema.org/a/452672?speed=3&theme=solarized-dark)
|
||||
|
||||
- Application Outages ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/application-outages.md))
|
||||
- Scenario to block the traffic ( Ingress/Egress ) of an application matching the labels for the specified duration of time to understand the behavior of the service/other services which depend on it during the downtime.
|
||||
- Helps understand how the dependent services react to the unavailability.
|
||||
- [Demo](https://asciinema.org/a/452403?speed=3&theme=solarized-dark)
|
||||
|
||||
- Power Outages ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/power-outages.md))
|
||||
- This scenario imitates a power outage by shutting down of the entire cluster for a specified duration of time, then restarts all the nodes after the specified time and checks the health of the cluster.
|
||||
- There are various use cases in the customer environments. For example, when some of the clusters are shutdown in cases where the applications are not needed to run in a particular time/season in order to save costs.
|
||||
- The nodes are stopped in parallel to mimic a power outage i.e., pulling off the plug
|
||||
- [Demo](https://asciinema.org/a/r0zLbh70XK7gnc4s5v0ZzSXGo)
|
||||
|
||||
- Resource Hog
|
||||
- Hogs CPU, Memory and IO on the targeted nodes
|
||||
- Helps understand if the application/system components have reserved resources to not get disrupted because of rogue applications, or get performance throttled.
|
||||
- CPU Hog ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/node-cpu-hog.md), [Demo](https://asciinema.org/a/452762))
|
||||
- Memory Hog ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/node-memory-hog.md), [Demo](https://asciinema.org/a/452742?speed=3&theme=solarized-dark))
|
||||
|
||||
- Time Skewing ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/time-scenarios.md))
|
||||
- Manipulate the system time and/or date of specific pods/nodes.
|
||||
- Verify scheduling of objects so they continue to work.
|
||||
- Verify time gets reset properly.
|
||||
|
||||
- Namespace Failures ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/namespace-scenarios.md))
|
||||
- Delete namespaces for the specified duration.
|
||||
- Helps understand the impact on other components and tests/improves recovery time of the components in the targeted namespace.
|
||||
|
||||
- Persistent Volume Fill ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/pvc-scenarios.md))
|
||||
- Fills up the persistent volumes, up to a given percentage, used by the pod for the specified duration.
|
||||
- Helps understand how an application deals when it is no longer able to write data to the disk. For example, kafka’s behavior when it is not able to commit data to the disk.
|
||||
|
||||
- Network Chaos ([Documentation](https://github.com/redhat-chaos/krkn-hub/blob/main/docs/network-chaos.md))
|
||||
- Scenarios supported includes:
|
||||
- Network latency
|
||||
- Packet loss
|
||||
- Interface flapping
|
||||
- DNS errors
|
||||
- Packet corruption
|
||||
- Bandwidth limitation
|
||||
|
||||
|
||||
|
||||
|
||||
### Test Environment Recommendations - how and where to run chaos tests
|
||||
|
||||
Let us take a look at few recommendations on how and where to run the chaos tests:
|
||||
|
||||
- Run the chaos tests continuously in your test pipelines:
|
||||
- Software, systems, and infrastructure does change – and the condition/health of each can change pretty rapidly. A good place to run tests is in your CI/CD pipeline running on a regular cadence.
|
||||
|
||||
- Run the chaos tests manually to learn from the system:
|
||||
- When running a Chaos scenario or Fault tests, it is more important to understand how the system responds and reacts, rather than mark the execution as pass or fail.
|
||||
- It is important to define the scope of the test before the execution to avoid some issues from masking others.
|
||||
|
||||
- Run the chaos tests in production environments or mimic the load in staging environments:
|
||||
- As scary as a thought about testing in production is, production is the environment that users are in and traffic spikes/load are real. To fully test the robustness/resilience of a production system, running Chaos Engineering experiments in a production environment will provide needed insights. A couple of things to keep in mind:
|
||||
- Minimize blast radius and have a backup plan in place to make sure the users and customers do not undergo downtime.
|
||||
- Mimic the load in a staging environment in case Service Level Agreements are too tight to cover any downtime.
|
||||
|
||||
- Enable Observability:
|
||||
- Chaos Engineering Without Observability ... Is Just Chaos.
|
||||
- Make sure to have logging and monitoring installed on the cluster to help with understanding the behaviour as to why it is happening. In case of running the tests in the CI where it is not humanly possible to monitor the cluster all the time, it is recommended to leverage Cerberus to capture the state during the runs and metrics collection in Kraken to store metrics long term even after the cluster is gone.
|
||||
- Kraken ships with dashboards that will help understand API, Etcd and Kubernetes cluster level stats and performance metrics.
|
||||
- Pay attention to Prometheus alerts. Check if they are firing as expected.
|
||||
|
||||
- Run multiple chaos tests at once to mimic the production outages:
|
||||
- For example, hogging both IO and Network at the same time instead of running them separately to observe the impact.
|
||||
- You might have existing test cases, be it related to Performance, Scalability or QE. Run the chaos in the background during the test runs to observe the impact. Signaling feature in Kraken can help with coordinating the chaos runs i.e., start, stop, pause the scenarios based on the state of the other test jobs.
|
||||
|
||||
|
||||
#### Chaos testing in Practice
|
||||
|
||||
##### OpenShift organization
|
||||
Within the OpenShift organization we use kraken to perform chaos testing throughout a release before the code is available to customers.
|
||||
|
||||
1. We execute kraken during our regression test suite.
|
||||
|
||||
i. We cover each of the chaos scenarios across different clouds.
|
||||
|
||||
a. Our testing is predominantly done on AWS, Azure and GCP.
|
||||
|
||||
2. We run the chaos scenarios during a long running reliability test.
|
||||
|
||||
i. During this test we perform different types of tasks by different users on the cluster.
|
||||
|
||||
ii. We have added the execution of kraken to perform at certain times throughout the long running test and monitor the health of the cluster.
|
||||
|
||||
iii. This test can be seen here: https://github.com/openshift/svt/tree/master/reliability-v2
|
||||
|
||||
3. We are starting to add in test cases that perform chaos testing during an upgrade (not many iterations of this have been completed).
|
||||
|
||||
|
||||
##### startx-lab
|
||||
|
||||
**NOTE**: Requests for enhancements and any issues need to be filed at the mentioned links given that they are not natively supported in Kraken.
|
||||
|
||||
The following content covers the implementation details around how Startx is leveraging Kraken:
|
||||
|
||||
* Using kraken as part of a tekton pipeline
|
||||
|
||||
You can find on [artifacthub.io](https://artifacthub.io/packages/search?kind=7&ts_query_web=kraken) the
|
||||
[kraken-scenario](https://artifacthub.io/packages/tekton-task/startx-tekton-catalog/kraken-scenario) `tekton-task`
|
||||
which can be used to start a kraken chaos scenarios as part of a chaos pipeline.
|
||||
|
||||
To use this task, you must have :
|
||||
|
||||
- Openshift pipeline enabled (or tekton CRD loaded for Kubernetes clusters)
|
||||
- 1 Secret named `kraken-aws-creds` for scenarios using aws
|
||||
- 1 ConfigMap named `kraken-kubeconfig` with credentials to the targeted cluster
|
||||
- 1 ConfigMap named `kraken-config-example` with kraken configuration file (config.yaml)
|
||||
- 1 ConfigMap named `kraken-common-example` with all kraken related files
|
||||
- The `pipeline` SA with be autorized to run with priviveged SCC
|
||||
|
||||
You can create theses resources using the following sequence :
|
||||
|
||||
```bash
|
||||
oc project default
|
||||
oc adm policy add-scc-to-user privileged -z pipeline
|
||||
oc apply -f https://github.com/startxfr/tekton-catalog/raw/stable/task/kraken-scenario/0.1/samples/common.yaml
|
||||
```
|
||||
|
||||
Then you must change content of `kraken-aws-creds` secret, `kraken-kubeconfig` and `kraken-config-example` configMap
|
||||
to reflect your cluster configuration. Refer to the [kraken configuration](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml)
|
||||
and [configuration examples](https://github.com/startxfr/tekton-catalog/blob/stable/task/kraken-scenario/0.1/samples/)
|
||||
for details on how to configure theses resources.
|
||||
|
||||
* Start as a single taskrun
|
||||
|
||||
```bash
|
||||
oc apply -f https://github.com/startxfr/tekton-catalog/raw/stable/task/kraken-scenario/0.1/samples/taskrun.yaml
|
||||
```
|
||||
|
||||
* Start as a pipelinerun
|
||||
|
||||
```yaml
|
||||
oc apply -f https://github.com/startxfr/tekton-catalog/raw/stable/task/kraken-scenario/0.1/samples/pipelinerun.yaml
|
||||
```
|
||||
|
||||
* Deploying kraken using a helm-chart
|
||||
|
||||
You can find on [artifacthub.io](https://artifacthub.io/packages/search?kind=0&ts_query_web=kraken) the
|
||||
[chaos-kraken](https://artifacthub.io/packages/helm/startx/chaos-kraken) `helm-chart`
|
||||
which can be used to deploy a kraken chaos scenarios.
|
||||
|
||||
Default configuration create the following resources :
|
||||
|
||||
- 1 project named **chaos-kraken**
|
||||
- 1 scc with privileged context for kraken deployment
|
||||
- 1 configmap with kraken 21 generic scenarios, various scripts and configuration
|
||||
- 1 configmap with kubeconfig of the targeted cluster
|
||||
- 1 job named kraken-test-xxx
|
||||
- 1 service to the kraken pods
|
||||
- 1 route to the kraken service
|
||||
|
||||
```bash
|
||||
# Install the startx helm repository
|
||||
helm repo add startx https://startxfr.github.io/helm-repository/packages/
|
||||
# Install the kraken project
|
||||
helm install --set project.enabled=true chaos-kraken-project startx/chaos-kraken
|
||||
# Deploy the kraken instance
|
||||
helm install \
|
||||
--set kraken.enabled=true \
|
||||
--set kraken.aws.credentials.region="eu-west-3" \
|
||||
--set kraken.aws.credentials.key_id="AKIAXXXXXXXXXXXXXXXX" \
|
||||
--set kraken.aws.credentials.secret="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" \
|
||||
--set kraken.kubeconfig.token.server="https://api.mycluster:6443" \
|
||||
--set kraken.kubeconfig.token.token="sha256~XXXXXXXXXX_PUT_YOUR_TOKEN_HERE_XXXXXXXXXXXX" \
|
||||
-n chaos-kraken \
|
||||
chaos-kraken-instance startx/chaos-kraken
|
||||
```
|
||||
@@ -1,45 +0,0 @@
|
||||
## Installation
|
||||
|
||||
The following ways are supported to run Kraken:
|
||||
|
||||
- Standalone python program through Git.
|
||||
- Containerized version using either Podman or Docker as the runtime via [Krkn-hub](https://github.com/krkn-chaos/krkn-hub)
|
||||
- Kubernetes or OpenShift deployment ( unsupported )
|
||||
|
||||
**NOTE**: It is recommended to run Kraken external to the cluster ( Standalone or Containerized ) hitting the Kubernetes/OpenShift API as running it internal to the cluster might be disruptive to itself and also might not report back the results if the chaos leads to cluster's API server instability.
|
||||
|
||||
**NOTE**: To run Kraken on Power (ppc64le) architecture, build and run a containerized version by following the
|
||||
instructions given [here](https://github.com/krkn-chaos/krkn/blob/main/containers/build_own_image-README.md).
|
||||
|
||||
**NOTE**: Helper functions for interactions in Krkn are part of [krkn-lib](https://github.com/redhat-chaos/krkn-lib).
|
||||
Please feel free to reuse and expand them as you see fit when adding a new scenario or expanding
|
||||
the capabilities of the current supported scenarios.
|
||||
|
||||
|
||||
### Git
|
||||
|
||||
#### Clone the repository
|
||||
Pick the latest stable release to install [here](https://github.com/krkn-chaos/krkn/releases).
|
||||
```
|
||||
$ git clone https://github.com/krkn-chaos/krkn.git --branch <release version>
|
||||
$ cd krkn
|
||||
```
|
||||
|
||||
#### Install the dependencies
|
||||
```
|
||||
$ python3.9 -m venv chaos
|
||||
$ source chaos/bin/activate
|
||||
$ pip3.9 install -r requirements.txt
|
||||
```
|
||||
|
||||
**NOTE**: Make sure python3-devel and latest pip versions are installed on the system. The dependencies install has been tested with pip >= 21.1.3 versions.
|
||||
|
||||
#### Run
|
||||
```
|
||||
$ python3.9 run_kraken.py --config <config_file_location>
|
||||
```
|
||||
|
||||
### Run containerized version
|
||||
[Krkn-hub](https://github.com/krkn-chaos/krkn-hub) is a wrapper that allows running Krkn chaos scenarios via podman or docker runtime with scenario parameters/configuration defined as environment variables.
|
||||
|
||||
Refer [instructions](https://github.com/krkn-chaos/krkn-hub#supported-chaos-scenarios) to get started.
|
||||
@@ -1,36 +0,0 @@
|
||||
### ManagedCluster Scenarios
|
||||
|
||||
[ManagedCluster](https://open-cluster-management.io/concepts/managedcluster/) scenarios provide a way to integrate kraken with [Open Cluster Management (OCM)](https://open-cluster-management.io/) and [Red Hat Advanced Cluster Management for Kubernetes (ACM)](https://www.redhat.com/en/technologies/management/advanced-cluster-management).
|
||||
|
||||
ManagedCluster scenarios leverage [ManifestWorks](https://open-cluster-management.io/concepts/manifestwork/) to inject faults into the ManagedClusters.
|
||||
|
||||
The following ManagedCluster chaos scenarios are supported:
|
||||
|
||||
1. **managedcluster_start_scenario**: Scenario to start the ManagedCluster instance.
|
||||
2. **managedcluster_stop_scenario**: Scenario to stop the ManagedCluster instance.
|
||||
3. **managedcluster_stop_start_scenario**: Scenario to stop and then start the ManagedCluster instance.
|
||||
4. **start_klusterlet_scenario**: Scenario to start the klusterlet of the ManagedCluster instance.
|
||||
5. **stop_klusterlet_scenario**: Scenario to stop the klusterlet of the ManagedCluster instance.
|
||||
6. **stop_start_klusterlet_scenario**: Scenario to stop and start the klusterlet of the ManagedCluster instance.
|
||||
|
||||
ManagedCluster scenarios can be injected by placing the ManagedCluster scenarios config files under `managedcluster_scenarios` option in the Kraken config. Refer to [managedcluster_scenarios_example](https://github.com/redhat-chaos/krkn/blob/main/scenarios/kube/managedcluster_scenarios_example.yml) config file.
|
||||
|
||||
```
|
||||
managedcluster_scenarios:
|
||||
- actions: # ManagedCluster chaos scenarios to be injected
|
||||
- managedcluster_stop_start_scenario
|
||||
managedcluster_name: cluster1 # ManagedCluster on which scenario has to be injected; can set multiple names separated by comma
|
||||
# label_selector: # When managedcluster_name is not specified, a ManagedCluster with matching label_selector is selected for ManagedCluster chaos scenario injection
|
||||
instance_count: 1 # Number of managedcluster to perform action/select that match the label selector
|
||||
runs: 1 # Number of times to inject each scenario under actions (will perform on same ManagedCluster each time)
|
||||
timeout: 420 # Duration to wait for completion of ManagedCluster scenario injection
|
||||
# For OCM to detect a ManagedCluster as unavailable, have to wait 5*leaseDurationSeconds
|
||||
# (default leaseDurationSeconds = 60 sec)
|
||||
- actions:
|
||||
- stop_start_klusterlet_scenario
|
||||
managedcluster_name: cluster1
|
||||
# label_selector:
|
||||
instance_count: 1
|
||||
runs: 1
|
||||
timeout: 60
|
||||
```
|
||||
@@ -1,49 +0,0 @@
|
||||
### Network chaos
|
||||
Scenario to introduce network latency, packet loss, and bandwidth restriction in the Node's host network interface. The purpose of this scenario is to observe faults caused by random variations in the network.
|
||||
|
||||
##### Sample scenario config for egress traffic shaping
|
||||
```
|
||||
network_chaos: # Scenario to create an outage by simulating random variations in the network.
|
||||
duration: 300 # In seconds - duration network chaos will be applied.
|
||||
node_name: # Comma separated node names on which scenario has to be injected.
|
||||
label_selector: node-role.kubernetes.io/master # When node_name is not specified, a node with matching label_selector is selected for running the scenario.
|
||||
instance_count: 1 # Number of nodes in which to execute network chaos.
|
||||
interfaces: # List of interface on which to apply the network restriction.
|
||||
- "ens5" # Interface name would be the Kernel host network interface name.
|
||||
execution: serial|parallel # Execute each of the egress options as a single scenario(parallel) or as separate scenario(serial).
|
||||
egress:
|
||||
latency: 500ms
|
||||
loss: 50% # percentage
|
||||
bandwidth: 10mbit
|
||||
```
|
||||
|
||||
##### Sample scenario config for ingress traffic shaping (using a plugin)
|
||||
'''
|
||||
- id: network_chaos
|
||||
config:
|
||||
node_interface_name: # Dictionary with key as node name(s) and value as a list of its interfaces to test
|
||||
ip-10-0-128-153.us-west-2.compute.internal:
|
||||
- ens5
|
||||
- genev_sys_6081
|
||||
label_selector: node-role.kubernetes.io/master # When node_interface_name is not specified, nodes with matching label_selector is selected for node chaos scenario injection
|
||||
instance_count: 1 # Number of nodes to perform action/select that match the label selector
|
||||
kubeconfig_path: ~/.kube/config # Path to kubernetes config file. If not specified, it defaults to ~/.kube/config
|
||||
execution_type: parallel # Execute each of the ingress options as a single scenario(parallel) or as separate scenario(serial).
|
||||
network_params:
|
||||
latency: 500ms
|
||||
loss: '50%'
|
||||
bandwidth: 10mbit
|
||||
wait_duration: 120
|
||||
test_duration: 60
|
||||
'''
|
||||
|
||||
Note: For ingress traffic shaping, ensure that your node doesn't have any [IFB](https://wiki.linuxfoundation.org/networking/ifb) interfaces already present. The scenario relies on creating IFBs to do the shaping, and they are deleted at the end of the scenario.
|
||||
|
||||
|
||||
##### Steps
|
||||
- Pick the nodes to introduce the network anomaly either from node_name or label_selector.
|
||||
- Verify interface list in one of the nodes or use the interface with a default route, as test interface, if no interface is specified by the user.
|
||||
- Set traffic shaping config on node's interface using tc and netem.
|
||||
- Wait for the duration time.
|
||||
- Remove traffic shaping config on node's interface.
|
||||
- Remove the job that spawned the pod.
|
||||
@@ -1,116 +0,0 @@
|
||||
### Node Scenarios
|
||||
|
||||
The following node chaos scenarios are supported:
|
||||
|
||||
1. **node_start_scenario**: Scenario to stop the node instance.
|
||||
2. **node_stop_scenario**: Scenario to stop the node instance.
|
||||
3. **node_stop_start_scenario**: Scenario to stop and then start the node instance. Not supported on VMware.
|
||||
4. **node_termination_scenario**: Scenario to terminate the node instance.
|
||||
5. **node_reboot_scenario**: Scenario to reboot the node instance.
|
||||
6. **stop_kubelet_scenario**: Scenario to stop the kubelet of the node instance.
|
||||
7. **stop_start_kubelet_scenario**: Scenario to stop and start the kubelet of the node instance.
|
||||
8. **restart_kubelet_scenario**: Scenario to restart the kubelet of the node instance.
|
||||
9. **node_crash_scenario**: Scenario to crash the node instance.
|
||||
10. **stop_start_helper_node_scenario**: Scenario to stop and start the helper node and check service status.
|
||||
|
||||
|
||||
**NOTE**: If the node does not recover from the node_crash_scenario injection, reboot the node to get it back to Ready state.
|
||||
|
||||
**NOTE**: node_start_scenario, node_stop_scenario, node_stop_start_scenario, node_termination_scenario
|
||||
, node_reboot_scenario and stop_start_kubelet_scenario are supported on AWS, Azure, OpenStack, BareMetal, GCP
|
||||
, VMware and Alibaba.
|
||||
|
||||
|
||||
#### AWS
|
||||
|
||||
Cloud setup instructions can be found [here](cloud_setup.md#aws). Sample scenario config can be found [here](https://github.com/krkn-chaos/krkn/blob/main/scenarios/openshift/aws_node_scenarios.yml).
|
||||
|
||||
|
||||
|
||||
#### Baremetal
|
||||
|
||||
Sample scenario config can be found [here](https://github.com/krkn-chaos/krkn/blob/main/scenarios/openshift/baremetal_node_scenarios.yml).
|
||||
|
||||
**NOTE**: Baremetal requires setting the IPMI user and password to power on, off, and reboot nodes, using the config options `bm_user` and `bm_password`. It can either be set in the root of the entry in the scenarios config, or it can be set per machine.
|
||||
|
||||
If no per-machine addresses are specified, kraken attempts to use the BMC value in the BareMetalHost object. To list them, you can do 'oc get bmh -o wide --all-namespaces'. If the BMC values are blank, you must specify them per-machine using the config option 'bmc_addr' as specified below.
|
||||
|
||||
For per-machine settings, add a "bmc_info" section to the entry in the scenarios config. Inside there, add a configuration section using the node name. In that, add per-machine settings. Valid settings are 'bmc_user', 'bmc_password', and 'bmc_addr'.
|
||||
See the example node scenario or the example below.
|
||||
|
||||
**NOTE**: Baremetal requires oc (openshift client) be installed on the machine running Kraken.
|
||||
|
||||
**NOTE**: Baremetal machines are fragile. Some node actions can occasionally corrupt the filesystem if it does not shut down properly, and sometimes the kubelet does not start properly.
|
||||
|
||||
|
||||
|
||||
#### Docker
|
||||
|
||||
The Docker provider can be used to run node scenarios against kind clusters.
|
||||
|
||||
[kind](https://kind.sigs.k8s.io/) is a tool for running local Kubernetes clusters using Docker container "nodes".
|
||||
|
||||
kind was primarily designed for testing Kubernetes itself, but may be used for local development or CI.
|
||||
|
||||
|
||||
|
||||
#### GCP
|
||||
Cloud setup instructions can be found [here](cloud_setup.md#gcp). Sample scenario config can be found [here](https://github.com/krkn-chaos/krkn/blob/main/scenarios/openshift/gcp_node_scenarios.yml).
|
||||
|
||||
|
||||
#### Openstack
|
||||
|
||||
How to set up Openstack cli to run node scenarios is defined [here](cloud_setup.md#openstack).
|
||||
|
||||
The supported node level chaos scenarios on an OPENSTACK cloud are `node_stop_start_scenario`, `stop_start_kubelet_scenario` and `node_reboot_scenario`.
|
||||
|
||||
**NOTE**: For `stop_start_helper_node_scenario`, visit [here](https://github.com/redhat-cop/ocp4-helpernode) to learn more about the helper node and its usage.
|
||||
|
||||
To execute the scenario, ensure the value for `ssh_private_key` in the node scenarios config file is set with the correct private key file path for ssh connection to the helper node. Ensure passwordless ssh is configured on the host running Kraken and the helper node to avoid connection errors.
|
||||
|
||||
|
||||
|
||||
#### Azure
|
||||
|
||||
Cloud setup instructions can be found [here](cloud_setup.md#azure). Sample scenario config can be found [here](https://github.com/krkn-chaos/krkn/blob/main/scenarios/openshift/azure_node_scenarios.yml).
|
||||
|
||||
|
||||
|
||||
#### Alibaba
|
||||
|
||||
How to set up Alibaba cli to run node scenarios is defined [here](cloud_setup.md#alibaba).
|
||||
|
||||
**NOTE**: There is no "terminating" idea in Alibaba, so any scenario with terminating will "release" the node
|
||||
. Releasing a node is 2 steps, stopping the node and then releasing it.
|
||||
|
||||
|
||||
|
||||
#### VMware
|
||||
How to set up VMware vSphere to run node scenarios is defined [here](cloud_setup.md#vmware)
|
||||
|
||||
This cloud type uses a different configuration style, see actions below and [example config file](../scenarios/openshift/vmware_node_scenarios.yml)
|
||||
|
||||
- vmware-node-terminate
|
||||
- vmware-node-reboot
|
||||
- vmware-node-stop
|
||||
- vmware-node-start
|
||||
|
||||
|
||||
|
||||
#### IBMCloud
|
||||
How to set up IBMCloud to run node scenarios is defined [here](cloud_setup.md#ibmcloud)
|
||||
|
||||
This cloud type uses a different configuration style, see actions below and [example config file](../scenarios/openshift/ibmcloud_node_scenarios.yml)
|
||||
|
||||
- ibmcloud-node-terminate
|
||||
- ibmcloud-node-reboot
|
||||
- ibmcloud-node-stop
|
||||
- ibmcloud-node-start
|
||||
|
||||
|
||||
|
||||
#### General
|
||||
|
||||
**NOTE**: The `node_crash_scenario` and `stop_kubelet_scenario` scenario is supported independent of the cloud platform.
|
||||
|
||||
Use 'generic' or do not add the 'cloud_type' key to your scenario if your cluster is not set up using one of the current supported cloud types.
|
||||
@@ -1,12 +0,0 @@
|
||||
## Performance dashboards
|
||||
|
||||
Kraken supports installing a mutable grafana on the cluster with the dashboards loaded to help with monitoring the cluster for things like resource usage to find the outliers, API stats, Etcd health, Critical alerts etc. It can be deployed by enabling the following in the config:
|
||||
|
||||
```
|
||||
performance_monitoring:
|
||||
deploy_dashboards: True
|
||||
```
|
||||
|
||||
The route and credentials to access the dashboards will be printed on the stdout before Kraken starts creating chaos. The dashboards can be edited/modified to include your queries of interest.
|
||||
|
||||
**NOTE**: The dashboards leverage Prometheus for scraping the metrics off of the cluster and currently only supports OpenShift since Prometheus is setup on the cluster by default and leverages routes object to expose the grafana dashboards externally.
|
||||
@@ -1,46 +0,0 @@
|
||||
## Pod network Scenarios
|
||||
|
||||
### Pod outage
|
||||
Scenario to block the traffic ( Ingress/Egress ) of a pod matching the labels for the specified duration of time to understand the behavior of the service/other services which depend on it during downtime. This helps with planning the requirements accordingly, be it improving the timeouts or tweaking the alerts etc.
|
||||
With the current network policies, it is not possible to explicitly block ports which are enabled by allowed network policy rule. This chaos scenario addresses this issue by using OVS flow rules to block ports related to the pod. It supports OpenShiftSDN and OVNKubernetes based networks.
|
||||
|
||||
##### Sample scenario config (using a plugin)
|
||||
```
|
||||
- id: pod_network_outage
|
||||
config:
|
||||
namespace: openshift-console # Required - Namespace of the pod to which filter need to be applied
|
||||
direction: # Optioinal - List of directions to apply filters
|
||||
- ingress # Blocks ingress traffic, Default both egress and ingress
|
||||
ingress_ports: # Optional - List of ports to block traffic on
|
||||
- 8443 # Blocks 8443, Default [], i.e. all ports.
|
||||
label_selector: 'component=ui' # Blocks access to openshift console
|
||||
```
|
||||
### Pod Network shaping
|
||||
Scenario to introduce network latency, packet loss, and bandwidth restriction in the Pod's network interface. The purpose of this scenario is to observe faults caused by random variations in the network.
|
||||
|
||||
##### Sample scenario config for egress traffic shaping (using plugin)
|
||||
```
|
||||
- id: pod_egress_shaping
|
||||
config:
|
||||
namespace: openshift-console # Required - Namespace of the pod to which filter need to be applied.
|
||||
label_selector: 'component=ui' # Applies traffic shaping to access openshift console.
|
||||
network_params:
|
||||
latency: 500ms # Add 500ms latency to egress traffic from the pod.
|
||||
```
|
||||
##### Sample scenario config for ingress traffic shaping (using plugin)
|
||||
```
|
||||
- id: pod_ingress_shaping
|
||||
config:
|
||||
namespace: openshift-console # Required - Namespace of the pod to which filter need to be applied.
|
||||
label_selector: 'component=ui' # Applies traffic shaping to access openshift console.
|
||||
network_params:
|
||||
latency: 500ms # Add 500ms latency to egress traffic from the pod.
|
||||
```
|
||||
|
||||
##### Steps
|
||||
- Pick the pods to introduce the network anomaly either from label_selector or pod_name.
|
||||
- Identify the pod interface name on the node.
|
||||
- Set traffic shaping config on pod's interface using tc and netem.
|
||||
- Wait for the duration time.
|
||||
- Remove traffic shaping config on pod's interface.
|
||||
- Remove the job that spawned the pod.
|
||||
@@ -1,37 +0,0 @@
|
||||
### Pod Scenarios
|
||||
|
||||
Krkn recently replaced PowerfulSeal with its own internal pod scenarios using a plugin system. You can run pod scenarios by adding the following config to Krkn:
|
||||
|
||||
```yaml
|
||||
kraken:
|
||||
chaos_scenarios:
|
||||
- plugin_scenarios:
|
||||
- path/to/scenario.yaml
|
||||
```
|
||||
|
||||
You can then create the scenario file with the following contents:
|
||||
|
||||
```yaml
|
||||
# yaml-language-server: $schema=../plugin.schema.json
|
||||
- id: kill-pods
|
||||
config:
|
||||
namespace_pattern: ^kube-system$
|
||||
label_selector: k8s-app=kube-scheduler
|
||||
krkn_pod_recovery_time: 120
|
||||
|
||||
```
|
||||
|
||||
Please adjust the schema reference to point to the [schema file](../scenarios/plugin.schema.json). This file will give you code completion and documentation for the available options in your IDE.
|
||||
|
||||
#### Pod Chaos Scenarios
|
||||
|
||||
The following are the components of Kubernetes/OpenShift for which a basic chaos scenario config exists today.
|
||||
|
||||
| Component | Description | Working |
|
||||
| ------------------------ |-------------| -------- |
|
||||
| [Basic pod scenario](../scenarios/kube/pod.yml) | Kill a pod. | :heavy_check_mark: |
|
||||
| [Etcd](../scenarios/openshift/etcd.yml) | Kills a single/multiple etcd replicas. | :heavy_check_mark: |
|
||||
| [Kube ApiServer](../scenarios/openshift/openshift-kube-apiserver.yml)| Kills a single/multiple kube-apiserver replicas. | :heavy_check_mark: |
|
||||
| [ApiServer](../scenarios/openshift/openshift-apiserver.yml) | Kills a single/multiple apiserver replicas. | :heavy_check_mark: |
|
||||
| [Prometheus](../scenarios/openshift/prometheus.yml) | Kills a single/multiple prometheus replicas. | :heavy_check_mark: |
|
||||
| [OpenShift System Pods](../scenarios/openshift/regex_openshift_pod_kill.yml) | Kills random pods running in the OpenShift system namespaces. | :heavy_check_mark: |
|
||||
@@ -1,26 +0,0 @@
|
||||
### PVC scenario
|
||||
Scenario to fill up a given PersistenVolumeClaim by creating a temp file on the PVC from a pod associated with it. The purpose of this scenario is to fill up a volume to understand faults caused by the application using this volume.
|
||||
|
||||
##### Sample scenario config
|
||||
```
|
||||
pvc_scenario:
|
||||
pvc_name: <pvc_name> # Name of the target PVC.
|
||||
pod_name: <pod_name> # Name of the pod where the PVC is mounted. It will be ignored if the pvc_name is defined.
|
||||
namespace: <namespace_name> # Namespace where the PVC is.
|
||||
fill_percentage: 50 # Target percentage to fill up the cluster. Value must be higher than current percentage. Valid values are between 0 and 99.
|
||||
duration: 60 # Duration in seconds for the fault.
|
||||
```
|
||||
|
||||
##### Steps
|
||||
- Get the pod name where the PVC is mounted.
|
||||
- Get the volume name mounted in the container pod.
|
||||
- Get the container name where the PVC is mounted.
|
||||
- Get the mount path where the PVC is mounted in the pod.
|
||||
- Get the PVC capacity and current used capacity.
|
||||
- Calculate file size to fill the PVC to the target fill_percentage.
|
||||
- Connect to the pod.
|
||||
- Create a temp file `kraken.tmp` with random data on the mount path:
|
||||
- `dd bs=1024 count=$file_size </dev/urandom > /mount_path/kraken.tmp`
|
||||
- Wait for the duration time.
|
||||
- Remove the temp file created:
|
||||
- `rm kraken.tmp`
|
||||
@@ -1,136 +0,0 @@
|
||||
# Scenario Plugin API:
|
||||
|
||||
This API enables seamless integration of Scenario Plugins for Krkn. Plugins are automatically
|
||||
detected and loaded by the plugin loader, provided they extend the `AbstractPluginScenario`
|
||||
abstract class, implement the required methods, and adhere to the specified [naming conventions](#naming-conventions).
|
||||
|
||||
## Plugin folder:
|
||||
|
||||
The plugin loader automatically loads plugins found in the `krkn/scenario_plugins` directory,
|
||||
relative to the Krkn root folder. Each plugin must reside in its own directory and can consist
|
||||
of one or more Python files. The entry point for each plugin is a Python class that extends the
|
||||
[AbstractPluginScenario](../krkn/scenario_plugins/abstract_scenario_plugin.py) abstract class and implements its required methods.
|
||||
|
||||
## `AbstractPluginScenario` abstract class:
|
||||
|
||||
This [abstract class](../krkn/scenario_plugins/abstract_scenario_plugin.py) defines the contract between the plugin and krkn.
|
||||
It consists of two methods:
|
||||
- `run(...)`
|
||||
- `get_scenario_type()`
|
||||
|
||||
Most IDEs can automatically suggest and implement the abstract methods defined in `AbstractPluginScenario`:
|
||||

|
||||
_(IntelliJ PyCharm)_
|
||||
|
||||
### `run(...)`
|
||||
|
||||
```python
|
||||
def run(
|
||||
self,
|
||||
run_uuid: str,
|
||||
scenario: str,
|
||||
krkn_config: dict[str, any],
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
) -> int:
|
||||
|
||||
```
|
||||
|
||||
This method represents the entry point of the plugin and the first method
|
||||
that will be executed.
|
||||
#### Parameters:
|
||||
|
||||
- `run_uuid`:
|
||||
- the uuid of the chaos run generated by krkn for every single run.
|
||||
- `scenario`:
|
||||
- the config file of the scenario that is currently executed
|
||||
- `krkn_config`:
|
||||
- the full dictionary representation of the `config.yaml`
|
||||
- `lib_telemetry`
|
||||
- it is a composite object of all the [krkn-lib](https://krkn-chaos.github.io/krkn-lib-docs/modules.html) objects and methods needed by a krkn plugin to run.
|
||||
- `scenario_telemetry`
|
||||
- the `ScenarioTelemetry` object of the scenario that is currently executed
|
||||
|
||||
### Return value:
|
||||
Returns 0 if the scenario succeeds and 1 if it fails.
|
||||
> [!WARNING]
|
||||
> All the exception must be handled __inside__ the run method and not propagated.
|
||||
|
||||
### `get_scenario_types()`:
|
||||
|
||||
```python def get_scenario_types(self) -> list[str]:```
|
||||
|
||||
Indicates the scenario types specified in the `config.yaml`. For the plugin to be properly
|
||||
loaded, recognized and executed, it must be implemented and must return one or more
|
||||
strings matching `scenario_type` strings set in the config.
|
||||
> [!WARNING]
|
||||
> Multiple strings can map to a *single* `ScenarioPlugin` but the same string cannot map
|
||||
> to different plugins, an exception will be thrown for scenario_type redefinition.
|
||||
|
||||
> [!Note]
|
||||
> The `scenario_type` strings must be unique across all plugins; otherwise, an exception will be thrown.
|
||||
|
||||
## Naming conventions:
|
||||
A key requirement for developing a plugin that will be properly loaded
|
||||
by the plugin loader is following the established naming conventions.
|
||||
These conventions are enforced to maintain a uniform and readable codebase,
|
||||
making it easier to onboard new developers from the community.
|
||||
|
||||
### plugin folder:
|
||||
- the plugin folder must be placed in the `krkn/scenario_plugin` folder starting from the krkn root folder
|
||||
- the plugin folder __cannot__ contain the words
|
||||
- `plugin`
|
||||
- `scenario`
|
||||
### plugin file name and class name:
|
||||
- the plugin file containing the main plugin class must be named in _snake case_ and must have the suffix `_scenario_plugin`:
|
||||
- `example_scenario_plugin.py`
|
||||
- the main plugin class must named in _capital camel case_ and must have the suffix `ScenarioPlugin` :
|
||||
- `ExampleScenarioPlugin`
|
||||
- the file name must match the class name in the respective syntax:
|
||||
- `example_scenario_plugin.py` -> `ExampleScenarioPlugin`
|
||||
|
||||
### scenario type:
|
||||
- the scenario type __must__ be unique between all the scenarios.
|
||||
|
||||
### logging:
|
||||
If your new scenario does not adhere to the naming conventions, an error log will be generated in the Krkn standard output,
|
||||
providing details about the issue:
|
||||
|
||||
```commandline
|
||||
2024-10-03 18:06:31,136 [INFO] 📣 `ScenarioPluginFactory`: types from config.yaml mapped to respective classes for execution:
|
||||
2024-10-03 18:06:31,136 [INFO] ✅ type: application_outages_scenarios ➡️ `ApplicationOutageScenarioPlugin`
|
||||
2024-10-03 18:06:31,136 [INFO] ✅ types: [hog_scenarios, arcaflow_scenario] ➡️ `ArcaflowScenarioPlugin`
|
||||
2024-10-03 18:06:31,136 [INFO] ✅ type: container_scenarios ➡️ `ContainerScenarioPlugin`
|
||||
2024-10-03 18:06:31,136 [INFO] ✅ type: managedcluster_scenarios ➡️ `ManagedClusterScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ types: [pod_disruption_scenarios, pod_network_scenario, vmware_node_scenarios, ibmcloud_node_scenarios] ➡️ `NativeScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: network_chaos_scenarios ➡️ `NetworkChaosScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: node_scenarios ➡️ `NodeActionsScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: pvc_scenarios ➡️ `PvcScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: service_disruption_scenarios ➡️ `ServiceDisruptionScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: service_hijacking_scenarios ➡️ `ServiceHijackingScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: cluster_shut_down_scenarios ➡️ `ShutDownScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: syn_flood_scenarios ➡️ `SynFloodScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: time_scenarios ➡️ `TimeActionsScenarioPlugin`
|
||||
2024-10-03 18:06:31,137 [INFO] ✅ type: zone_outages_scenarios ➡️ `ZoneOutageScenarioPlugin`
|
||||
|
||||
2024-09-18 14:48:41,735 [INFO] Failed to load Scenario Plugins:
|
||||
|
||||
2024-09-18 14:48:41,735 [ERROR] ⛔ Class: ExamplePluginScenario Module: krkn.scenario_plugins.example.example_scenario_plugin
|
||||
2024-09-18 14:48:41,735 [ERROR] ⚠️ scenario plugin class name must start with a capital letter, end with `ScenarioPlugin`, and cannot be just `ScenarioPlugin`.
|
||||
```
|
||||
|
||||
>[!NOTE]
|
||||
>If you're trying to understand how the scenario types in the config.yaml are mapped to
|
||||
> their corresponding plugins, this log will guide you!
|
||||
> Each scenario plugin class mentioned can be found in the `krkn/scenario_plugin` folder
|
||||
> simply convert the camel case notation and remove the ScenarioPlugin suffix from the class name
|
||||
> e.g `ShutDownScenarioPlugin` class can be found in the `krkn/scenario_plugin/shut_down` folder.
|
||||
|
||||
## ExampleScenarioPlugin
|
||||
The [ExampleScenarioPlugin](../krkn/tests/test_classes/example_scenario_plugin.py) class included in the tests folder can be used as a scaffolding for new plugins and it is considered
|
||||
part of the documentation.
|
||||
|
||||
For any questions or further guidance, feel free to reach out to us on the
|
||||
[Kubernetes workspace](https://kubernetes.slack.com/) in the `#krkn` channel.
|
||||
We’re happy to assist. Now, __release the Krkn!__
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 340 KiB |
@@ -1,63 +0,0 @@
|
||||
### Service Disruption Scenarios (Previously Delete Namespace Scenario)
|
||||
|
||||
Using this type of scenario configuration one is able to delete crucial objects in a specific namespace, or a namespace matching a certain regex string.
|
||||
|
||||
Configuration Options:
|
||||
|
||||
**namespace:** Specific namespace or regex style namespace of what you want to delete. Gets all namespaces if not specified; set to "" if you want to use the label_selector field.
|
||||
|
||||
Set to '^.*$' and label_selector to "" to randomly select any namespace in your cluster.
|
||||
|
||||
**label_selector:** Label on the namespace you want to delete. Set to "" if you are using the namespace variable.
|
||||
|
||||
**delete_count:** Number of namespaces to kill in each run. Based on matching namespace and label specified, default is 1.
|
||||
|
||||
**runs:** Number of runs/iterations to kill namespaces, default is 1.
|
||||
|
||||
**sleep:** Number of seconds to wait between each iteration/count of killing namespaces. Defaults to 10 seconds if not set
|
||||
|
||||
Refer to [namespace_scenarios_example](https://github.com/krkn-chaos/krkn/blob/main/scenarios/regex_namespace.yaml) config file.
|
||||
|
||||
```
|
||||
scenarios:
|
||||
- namespace: "^.*$"
|
||||
runs: 1
|
||||
- namespace: "^.*ingress.*$"
|
||||
runs: 1
|
||||
sleep: 15
|
||||
```
|
||||
|
||||
|
||||
### Steps
|
||||
|
||||
This scenario will select a namespace (or multiple) dependent on the configuration and will kill all of the below object types in that namespace and will wait for them to be Running in the post action
|
||||
1. Services
|
||||
2. Daemonsets
|
||||
3. Statefulsets
|
||||
4. Replicasets
|
||||
5. Deployments
|
||||
|
||||
|
||||
#### Post Action
|
||||
|
||||
We do a post chaos check to wait and verify the specific objects in each namespace are Ready
|
||||
|
||||
Here there are two options:
|
||||
|
||||
1. Pass a custom script in the main config scenario list that will run before the chaos and verify the output matches post chaos scenario.
|
||||
|
||||
See [scenarios/post_action_namespace.py](https://github.com/cloud-bulldozer/kraken/tree/master/scenarios/post_action_namespace.py) for an example
|
||||
|
||||
```
|
||||
- namespace_scenarios:
|
||||
- - scenarios/regex_namespace.yaml
|
||||
- scenarios/post_action_namespace.py
|
||||
```
|
||||
|
||||
|
||||
1. Allow kraken to wait and check all killed objects in the namespaces become 'Running' again. Kraken keeps a list of the specific
|
||||
objects in namespaces that were killed to verify all that were affected recover properly.
|
||||
|
||||
```
|
||||
wait_time: <seconds to wait for namespace to recover>
|
||||
```
|
||||
@@ -1,80 +0,0 @@
|
||||
### Service Hijacking Scenarios
|
||||
|
||||
Service Hijacking Scenarios aim to simulate fake HTTP responses from a workload targeted by a
|
||||
`Service` already deployed in the cluster.
|
||||
This scenario is executed by deploying a custom-made web service and modifying the target `Service`
|
||||
selector to direct traffic to this web service for a specified duration.
|
||||
|
||||
The web service's source code is available [here](https://github.com/krkn-chaos/krkn-service-hijacking).
|
||||
It employs a time-based test plan from the scenario configuration file, which specifies the behavior of resources during the chaos scenario as follows:
|
||||
|
||||
```yaml
|
||||
service_target_port: http-web-svc # The port of the service to be hijacked (can be named or numeric, based on the workload and service configuration).
|
||||
service_name: nginx-service # The name of the service that will be hijacked.
|
||||
service_namespace: default # The namespace where the target service is located.
|
||||
image: quay.io/krkn-chaos/krkn-service-hijacking:v0.1.3 # Image of the krkn web service to be deployed to receive traffic.
|
||||
chaos_duration: 30 # Total duration of the chaos scenario in seconds.
|
||||
plan:
|
||||
- resource: "/list/index.php" # Specifies the resource or path to respond to in the scenario. For paths, both the path and query parameters are captured but ignored. For resources, only query parameters are captured.
|
||||
|
||||
steps: # A time-based plan consisting of steps can be defined for each resource.
|
||||
GET: # One or more HTTP methods can be specified for each step. Note: Non-standard methods are supported for fully custom web services (e.g., using NONEXISTENT instead of POST).
|
||||
|
||||
- duration: 15 # Duration in seconds for this step before moving to the next one, if defined. Otherwise, this step will continue until the chaos scenario ends.
|
||||
|
||||
status: 500 # HTTP status code to be returned in this step.
|
||||
mime_type: "application/json" # MIME type of the response for this step.
|
||||
payload: | # The response payload for this step.
|
||||
{
|
||||
"status":"internal server error"
|
||||
}
|
||||
- duration: 15
|
||||
status: 201
|
||||
mime_type: "application/json"
|
||||
payload: |
|
||||
{
|
||||
"status":"resource created"
|
||||
}
|
||||
POST:
|
||||
- duration: 15
|
||||
status: 401
|
||||
mime_type: "application/json"
|
||||
payload: |
|
||||
{
|
||||
"status": "unauthorized"
|
||||
}
|
||||
- duration: 15
|
||||
status: 404
|
||||
mime_type: "text/plain"
|
||||
payload: "not found"
|
||||
|
||||
|
||||
```
|
||||
The scenario will focus on the `service_name` within the `service_namespace`,
|
||||
substituting the selector with a randomly generated one, which is added as a label in the mock service manifest.
|
||||
This allows multiple scenarios to be executed in the same namespace, each targeting different services without
|
||||
causing conflicts.
|
||||
|
||||
The newly deployed mock web service will expose a `service_target_port`,
|
||||
which can be either a named or numeric port based on the service configuration.
|
||||
This ensures that the Service correctly routes HTTP traffic to the mock web service during the chaos run.
|
||||
|
||||
Each step will last for `duration` seconds from the deployment of the mock web service in the cluster.
|
||||
For each HTTP resource, defined as a top-level YAML property of the plan
|
||||
(it could be a specific resource, e.g., /list/index.php, or a path-based resource typical in MVC frameworks),
|
||||
one or more HTTP request methods can be specified. Both standard and custom request methods are supported.
|
||||
|
||||
During this time frame, the web service will respond with:
|
||||
|
||||
- `status`: The [HTTP status code](https://datatracker.ietf.org/doc/html/rfc7231#section-6) (can be standard or custom).
|
||||
- `mime_type`: The [MIME type](https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types) (can be standard or custom).
|
||||
- `payload`: The response body to be returned to the client.
|
||||
|
||||
At the end of the step `duration`, the web service will proceed to the next step (if available) until
|
||||
the global `chaos_duration` concludes. At this point, the original service will be restored,
|
||||
and the custom web service and its resources will be undeployed.
|
||||
|
||||
__NOTE__: Some clients (e.g., cURL, jQuery) may optimize queries using lightweight methods (like HEAD or OPTIONS)
|
||||
to probe API behavior. If these methods are not defined in the test plan, the web service may respond with
|
||||
a `405` or `404` status code. If you encounter unexpected behavior, consider this use case.
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
### Signaling to Kraken
|
||||
This functionality allows a user to be able to pause or stop the kraken run at any time no matter the number of iterations or daemon_mode set in the config.
|
||||
|
||||
If publish_kraken_status is set to True in the config, kraken will start up a connection to a url at a certain port to decide if it should continue running.
|
||||
|
||||
By default, it will get posted to http://0.0.0.0:8081/
|
||||
|
||||
An example use case for this feature would be coordinating kraken runs based on the status of the service installation or load on the cluster.
|
||||
|
||||
|
||||
|
||||
#### States
|
||||
There are 3 states in the kraken status:
|
||||
|
||||
```PAUSE```: When the Kraken signal is 'PAUSE', this will pause the kraken test and wait for the wait_duration until the signal returns to RUN.
|
||||
|
||||
```STOP```: When the Kraken signal is 'STOP', end the kraken run and print out report.
|
||||
|
||||
```RUN```: When the Kraken signal is 'RUN', continue kraken run based on iterations.
|
||||
|
||||
|
||||
|
||||
#### Configuration
|
||||
|
||||
In the config you need to set these parameters to tell kraken which port to post the kraken run status to.
|
||||
As well if you want to publish and stop running based on the kraken status or not.
|
||||
The signal is set to `RUN` by default, meaning it will continue to run the scenarios. It can set to `PAUSE` for Kraken to act as listener and wait until set to `RUN` before injecting chaos.
|
||||
```
|
||||
port: 8081
|
||||
publish_kraken_status: True
|
||||
signal_state: RUN
|
||||
```
|
||||
|
||||
|
||||
#### Setting Signal
|
||||
|
||||
You can reset the kraken status during kraken execution with a `set_stop_signal.py` script with the following contents:
|
||||
|
||||
```
|
||||
import http.client as cli
|
||||
|
||||
conn = cli.HTTPConnection("0.0.0.0", "<port>")
|
||||
|
||||
conn.request("POST", "/STOP", {})
|
||||
|
||||
# conn.request('POST', '/PAUSE', {})
|
||||
|
||||
# conn.request('POST', '/RUN', {})
|
||||
|
||||
response = conn.getresponse()
|
||||
print(response.read().decode())
|
||||
```
|
||||
|
||||
Make sure to set the correct port number in your set_stop_signal script.
|
||||
|
||||
##### Url Examples
|
||||
To stop run:
|
||||
|
||||
```
|
||||
curl -X POST http:/0.0.0.0:8081/STOP
|
||||
```
|
||||
|
||||
To pause run:
|
||||
```
|
||||
curl -X POST http:/0.0.0.0:8081/PAUSE
|
||||
```
|
||||
|
||||
To start running again:
|
||||
```
|
||||
curl -X POST http:/0.0.0.0:8081/RUN
|
||||
```
|
||||
@@ -1,33 +0,0 @@
|
||||
### SYN Flood Scenarios
|
||||
|
||||
This scenario generates a substantial amount of TCP traffic directed at one or more Kubernetes services within
|
||||
the cluster to test the server's resiliency under extreme traffic conditions.
|
||||
It can also target hosts outside the cluster by specifying a reachable IP address or hostname.
|
||||
This scenario leverages the distributed nature of Kubernetes clusters to instantiate multiple instances
|
||||
of the same pod against a single host, significantly increasing the effectiveness of the attack.
|
||||
The configuration also allows for the specification of multiple node selectors, enabling Kubernetes to schedule
|
||||
the attacker pods on a user-defined subset of nodes to make the test more realistic.
|
||||
|
||||
```yaml
|
||||
packet-size: 120 # hping3 packet size
|
||||
window-size: 64 # hping 3 TCP window size
|
||||
duration: 10 # chaos scenario duration
|
||||
namespace: default # namespace where the target service(s) are deployed
|
||||
target-service: target-svc # target service name (if set target-service-label must be empty)
|
||||
target-port: 80 # target service TCP port
|
||||
target-service-label : "" # target service label, can be used to target multiple target at the same time
|
||||
# if they have the same label set (if set target-service must be empty)
|
||||
number-of-pods: 2 # number of attacker pod instantiated per each target
|
||||
image: quay.io/krkn-chaos/krkn-syn-flood # syn flood attacker container image
|
||||
attacker-nodes: # this will set the node affinity to schedule the attacker node. Per each node label selector
|
||||
# can be specified multiple values in this way the kube scheduler will schedule the attacker pods
|
||||
# in the best way possible based on the provided labels. Multiple labels can be specified
|
||||
kubernetes.io/hostname:
|
||||
- host_1
|
||||
- host_2
|
||||
kubernetes.io/os:
|
||||
- linux
|
||||
|
||||
```
|
||||
|
||||
The attacker container source code is available [here](https://github.com/krkn-chaos/krkn-syn-flood).
|
||||
@@ -1,44 +0,0 @@
|
||||
# How to Test Your Changes/Additions
|
||||
|
||||
## Current list of Scenario Types
|
||||
|
||||
Scenario Types:
|
||||
* pod-scenarios
|
||||
* node-scenarios
|
||||
* zone-outages
|
||||
* time-scenarios
|
||||
* cluster-shutdown
|
||||
* container-scenarios
|
||||
* node-cpu-hog
|
||||
* node-io-hog
|
||||
* node-memory-hog
|
||||
* application-outages
|
||||
|
||||
## Adding a New Scenario
|
||||
1. Create folder under [kraken/kraken](../kraken) with name pertinent to your scenario name.
|
||||
|
||||
2. Create a python file that will have a generic run function to be the base of your scenario.
|
||||
|
||||
a. See [shut_down.py](../kraken/shut_down/common_shut_down_func.py) for example.
|
||||
|
||||
3. Add in a scenario yaml file to run your specific scenario under [scenarios](../scenarios).
|
||||
|
||||
a. Try to add as many parameters as possible and be sure to give them default values in your run function.
|
||||
|
||||
4. Add all functionality and helper functions in file you made above (Step 2).
|
||||
|
||||
5. Add in caller to new scenario type in [run_kraken.py](../run_kraken.py) (around line 154).
|
||||
|
||||
a. This will also require you to add the new scenario python script to your imports.
|
||||
|
||||
6. Add scenario type and scenario yaml to the scenario list in [config](../config/config.yaml) and [config_performance](../config/config_performance.yaml).
|
||||
|
||||
7. Update this doc and main README with new scenario type.
|
||||
|
||||
8. Add CI test for new scenario.
|
||||
|
||||
a. Refer to test [Readme](../CI/README.md#adding-a-test-case) for more details.
|
||||
|
||||
## Follow Contribute guide
|
||||
|
||||
Once all you are happy with your changes, follow the [contribution](#docs/contribute.md) guide on how to create your own branch and squash your commits.
|
||||
@@ -1,33 +0,0 @@
|
||||
### Time/Date Skew Scenarios
|
||||
|
||||
Using this type of scenario configuration, one is able to change the time and/or date of the system for pods or nodes.
|
||||
|
||||
Configuration Options:
|
||||
|
||||
**action:** skew_time or skew_date.
|
||||
|
||||
**object_type:** pod or node.
|
||||
|
||||
**namespace:** namespace of the pods you want to skew. Needs to be set if setting a specific pod name.
|
||||
|
||||
**label_selector:** Label on the nodes or pods you want to skew.
|
||||
|
||||
**container_name:** Container name in pod you want to reset time on. If left blank it will randomly select one.
|
||||
|
||||
**object_name:** List of the names of pods or nodes you want to skew.
|
||||
|
||||
Refer to [time_scenarios_example](https://github.com/krkn-chaos/krkn/blob/main/scenarios/time_scenarios_example.yml) config file.
|
||||
|
||||
```
|
||||
time_scenarios:
|
||||
- action: skew_time
|
||||
object_type: pod
|
||||
object_name:
|
||||
- apiserver-868595fcbb-6qnsc
|
||||
- apiserver-868595fcbb-mb9j5
|
||||
namespace: openshift-apiserver
|
||||
container_name: openshift-apiserver
|
||||
- action: skew_date
|
||||
object_type: node
|
||||
label_selector: node-role.kubernetes.io/worker
|
||||
```
|
||||
@@ -1,26 +0,0 @@
|
||||
### Zone outage scenario
|
||||
Scenario to create outage in a targeted zone in the public cloud to understand the impact on both Kubernetes/OpenShift control plane as well as applications running on the worker nodes in that zone. It tweaks the network acl of the zone to simulate the failure and that in turn will stop both ingress and egress traffic from all the nodes in a particular zone for the specified duration and reverts it back to the previous state. Zone outage can be injected by placing the zone_outage config file under zone_outages option in the [kraken config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml). Refer to [zone_outage_scenario](https://github.com/redhat-chaos/krkn/blob/main/scenarios/zone_outage.yaml) config file for the parameters that need to be defined.
|
||||
|
||||
Refer to [cloud setup](cloud_setup.md) to configure your cli properly for the cloud provider of the cluster you want to shut down.
|
||||
|
||||
##### Current accepted cloud types:
|
||||
* [AWS](cloud_setup.md#aws)
|
||||
|
||||
##### Sample scenario config
|
||||
```
|
||||
zone_outage: # Scenario to create an outage of a zone by tweaking network ACL.
|
||||
cloud_type: aws # Cloud type on which Kubernetes/OpenShift runs. aws is the only platform supported currently for this scenario.
|
||||
duration: 600 # Duration in seconds after which the zone will be back online.
|
||||
vpc_id: # Cluster virtual private network to target.
|
||||
subnet_id: [subnet1, subnet2] # List of subnet-id's to deny both ingress and egress traffic.
|
||||
```
|
||||
|
||||
**NOTE**: vpc_id and subnet_id can be obtained from the cloud web console by selecting one of the instances in the targeted zone ( us-west-2a for example ).
|
||||
**NOTE**: Multiple zones will experience downtime in case of targeting multiple subnets which might have an impact on the cluster health especially if the zones have control plane components deployed.
|
||||
|
||||
##### Debugging steps in case of failures
|
||||
In case of failures during the steps which revert back the network acl to allow traffic and bring back the cluster nodes in the zone, the nodes in the particular zone will be in `NotReady` condition. Here is how to fix it:
|
||||
- OpenShift by default deploys the nodes in different zones for fault tolerance, for example us-west-2a, us-west-2b, us-west-2c. The cluster is associated with a virtual private network and each zone has its own subnet with a network acl which defines the ingress and egress traffic rules at the zone level unlike security groups which are at an instance level.
|
||||
- From the cloud web console, select one of the instances in the zone which is down and go to the subnet_id specified in the config.
|
||||
- Look at the network acl associated with the subnet and you will see both ingress and egress traffic being denied which is expected as Kraken deliberately injects it.
|
||||
- Kraken just switches the network acl while still keeping the original or default network acl around, switching to the default network acl from the drop-down menu will get back the nodes in the targeted zone into Ready state.
|
||||
@@ -29,9 +29,9 @@ def calculate_zscores(data):
|
||||
|
||||
|
||||
def identify_outliers(data, threshold):
|
||||
outliers_cpu = data[data["CPU"] > threshold]["Service"].tolist()
|
||||
outliers_memory = data[data["Memory"] > threshold]["Service"].tolist()
|
||||
outliers_network = data[data["Network"] > threshold]["Service"].tolist()
|
||||
outliers_cpu = data[data["CPU"] > float(threshold)]["Service"].tolist()
|
||||
outliers_memory = data[data["Memory"] > float(threshold)]["Service"].tolist()
|
||||
outliers_network = data[data["Network"] > float(threshold)]["Service"].tolist()
|
||||
|
||||
return outliers_cpu, outliers_memory, outliers_network
|
||||
|
||||
@@ -39,13 +39,13 @@ def identify_outliers(data, threshold):
|
||||
def get_services_above_heatmap_threshold(dataframe, cpu_threshold, mem_threshold):
|
||||
# Filter the DataFrame based on CPU_HEATMAP and MEM_HEATMAP thresholds
|
||||
filtered_df = dataframe[
|
||||
((dataframe["CPU"] / dataframe["CPU_LIMITS"]) > cpu_threshold)
|
||||
((dataframe["CPU"] / dataframe["CPU_LIMITS"]) > float(cpu_threshold))
|
||||
]
|
||||
# Get the lists of services
|
||||
cpu_services = filtered_df["service"].tolist()
|
||||
|
||||
filtered_df = dataframe[
|
||||
((dataframe["MEM"] / dataframe["MEM_LIMITS"]) > mem_threshold)
|
||||
((dataframe["MEM"] / dataframe["MEM_LIMITS"]) > float(mem_threshold))
|
||||
]
|
||||
mem_services = filtered_df["service"].tolist()
|
||||
|
||||
|
||||
@@ -2,11 +2,13 @@ from __future__ import annotations
|
||||
|
||||
import datetime
|
||||
import os.path
|
||||
import math
|
||||
from typing import Optional, List, Dict, Any
|
||||
|
||||
import urllib3
|
||||
import logging
|
||||
import urllib3
|
||||
import sys
|
||||
import json
|
||||
|
||||
import yaml
|
||||
from krkn_lib.elastic.krkn_elastic import KrknElastic
|
||||
@@ -25,8 +27,7 @@ def alerts(
|
||||
start_time,
|
||||
end_time,
|
||||
alert_profile,
|
||||
elastic_collect_alerts,
|
||||
elastic_alerts_index,
|
||||
elastic_alerts_index
|
||||
):
|
||||
|
||||
if alert_profile is None or os.path.exists(alert_profile) is False:
|
||||
@@ -46,6 +47,7 @@ def alerts(
|
||||
for alert in profile_yaml:
|
||||
if list(alert.keys()).sort() != ["expr", "description", "severity"].sort():
|
||||
logging.error(f"wrong alert {alert}, skipping")
|
||||
continue
|
||||
|
||||
processed_alert = prom_cli.process_alert(
|
||||
alert,
|
||||
@@ -56,7 +58,6 @@ def alerts(
|
||||
processed_alert[0]
|
||||
and processed_alert[1]
|
||||
and elastic
|
||||
and elastic_collect_alerts
|
||||
):
|
||||
elastic_alert = ElasticAlert(
|
||||
run_uuid=run_uuid,
|
||||
@@ -156,15 +157,16 @@ def metrics(
|
||||
start_time,
|
||||
end_time,
|
||||
metrics_profile,
|
||||
elastic_collect_metrics,
|
||||
elastic_metrics_index,
|
||||
telemetry_json
|
||||
) -> list[dict[str, list[(int, float)] | str]]:
|
||||
metrics_list: list[dict[str, list[(int, float)] | str]] = []
|
||||
|
||||
if metrics_profile is None or os.path.exists(metrics_profile) is False:
|
||||
logging.error(f"{metrics_profile} alert profile does not exist")
|
||||
sys.exit(1)
|
||||
with open(metrics_profile) as profile:
|
||||
profile_yaml = yaml.safe_load(profile)
|
||||
|
||||
if not profile_yaml["metrics"] or not isinstance(profile_yaml["metrics"], list):
|
||||
logging.error(
|
||||
f"{metrics_profile} wrong file format, alert profile must be "
|
||||
@@ -172,30 +174,84 @@ def metrics(
|
||||
f"expr, description, severity"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
elapsed_ceil = math.ceil((end_time - start_time)/ 60 )
|
||||
elapsed_time = str(elapsed_ceil) + "m"
|
||||
metrics_list: list[dict[str, int | float | str]] = []
|
||||
for metric_query in profile_yaml["metrics"]:
|
||||
if (
|
||||
query = metric_query['query']
|
||||
|
||||
# calculate elapsed time
|
||||
if ".elapsed" in metric_query["query"]:
|
||||
query = metric_query['query'].replace(".elapsed", elapsed_time)
|
||||
if "instant" in list(metric_query.keys()) and metric_query['instant']:
|
||||
metrics_result = prom_cli.process_query(
|
||||
query
|
||||
)
|
||||
elif (
|
||||
list(metric_query.keys()).sort()
|
||||
!= ["query", "metricName", "instant"].sort()
|
||||
== ["query", "metricName"].sort()
|
||||
):
|
||||
logging.error(f"wrong alert {metric_query}, skipping")
|
||||
metrics_result = prom_cli.process_prom_query_in_range(
|
||||
metric_query["query"],
|
||||
start_time=datetime.datetime.fromtimestamp(start_time),
|
||||
end_time=datetime.datetime.fromtimestamp(end_time),
|
||||
)
|
||||
|
||||
metric = {"name": metric_query["metricName"], "values": []}
|
||||
metrics_result = prom_cli.process_prom_query_in_range(
|
||||
query,
|
||||
start_time=datetime.datetime.fromtimestamp(start_time),
|
||||
end_time=datetime.datetime.fromtimestamp(end_time), granularity=30
|
||||
)
|
||||
else:
|
||||
logging.info('didnt match keys')
|
||||
continue
|
||||
|
||||
for returned_metric in metrics_result:
|
||||
if "values" in returned_metric:
|
||||
metric = {"query": query, "metricName": metric_query['metricName']}
|
||||
for k,v in returned_metric['metric'].items():
|
||||
metric[k] = v
|
||||
|
||||
if "values" in returned_metric:
|
||||
for value in returned_metric["values"]:
|
||||
try:
|
||||
metric["values"].append((value[0], float(value[1])))
|
||||
metric['timestamp'] = str(datetime.datetime.fromtimestamp(value[0]))
|
||||
metric["value"] = float(value[1])
|
||||
# want double array of the known details and the metrics specific to each call
|
||||
metrics_list.append(metric.copy())
|
||||
except ValueError:
|
||||
pass
|
||||
metrics_list.append(metric)
|
||||
elif "value" in returned_metric:
|
||||
try:
|
||||
value = returned_metric["value"]
|
||||
metric['timestamp'] = str(datetime.datetime.fromtimestamp(value[0]))
|
||||
metric["value"] = float(value[1])
|
||||
|
||||
if elastic_collect_metrics and elastic:
|
||||
# want double array of the known details and the metrics specific to each call
|
||||
metrics_list.append(metric.copy())
|
||||
except ValueError:
|
||||
pass
|
||||
telemetry_json = json.loads(telemetry_json)
|
||||
for scenario in telemetry_json['scenarios']:
|
||||
for k,v in scenario["affected_pods"].items():
|
||||
metric_name = "affected_pods_recovery"
|
||||
metric = {"metricName": metric_name, "type": k}
|
||||
if type(v) is list:
|
||||
for pod in v:
|
||||
for k,v in pod.items():
|
||||
metric[k] = v
|
||||
metric['timestamp'] = str(datetime.datetime.now())
|
||||
print('adding pod' + str(metric))
|
||||
metrics_list.append(metric.copy())
|
||||
for affected_node in scenario["affected_nodes"]:
|
||||
metric_name = "affected_nodes_recovery"
|
||||
metric = {"metricName": metric_name}
|
||||
for k,v in affected_node.items():
|
||||
metric[k] = v
|
||||
metric['timestamp'] = str(datetime.datetime.now())
|
||||
metrics_list.append(metric.copy())
|
||||
if telemetry_json['health_checks']:
|
||||
for health_check in telemetry_json["health_checks"]:
|
||||
metric_name = "health_check_recovery"
|
||||
metric = {"metricName": metric_name}
|
||||
for k,v in health_check.items():
|
||||
metric[k] = v
|
||||
metric['timestamp'] = str(datetime.datetime.now())
|
||||
metrics_list.append(metric.copy())
|
||||
if elastic:
|
||||
result = elastic.upload_metrics_to_elasticsearch(
|
||||
run_uuid=run_uuid, index=elastic_metrics_index, raw_data=metrics_list
|
||||
)
|
||||
|
||||
@@ -56,6 +56,7 @@ class AbstractScenarioPlugin(ABC):
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
wait_duration = krkn_config["tunings"]["wait_duration"]
|
||||
events_backup = krkn_config["telemetry"]["events_backup"]
|
||||
for scenario_config in scenarios_list:
|
||||
if isinstance(scenario_config, list):
|
||||
logging.error(
|
||||
@@ -67,6 +68,7 @@ class AbstractScenarioPlugin(ABC):
|
||||
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = scenario_config
|
||||
scenario_telemetry.scenario_type = self.get_scenario_types()[0]
|
||||
scenario_telemetry.start_timestamp = time.time()
|
||||
parsed_scenario_config = telemetry.set_parameters_base64(
|
||||
scenario_telemetry, scenario_config
|
||||
@@ -99,13 +101,15 @@ class AbstractScenarioPlugin(ABC):
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp),
|
||||
)
|
||||
utils.populate_cluster_events(
|
||||
scenario_telemetry,
|
||||
parsed_scenario_config,
|
||||
telemetry.get_lib_kubernetes(),
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp),
|
||||
)
|
||||
|
||||
if events_backup:
|
||||
utils.populate_cluster_events(
|
||||
krkn_config,
|
||||
parsed_scenario_config,
|
||||
telemetry.get_lib_kubernetes(),
|
||||
int(scenario_telemetry.start_timestamp),
|
||||
int(scenario_telemetry.end_timestamp),
|
||||
)
|
||||
|
||||
if scenario_telemetry.exit_status != 0:
|
||||
failed_scenarios.append(scenario_config)
|
||||
|
||||
@@ -3,7 +3,7 @@ import time
|
||||
import yaml
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.utils import get_yaml_item_value
|
||||
from krkn_lib.utils import get_yaml_item_value, get_random_string
|
||||
from jinja2 import Template
|
||||
from krkn import cerberus
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
@@ -33,17 +33,22 @@ class ApplicationOutageScenarioPlugin(AbstractScenarioPlugin):
|
||||
duration = get_yaml_item_value(scenario_config, "duration", 60)
|
||||
|
||||
start_time = int(time.time())
|
||||
policy_name = f"krkn-deny-{get_random_string(5)}"
|
||||
|
||||
network_policy_template = """---
|
||||
network_policy_template = (
|
||||
"""---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: kraken-deny
|
||||
name: """
|
||||
+ policy_name
|
||||
+ """
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels: {{ pod_selector }}
|
||||
policyTypes: {{ traffic_type }}
|
||||
"""
|
||||
)
|
||||
t = Template(network_policy_template)
|
||||
rendered_spec = t.render(
|
||||
pod_selector=pod_selector, traffic_type=traffic_type
|
||||
@@ -65,7 +70,7 @@ class ApplicationOutageScenarioPlugin(AbstractScenarioPlugin):
|
||||
# unblock the traffic by deleting the network policy
|
||||
logging.info("Deleting the network policy")
|
||||
lib_telemetry.get_lib_kubernetes().delete_net_policy(
|
||||
"kraken-deny", namespace
|
||||
policy_name, namespace
|
||||
)
|
||||
|
||||
logging.info(
|
||||
|
||||
@@ -1,197 +0,0 @@
|
||||
import logging
|
||||
import os
|
||||
from pathlib import Path
|
||||
import arcaflow
|
||||
import yaml
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
from krkn.scenario_plugins.arcaflow.context_auth import ContextAuth
|
||||
|
||||
|
||||
class ArcaflowScenarioPlugin(AbstractScenarioPlugin):
|
||||
|
||||
def run(
|
||||
self,
|
||||
run_uuid: str,
|
||||
scenario: str,
|
||||
krkn_config: dict[str, any],
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
) -> int:
|
||||
try:
|
||||
engine_args = self.build_args(scenario)
|
||||
status_code = self.run_workflow(
|
||||
engine_args, lib_telemetry.get_lib_kubernetes().get_kubeconfig_path()
|
||||
)
|
||||
return status_code
|
||||
except Exception as e:
|
||||
logging.error("ArcaflowScenarioPlugin exiting due to Exception %s" % e)
|
||||
return 1
|
||||
|
||||
def get_scenario_types(self) -> [str]:
|
||||
return ["hog_scenarios", "arcaflow_scenario"]
|
||||
|
||||
def run_workflow(
|
||||
self, engine_args: arcaflow.EngineArgs, kubeconfig_path: str
|
||||
) -> int:
|
||||
self.set_arca_kubeconfig(engine_args, kubeconfig_path)
|
||||
exit_status = arcaflow.run(engine_args)
|
||||
return exit_status
|
||||
|
||||
def build_args(self, input_file: str) -> arcaflow.EngineArgs:
|
||||
"""sets the kubeconfig parsed by setArcaKubeConfig as an input to the arcaflow workflow"""
|
||||
current_path = Path().resolve()
|
||||
context = f"{current_path}/{Path(input_file).parent}"
|
||||
workflow = f"{context}/workflow.yaml"
|
||||
config = f"{context}/config.yaml"
|
||||
if not os.path.exists(context):
|
||||
raise Exception(
|
||||
"context folder for arcaflow workflow not found: {}".format(context)
|
||||
)
|
||||
if not os.path.exists(input_file):
|
||||
raise Exception(
|
||||
"input file for arcaflow workflow not found: {}".format(input_file)
|
||||
)
|
||||
if not os.path.exists(workflow):
|
||||
raise Exception(
|
||||
"workflow file for arcaflow workflow not found: {}".format(workflow)
|
||||
)
|
||||
if not os.path.exists(config):
|
||||
raise Exception(
|
||||
"configuration file for arcaflow workflow not found: {}".format(config)
|
||||
)
|
||||
|
||||
engine_args = arcaflow.EngineArgs()
|
||||
engine_args.context = context
|
||||
engine_args.config = config
|
||||
engine_args.workflow = workflow
|
||||
engine_args.input = f"{current_path}/{input_file}"
|
||||
return engine_args
|
||||
|
||||
def set_arca_kubeconfig(
|
||||
self, engine_args: arcaflow.EngineArgs, kubeconfig_path: str
|
||||
):
|
||||
|
||||
context_auth = ContextAuth()
|
||||
if not os.path.exists(kubeconfig_path):
|
||||
raise Exception("kubeconfig not found in {}".format(kubeconfig_path))
|
||||
|
||||
with open(kubeconfig_path, "r") as stream:
|
||||
try:
|
||||
kubeconfig = yaml.safe_load(stream)
|
||||
context_auth.fetch_auth_data(kubeconfig)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"impossible to read kubeconfig file in: {}".format(kubeconfig_path)
|
||||
)
|
||||
raise e
|
||||
|
||||
kubeconfig_str = self.set_kubeconfig_auth(kubeconfig, context_auth)
|
||||
|
||||
with open(engine_args.input, "r") as stream:
|
||||
input_file = yaml.safe_load(stream)
|
||||
if "input_list" in input_file and isinstance(
|
||||
input_file["input_list"], list
|
||||
):
|
||||
for index, _ in enumerate(input_file["input_list"]):
|
||||
if isinstance(input_file["input_list"][index], dict):
|
||||
input_file["input_list"][index]["kubeconfig"] = kubeconfig_str
|
||||
else:
|
||||
input_file["kubeconfig"] = kubeconfig_str
|
||||
stream.close()
|
||||
with open(engine_args.input, "w") as stream:
|
||||
yaml.safe_dump(input_file, stream)
|
||||
|
||||
with open(engine_args.config, "r") as stream:
|
||||
config_file = yaml.safe_load(stream)
|
||||
if config_file["deployers"]["image"]["deployer_name"] == "kubernetes":
|
||||
kube_connection = self.set_kubernetes_deployer_auth(
|
||||
config_file["deployers"]["image"]["connection"], context_auth
|
||||
)
|
||||
config_file["deployers"]["image"]["connection"] = kube_connection
|
||||
with open(engine_args.config, "w") as stream:
|
||||
yaml.safe_dump(config_file, stream, explicit_start=True, width=4096)
|
||||
|
||||
def set_kubernetes_deployer_auth(
|
||||
self, deployer: any, context_auth: ContextAuth
|
||||
) -> any:
|
||||
if context_auth.clusterHost is not None:
|
||||
deployer["host"] = context_auth.clusterHost
|
||||
if context_auth.clientCertificateData is not None:
|
||||
deployer["cert"] = context_auth.clientCertificateData
|
||||
if context_auth.clientKeyData is not None:
|
||||
deployer["key"] = context_auth.clientKeyData
|
||||
if context_auth.clusterCertificateData is not None:
|
||||
deployer["cacert"] = context_auth.clusterCertificateData
|
||||
if context_auth.username is not None:
|
||||
deployer["username"] = context_auth.username
|
||||
if context_auth.password is not None:
|
||||
deployer["password"] = context_auth.password
|
||||
if context_auth.bearerToken is not None:
|
||||
deployer["bearerToken"] = context_auth.bearerToken
|
||||
return deployer
|
||||
|
||||
def set_kubeconfig_auth(self, kubeconfig: any, context_auth: ContextAuth) -> str:
|
||||
"""
|
||||
Builds an arcaflow-compatible kubeconfig representation and returns it as a string.
|
||||
In order to run arcaflow plugins in kubernetes/openshift the kubeconfig must contain client certificate/key
|
||||
and server certificate base64 encoded within the kubeconfig file itself in *-data fields. That is not always the
|
||||
case, infact kubeconfig may contain filesystem paths to those files, this function builds an arcaflow-compatible
|
||||
kubeconfig file and returns it as a string that can be safely included in input.yaml
|
||||
"""
|
||||
|
||||
if "current-context" not in kubeconfig.keys():
|
||||
raise Exception(
|
||||
"invalid kubeconfig file, impossible to determine current-context"
|
||||
)
|
||||
user_id = None
|
||||
cluster_id = None
|
||||
user_name = None
|
||||
cluster_name = None
|
||||
current_context = kubeconfig["current-context"]
|
||||
for context in kubeconfig["contexts"]:
|
||||
if context["name"] == current_context:
|
||||
user_name = context["context"]["user"]
|
||||
cluster_name = context["context"]["cluster"]
|
||||
if user_name is None:
|
||||
raise Exception(
|
||||
"user not set for context {} in kubeconfig file".format(current_context)
|
||||
)
|
||||
if cluster_name is None:
|
||||
raise Exception(
|
||||
"cluster not set for context {} in kubeconfig file".format(
|
||||
current_context
|
||||
)
|
||||
)
|
||||
|
||||
for index, user in enumerate(kubeconfig["users"]):
|
||||
if user["name"] == user_name:
|
||||
user_id = index
|
||||
for index, cluster in enumerate(kubeconfig["clusters"]):
|
||||
if cluster["name"] == cluster_name:
|
||||
cluster_id = index
|
||||
|
||||
if cluster_id is None:
|
||||
raise Exception(
|
||||
"no cluster {} found in kubeconfig users".format(cluster_name)
|
||||
)
|
||||
if "client-certificate" in kubeconfig["users"][user_id]["user"]:
|
||||
kubeconfig["users"][user_id]["user"][
|
||||
"client-certificate-data"
|
||||
] = context_auth.clientCertificateDataBase64
|
||||
del kubeconfig["users"][user_id]["user"]["client-certificate"]
|
||||
|
||||
if "client-key" in kubeconfig["users"][user_id]["user"]:
|
||||
kubeconfig["users"][user_id]["user"][
|
||||
"client-key-data"
|
||||
] = context_auth.clientKeyDataBase64
|
||||
del kubeconfig["users"][user_id]["user"]["client-key"]
|
||||
|
||||
if "certificate-authority" in kubeconfig["clusters"][cluster_id]["cluster"]:
|
||||
kubeconfig["clusters"][cluster_id]["cluster"][
|
||||
"certificate-authority-data"
|
||||
] = context_auth.clusterCertificateDataBase64
|
||||
del kubeconfig["clusters"][cluster_id]["cluster"]["certificate-authority"]
|
||||
kubeconfig_str = yaml.dump(kubeconfig)
|
||||
return kubeconfig_str
|
||||
@@ -1,142 +0,0 @@
|
||||
import os
|
||||
import base64
|
||||
|
||||
|
||||
class ContextAuth:
|
||||
clusterCertificate: str = None
|
||||
clusterCertificateData: str = None
|
||||
clusterHost: str = None
|
||||
clientCertificate: str = None
|
||||
clientCertificateData: str = None
|
||||
clientKey: str = None
|
||||
clientKeyData: str = None
|
||||
clusterName: str = None
|
||||
username: str = None
|
||||
password: str = None
|
||||
bearerToken: str = None
|
||||
# TODO: integrate in krkn-lib-kubernetes in the next iteration
|
||||
|
||||
@property
|
||||
def clusterCertificateDataBase64(self):
|
||||
if self.clusterCertificateData is not None:
|
||||
return base64.b64encode(bytes(self.clusterCertificateData, "utf8")).decode(
|
||||
"ascii"
|
||||
)
|
||||
return
|
||||
|
||||
@property
|
||||
def clientCertificateDataBase64(self):
|
||||
if self.clientCertificateData is not None:
|
||||
return base64.b64encode(bytes(self.clientCertificateData, "utf8")).decode(
|
||||
"ascii"
|
||||
)
|
||||
return
|
||||
|
||||
@property
|
||||
def clientKeyDataBase64(self):
|
||||
if self.clientKeyData is not None:
|
||||
return base64.b64encode(bytes(self.clientKeyData, "utf-8")).decode("ascii")
|
||||
return
|
||||
|
||||
def fetch_auth_data(self, kubeconfig: any):
|
||||
context_username = None
|
||||
current_context = kubeconfig["current-context"]
|
||||
if current_context is None:
|
||||
raise Exception("no current-context found in kubeconfig")
|
||||
|
||||
for context in kubeconfig["contexts"]:
|
||||
if context["name"] == current_context:
|
||||
context_username = context["context"]["user"]
|
||||
self.clusterName = context["context"]["cluster"]
|
||||
if context_username is None:
|
||||
raise Exception("user not found for context {0}".format(current_context))
|
||||
if self.clusterName is None:
|
||||
raise Exception("cluster not found for context {0}".format(current_context))
|
||||
cluster_id = None
|
||||
user_id = None
|
||||
for index, user in enumerate(kubeconfig["users"]):
|
||||
if user["name"] == context_username:
|
||||
user_id = index
|
||||
if user_id is None:
|
||||
raise Exception(
|
||||
"user {0} not found in kubeconfig users".format(context_username)
|
||||
)
|
||||
|
||||
for index, cluster in enumerate(kubeconfig["clusters"]):
|
||||
if cluster["name"] == self.clusterName:
|
||||
cluster_id = index
|
||||
|
||||
if cluster_id is None:
|
||||
raise Exception(
|
||||
"no cluster {} found in kubeconfig users".format(self.clusterName)
|
||||
)
|
||||
|
||||
user = kubeconfig["users"][user_id]["user"]
|
||||
cluster = kubeconfig["clusters"][cluster_id]["cluster"]
|
||||
# sets cluster api URL
|
||||
self.clusterHost = cluster["server"]
|
||||
# client certificates
|
||||
|
||||
if "client-key" in user:
|
||||
try:
|
||||
self.clientKey = user["client-key"]
|
||||
self.clientKeyData = self.read_file(user["client-key"])
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
if "client-key-data" in user:
|
||||
try:
|
||||
self.clientKeyData = base64.b64decode(user["client-key-data"]).decode(
|
||||
"utf-8"
|
||||
)
|
||||
except Exception as e:
|
||||
raise Exception("impossible to decode client-key-data")
|
||||
|
||||
if "client-certificate" in user:
|
||||
try:
|
||||
self.clientCertificate = user["client-certificate"]
|
||||
self.clientCertificateData = self.read_file(user["client-certificate"])
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
if "client-certificate-data" in user:
|
||||
try:
|
||||
self.clientCertificateData = base64.b64decode(
|
||||
user["client-certificate-data"]
|
||||
).decode("utf-8")
|
||||
except Exception as e:
|
||||
raise Exception("impossible to decode client-certificate-data")
|
||||
|
||||
# cluster certificate authority
|
||||
|
||||
if "certificate-authority" in cluster:
|
||||
try:
|
||||
self.clusterCertificate = cluster["certificate-authority"]
|
||||
self.clusterCertificateData = self.read_file(
|
||||
cluster["certificate-authority"]
|
||||
)
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
if "certificate-authority-data" in cluster:
|
||||
try:
|
||||
self.clusterCertificateData = base64.b64decode(
|
||||
cluster["certificate-authority-data"]
|
||||
).decode("utf-8")
|
||||
except Exception as e:
|
||||
raise Exception("impossible to decode certificate-authority-data")
|
||||
|
||||
if "username" in user:
|
||||
self.username = user["username"]
|
||||
|
||||
if "password" in user:
|
||||
self.password = user["password"]
|
||||
|
||||
if "token" in user:
|
||||
self.bearerToken = user["token"]
|
||||
|
||||
def read_file(self, filename: str) -> str:
|
||||
if not os.path.exists(filename):
|
||||
raise Exception("file not found {0} ".format(filename))
|
||||
with open(filename, "rb") as file_stream:
|
||||
return file_stream.read().decode("utf-8")
|
||||
@@ -1,19 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p
|
||||
a3ViZUNBMB4XDTIzMDMxMzE1NDAxM1oXDTMzMDMxMTE1NDAxM1owFTETMBEGA1UE
|
||||
AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMnz
|
||||
U/gIbJBRGOgNYVKX2fV03ANOwnM4VjquR28QMAdxURqgOFZ6IxYNysHEyxxE9I+I
|
||||
DAm9hi4vQPbOX7FlxUezuzw+ExEfa6RRJ+n+AGJOV1lezCVph6OaJxB1+L1UqaDZ
|
||||
eM3B4cUf/iCc5Y4bs927+CBG3MJL/jmCVPCO+MiSn/l73PXSFNJAYMvRj42zkXqD
|
||||
CVG9CwY2vWgZnnzl01l7jNGtie871AmV2uqKakJrQ2ILhD+8fZk4jE5JBDTCZnqQ
|
||||
pXIc+vERNKLUS8cvjO6Ux8dMv/Z7+xonpXOU59LlpUdHWP9jgCvMTwiOriwqGjJ+
|
||||
pQJWpX9Dm+oxJiVOJzsCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW
|
||||
MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
|
||||
BBQU9pDMtbayJdNM6bp0IG8dcs15qTANBgkqhkiG9w0BAQsFAAOCAQEAtl9TVKPA
|
||||
hTnPODqv0AGTqreS9kLg4WUUjZRaPUkPWmtCoTh2Yf55nRWdHOHeZnCWDSg24x42
|
||||
lpt+13IdqKew1RKTpKCTkicMFi090A01bYu/w39Cm6nOAA5h8zkgSkV5czvQotuV
|
||||
SoN2vB+nbuY28ah5PkdqjMHEZbNwa59cgEke8wB1R1DWFQ/pqflrH2v9ACAuY+5Q
|
||||
i673tA6CXrb1YfaCQnVBzcfvjGS1MqShPKpOLMF+/GccPczNimaBxMnKvYLvf3pN
|
||||
qEUrJC00mAcein8HmxR2Xz8wredbMUUyrQxW29pZJwfGE5GU0olnlsA0lZLbTwio
|
||||
xoolo5y+fsK/dA==
|
||||
-----END CERTIFICATE-----
|
||||
@@ -1,19 +0,0 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIDITCCAgmgAwIBAgIBAjANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p
|
||||
a3ViZUNBMB4XDTIzMDUwMTA4NTc0N1oXDTI2MDUwMTA4NTc0N1owMTEXMBUGA1UE
|
||||
ChMOc3lzdGVtOm1hc3RlcnMxFjAUBgNVBAMTDW1pbmlrdWJlLXVzZXIwggEiMA0G
|
||||
CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0b7uy9nQYrh7uC5NODve7dFNLAgo5
|
||||
pWRS6Kx13ULA55gOpieZiI5/1jwUBjOz0Hhl5QAdHC1HDNu5wf4MmwIEheuq3kMA
|
||||
mfuvNxW2BnWSDuXyUMlBfqlwg5o6W8ndEWaK33D7wd2WQsSsAnhQPJSjnzWKvWKq
|
||||
+Kbcygc4hdss/ZWN+SXLTahNpHBw0sw8AcJqddNeXs2WI5GdZmbXL4QZI36EaNUm
|
||||
m4xKmKRKYIP9wYkmXOV/D2h1meM44y4lul5v2qvo6I+umJ84q4W1/W1vVmAzyVfL
|
||||
v1TQCUx8cpKMHzw3ma6CTBCtU3Oq9HKHBnf8GyHZicmV7ESzf/phJu4ZAgMBAAGj
|
||||
YDBeMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUH
|
||||
AwIwDAYDVR0TAQH/BAIwADAfBgNVHSMEGDAWgBQU9pDMtbayJdNM6bp0IG8dcs15
|
||||
qTANBgkqhkiG9w0BAQsFAAOCAQEABNzEQQMYUcLsBASHladEjr46avKn7gREfaDl
|
||||
Y5PBvgCPP42q/sW/9iCNY3UpT9TJZWM6s01+0p6I96jYbRQER1NX7O4OgQYHmFw2
|
||||
PF6UOG2vMo54w11OvL7sbr4d+nkE6ItdM9fLDIJ3fEOYJZkSoxhOL/U3jSjIl7Wu
|
||||
KCIlpM/M/gcZ4w2IvcLrWtvswbFNUd+dwQfBGcQTmSQDOLE7MqSvzYAkeNv73GLB
|
||||
ieba7gs/PmoTFsf9nW60iXymDDF4MtODn15kqT/y1uD6coujmiEiIomBfxqAkUCU
|
||||
0ciP/KF5oOEMmMedm7/peQxaRTMdRSk4yu7vbj/BxnTcj039Qg==
|
||||
-----END CERTIFICATE-----
|
||||
@@ -1,27 +0,0 @@
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
MIIEowIBAAKCAQEAtG+7svZ0GK4e7guTTg73u3RTSwIKOaVkUuisdd1CwOeYDqYn
|
||||
mYiOf9Y8FAYzs9B4ZeUAHRwtRwzbucH+DJsCBIXrqt5DAJn7rzcVtgZ1kg7l8lDJ
|
||||
QX6pcIOaOlvJ3RFmit9w+8HdlkLErAJ4UDyUo581ir1iqvim3MoHOIXbLP2Vjfkl
|
||||
y02oTaRwcNLMPAHCanXTXl7NliORnWZm1y+EGSN+hGjVJpuMSpikSmCD/cGJJlzl
|
||||
fw9odZnjOOMuJbpeb9qr6OiPrpifOKuFtf1tb1ZgM8lXy79U0AlMfHKSjB88N5mu
|
||||
gkwQrVNzqvRyhwZ3/Bsh2YnJlexEs3/6YSbuGQIDAQABAoIBAQCdJxPb8zt6o2zc
|
||||
98f8nJy378D7+3LccmjGrVBH98ZELXIKkDy9RGqYfQcmiaBOZKv4U1OeBwSIdXKK
|
||||
f6O9ZuSC/AEeeSbyRysmmFuYhlewNrmgKyyelqsNDBIv8fIHUTh2i9Xj8B4G2XBi
|
||||
QGR5vcnYGLqRdBGTx63Nb0iKuksDCwPAuPA/e0ySz9HdWL1j4bqpVSYsOIXsqTDr
|
||||
CVnxUeSIL0fFQnRm3IASXQD7zdq9eEFX7vESeleZoz8qNcKb4Na/C3N6crScjgH7
|
||||
qyNZ2zNLfy1LT84k8uc1TMX2KcEVEmfdDv5cCnUH2ic12CwXMZ0vgId5LJTaHx4x
|
||||
ytIQIe5hAoGBANB+TsRXP4KzcjZlUUfiAp/pWUM4kVktbsfZa1R2NEuIGJUxPk3P
|
||||
7WS0WX5W75QKRg+UWTubg5kfd0f9fklLgofmliBnY/HrpgdyugJmUZBgzIxmy0k+
|
||||
aCe0biD1gULfyyrKtfe8k5wRFstzhfGszlOf2ebR87sSVNBuF2lEwPTvAoGBAN2M
|
||||
0/XrsodGU4B9Mj86Go2gb2k2WU2izI0cO+tm2S5U5DvKmVEnmjXfPRaOFj2UUQjo
|
||||
cljnDAinbN+O0+Inc35qsEeYdAIepNAPglzcpfTHagja9mhx2idLYTXGhbZLL+Ei
|
||||
TRzMyP27NF+GVVfYU/cA86ns6NboG6spohmnqh13AoGAKPc4aNGv0/GIVnHP56zb
|
||||
0SnbdR7PSFNp+fCZay4Slmi2U9IqKMXbIjdhgjZ4uoDORU9jvReQYuzQ1h9TyfkB
|
||||
O8yt4M4P0D/6DmqXa9NI4XJznn6wIMMXWf3UybsTW913IQBVgsjVxAuDjBQ11Eec
|
||||
/sdg3D6SgkZWzeFjzjZJJ5cCgYBSYVg7fE3hERxhjawOaJuRCBQFSklAngVzfwkk
|
||||
yhR9ruFC/l2uGIy19XFwnprUgP700gIa3qbR3PeV1TUiRcsjOaacqKqSUzSzjODL
|
||||
iNxIvZHHAyxWv+b/b38REOWNWD3QeAG2cMtX1bFux7OaO31VPkxcZhRaPOp05cE5
|
||||
yudtlwKBgDBbR7RLYn03OPm3NDBLLjTybhD8Iu8Oj7UeNCiEWAdZpqIKYnwSxMzQ
|
||||
kdo4aTENA/seEwq+XDV7TwbUIFFJg5gDXIhkcK2c9kiO2bObCAmKpBlQCcrp0a5X
|
||||
NSBk1N/ZG/Qhqns7z8k01KN4LNcdpRoNiYYPgY+p3xbY8+nWhv+q
|
||||
-----END RSA PRIVATE KEY-----
|
||||
@@ -1,98 +0,0 @@
|
||||
import os
|
||||
import unittest
|
||||
|
||||
import yaml
|
||||
|
||||
from .context_auth import ContextAuth
|
||||
|
||||
|
||||
class TestCurrentContext(unittest.TestCase):
|
||||
|
||||
def get_kubeconfig_with_data(self) -> str:
|
||||
"""
|
||||
This function returns a test kubeconfig file as a string.
|
||||
|
||||
:return: a test kubeconfig file in string format (for unit testing purposes)
|
||||
""" # NOQA
|
||||
return """apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM5ekNDQWQrZ0F3SUJBZ0lVV01PTVBNMVUrRi9uNXN6TSthYzlMcGZISHB3d0RRWUpLb1pJaHZjTkFRRUwKQlFBd0hqRWNNQm9HQTFVRUF3d1RhM1ZpZFc1MGRTNXNiMk5oYkdSdmJXRnBiakFlRncweU1URXlNRFl4T0RBdwpNRFJhRncwek1URXlNRFF4T0RBd01EUmFNQjR4SERBYUJnTlZCQU1NRTJ0MVluVnVkSFV1Ykc5allXeGtiMjFoCmFXNHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDNExhcG00SDB0T1NuYTNXVisKdzI4a0tOWWRwaHhYOUtvNjUwVGlOK2c5ZFNQU3VZK0V6T1JVOWVONlgyWUZkMEJmVFNodno4Y25rclAvNysxegpETEoxQ3MwRi9haEV3ZDQxQXN5UGFjbnRiVE80dGRLWm9POUdyODR3YVdBN1hSZmtEc2ZxRGN1YW5UTmVmT1hpCkdGbmdDVzU5Q285M056alB1eEFrakJxdVF6eE5GQkgwRlJPbXJtVFJ4cnVLZXo0aFFuUW1OWEFUNnp0M21udzMKWUtWTzU4b2xlcUxUcjVHNlRtVFQyYTZpVGdtdWY2N0cvaVZlalJGbkw3YkNHWmgzSjlCSTNMcVpqRzE4dWxvbgpaVDdQcGQrQTlnaTJOTm9UZlI2TVB5SndxU1BCL0xZQU5ZNGRoZDVJYlVydDZzbmViTlRZSHV2T0tZTDdNTWRMCmVMSzFBZ01CQUFHakxUQXJNQWtHQTFVZEV3UUNNQUF3SGdZRFZSMFJCQmN3RllJVGEzVmlkVzUwZFM1c2IyTmgKYkdSdmJXRnBiakFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBQTVqUHVpZVlnMExySE1PSkxYY0N4d3EvVzBDNApZeFpncVd3VHF5VHNCZjVKdDlhYTk0SkZTc2dHQWdzUTN3NnA2SlBtL0MyR05MY3U4ZWxjV0E4UXViQWxueXRRCnF1cEh5WnYrZ08wMG83TXdrejZrTUxqQVZ0QllkRzJnZ21FRjViTEk5czBKSEhjUGpHUkl1VHV0Z0tHV1dPWHgKSEg4T0RzaG9wZHRXMktrR2c2aThKaEpYaWVIbzkzTHptM00xRUNGcXAvMEdtNkN1RFphVVA2SGpJMWRrYllLdgpsSHNVZ1U1SmZjSWhNYmJLdUllTzRkc1YvT3FHcm9iNW5vcmRjaExBQmRDTnc1cmU5T1NXZGZ1VVhSK0ViZVhrCjVFM0tFYzA1RGNjcGV2a1NTdlJ4SVQrQzNMOTltWGcxL3B5NEw3VUhvNFFLTXlqWXJXTWlLRlVKV1E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
server: https://127.0.0.1:6443
|
||||
name: default
|
||||
contexts:
|
||||
- context:
|
||||
cluster: default
|
||||
namespace: default
|
||||
user: testuser
|
||||
name: default
|
||||
current-context: default
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: testuser
|
||||
user:
|
||||
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM5ekNDQWQrZ0F3SUJBZ0lVV01PTVBNMVUrRi9uNXN6TSthYzlMcGZISHB3d0RRWUpLb1pJaHZjTkFRRUwKQlFBd0hqRWNNQm9HQTFVRUF3d1RhM1ZpZFc1MGRTNXNiMk5oYkdSdmJXRnBiakFlRncweU1URXlNRFl4T0RBdwpNRFJhRncwek1URXlNRFF4T0RBd01EUmFNQjR4SERBYUJnTlZCQU1NRTJ0MVluVnVkSFV1Ykc5allXeGtiMjFoCmFXNHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDNExhcG00SDB0T1NuYTNXVisKdzI4a0tOWWRwaHhYOUtvNjUwVGlOK2c5ZFNQU3VZK0V6T1JVOWVONlgyWUZkMEJmVFNodno4Y25rclAvNysxegpETEoxQ3MwRi9haEV3ZDQxQXN5UGFjbnRiVE80dGRLWm9POUdyODR3YVdBN1hSZmtEc2ZxRGN1YW5UTmVmT1hpCkdGbmdDVzU5Q285M056alB1eEFrakJxdVF6eE5GQkgwRlJPbXJtVFJ4cnVLZXo0aFFuUW1OWEFUNnp0M21udzMKWUtWTzU4b2xlcUxUcjVHNlRtVFQyYTZpVGdtdWY2N0cvaVZlalJGbkw3YkNHWmgzSjlCSTNMcVpqRzE4dWxvbgpaVDdQcGQrQTlnaTJOTm9UZlI2TVB5SndxU1BCL0xZQU5ZNGRoZDVJYlVydDZzbmViTlRZSHV2T0tZTDdNTWRMCmVMSzFBZ01CQUFHakxUQXJNQWtHQTFVZEV3UUNNQUF3SGdZRFZSMFJCQmN3RllJVGEzVmlkVzUwZFM1c2IyTmgKYkdSdmJXRnBiakFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBQTVqUHVpZVlnMExySE1PSkxYY0N4d3EvVzBDNApZeFpncVd3VHF5VHNCZjVKdDlhYTk0SkZTc2dHQWdzUTN3NnA2SlBtL0MyR05MY3U4ZWxjV0E4UXViQWxueXRRCnF1cEh5WnYrZ08wMG83TXdrejZrTUxqQVZ0QllkRzJnZ21FRjViTEk5czBKSEhjUGpHUkl1VHV0Z0tHV1dPWHgKSEg4T0RzaG9wZHRXMktrR2c2aThKaEpYaWVIbzkzTHptM00xRUNGcXAvMEdtNkN1RFphVVA2SGpJMWRrYllLdgpsSHNVZ1U1SmZjSWhNYmJLdUllTzRkc1YvT3FHcm9iNW5vcmRjaExBQmRDTnc1cmU5T1NXZGZ1VVhSK0ViZVhrCjVFM0tFYzA1RGNjcGV2a1NTdlJ4SVQrQzNMOTltWGcxL3B5NEw3VUhvNFFLTXlqWXJXTWlLRlVKV1E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
|
||||
client-key-data: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2QUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktZd2dnU2lBZ0VBQW9JQkFRQzRMYXBtNEgwdE9TbmEKM1dWK3cyOGtLTllkcGh4WDlLbzY1MFRpTitnOWRTUFN1WStFek9SVTllTjZYMllGZDBCZlRTaHZ6OGNua3JQLwo3KzF6RExKMUNzMEYvYWhFd2Q0MUFzeVBhY250YlRPNHRkS1pvTzlHcjg0d2FXQTdYUmZrRHNmcURjdWFuVE5lCmZPWGlHRm5nQ1c1OUNvOTNOempQdXhBa2pCcXVRenhORkJIMEZST21ybVRSeHJ1S2V6NGhRblFtTlhBVDZ6dDMKbW53M1lLVk81OG9sZXFMVHI1RzZUbVRUMmE2aVRnbXVmNjdHL2lWZWpSRm5MN2JDR1poM0o5QkkzTHFaakcxOAp1bG9uWlQ3UHBkK0E5Z2kyTk5vVGZSNk1QeUp3cVNQQi9MWUFOWTRkaGQ1SWJVcnQ2c25lYk5UWUh1dk9LWUw3Ck1NZExlTEsxQWdNQkFBRUNnZ0VBQ28rank4NW5ueVk5L2l6ZjJ3cjkzb2J3OERaTVBjYnIxQURhOUZYY1hWblEKT2c4bDZhbU9Ga2tiU0RNY09JZ0VDdkx6dEtXbmQ5OXpydU5sTEVtNEdmb0trNk5kK01OZEtKRUdoZHE5RjM1Qgpqdi91R1owZTIyRE5ZLzFHNVdDTE5DcWMwQkVHY2RFOTF0YzJuMlppRVBTNWZ6WVJ6L1k4cmJ5K1NqbzJkWE9RCmRHYWRlUFplbi9UbmlHTFlqZWhrbXZNQjJvU0FDbVMycTd2OUNrcmdmR1RZbWJzeGVjSU1QK0JONG9KS3BOZ28KOUpnRWJ5SUxkR1pZS2pQb2lLaHNjMVhmSy8zZStXSmxuYjJBaEE5Y1JMUzhMcDdtcEYySWp4SjNSNE93QTg3WQpNeGZvZWFGdnNuVUFHWUdFWFo4Z3BkWmhQMEoxNWRGdERjajIrcngrQVFLQmdRRDFoSE9nVGdFbERrVEc5bm5TCjE1eXYxRzUxYnJMQU1UaWpzNklEMU1qelhzck0xY2ZvazVaaUlxNVJsQ3dReTlYNDdtV1RhY0lZRGR4TGJEcXEKY0IydjR5Wm1YK1VleGJ3cDU1OWY0V05HdzF5YzQrQjdaNFF5aTRFelN4WmFjbldjMnBzcHJMUFVoOUFXRXVNcApOaW1vcXNiVGNnNGs5QWRxeUIrbWhIWmJRUUtCZ1FEQUNzU09qNXZMU1VtaVpxYWcrOVMySUxZOVNOdDZzS1VyCkprcjdCZEVpN3N2YmU5cldRR2RBb0xkQXNzcU94aENydmtPNkpSSHB1YjlRRjlYdlF4Riszc2ZpZm4yYkQ0ZloKMlVsclA1emF3RlNrNDNLbjdMZzRscURpaVUxVGlqTkJBL3dUcFlmbTB4dW5WeFRWNDZpNVViQW1XRk12TWV0bQozWUZYQmJkK2RRS0JnRGl6Q1B6cFpzeEcrazAwbUxlL2dYajl4ekNwaXZCbHJaM29teTdsVWk4YUloMmg5VlBaCjJhMzZNbVcyb1dLVG9HdW5xcCtibWU1eUxRRGlFcjVQdkJ0bGl2V3ppYmRNbFFMY2Nlcnpveml4WDA4QU5WUnEKZUpZdnIzdklDSGFFM25LRjdiVjNJK1NlSk1ra1BYL0QrV1R4WTQ5clZLYm1FRnh4c1JXRW04ekJBb0dBWEZ3UgpZanJoQTZqUW1DRmtYQ0loa0NJMVkwNEorSHpDUXZsY3NGT0EzSnNhUWduVUdwekl5OFUvdlFiLzhpQ0IzZ2RZCmpVck16YXErdnVkbnhYVnRFYVpWWGJIVitPQkVSdHFBdStyUkprZS9yYm1SNS84cUxsVUxOVWd4ZjA4RkRXeTgKTERxOUhKOUZPbnJnRTJvMU9FTjRRMGpSWU81U041dXFXODd0REEwQ2dZQXpXbk1KSFgrbmlyMjhRRXFyVnJKRAo4ZUEwOHIwWTJRMDhMRlcvMjNIVWQ4WU12VnhTUTdwcUwzaE41RXVJQ2dCbEpGVFI3TndBREo3eDY2M002akFMCm1DNlI4dWxSZStwa08xN2Y0UUs3MnVRanJGZEhESnlXQmdDL0RKSkV6d1dwY0Q4VVNPK3A5bVVIbllLTUJTOEsKTVB1ejYrZ3h0VEtsRU5pZUVacXhxZz09Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K
|
||||
username: testuser
|
||||
password: testpassword
|
||||
token: sha256~fFyEqjf1xxFMO0tbEyGRvWeNOd7QByuEgS4hyEq_A9o
|
||||
""" # NOQA
|
||||
|
||||
def get_kubeconfig_with_paths(self) -> str:
|
||||
"""
|
||||
This function returns a test kubeconfig file as a string.
|
||||
|
||||
:return: a test kubeconfig file in string format (for unit testing purposes)
|
||||
""" # NOQA
|
||||
return """apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority: fixtures/ca.crt
|
||||
server: https://127.0.0.1:6443
|
||||
name: default
|
||||
contexts:
|
||||
- context:
|
||||
cluster: default
|
||||
namespace: default
|
||||
user: testuser
|
||||
name: default
|
||||
current-context: default
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: testuser
|
||||
user:
|
||||
client-certificate: fixtures/client.crt
|
||||
client-key: fixtures/client.key
|
||||
username: testuser
|
||||
password: testpassword
|
||||
token: sha256~fFyEqjf1xxFMO0tbEyGRvWeNOd7QByuEgS4hyEq_A9o
|
||||
""" # NOQA
|
||||
|
||||
def test_current_context(self):
|
||||
cwd = os.getcwd()
|
||||
current_context_data = ContextAuth()
|
||||
data = yaml.safe_load(self.get_kubeconfig_with_data())
|
||||
current_context_data.fetch_auth_data(data)
|
||||
self.assertIsNotNone(current_context_data.clusterCertificateData)
|
||||
self.assertIsNotNone(current_context_data.clientCertificateData)
|
||||
self.assertIsNotNone(current_context_data.clientKeyData)
|
||||
self.assertIsNotNone(current_context_data.username)
|
||||
self.assertIsNotNone(current_context_data.password)
|
||||
self.assertIsNotNone(current_context_data.bearerToken)
|
||||
self.assertIsNotNone(current_context_data.clusterHost)
|
||||
|
||||
current_context_no_data = ContextAuth()
|
||||
data = yaml.safe_load(self.get_kubeconfig_with_paths())
|
||||
current_context_no_data.fetch_auth_data(data)
|
||||
self.assertIsNotNone(current_context_no_data.clusterCertificate)
|
||||
self.assertIsNotNone(current_context_no_data.clusterCertificateData)
|
||||
self.assertIsNotNone(current_context_no_data.clientCertificate)
|
||||
self.assertIsNotNone(current_context_no_data.clientCertificateData)
|
||||
self.assertIsNotNone(current_context_no_data.clientKey)
|
||||
self.assertIsNotNone(current_context_no_data.clientKeyData)
|
||||
self.assertIsNotNone(current_context_no_data.username)
|
||||
self.assertIsNotNone(current_context_no_data.password)
|
||||
self.assertIsNotNone(current_context_no_data.bearerToken)
|
||||
self.assertIsNotNone(current_context_data.clusterHost)
|
||||
@@ -22,9 +22,7 @@ class ContainerScenarioPlugin(AbstractScenarioPlugin):
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
) -> int:
|
||||
start_time = int(time.time())
|
||||
pool = PodsMonitorPool(lib_telemetry.get_lib_kubernetes())
|
||||
wait_duration = krkn_config["tunings"]["wait_duration"]
|
||||
try:
|
||||
with open(scenario, "r") as f:
|
||||
cont_scenario_config = yaml.full_load(f)
|
||||
@@ -45,16 +43,10 @@ class ContainerScenarioPlugin(AbstractScenarioPlugin):
|
||||
)
|
||||
return 1
|
||||
scenario_telemetry.affected_pods = result
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
|
||||
# capture end time
|
||||
end_time = int(time.time())
|
||||
|
||||
# publish cerberus status
|
||||
cerberus.publish_kraken_status(krkn_config, [], start_time, end_time)
|
||||
except (RuntimeError, Exception):
|
||||
logging.error("ContainerScenarioPlugin exiting due to Exception %s" % e)
|
||||
logging.error("ContainerScenarioPlugin exiting due to Exception %s")
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
|
||||
0
krkn/scenario_plugins/hogs/__init__.py
Normal file
0
krkn/scenario_plugins/hogs/__init__.py
Normal file
142
krkn/scenario_plugins/hogs/hogs_scenario_plugin.py
Normal file
142
krkn/scenario_plugins/hogs/hogs_scenario_plugin.py
Normal file
@@ -0,0 +1,142 @@
|
||||
import copy
|
||||
import logging
|
||||
import queue
|
||||
import random
|
||||
import re
|
||||
import threading
|
||||
import time
|
||||
|
||||
|
||||
import yaml
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.models.krkn import HogConfig, HogType
|
||||
from krkn_lib.models.k8s import NodeResources
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.utils import get_random_string
|
||||
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
|
||||
|
||||
class HogsScenarioPlugin(AbstractScenarioPlugin):
|
||||
def run(self, run_uuid: str, scenario: str, krkn_config: dict[str, any], lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry) -> int:
|
||||
try:
|
||||
with open(scenario, "r") as f:
|
||||
scenario = yaml.full_load(f)
|
||||
scenario_config = HogConfig.from_yaml_dict(scenario)
|
||||
has_selector = True
|
||||
if not scenario_config.node_selector or not re.match("^.+=.*$", scenario_config.node_selector):
|
||||
if scenario_config.node_selector:
|
||||
logging.warning(f"node selector {scenario_config.node_selector} not in right format (key=value)")
|
||||
node_selector = ""
|
||||
else:
|
||||
node_selector = scenario_config.node_selector
|
||||
|
||||
available_nodes = lib_telemetry.get_lib_kubernetes().list_nodes(node_selector)
|
||||
if len(available_nodes) == 0:
|
||||
raise Exception("no available nodes to schedule workload")
|
||||
|
||||
if not has_selector:
|
||||
# if selector not specified picks a random node between the available
|
||||
available_nodes = [available_nodes[random.randint(0, len(available_nodes))]]
|
||||
|
||||
if scenario_config.number_of_nodes and len(available_nodes) > scenario_config.number_of_nodes:
|
||||
available_nodes = random.sample(available_nodes, scenario_config.number_of_nodes)
|
||||
|
||||
exception_queue = queue.Queue()
|
||||
self.run_scenario(scenario_config, lib_telemetry.get_lib_kubernetes(), available_nodes, exception_queue)
|
||||
return 0
|
||||
except Exception as e:
|
||||
logging.error(f"scenario exception: {e}")
|
||||
return 1
|
||||
|
||||
def get_scenario_types(self) -> list[str]:
|
||||
return ["hog_scenarios"]
|
||||
|
||||
def run_scenario_worker(self, config: HogConfig,
|
||||
lib_k8s: KrknKubernetes, node: str,
|
||||
exception_queue: queue.Queue):
|
||||
try:
|
||||
if not config.workers:
|
||||
config.workers = lib_k8s.get_node_cpu_count(node)
|
||||
logging.info(f"[{node}] detected {config.workers} cpus for node {node}")
|
||||
|
||||
logging.info(f"[{node}] workers number: {config.workers}")
|
||||
|
||||
# using kubernetes.io/hostname = <node_name> selector to
|
||||
# precisely deploy each workload on each selected node
|
||||
config.node_selector = f"kubernetes.io/hostname={node}"
|
||||
pod_name = f"{config.type.value}-hog-{get_random_string(5)}"
|
||||
node_resources_start = lib_k8s.get_node_resources_info(node)
|
||||
lib_k8s.deploy_hog(pod_name, config)
|
||||
start = time.time()
|
||||
# waiting 3 seconds before starting sample collection
|
||||
time.sleep(3)
|
||||
node_resources_end = lib_k8s.get_node_resources_info(node)
|
||||
|
||||
samples: list[NodeResources] = []
|
||||
avg_node_resources = NodeResources()
|
||||
|
||||
while time.time() - start < config.duration-1:
|
||||
samples.append(lib_k8s.get_node_resources_info(node))
|
||||
|
||||
max_wait = 30
|
||||
wait = 0
|
||||
logging.info(f"[{node}] waiting {max_wait} up to seconds pod: {pod_name} namespace: {config.namespace} to finish")
|
||||
while lib_k8s.is_pod_running(pod_name, config.namespace):
|
||||
if wait >= max_wait:
|
||||
raise Exception(f"[{node}] hog workload pod: {pod_name} namespace: {config.namespace} "
|
||||
f"didn't finish after {max_wait}")
|
||||
time.sleep(1)
|
||||
wait += 1
|
||||
continue
|
||||
|
||||
logging.info(f"[{node}] deleting pod: {pod_name} namespace: {config.namespace}")
|
||||
lib_k8s.delete_pod(pod_name, config.namespace)
|
||||
|
||||
for resource in samples:
|
||||
avg_node_resources.cpu += resource.cpu
|
||||
avg_node_resources.memory += resource.memory
|
||||
avg_node_resources.disk_space += resource.disk_space
|
||||
|
||||
avg_node_resources.cpu = avg_node_resources.cpu/len(samples)
|
||||
avg_node_resources.memory = avg_node_resources.memory / len(samples)
|
||||
avg_node_resources.disk_space = avg_node_resources.disk_space / len(samples)
|
||||
|
||||
if config.type == HogType.cpu:
|
||||
logging.info(f"[{node}] detected cpu consumption: "
|
||||
f"{(avg_node_resources.cpu / (config.workers * 1000000000)) * 100} %")
|
||||
if config.type == HogType.memory:
|
||||
logging.info(f"[{node}] detected memory increase: "
|
||||
f"{avg_node_resources.memory / node_resources_start.memory * 100} %")
|
||||
if config.type == HogType.io:
|
||||
logging.info(f"[{node}] detected disk space allocated: "
|
||||
f"{(avg_node_resources.disk_space - node_resources_end.disk_space) / 1024 / 1024} MB")
|
||||
except Exception as e:
|
||||
exception_queue.put(e)
|
||||
|
||||
def run_scenario(self, config: HogConfig,
|
||||
lib_k8s: KrknKubernetes,
|
||||
available_nodes: list[str],
|
||||
exception_queue: queue.Queue):
|
||||
workers = []
|
||||
logging.info(f"running {config.type.value} hog scenario")
|
||||
logging.info(f"targeting nodes: [{','.join(available_nodes)}]")
|
||||
for node in available_nodes:
|
||||
config_copy = copy.deepcopy(config)
|
||||
worker = threading.Thread(target=self.run_scenario_worker,
|
||||
args=(config_copy, lib_k8s, node, exception_queue))
|
||||
worker.daemon = True
|
||||
worker.start()
|
||||
workers.append(worker)
|
||||
|
||||
for worker in workers:
|
||||
worker.join()
|
||||
|
||||
try:
|
||||
while True:
|
||||
exception = exception_queue.get_nowait()
|
||||
raise exception
|
||||
except queue.Empty:
|
||||
pass
|
||||
@@ -49,8 +49,7 @@ class NativeScenarioPlugin(AbstractScenarioPlugin):
|
||||
return [
|
||||
"pod_disruption_scenarios",
|
||||
"pod_network_scenarios",
|
||||
"vmware_node_scenarios",
|
||||
"ibmcloud_node_scenarios",
|
||||
"ingress_node_scenarios"
|
||||
]
|
||||
|
||||
def start_monitoring(self, pool: PodsMonitorPool, scenarios: list[Any]):
|
||||
|
||||
@@ -97,15 +97,6 @@ class NetworkScenarioConfig:
|
||||
},
|
||||
)
|
||||
|
||||
kraken_config: typing.Optional[str] = field(
|
||||
default="",
|
||||
metadata={
|
||||
"name": "Kraken Config",
|
||||
"description": "Path to the config file of Kraken. "
|
||||
"Set this field if you wish to publish status onto Cerberus",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NetworkScenarioSuccessOutput:
|
||||
@@ -265,7 +256,7 @@ def verify_interface(
|
||||
% (interface, node, node_interface_list)
|
||||
)
|
||||
finally:
|
||||
logging.info("Deleteing pod to query interface on node")
|
||||
logging.info("Deleting pod to query interface on node")
|
||||
kube_helper.delete_pod(cli, "fedtools", "default")
|
||||
|
||||
return input_interface_list
|
||||
@@ -555,7 +546,7 @@ def wait_for_job(
|
||||
count += 1
|
||||
job_list.remove(job_name)
|
||||
except Exception:
|
||||
logging.warn("Exception in getting job status")
|
||||
logging.warning("Exception in getting job status")
|
||||
if time.time() > wait_time:
|
||||
raise Exception(
|
||||
"Jobs did not complete within "
|
||||
@@ -594,7 +585,7 @@ def delete_jobs(cli: CoreV1Api, batch_cli: BatchV1Api, job_list: typing.List[str
|
||||
pod_log = pod_log_response.data.decode("utf-8")
|
||||
logging.error(pod_log)
|
||||
except Exception as e:
|
||||
logging.warn("Exception in getting job status: %s" % str(e))
|
||||
logging.warning("Exception in getting job status: %s" % str(e))
|
||||
api_response = kube_helper.delete_job(
|
||||
batch_cli, name=job_name, namespace="default"
|
||||
)
|
||||
@@ -670,7 +661,7 @@ def get_ingress_cmd(
|
||||
)
|
||||
tc_set += ";"
|
||||
|
||||
exec_cmd = "{0} {1} sleep {2};{3} sleep 20;{4}".format(
|
||||
exec_cmd = "sleep 30;{0} {1} sleep {2};{3} sleep 20;{4}".format(
|
||||
tc_set, tc_ls, duration, tc_unset, tc_ls
|
||||
)
|
||||
|
||||
@@ -710,6 +701,7 @@ def network_chaos(
|
||||
pod_module_template = env.get_template("pod_module.j2")
|
||||
cli, batch_cli = kube_helper.setup_kubernetes(cfg.kubeconfig_path)
|
||||
|
||||
logging.info("Starting Ingress Network Chaos")
|
||||
try:
|
||||
node_interface_dict = get_node_interfaces(
|
||||
cfg.node_interface_name,
|
||||
@@ -721,16 +713,6 @@ def network_chaos(
|
||||
except Exception:
|
||||
return "error", NetworkScenarioErrorOutput(format_exc())
|
||||
job_list = []
|
||||
publish = False
|
||||
if cfg.kraken_config:
|
||||
failed_post_scenarios = ""
|
||||
try:
|
||||
with open(cfg.kraken_config, "r") as f:
|
||||
config = yaml.full_load(f)
|
||||
except Exception:
|
||||
logging.error("Error reading Kraken config from %s" % cfg.kraken_config)
|
||||
return "error", NetworkScenarioErrorOutput(format_exc())
|
||||
publish = True
|
||||
|
||||
try:
|
||||
if cfg.execution_type == "parallel":
|
||||
@@ -747,13 +729,7 @@ def network_chaos(
|
||||
)
|
||||
)
|
||||
logging.info("Waiting for parallel job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(batch_cli, job_list[:], cfg.test_duration + 100)
|
||||
end_time = int(time.time())
|
||||
if publish:
|
||||
cerberus.publish_kraken_status(
|
||||
config, failed_post_scenarios, start_time, end_time
|
||||
)
|
||||
|
||||
elif cfg.execution_type == "serial":
|
||||
create_interfaces = True
|
||||
@@ -773,18 +749,12 @@ def network_chaos(
|
||||
)
|
||||
)
|
||||
logging.info("Waiting for serial job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(batch_cli, job_list[:], cfg.test_duration + 100)
|
||||
logging.info("Deleting jobs")
|
||||
delete_jobs(cli, batch_cli, job_list[:])
|
||||
job_list = []
|
||||
logging.info("Waiting for wait_duration : %ss" % cfg.wait_duration)
|
||||
time.sleep(cfg.wait_duration)
|
||||
end_time = int(time.time())
|
||||
if publish:
|
||||
cerberus.publish_kraken_status(
|
||||
config, failed_post_scenarios, start_time, end_time
|
||||
)
|
||||
create_interfaces = False
|
||||
else:
|
||||
|
||||
@@ -799,7 +769,7 @@ def network_chaos(
|
||||
execution_type=cfg.execution_type,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Network Chaos exiting due to Exception - %s" % e)
|
||||
logging.error("Ingress Network Chaos exiting due to Exception - %s" % e)
|
||||
return "error", NetworkScenarioErrorOutput(format_exc())
|
||||
finally:
|
||||
delete_virtual_interfaces(cli, node_interface_dict.keys(), pod_module_template)
|
||||
|
||||
@@ -29,13 +29,13 @@ def create_job(batch_cli, body, namespace="default"):
|
||||
api_response = batch_cli.create_namespaced_job(body=body, namespace=namespace)
|
||||
return api_response
|
||||
except ApiException as api:
|
||||
logging.warn(
|
||||
logging.warning(
|
||||
"Exception when calling \
|
||||
BatchV1Api->create_job: %s"
|
||||
% api
|
||||
)
|
||||
if api.status == 409:
|
||||
logging.warn("Job already present")
|
||||
logging.warning("Job already present")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
@@ -218,12 +218,12 @@ def delete_job(batch_cli, name, namespace="default"):
|
||||
logging.debug("Job deleted. status='%s'" % str(api_response.status))
|
||||
return api_response
|
||||
except ApiException as api:
|
||||
logging.warn(
|
||||
logging.warning(
|
||||
"Exception when calling \
|
||||
BatchV1Api->create_namespaced_job: %s"
|
||||
% api
|
||||
)
|
||||
logging.warn("Job already deleted\n")
|
||||
logging.warning("Job already deleted\n")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
|
||||
@@ -1,580 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
import time
|
||||
import typing
|
||||
from os import environ
|
||||
from dataclasses import dataclass, field
|
||||
from traceback import format_exc
|
||||
import logging
|
||||
from krkn.scenario_plugins.native.node_scenarios import (
|
||||
kubernetes_functions as kube_helper,
|
||||
)
|
||||
from arcaflow_plugin_sdk import validation, plugin
|
||||
from kubernetes import client, watch
|
||||
from ibm_vpc import VpcV1
|
||||
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
|
||||
import sys
|
||||
|
||||
|
||||
class IbmCloud:
|
||||
def __init__(self):
|
||||
"""
|
||||
Initialize the ibm cloud client by using the the env variables:
|
||||
'IBMC_APIKEY' 'IBMC_URL'
|
||||
"""
|
||||
apiKey = environ.get("IBMC_APIKEY")
|
||||
service_url = environ.get("IBMC_URL")
|
||||
if not apiKey:
|
||||
raise Exception("Environmental variable 'IBMC_APIKEY' is not set")
|
||||
if not service_url:
|
||||
raise Exception("Environmental variable 'IBMC_URL' is not set")
|
||||
try:
|
||||
authenticator = IAMAuthenticator(apiKey)
|
||||
self.service = VpcV1(authenticator=authenticator)
|
||||
|
||||
self.service.set_service_url(service_url)
|
||||
except Exception as e:
|
||||
logging.error("error authenticating" + str(e))
|
||||
sys.exit(1)
|
||||
|
||||
def delete_instance(self, instance_id):
|
||||
"""
|
||||
Deletes the Instance whose name is given by 'instance_id'
|
||||
"""
|
||||
try:
|
||||
self.service.delete_instance(instance_id)
|
||||
logging.info("Deleted Instance -- '{}'".format(instance_id))
|
||||
except Exception as e:
|
||||
logging.info("Instance '{}' could not be deleted. ".format(instance_id))
|
||||
return False
|
||||
|
||||
def reboot_instances(self, instance_id):
|
||||
"""
|
||||
Reboots the Instance whose name is given by 'instance_id'. Returns True if successful, or
|
||||
returns False if the Instance is not powered on
|
||||
"""
|
||||
|
||||
try:
|
||||
self.service.create_instance_action(
|
||||
instance_id,
|
||||
type="reboot",
|
||||
)
|
||||
logging.info("Reset Instance -- '{}'".format(instance_id))
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.info("Instance '{}' could not be rebooted".format(instance_id))
|
||||
return False
|
||||
|
||||
def stop_instances(self, instance_id):
|
||||
"""
|
||||
Stops the Instance whose name is given by 'instance_id'. Returns True if successful, or
|
||||
returns False if the Instance is already stopped
|
||||
"""
|
||||
|
||||
try:
|
||||
self.service.create_instance_action(
|
||||
instance_id,
|
||||
type="stop",
|
||||
)
|
||||
logging.info("Stopped Instance -- '{}'".format(instance_id))
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.info("Instance '{}' could not be stopped".format(instance_id))
|
||||
logging.info("error" + str(e))
|
||||
return False
|
||||
|
||||
def start_instances(self, instance_id):
|
||||
"""
|
||||
Stops the Instance whose name is given by 'instance_id'. Returns True if successful, or
|
||||
returns False if the Instance is already running
|
||||
"""
|
||||
|
||||
try:
|
||||
self.service.create_instance_action(
|
||||
instance_id,
|
||||
type="start",
|
||||
)
|
||||
logging.info("Started Instance -- '{}'".format(instance_id))
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.info("Instance '{}' could not start running".format(instance_id))
|
||||
return False
|
||||
|
||||
def list_instances(self):
|
||||
"""
|
||||
Returns a list of Instances present in the datacenter
|
||||
"""
|
||||
instance_names = []
|
||||
try:
|
||||
instances_result = self.service.list_instances().get_result()
|
||||
instances_list = instances_result["instances"]
|
||||
for vpc in instances_list:
|
||||
instance_names.append({"vpc_name": vpc["name"], "vpc_id": vpc["id"]})
|
||||
starting_count = instances_result["total_count"]
|
||||
while instances_result["total_count"] == instances_result["limit"]:
|
||||
instances_result = self.service.list_instances(
|
||||
start=starting_count
|
||||
).get_result()
|
||||
instances_list = instances_result["instances"]
|
||||
starting_count += instances_result["total_count"]
|
||||
for vpc in instances_list:
|
||||
instance_names.append({"vpc_name": vpc.name, "vpc_id": vpc.id})
|
||||
except Exception as e:
|
||||
logging.error("Error listing out instances: " + str(e))
|
||||
sys.exit(1)
|
||||
return instance_names
|
||||
|
||||
def find_id_in_list(self, name, vpc_list):
|
||||
for vpc in vpc_list:
|
||||
if vpc["vpc_name"] == name:
|
||||
return vpc["vpc_id"]
|
||||
|
||||
def get_instance_status(self, instance_id):
|
||||
"""
|
||||
Returns the status of the Instance whose name is given by 'instance_id'
|
||||
"""
|
||||
|
||||
try:
|
||||
instance = self.service.get_instance(instance_id).get_result()
|
||||
state = instance["status"]
|
||||
return state
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to get node instance status %s. Encountered following "
|
||||
"exception: %s." % (instance_id, e)
|
||||
)
|
||||
return None
|
||||
|
||||
def wait_until_deleted(self, instance_id, timeout):
|
||||
"""
|
||||
Waits until the instance is deleted or until the timeout. Returns True if
|
||||
the instance is successfully deleted, else returns False
|
||||
"""
|
||||
|
||||
time_counter = 0
|
||||
vpc = self.get_instance_status(instance_id)
|
||||
while vpc is not None:
|
||||
vpc = self.get_instance_status(instance_id)
|
||||
logging.info(
|
||||
"Instance %s is still being deleted, sleeping for 5 seconds"
|
||||
% instance_id
|
||||
)
|
||||
time.sleep(5)
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info(
|
||||
"Instance %s is still not deleted in allotted time" % instance_id
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
def wait_until_running(self, instance_id, timeout):
|
||||
"""
|
||||
Waits until the Instance switches to running state or until the timeout.
|
||||
Returns True if the Instance switches to running, else returns False
|
||||
"""
|
||||
|
||||
time_counter = 0
|
||||
status = self.get_instance_status(instance_id)
|
||||
while status != "running":
|
||||
status = self.get_instance_status(instance_id)
|
||||
logging.info(
|
||||
"Instance %s is still not running, sleeping for 5 seconds" % instance_id
|
||||
)
|
||||
time.sleep(5)
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info(
|
||||
"Instance %s is still not ready in allotted time" % instance_id
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
def wait_until_stopped(self, instance_id, timeout):
|
||||
"""
|
||||
Waits until the Instance switches to stopped state or until the timeout.
|
||||
Returns True if the Instance switches to stopped, else returns False
|
||||
"""
|
||||
|
||||
time_counter = 0
|
||||
status = self.get_instance_status(instance_id)
|
||||
while status != "stopped":
|
||||
status = self.get_instance_status(instance_id)
|
||||
logging.info(
|
||||
"Instance %s is still not stopped, sleeping for 5 seconds" % instance_id
|
||||
)
|
||||
time.sleep(5)
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info(
|
||||
"Instance %s is still not stopped in allotted time" % instance_id
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
def wait_until_rebooted(self, instance_id, timeout):
|
||||
"""
|
||||
Waits until the Instance switches to restarting state and then running state or until the timeout.
|
||||
Returns True if the Instance switches back to running, else returns False
|
||||
"""
|
||||
|
||||
time_counter = 0
|
||||
status = self.get_instance_status(instance_id)
|
||||
while status == "starting":
|
||||
status = self.get_instance_status(instance_id)
|
||||
logging.info(
|
||||
"Instance %s is still restarting, sleeping for 5 seconds" % instance_id
|
||||
)
|
||||
time.sleep(5)
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info(
|
||||
"Instance %s is still restarting after allotted time" % instance_id
|
||||
)
|
||||
return False
|
||||
self.wait_until_running(instance_id, timeout)
|
||||
return True
|
||||
|
||||
|
||||
@dataclass
|
||||
class Node:
|
||||
name: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeScenarioSuccessOutput:
|
||||
|
||||
nodes: typing.Dict[int, Node] = field(
|
||||
metadata={
|
||||
"name": "Nodes started/stopped/terminated/rebooted",
|
||||
"description": """Map between timestamps and the pods started/stopped/terminated/rebooted.
|
||||
The timestamp is provided in nanoseconds""",
|
||||
}
|
||||
)
|
||||
action: kube_helper.Actions = field(
|
||||
metadata={
|
||||
"name": "The action performed on the node",
|
||||
"description": """The action performed or attempted to be performed on the node. Possible values
|
||||
are : Start, Stop, Terminate, Reboot""",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeScenarioErrorOutput:
|
||||
|
||||
error: str
|
||||
action: kube_helper.Actions = field(
|
||||
metadata={
|
||||
"name": "The action performed on the node",
|
||||
"description": """The action attempted to be performed on the node. Possible values are : Start
|
||||
Stop, Terminate, Reboot""",
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeScenarioConfig:
|
||||
|
||||
name: typing.Annotated[
|
||||
typing.Optional[str],
|
||||
validation.required_if_not("label_selector"),
|
||||
validation.required_if("skip_openshift_checks"),
|
||||
] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Name",
|
||||
"description": "Name(s) for target nodes. Required if label_selector is not set.",
|
||||
},
|
||||
)
|
||||
|
||||
runs: typing.Annotated[typing.Optional[int], validation.min(1)] = field(
|
||||
default=1,
|
||||
metadata={
|
||||
"name": "Number of runs per node",
|
||||
"description": "Number of times to inject each scenario under actions (will perform on same node each time)",
|
||||
},
|
||||
)
|
||||
|
||||
label_selector: typing.Annotated[
|
||||
typing.Optional[str], validation.min(1), validation.required_if_not("name")
|
||||
] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Label selector",
|
||||
"description": "Kubernetes label selector for the target nodes. Required if name is not set.\n"
|
||||
"See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for details.",
|
||||
},
|
||||
)
|
||||
|
||||
timeout: typing.Annotated[typing.Optional[int], validation.min(1)] = field(
|
||||
default=180,
|
||||
metadata={
|
||||
"name": "Timeout",
|
||||
"description": "Timeout to wait for the target pod(s) to be removed in seconds.",
|
||||
},
|
||||
)
|
||||
|
||||
instance_count: typing.Annotated[typing.Optional[int], validation.min(1)] = field(
|
||||
default=1,
|
||||
metadata={
|
||||
"name": "Instance Count",
|
||||
"description": "Number of nodes to perform action/select that match the label selector.",
|
||||
},
|
||||
)
|
||||
|
||||
skip_openshift_checks: typing.Optional[bool] = field(
|
||||
default=False,
|
||||
metadata={
|
||||
"name": "Skip Openshift Checks",
|
||||
"description": "Skip checking the status of the openshift nodes.",
|
||||
},
|
||||
)
|
||||
|
||||
kubeconfig_path: typing.Optional[str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"name": "Kubeconfig path",
|
||||
"description": "Path to your Kubeconfig file. Defaults to ~/.kube/config.\n"
|
||||
"See https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ for "
|
||||
"details.",
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@plugin.step(
|
||||
id="ibmcloud-node-start",
|
||||
name="Start the node",
|
||||
description="Start the node(s) by starting the Ibmcloud Instance on which the node is configured",
|
||||
outputs={"success": NodeScenarioSuccessOutput, "error": NodeScenarioErrorOutput},
|
||||
)
|
||||
def node_start(
|
||||
cfg: NodeScenarioConfig,
|
||||
) -> typing.Tuple[
|
||||
str, typing.Union[NodeScenarioSuccessOutput, NodeScenarioErrorOutput]
|
||||
]:
|
||||
with kube_helper.setup_kubernetes(None) as cli:
|
||||
ibmcloud = IbmCloud()
|
||||
core_v1 = client.CoreV1Api(cli)
|
||||
watch_resource = watch.Watch()
|
||||
node_list = kube_helper.get_node_list(cfg, kube_helper.Actions.START, core_v1)
|
||||
node_name_id_list = ibmcloud.list_instances()
|
||||
nodes_started = {}
|
||||
for name in node_list:
|
||||
try:
|
||||
for _ in range(cfg.runs):
|
||||
logging.info("Starting node_start_scenario injection")
|
||||
logging.info("Starting the node %s " % (name))
|
||||
instance_id = ibmcloud.find_id_in_list(name, node_name_id_list)
|
||||
if instance_id:
|
||||
vm_started = ibmcloud.start_instances(instance_id)
|
||||
if vm_started:
|
||||
ibmcloud.wait_until_running(instance_id, cfg.timeout)
|
||||
if not cfg.skip_openshift_checks:
|
||||
kube_helper.wait_for_ready_status(
|
||||
name, cfg.timeout, watch_resource, core_v1
|
||||
)
|
||||
nodes_started[int(time.time_ns())] = Node(name=name)
|
||||
logging.info(
|
||||
"Node with instance ID: %s is in running state" % name
|
||||
)
|
||||
logging.info(
|
||||
"node_start_scenario has been successfully injected!"
|
||||
)
|
||||
else:
|
||||
logging.error(
|
||||
"Failed to find node that matched instances on ibm cloud in region"
|
||||
)
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
"No matching vpc with node name " + name,
|
||||
kube_helper.Actions.START,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to start node instance. Test Failed")
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
format_exc(), kube_helper.Actions.START
|
||||
)
|
||||
|
||||
return "success", NodeScenarioSuccessOutput(
|
||||
nodes_started, kube_helper.Actions.START
|
||||
)
|
||||
|
||||
|
||||
@plugin.step(
|
||||
id="ibmcloud-node-stop",
|
||||
name="Stop the node",
|
||||
description="Stop the node(s) by starting the Ibmcloud Instance on which the node is configured",
|
||||
outputs={"success": NodeScenarioSuccessOutput, "error": NodeScenarioErrorOutput},
|
||||
)
|
||||
def node_stop(
|
||||
cfg: NodeScenarioConfig,
|
||||
) -> typing.Tuple[
|
||||
str, typing.Union[NodeScenarioSuccessOutput, NodeScenarioErrorOutput]
|
||||
]:
|
||||
with kube_helper.setup_kubernetes(None) as cli:
|
||||
ibmcloud = IbmCloud()
|
||||
core_v1 = client.CoreV1Api(cli)
|
||||
watch_resource = watch.Watch()
|
||||
logging.info("set up done")
|
||||
node_list = kube_helper.get_node_list(cfg, kube_helper.Actions.STOP, core_v1)
|
||||
logging.info("set node list" + str(node_list))
|
||||
node_name_id_list = ibmcloud.list_instances()
|
||||
logging.info("node names" + str(node_name_id_list))
|
||||
nodes_stopped = {}
|
||||
for name in node_list:
|
||||
try:
|
||||
for _ in range(cfg.runs):
|
||||
logging.info("Starting node_stop_scenario injection")
|
||||
logging.info("Stopping the node %s " % (name))
|
||||
instance_id = ibmcloud.find_id_in_list(name, node_name_id_list)
|
||||
if instance_id:
|
||||
vm_stopped = ibmcloud.stop_instances(instance_id)
|
||||
if vm_stopped:
|
||||
ibmcloud.wait_until_stopped(instance_id, cfg.timeout)
|
||||
if not cfg.skip_openshift_checks:
|
||||
kube_helper.wait_for_ready_status(
|
||||
name, cfg.timeout, watch_resource, core_v1
|
||||
)
|
||||
nodes_stopped[int(time.time_ns())] = Node(name=name)
|
||||
logging.info(
|
||||
"Node with instance ID: %s is in stopped state" % name
|
||||
)
|
||||
logging.info(
|
||||
"node_stop_scenario has been successfully injected!"
|
||||
)
|
||||
else:
|
||||
logging.error(
|
||||
"Failed to find node that matched instances on ibm cloud in region"
|
||||
)
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
"No matching vpc with node name " + name,
|
||||
kube_helper.Actions.STOP,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Test Failed")
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
format_exc(), kube_helper.Actions.STOP
|
||||
)
|
||||
|
||||
return "success", NodeScenarioSuccessOutput(
|
||||
nodes_stopped, kube_helper.Actions.STOP
|
||||
)
|
||||
|
||||
|
||||
@plugin.step(
|
||||
id="ibmcloud-node-reboot",
|
||||
name="Reboot Ibmcloud Instance",
|
||||
description="Reboot the node(s) by starting the Ibmcloud Instance on which the node is configured",
|
||||
outputs={"success": NodeScenarioSuccessOutput, "error": NodeScenarioErrorOutput},
|
||||
)
|
||||
def node_reboot(
|
||||
cfg: NodeScenarioConfig,
|
||||
) -> typing.Tuple[
|
||||
str, typing.Union[NodeScenarioSuccessOutput, NodeScenarioErrorOutput]
|
||||
]:
|
||||
with kube_helper.setup_kubernetes(None) as cli:
|
||||
ibmcloud = IbmCloud()
|
||||
core_v1 = client.CoreV1Api(cli)
|
||||
watch_resource = watch.Watch()
|
||||
node_list = kube_helper.get_node_list(cfg, kube_helper.Actions.REBOOT, core_v1)
|
||||
node_name_id_list = ibmcloud.list_instances()
|
||||
nodes_rebooted = {}
|
||||
for name in node_list:
|
||||
try:
|
||||
for _ in range(cfg.runs):
|
||||
logging.info("Starting node_reboot_scenario injection")
|
||||
logging.info("Rebooting the node %s " % (name))
|
||||
instance_id = ibmcloud.find_id_in_list(name, node_name_id_list)
|
||||
if instance_id:
|
||||
ibmcloud.reboot_instances(instance_id)
|
||||
ibmcloud.wait_until_rebooted(instance_id, cfg.timeout)
|
||||
if not cfg.skip_openshift_checks:
|
||||
kube_helper.wait_for_unknown_status(
|
||||
name, cfg.timeout, watch_resource, core_v1
|
||||
)
|
||||
kube_helper.wait_for_ready_status(
|
||||
name, cfg.timeout, watch_resource, core_v1
|
||||
)
|
||||
nodes_rebooted[int(time.time_ns())] = Node(name=name)
|
||||
logging.info(
|
||||
"Node with instance ID: %s has rebooted successfully" % name
|
||||
)
|
||||
logging.info(
|
||||
"node_reboot_scenario has been successfully injected!"
|
||||
)
|
||||
else:
|
||||
logging.error(
|
||||
"Failed to find node that matched instances on ibm cloud in region"
|
||||
)
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
"No matching vpc with node name " + name,
|
||||
kube_helper.Actions.REBOOT,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to reboot node instance. Test Failed")
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
format_exc(), kube_helper.Actions.REBOOT
|
||||
)
|
||||
|
||||
return "success", NodeScenarioSuccessOutput(
|
||||
nodes_rebooted, kube_helper.Actions.REBOOT
|
||||
)
|
||||
|
||||
|
||||
@plugin.step(
|
||||
id="ibmcloud-node-terminate",
|
||||
name="Reboot Ibmcloud Instance",
|
||||
description="Wait for node to be deleted",
|
||||
outputs={"success": NodeScenarioSuccessOutput, "error": NodeScenarioErrorOutput},
|
||||
)
|
||||
def node_terminate(
|
||||
cfg: NodeScenarioConfig,
|
||||
) -> typing.Tuple[
|
||||
str, typing.Union[NodeScenarioSuccessOutput, NodeScenarioErrorOutput]
|
||||
]:
|
||||
with kube_helper.setup_kubernetes(None) as cli:
|
||||
ibmcloud = IbmCloud()
|
||||
core_v1 = client.CoreV1Api(cli)
|
||||
node_list = kube_helper.get_node_list(
|
||||
cfg, kube_helper.Actions.TERMINATE, core_v1
|
||||
)
|
||||
node_name_id_list = ibmcloud.list_instances()
|
||||
nodes_terminated = {}
|
||||
for name in node_list:
|
||||
try:
|
||||
for _ in range(cfg.runs):
|
||||
logging.info(
|
||||
"Starting node_termination_scenario injection by first stopping the node"
|
||||
)
|
||||
instance_id = ibmcloud.find_id_in_list(name, node_name_id_list)
|
||||
logging.info("Deleting the node with instance ID: %s " % (name))
|
||||
if instance_id:
|
||||
ibmcloud.delete_instance(instance_id)
|
||||
ibmcloud.wait_until_released(name, cfg.timeout)
|
||||
nodes_terminated[int(time.time_ns())] = Node(name=name)
|
||||
logging.info(
|
||||
"Node with instance ID: %s has been released" % name
|
||||
)
|
||||
logging.info(
|
||||
"node_terminate_scenario has been successfully injected!"
|
||||
)
|
||||
else:
|
||||
logging.error(
|
||||
"Failed to find instances that matched the node specifications on ibm cloud in the set region"
|
||||
)
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
"No matching vpc with node name " + name,
|
||||
kube_helper.Actions.TERMINATE,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to terminate node instance. Test Failed")
|
||||
logging.error("node_terminate_scenario injection failed!")
|
||||
return "error", NodeScenarioErrorOutput(
|
||||
format_exc(), kube_helper.Actions.TERMINATE
|
||||
)
|
||||
|
||||
return "success", NodeScenarioSuccessOutput(
|
||||
nodes_terminated, kube_helper.Actions.TERMINATE
|
||||
)
|
||||
@@ -1,179 +0,0 @@
|
||||
from kubernetes import config, client
|
||||
from kubernetes.client.rest import ApiException
|
||||
import logging
|
||||
import random
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class Actions(Enum):
|
||||
"""
|
||||
This enumeration indicates different kinds of node operations
|
||||
"""
|
||||
|
||||
START = "Start"
|
||||
STOP = "Stop"
|
||||
TERMINATE = "Terminate"
|
||||
REBOOT = "Reboot"
|
||||
|
||||
|
||||
def setup_kubernetes(kubeconfig_path):
|
||||
"""
|
||||
Sets up the Kubernetes client
|
||||
"""
|
||||
|
||||
if kubeconfig_path is None:
|
||||
kubeconfig_path = config.KUBE_CONFIG_DEFAULT_LOCATION
|
||||
kubeconfig = config.kube_config.KubeConfigMerger(kubeconfig_path)
|
||||
|
||||
if kubeconfig.config is None:
|
||||
raise Exception(
|
||||
"Invalid kube-config file: %s. " "No configuration found." % kubeconfig_path
|
||||
)
|
||||
loader = config.kube_config.KubeConfigLoader(
|
||||
config_dict=kubeconfig.config,
|
||||
)
|
||||
client_config = client.Configuration()
|
||||
loader.load_and_set(client_config)
|
||||
return client.ApiClient(configuration=client_config)
|
||||
|
||||
|
||||
def list_killable_nodes(core_v1, label_selector=None):
|
||||
"""
|
||||
Returns a list of nodes that can be stopped/reset/released
|
||||
"""
|
||||
|
||||
nodes = []
|
||||
try:
|
||||
if label_selector:
|
||||
ret = core_v1.list_node(pretty=True, label_selector=label_selector)
|
||||
else:
|
||||
ret = core_v1.list_node(pretty=True)
|
||||
except ApiException as e:
|
||||
logging.error("Exception when calling CoreV1Api->list_node: %s\n" % e)
|
||||
raise e
|
||||
for node in ret.items:
|
||||
for cond in node.status.conditions:
|
||||
if str(cond.type) == "Ready" and str(cond.status) == "True":
|
||||
nodes.append(node.metadata.name)
|
||||
return nodes
|
||||
|
||||
|
||||
def list_startable_nodes(core_v1, label_selector=None):
|
||||
"""
|
||||
Returns a list of nodes that can be started
|
||||
"""
|
||||
|
||||
nodes = []
|
||||
try:
|
||||
if label_selector:
|
||||
ret = core_v1.list_node(pretty=True, label_selector=label_selector)
|
||||
else:
|
||||
ret = core_v1.list_node(pretty=True)
|
||||
except ApiException as e:
|
||||
logging.error("Exception when calling CoreV1Api->list_node: %s\n" % e)
|
||||
raise e
|
||||
for node in ret.items:
|
||||
for cond in node.status.conditions:
|
||||
if str(cond.type) == "Ready" and str(cond.status) != "True":
|
||||
nodes.append(node.metadata.name)
|
||||
return nodes
|
||||
|
||||
|
||||
def get_node_list(cfg, action, core_v1):
|
||||
"""
|
||||
Returns a list of nodes to be used in the node scenarios. The list returned is constructed as follows:
|
||||
- If the key 'name' is present in the node scenario config, the value is extracted and split into
|
||||
a list
|
||||
- Each node in the list is fed to the get_node function which checks if the node is killable or
|
||||
fetches the node using the label selector
|
||||
"""
|
||||
|
||||
def get_node(node_name, label_selector, instance_kill_count, action, core_v1):
|
||||
list_nodes_func = (
|
||||
list_startable_nodes if action == Actions.START else list_killable_nodes
|
||||
)
|
||||
if node_name in list_nodes_func(core_v1):
|
||||
return [node_name]
|
||||
elif node_name:
|
||||
logging.info(
|
||||
"Node with provided node_name does not exist or the node might "
|
||||
"be in NotReady state."
|
||||
)
|
||||
nodes = list_nodes_func(core_v1, label_selector)
|
||||
if not nodes:
|
||||
raise Exception("Ready nodes with the provided label selector do not exist")
|
||||
logging.info(
|
||||
"Ready nodes with the label selector %s: %s" % (label_selector, nodes)
|
||||
)
|
||||
number_of_nodes = len(nodes)
|
||||
if instance_kill_count == number_of_nodes:
|
||||
return nodes
|
||||
nodes_to_return = []
|
||||
for i in range(instance_kill_count):
|
||||
node_to_add = nodes[random.randint(0, len(nodes) - 1)]
|
||||
nodes_to_return.append(node_to_add)
|
||||
nodes.remove(node_to_add)
|
||||
return nodes_to_return
|
||||
|
||||
if cfg.name:
|
||||
input_nodes = cfg.name.split(",")
|
||||
else:
|
||||
input_nodes = [""]
|
||||
scenario_nodes = set()
|
||||
|
||||
if cfg.skip_openshift_checks:
|
||||
scenario_nodes = input_nodes
|
||||
else:
|
||||
for node in input_nodes:
|
||||
nodes = get_node(
|
||||
node, cfg.label_selector, cfg.instance_count, action, core_v1
|
||||
)
|
||||
scenario_nodes.update(nodes)
|
||||
|
||||
return list(scenario_nodes)
|
||||
|
||||
|
||||
def watch_node_status(node, status, timeout, watch_resource, core_v1):
|
||||
"""
|
||||
Monitor the status of a node for change
|
||||
"""
|
||||
count = timeout
|
||||
for event in watch_resource.stream(
|
||||
core_v1.list_node,
|
||||
field_selector=f"metadata.name={node}",
|
||||
timeout_seconds=timeout,
|
||||
):
|
||||
conditions = [
|
||||
status
|
||||
for status in event["object"].status.conditions
|
||||
if status.type == "Ready"
|
||||
]
|
||||
if conditions[0].status == status:
|
||||
watch_resource.stop()
|
||||
break
|
||||
else:
|
||||
count -= 1
|
||||
logging.info("Status of node " + node + ": " + str(conditions[0].status))
|
||||
if not count:
|
||||
watch_resource.stop()
|
||||
|
||||
|
||||
def wait_for_ready_status(node, timeout, watch_resource, core_v1):
|
||||
"""
|
||||
Wait until the node status becomes Ready
|
||||
"""
|
||||
watch_node_status(node, "True", timeout, watch_resource, core_v1)
|
||||
|
||||
|
||||
def wait_for_not_ready_status(node, timeout, watch_resource, core_v1):
|
||||
"""
|
||||
Wait until the node status becomes Not Ready
|
||||
"""
|
||||
watch_node_status(node, "False", timeout, watch_resource, core_v1)
|
||||
|
||||
|
||||
def wait_for_unknown_status(node, timeout, watch_resource, core_v1):
|
||||
"""
|
||||
Wait until the node status becomes Unknown
|
||||
"""
|
||||
watch_node_status(node, "Unknown", timeout, watch_resource, core_v1)
|
||||
@@ -12,15 +12,11 @@ from krkn.scenario_plugins.native.pod_network_outage.pod_network_outage_plugin i
|
||||
from krkn.scenario_plugins.native.pod_network_outage.pod_network_outage_plugin import (
|
||||
pod_egress_shaping,
|
||||
)
|
||||
import krkn.scenario_plugins.native.node_scenarios.ibmcloud_plugin as ibmcloud_plugin
|
||||
from krkn.scenario_plugins.native.pod_network_outage.pod_network_outage_plugin import (
|
||||
pod_ingress_shaping,
|
||||
)
|
||||
from arcaflow_plugin_sdk import schema, serialization, jsonschema
|
||||
|
||||
from krkn.scenario_plugins.native.node_scenarios import vmware_plugin
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class PluginStep:
|
||||
schema: schema.StepSchema
|
||||
@@ -160,14 +156,6 @@ PLUGINS = Plugins(
|
||||
),
|
||||
PluginStep(wait_for_pods, ["error"]),
|
||||
PluginStep(run_python_file, ["error"]),
|
||||
PluginStep(vmware_plugin.node_start, ["error"]),
|
||||
PluginStep(vmware_plugin.node_stop, ["error"]),
|
||||
PluginStep(vmware_plugin.node_reboot, ["error"]),
|
||||
PluginStep(vmware_plugin.node_terminate, ["error"]),
|
||||
PluginStep(ibmcloud_plugin.node_start, ["error"]),
|
||||
PluginStep(ibmcloud_plugin.node_stop, ["error"]),
|
||||
PluginStep(ibmcloud_plugin.node_reboot, ["error"]),
|
||||
PluginStep(ibmcloud_plugin.node_terminate, ["error"]),
|
||||
PluginStep(network_chaos, ["error"]),
|
||||
PluginStep(pod_outage, ["error"]),
|
||||
PluginStep(pod_egress_shaping, ["error"]),
|
||||
|
||||
@@ -27,13 +27,13 @@ def create_job(batch_cli, body, namespace="default"):
|
||||
body=body, namespace=namespace)
|
||||
return api_response
|
||||
except ApiException as api:
|
||||
logging.warn(
|
||||
logging.warning(
|
||||
"Exception when calling \
|
||||
BatchV1Api->create_job: %s"
|
||||
% api
|
||||
)
|
||||
if api.status == 409:
|
||||
logging.warn("Job already present")
|
||||
logging.warning("Job already present")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
@@ -196,12 +196,12 @@ def delete_job(batch_cli, name, namespace="default"):
|
||||
logging.debug("Job deleted. status='%s'" % str(api_response.status))
|
||||
return api_response
|
||||
except ApiException as api:
|
||||
logging.warn(
|
||||
logging.warning(
|
||||
"Exception when calling \
|
||||
BatchV1Api->create_namespaced_job: %s"
|
||||
% api
|
||||
)
|
||||
logging.warn("Job already deleted\n")
|
||||
logging.warning("Job already deleted\n")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling \
|
||||
|
||||
@@ -102,7 +102,7 @@ def delete_jobs(kubecli: KrknKubernetes, job_list: typing.List[str]):
|
||||
pod_log = pod_log_response.data.decode("utf-8")
|
||||
logging.error(pod_log)
|
||||
except Exception as e:
|
||||
logging.warn("Exception in getting job status: %s" % str(e))
|
||||
logging.warning("Exception in getting job status: %s" % str(e))
|
||||
api_response = kubecli.delete_job(name=job_name, namespace="default")
|
||||
|
||||
|
||||
@@ -137,7 +137,7 @@ def wait_for_job(
|
||||
count += 1
|
||||
job_list.remove(job_name)
|
||||
except Exception:
|
||||
logging.warn("Exception in getting job status")
|
||||
logging.warning("Exception in getting job status")
|
||||
if time.time() > wait_time:
|
||||
raise Exception(
|
||||
"Jobs did not complete within "
|
||||
@@ -229,6 +229,8 @@ def apply_outage_policy(
|
||||
"""
|
||||
|
||||
job_list = []
|
||||
yml_list = []
|
||||
cookie_list = []
|
||||
cookie = random.randint(100, 10000)
|
||||
net_direction = {"egress": "nw_src", "ingress": "nw_dst"}
|
||||
br = "br0"
|
||||
@@ -237,7 +239,7 @@ def apply_outage_policy(
|
||||
br = "br-int"
|
||||
table = 8
|
||||
for node, ips in node_dict.items():
|
||||
while len(check_cookie(node, pod_template, br, cookie, kubecli)) > 2:
|
||||
while len(check_cookie(node, pod_template, br, cookie, kubecli)) > 2 or cookie in cookie_list:
|
||||
cookie = random.randint(100, 10000)
|
||||
exec_cmd = ""
|
||||
for ip in ips:
|
||||
@@ -247,7 +249,8 @@ def apply_outage_policy(
|
||||
exec_cmd = f"{exec_cmd}ovs-ofctl -O OpenFlow13 add-flow {br} cookie={cookie},table={table},priority=65535,udp,{net_direction[direction]}={ip},tp_dst={target_port},actions=drop;"
|
||||
if not ports:
|
||||
exec_cmd = f"{exec_cmd}ovs-ofctl -O OpenFlow13 add-flow {br} cookie={cookie},table={table},priority=65535,ip,{net_direction[direction]}={ip},actions=drop;"
|
||||
exec_cmd = f"{exec_cmd}sleep {duration};ovs-ofctl -O OpenFlow13 del-flows {br} cookie={cookie}/-1"
|
||||
exec_cmd = f"sleep 30;{exec_cmd}sleep {duration};ovs-ofctl -O OpenFlow13 del-flows {br} cookie={cookie}/-1"
|
||||
cookie_list.append(cookie)
|
||||
logging.info("Executing %s on node %s" % (exec_cmd, node))
|
||||
|
||||
job_body = yaml.safe_load(
|
||||
@@ -257,6 +260,8 @@ def apply_outage_policy(
|
||||
cmd=exec_cmd,
|
||||
)
|
||||
)
|
||||
yml_list.append(job_body)
|
||||
for job_body in yml_list:
|
||||
api_response = kubecli.create_job(job_body)
|
||||
if api_response is None:
|
||||
raise Exception("Error creating job")
|
||||
@@ -320,6 +325,7 @@ def apply_ingress_policy(
|
||||
"""
|
||||
|
||||
job_list = []
|
||||
yml_list = []
|
||||
|
||||
create_virtual_interfaces(kubecli, len(ips), node, pod_template)
|
||||
|
||||
@@ -332,12 +338,16 @@ def apply_ingress_policy(
|
||||
job_body = yaml.safe_load(
|
||||
job_template.render(jobname=mod + str(pod_ip), nodename=node, cmd=exec_cmd)
|
||||
)
|
||||
job_list.append(job_body["metadata"]["name"])
|
||||
yml_list.append(job_body)
|
||||
if pod_ip == node:
|
||||
break
|
||||
|
||||
for job_body in yml_list:
|
||||
api_response = kubecli.create_job(job_body)
|
||||
if api_response is None:
|
||||
raise Exception("Error creating job")
|
||||
if pod_ip == node:
|
||||
break
|
||||
|
||||
job_list.append(job_body["metadata"]["name"])
|
||||
return job_list
|
||||
|
||||
|
||||
@@ -396,6 +406,7 @@ def apply_net_policy(
|
||||
"""
|
||||
|
||||
job_list = []
|
||||
yml_list = []
|
||||
|
||||
for pod_ip in set(ips):
|
||||
pod_inf = get_pod_interface(node, pod_ip, pod_template, bridge_name, kubecli)
|
||||
@@ -406,13 +417,18 @@ def apply_net_policy(
|
||||
job_body = yaml.safe_load(
|
||||
job_template.render(jobname=mod + str(pod_ip), nodename=node, cmd=exec_cmd)
|
||||
)
|
||||
job_list.append(job_body["metadata"]["name"])
|
||||
yml_list.append(job_body)
|
||||
|
||||
for job_body in yml_list:
|
||||
api_response = kubecli.create_job(job_body)
|
||||
if api_response is None:
|
||||
raise Exception("Error creating job")
|
||||
|
||||
job_list.append(job_body["metadata"]["name"])
|
||||
return job_list
|
||||
|
||||
|
||||
|
||||
def get_ingress_cmd(
|
||||
execution: str,
|
||||
test_interface: str,
|
||||
@@ -463,7 +479,7 @@ def get_ingress_cmd(
|
||||
tc_set += ";"
|
||||
else:
|
||||
tc_set += " {0} {1} ;".format(param_map[mod], vallst[mod])
|
||||
exec_cmd = "{0} {1} sleep {2};{3}".format(tc_set, tc_ls, duration, tc_unset)
|
||||
exec_cmd = "sleep 30;{0} {1} sleep {2};{3}".format(tc_set, tc_ls, duration, tc_unset)
|
||||
|
||||
return exec_cmd
|
||||
|
||||
@@ -508,7 +524,7 @@ def get_egress_cmd(
|
||||
tc_set += ";"
|
||||
else:
|
||||
tc_set += " {0} {1} ;".format(param_map[mod], vallst[mod])
|
||||
exec_cmd = "{0} {1} sleep {2};{3}".format(tc_set, tc_ls, duration, tc_unset)
|
||||
exec_cmd = "sleep 30;{0} {1} sleep {2};{3}".format(tc_set, tc_ls, duration, tc_unset)
|
||||
|
||||
return exec_cmd
|
||||
|
||||
|
||||
@@ -42,19 +42,13 @@ class NetworkChaosScenarioPlugin(AbstractScenarioPlugin):
|
||||
test_egress = get_yaml_item_value(
|
||||
test_dict, "egress", {"bandwidth": "100mbit"}
|
||||
)
|
||||
|
||||
if test_node:
|
||||
node_name_list = test_node.split(",")
|
||||
nodelst = common_node_functions.get_node_by_name(node_name_list, lib_telemetry.get_lib_kubernetes())
|
||||
else:
|
||||
node_name_list = [test_node]
|
||||
nodelst = []
|
||||
for single_node_name in node_name_list:
|
||||
nodelst.extend(
|
||||
common_node_functions.get_node(
|
||||
single_node_name,
|
||||
test_node_label,
|
||||
test_instance_count,
|
||||
lib_telemetry.get_lib_kubernetes(),
|
||||
)
|
||||
nodelst = common_node_functions.get_node(
|
||||
test_node_label, test_instance_count, lib_telemetry.get_lib_kubernetes()
|
||||
)
|
||||
file_loader = FileSystemLoader(
|
||||
os.path.abspath(os.path.dirname(__file__))
|
||||
@@ -149,7 +143,10 @@ class NetworkChaosScenarioPlugin(AbstractScenarioPlugin):
|
||||
finally:
|
||||
logging.info("Deleting jobs")
|
||||
self.delete_job(joblst[:], lib_telemetry.get_lib_kubernetes())
|
||||
except (RuntimeError, Exception):
|
||||
except (RuntimeError, Exception) as e:
|
||||
logging.error(
|
||||
"NetworkChaosScenarioPlugin exiting due to Exception %s" % e
|
||||
)
|
||||
scenario_telemetry.exit_status = 1
|
||||
return 1
|
||||
else:
|
||||
@@ -246,7 +243,7 @@ class NetworkChaosScenarioPlugin(AbstractScenarioPlugin):
|
||||
tc_set += ";"
|
||||
else:
|
||||
tc_set += " {0} {1} ;".format(param_map[mod], vallst[mod])
|
||||
exec_cmd = "{0} {1} sleep {2};{3} sleep 20;{4}".format(
|
||||
exec_cmd = "sleep 30;{0} {1} sleep {2};{3} sleep 20;{4}".format(
|
||||
tc_set, tc_ls, duration, tc_unset, tc_ls
|
||||
)
|
||||
return exec_cmd
|
||||
|
||||
0
krkn/scenario_plugins/network_chaos_ng/__init__.py
Normal file
0
krkn/scenario_plugins/network_chaos_ng/__init__.py
Normal file
41
krkn/scenario_plugins/network_chaos_ng/models.py
Normal file
41
krkn/scenario_plugins/network_chaos_ng/models.py
Normal file
@@ -0,0 +1,41 @@
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class NetworkChaosScenarioType(Enum):
|
||||
Node = 1
|
||||
Pod = 2
|
||||
|
||||
@dataclass
|
||||
class BaseNetworkChaosConfig:
|
||||
supported_execution = ["serial", "parallel"]
|
||||
id: str
|
||||
wait_duration: int
|
||||
test_duration: int
|
||||
label_selector: str
|
||||
instance_count: int
|
||||
execution: str
|
||||
namespace: str
|
||||
|
||||
def validate(self) -> list[str]:
|
||||
errors = []
|
||||
if self.execution is None:
|
||||
errors.append(f"execution cannot be None, supported values are: {','.join(self.supported_execution)}")
|
||||
if self.execution not in self.supported_execution:
|
||||
errors.append(f"{self.execution} is not in supported execution mod: {','.join(self.supported_execution)}")
|
||||
if self.label_selector is None:
|
||||
errors.append("label_selector cannot be None")
|
||||
return errors
|
||||
|
||||
@dataclass
|
||||
class NetworkFilterConfig(BaseNetworkChaosConfig):
|
||||
ingress: bool
|
||||
egress: bool
|
||||
interfaces: list[str]
|
||||
target: str
|
||||
ports: list[int]
|
||||
|
||||
def validate(self) -> list[str]:
|
||||
errors = super().validate()
|
||||
# here further validations
|
||||
return errors
|
||||
@@ -0,0 +1,58 @@
|
||||
import abc
|
||||
import logging
|
||||
import queue
|
||||
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn.scenario_plugins.network_chaos_ng.models import BaseNetworkChaosConfig, NetworkChaosScenarioType
|
||||
|
||||
|
||||
class AbstractNetworkChaosModule(abc.ABC):
|
||||
"""
|
||||
The abstract class that needs to be implemented by each Network Chaos Scenario
|
||||
"""
|
||||
@abc.abstractmethod
|
||||
def run(self, target: str, kubecli: KrknTelemetryOpenshift, error_queue: queue.Queue = None):
|
||||
"""
|
||||
the entrypoint method for the Network Chaos Scenario
|
||||
:param target: The resource name that will be targeted by the scenario (Node Name, Pod Name etc.)
|
||||
:param kubecli: The `KrknTelemetryOpenshift` needed by the scenario to access to the krkn-lib methods
|
||||
:param error_queue: A queue that will be used by the plugin to push the errors raised during the execution of parallel modules
|
||||
"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_config(self) -> (NetworkChaosScenarioType, BaseNetworkChaosConfig):
|
||||
"""
|
||||
returns the common subset of settings shared by all the scenarios `BaseNetworkChaosConfig` and the type of Network
|
||||
Chaos Scenario that is running (Pod Scenario or Node Scenario)
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def log_info(self, message: str, parallel: bool = False, node_name: str = ""):
|
||||
"""
|
||||
log helper method for INFO severity to be used in the scenarios
|
||||
"""
|
||||
if parallel:
|
||||
logging.info(f"[{node_name}]: {message}")
|
||||
else:
|
||||
logging.info(message)
|
||||
|
||||
def log_warning(self, message: str, parallel: bool = False, node_name: str = ""):
|
||||
"""
|
||||
log helper method for WARNING severity to be used in the scenarios
|
||||
"""
|
||||
if parallel:
|
||||
logging.warning(f"[{node_name}]: {message}")
|
||||
else:
|
||||
logging.warning(message)
|
||||
|
||||
|
||||
def log_error(self, message: str, parallel: bool = False, node_name: str = ""):
|
||||
"""
|
||||
log helper method for ERROR severity to be used in the scenarios
|
||||
"""
|
||||
if parallel:
|
||||
logging.error(f"[{node_name}]: {message}")
|
||||
else:
|
||||
logging.error(message)
|
||||
@@ -0,0 +1,136 @@
|
||||
import os
|
||||
import queue
|
||||
import time
|
||||
|
||||
import yaml
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.utils import get_random_string
|
||||
from krkn.scenario_plugins.network_chaos_ng.models import (
|
||||
BaseNetworkChaosConfig,
|
||||
NetworkFilterConfig,
|
||||
NetworkChaosScenarioType,
|
||||
)
|
||||
from krkn.scenario_plugins.network_chaos_ng.modules.abstract_network_chaos_module import (
|
||||
AbstractNetworkChaosModule,
|
||||
)
|
||||
|
||||
|
||||
class NodeNetworkFilterModule(AbstractNetworkChaosModule):
|
||||
config: NetworkFilterConfig
|
||||
|
||||
def run(
|
||||
self,
|
||||
target: str,
|
||||
kubecli: KrknTelemetryOpenshift,
|
||||
error_queue: queue.Queue = None,
|
||||
):
|
||||
parallel = False
|
||||
if error_queue:
|
||||
parallel = True
|
||||
try:
|
||||
file_loader = FileSystemLoader(os.path.abspath(os.path.dirname(__file__)))
|
||||
env = Environment(loader=file_loader, autoescape=True)
|
||||
pod_name = f"node-filter-{get_random_string(5)}"
|
||||
pod_template = env.get_template("templates/network-chaos.j2")
|
||||
pod_body = yaml.safe_load(
|
||||
pod_template.render(
|
||||
pod_name=pod_name,
|
||||
namespace=self.config.namespace,
|
||||
host_network=True,
|
||||
target=target,
|
||||
)
|
||||
)
|
||||
self.log_info(
|
||||
f"creating pod to filter "
|
||||
f"ports {','.join([str(port) for port in self.config.ports])}, "
|
||||
f"ingress:{str(self.config.ingress)}, "
|
||||
f"egress:{str(self.config.egress)}",
|
||||
parallel,
|
||||
target,
|
||||
)
|
||||
kubecli.get_lib_kubernetes().create_pod(
|
||||
pod_body, self.config.namespace, 300
|
||||
)
|
||||
|
||||
if len(self.config.interfaces) == 0:
|
||||
interfaces = [
|
||||
self.get_default_interface(pod_name, self.config.namespace, kubecli)
|
||||
]
|
||||
self.log_info(f"detected default interface {interfaces[0]}")
|
||||
else:
|
||||
interfaces = self.config.interfaces
|
||||
|
||||
input_rules, output_rules = self.generate_rules(interfaces)
|
||||
|
||||
for rule in input_rules:
|
||||
self.log_info(f"applying iptables INPUT rule: {rule}", parallel, target)
|
||||
kubecli.get_lib_kubernetes().exec_cmd_in_pod(
|
||||
[rule], pod_name, self.config.namespace
|
||||
)
|
||||
for rule in output_rules:
|
||||
self.log_info(
|
||||
f"applying iptables OUTPUT rule: {rule}", parallel, target
|
||||
)
|
||||
kubecli.get_lib_kubernetes().exec_cmd_in_pod(
|
||||
[rule], pod_name, self.config.namespace
|
||||
)
|
||||
self.log_info(
|
||||
f"waiting {self.config.test_duration} seconds before removing the iptables rules"
|
||||
)
|
||||
time.sleep(self.config.test_duration)
|
||||
self.log_info("removing iptables rules")
|
||||
for _ in input_rules:
|
||||
# always deleting the first rule since has been inserted from the top
|
||||
kubecli.get_lib_kubernetes().exec_cmd_in_pod(
|
||||
[f"iptables -D INPUT 1"], pod_name, self.config.namespace
|
||||
)
|
||||
for _ in output_rules:
|
||||
# always deleting the first rule since has been inserted from the top
|
||||
kubecli.get_lib_kubernetes().exec_cmd_in_pod(
|
||||
[f"iptables -D OUTPUT 1"], pod_name, self.config.namespace
|
||||
)
|
||||
self.log_info(
|
||||
f"deleting network chaos pod {pod_name} from {self.config.namespace}"
|
||||
)
|
||||
|
||||
kubecli.get_lib_kubernetes().delete_pod(pod_name, self.config.namespace)
|
||||
|
||||
except Exception as e:
|
||||
if error_queue is None:
|
||||
raise e
|
||||
else:
|
||||
error_queue.put(str(e))
|
||||
|
||||
def __init__(self, config: NetworkFilterConfig):
|
||||
self.config = config
|
||||
|
||||
def get_config(self) -> (NetworkChaosScenarioType, BaseNetworkChaosConfig):
|
||||
return NetworkChaosScenarioType.Node, self.config
|
||||
|
||||
def get_default_interface(
|
||||
self, pod_name: str, namespace: str, kubecli: KrknTelemetryOpenshift
|
||||
) -> str:
|
||||
cmd = "ip r | grep default | awk '/default/ {print $5}'"
|
||||
output = kubecli.get_lib_kubernetes().exec_cmd_in_pod(
|
||||
[cmd], pod_name, namespace
|
||||
)
|
||||
return output.replace("\n", "")
|
||||
|
||||
def generate_rules(self, interfaces: list[str]) -> (list[str], list[str]):
|
||||
input_rules = []
|
||||
output_rules = []
|
||||
for interface in interfaces:
|
||||
for port in self.config.ports:
|
||||
if self.config.egress:
|
||||
output_rules.append(
|
||||
f"iptables -I OUTPUT 1 -p tcp --dport {port} -m state --state NEW,RELATED,ESTABLISHED -j DROP"
|
||||
)
|
||||
|
||||
if self.config.ingress:
|
||||
input_rules.append(
|
||||
f"iptables -I INPUT 1 -i {interface} -p tcp --dport {port} -m state --state NEW,RELATED,ESTABLISHED -j DROP"
|
||||
)
|
||||
return input_rules, output_rules
|
||||
@@ -0,0 +1,17 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{pod_name}}
|
||||
namespace: {{namespace}}
|
||||
spec:
|
||||
{% if host_network %}
|
||||
hostNetwork: true
|
||||
{%endif%}
|
||||
nodeSelector:
|
||||
kubernetes.io/hostname: {{target}}
|
||||
containers:
|
||||
- name: fedora
|
||||
imagePullPolicy: Always
|
||||
image: quay.io/krkn-chaos/krkn-network-chaos:latest
|
||||
securityContext:
|
||||
privileged: true
|
||||
@@ -0,0 +1,24 @@
|
||||
from krkn.scenario_plugins.network_chaos_ng.models import NetworkFilterConfig
|
||||
from krkn.scenario_plugins.network_chaos_ng.modules.abstract_network_chaos_module import AbstractNetworkChaosModule
|
||||
from krkn.scenario_plugins.network_chaos_ng.modules.node_network_filter import NodeNetworkFilterModule
|
||||
|
||||
|
||||
supported_modules = ["node_network_filter"]
|
||||
|
||||
class NetworkChaosFactory:
|
||||
|
||||
@staticmethod
|
||||
def get_instance(config: dict[str, str]) -> AbstractNetworkChaosModule:
|
||||
if config["id"] is None:
|
||||
raise Exception("network chaos id cannot be None")
|
||||
if config["id"] not in supported_modules:
|
||||
raise Exception(f"{config['id']} is not a supported network chaos module")
|
||||
|
||||
if config["id"] == "node_network_filter":
|
||||
config = NetworkFilterConfig(**config)
|
||||
errors = config.validate()
|
||||
if len(errors) > 0:
|
||||
raise Exception(f"config validation errors: [{';'.join(errors)}]")
|
||||
return NodeNetworkFilterModule(config)
|
||||
|
||||
|
||||
@@ -0,0 +1,116 @@
|
||||
import logging
|
||||
import queue
|
||||
import random
|
||||
import threading
|
||||
import time
|
||||
|
||||
import yaml
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
from krkn.scenario_plugins.network_chaos_ng.models import (
|
||||
NetworkChaosScenarioType,
|
||||
BaseNetworkChaosConfig,
|
||||
)
|
||||
from krkn.scenario_plugins.network_chaos_ng.modules.abstract_network_chaos_module import (
|
||||
AbstractNetworkChaosModule,
|
||||
)
|
||||
from krkn.scenario_plugins.network_chaos_ng.network_chaos_factory import (
|
||||
NetworkChaosFactory,
|
||||
)
|
||||
|
||||
|
||||
class NetworkChaosNgScenarioPlugin(AbstractScenarioPlugin):
|
||||
def run(
|
||||
self,
|
||||
run_uuid: str,
|
||||
scenario: str,
|
||||
krkn_config: dict[str, any],
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
scenario_telemetry: ScenarioTelemetry,
|
||||
) -> int:
|
||||
try:
|
||||
with open(scenario, "r") as file:
|
||||
scenario_config = yaml.safe_load(file)
|
||||
if not isinstance(scenario_config, list):
|
||||
logging.error(
|
||||
"network chaos scenario config must be a list of objects"
|
||||
)
|
||||
return 1
|
||||
for config in scenario_config:
|
||||
network_chaos = NetworkChaosFactory.get_instance(config)
|
||||
network_chaos_config = network_chaos.get_config()
|
||||
logging.info(
|
||||
f"running network_chaos scenario: {network_chaos_config[1].id}"
|
||||
)
|
||||
if network_chaos_config[0] == NetworkChaosScenarioType.Node:
|
||||
targets = lib_telemetry.get_lib_kubernetes().list_nodes(
|
||||
network_chaos_config[1].label_selector
|
||||
)
|
||||
else:
|
||||
targets = lib_telemetry.get_lib_kubernetes().list_pods(
|
||||
network_chaos_config[1].namespace,
|
||||
network_chaos_config[1].label_selector,
|
||||
)
|
||||
if len(targets) == 0:
|
||||
logging.warning(
|
||||
f"no targets found for {network_chaos_config[1].id} "
|
||||
f"network chaos scenario with selector {network_chaos_config[1].label_selector} "
|
||||
f"with target type {network_chaos_config[0]}"
|
||||
)
|
||||
|
||||
if network_chaos_config[1].instance_count != 0 and network_chaos_config[1].instance_count > len(targets):
|
||||
targets = random.sample(targets, network_chaos_config[1].instance_count)
|
||||
|
||||
if network_chaos_config[1].execution == "parallel":
|
||||
self.run_parallel(targets, network_chaos, lib_telemetry)
|
||||
else:
|
||||
self.run_serial(targets, network_chaos, lib_telemetry)
|
||||
if len(config) > 1:
|
||||
logging.info(f"waiting {network_chaos_config[1].wait_duration} seconds before running the next "
|
||||
f"Network Chaos NG Module")
|
||||
time.sleep(network_chaos_config[1].wait_duration)
|
||||
except Exception as e:
|
||||
logging.error(str(e))
|
||||
return 1
|
||||
return 0
|
||||
|
||||
def run_parallel(
|
||||
self,
|
||||
targets: list[str],
|
||||
module: AbstractNetworkChaosModule,
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
):
|
||||
error_queue = queue.Queue()
|
||||
threads = []
|
||||
errors = []
|
||||
for target in targets:
|
||||
thread = threading.Thread(
|
||||
target=module.run, args=[target, lib_telemetry, error_queue]
|
||||
)
|
||||
thread.start()
|
||||
threads.append(thread)
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
while True:
|
||||
try:
|
||||
errors.append(error_queue.get_nowait())
|
||||
except queue.Empty:
|
||||
break
|
||||
if len(errors) > 0:
|
||||
raise Exception(
|
||||
f"module {module.get_config()[1].id} execution failed: [{';'.join(errors)}]"
|
||||
)
|
||||
|
||||
def run_serial(
|
||||
self,
|
||||
targets: list[str],
|
||||
module: AbstractNetworkChaosModule,
|
||||
lib_telemetry: KrknTelemetryOpenshift,
|
||||
):
|
||||
for target in targets:
|
||||
module.run(target, lib_telemetry)
|
||||
|
||||
def get_scenario_types(self) -> list[str]:
|
||||
return ["network_chaos_ng_scenarios"]
|
||||
@@ -4,14 +4,18 @@ import time
|
||||
import krkn.invoke.command as runcommand
|
||||
import krkn.scenario_plugins.node_actions.common_node_functions as nodeaction
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
|
||||
# krkn_lib
|
||||
class abstract_node_scenarios:
|
||||
kubecli: KrknKubernetes
|
||||
affected_nodes_status: AffectedNodeStatus
|
||||
node_action_kube_check: bool
|
||||
|
||||
def __init__(self, kubecli: KrknKubernetes):
|
||||
def __init__(self, kubecli: KrknKubernetes, node_action_kube_check: bool, affected_nodes_status: AffectedNodeStatus):
|
||||
self.kubecli = kubecli
|
||||
self.affected_nodes_status = affected_nodes_status
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -28,6 +32,7 @@ class abstract_node_scenarios:
|
||||
logging.info("Waiting for %s seconds before starting the node" % (duration))
|
||||
time.sleep(duration)
|
||||
self.node_start_scenario(instance_kill_count, node, timeout)
|
||||
self.affected_nodes_status.merge_affected_nodes()
|
||||
logging.info("node_stop_start_scenario has been successfully injected!")
|
||||
|
||||
def helper_node_stop_start_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -36,6 +41,20 @@ class abstract_node_scenarios:
|
||||
self.helper_node_start_scenario(instance_kill_count, node, timeout)
|
||||
logging.info("helper_node_stop_start_scenario has been successfully injected!")
|
||||
|
||||
# Node scenario to detach and attach the disk
|
||||
def node_disk_detach_attach_scenario(self, instance_kill_count, node, timeout, duration):
|
||||
logging.info("Starting disk_detach_attach_scenario injection")
|
||||
disk_attachment_details = self.get_disk_attachment_info(instance_kill_count, node)
|
||||
if disk_attachment_details:
|
||||
self.disk_detach_scenario(instance_kill_count, node, timeout)
|
||||
logging.info("Waiting for %s seconds before attaching the disk" % (duration))
|
||||
time.sleep(duration)
|
||||
self.disk_attach_scenario(instance_kill_count, disk_attachment_details, timeout)
|
||||
logging.info("node_disk_detach_attach_scenario has been successfully injected!")
|
||||
else:
|
||||
logging.error("Node %s has only root disk attached" % (node))
|
||||
logging.error("node_disk_detach_attach_scenario failed!")
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
pass
|
||||
@@ -47,13 +66,15 @@ class abstract_node_scenarios:
|
||||
# Node scenario to stop the kubelet
|
||||
def stop_kubelet_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting stop_kubelet_scenario injection")
|
||||
logging.info("Stopping the kubelet of the node %s" % (node))
|
||||
runcommand.run(
|
||||
"oc debug node/" + node + " -- chroot /host systemctl stop kubelet"
|
||||
)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli, affected_node)
|
||||
|
||||
logging.info("The kubelet of the node %s has been stopped" % (node))
|
||||
logging.info("stop_kubelet_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
@@ -63,17 +84,20 @@ class abstract_node_scenarios:
|
||||
)
|
||||
logging.error("stop_kubelet_scenario injection failed!")
|
||||
raise e
|
||||
self.add_affected_node(affected_node)
|
||||
|
||||
# Node scenario to stop and start the kubelet
|
||||
def stop_start_kubelet_scenario(self, instance_kill_count, node, timeout):
|
||||
logging.info("Starting stop_start_kubelet_scenario injection")
|
||||
self.stop_kubelet_scenario(instance_kill_count, node, timeout)
|
||||
self.node_reboot_scenario(instance_kill_count, node, timeout)
|
||||
self.affected_nodes_status.merge_affected_nodes()
|
||||
logging.info("stop_start_kubelet_scenario has been successfully injected!")
|
||||
|
||||
# Node scenario to restart the kubelet
|
||||
def restart_kubelet_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting restart_kubelet_scenario injection")
|
||||
logging.info("Restarting the kubelet of the node %s" % (node))
|
||||
@@ -82,8 +106,8 @@ class abstract_node_scenarios:
|
||||
+ node
|
||||
+ " -- chroot /host systemctl restart kubelet &"
|
||||
)
|
||||
nodeaction.wait_for_not_ready_status(node, timeout, self.kubecli)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
nodeaction.wait_for_not_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli,affected_node)
|
||||
logging.info("The kubelet of the node %s has been restarted" % (node))
|
||||
logging.info("restart_kubelet_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
@@ -93,6 +117,7 @@ class abstract_node_scenarios:
|
||||
)
|
||||
logging.error("restart_kubelet_scenario injection failed!")
|
||||
raise e
|
||||
self.add_affected_node(affected_node)
|
||||
|
||||
# Node scenario to crash the node
|
||||
def node_crash_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -116,3 +141,8 @@ class abstract_node_scenarios:
|
||||
# Node scenario to check service status on helper node
|
||||
def node_service_status(self, node, service, ssh_private_key, timeout):
|
||||
pass
|
||||
|
||||
# Node Scenario to block all inbound and outbound traffic to a specific node
|
||||
# Currently only configured for azure
|
||||
def node_block_scenario(self, instance_kill_count, node, timeout, duration):
|
||||
pass
|
||||
@@ -18,7 +18,7 @@ from krkn.scenario_plugins.node_actions.abstract_node_scenarios import (
|
||||
abstract_node_scenarios,
|
||||
)
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
|
||||
class Alibaba:
|
||||
def __init__(self):
|
||||
@@ -161,8 +161,9 @@ class Alibaba:
|
||||
return None
|
||||
|
||||
# Wait until the node instance is running
|
||||
def wait_until_running(self, instance_id, timeout):
|
||||
def wait_until_running(self, instance_id, timeout, affected_node):
|
||||
time_counter = 0
|
||||
start_time = time.time()
|
||||
status = self.get_vm_status(instance_id)
|
||||
while status != "Running":
|
||||
status = self.get_vm_status(instance_id)
|
||||
@@ -174,11 +175,15 @@ class Alibaba:
|
||||
if time_counter >= timeout:
|
||||
logging.info("ECS %s is still not ready in allotted time" % instance_id)
|
||||
return False
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("running", end_time - start_time)
|
||||
return True
|
||||
|
||||
# Wait until the node instance is stopped
|
||||
def wait_until_stopped(self, instance_id, timeout):
|
||||
def wait_until_stopped(self, instance_id, timeout, affected_node):
|
||||
time_counter = 0
|
||||
start_time = time.time()
|
||||
status = self.get_vm_status(instance_id)
|
||||
while status != "Stopped":
|
||||
status = self.get_vm_status(instance_id)
|
||||
@@ -192,10 +197,14 @@ class Alibaba:
|
||||
"Vm %s is still not stopped in allotted time" % instance_id
|
||||
)
|
||||
return False
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("stopped", end_time - start_time)
|
||||
return True
|
||||
|
||||
# Wait until the node instance is terminated
|
||||
def wait_until_released(self, instance_id, timeout):
|
||||
def wait_until_released(self, instance_id, timeout, affected_node):
|
||||
start_time = time.time()
|
||||
statuses = self.get_vm_status(instance_id)
|
||||
time_counter = 0
|
||||
while statuses and statuses != "Released":
|
||||
@@ -210,26 +219,35 @@ class Alibaba:
|
||||
return False
|
||||
|
||||
logging.info("ECS %s is released" % instance_id)
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("terminated", end_time - start_time)
|
||||
return True
|
||||
|
||||
|
||||
# krkn_lib
|
||||
class alibaba_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, kubecli: KrknKubernetes):
|
||||
def __init__(self, kubecli: KrknKubernetes, node_action_kube_check: bool, affected_nodes_status: AffectedNodeStatus):
|
||||
super().__init__(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
self.alibaba = Alibaba()
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_start_scenario injection")
|
||||
vm_id = self.alibaba.get_instance_id(node)
|
||||
affected_node.node_id = vm_id
|
||||
logging.info(
|
||||
"Starting the node %s with instance ID: %s " % (node, vm_id)
|
||||
)
|
||||
self.alibaba.start_instances(vm_id)
|
||||
self.alibaba.wait_until_running(vm_id, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
self.alibaba.wait_until_running(vm_id, timeout, affected_node)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
logging.info("Node with instance ID: %s is in running state" % node)
|
||||
logging.info("node_start_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
@@ -239,20 +257,24 @@ class alibaba_node_scenarios(abstract_node_scenarios):
|
||||
)
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
raise e
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_stop_scenario injection")
|
||||
vm_id = self.alibaba.get_instance_id(node)
|
||||
affected_node.node_id = vm_id
|
||||
logging.info(
|
||||
"Stopping the node %s with instance ID: %s " % (node, vm_id)
|
||||
)
|
||||
self.alibaba.stop_instances(vm_id)
|
||||
self.alibaba.wait_until_stopped(vm_id, timeout)
|
||||
self.alibaba.wait_until_stopped(vm_id, timeout, affected_node)
|
||||
logging.info("Node with instance ID: %s is in stopped state" % vm_id)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli, affected_node)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to stop node instance. Encountered following exception: %s. "
|
||||
@@ -260,23 +282,26 @@ class alibaba_node_scenarios(abstract_node_scenarios):
|
||||
)
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
raise e
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Might need to stop and then release the instance
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info(
|
||||
"Starting node_termination_scenario injection by first stopping instance"
|
||||
)
|
||||
vm_id = self.alibaba.get_instance_id(node)
|
||||
affected_node.node_id = vm_id
|
||||
self.alibaba.stop_instances(vm_id)
|
||||
self.alibaba.wait_until_stopped(vm_id, timeout)
|
||||
self.alibaba.wait_until_stopped(vm_id, timeout, affected_node)
|
||||
logging.info(
|
||||
"Releasing the node %s with instance ID: %s " % (node, vm_id)
|
||||
)
|
||||
self.alibaba.release_instance(vm_id)
|
||||
self.alibaba.wait_until_released(vm_id, timeout)
|
||||
self.alibaba.wait_until_released(vm_id, timeout, affected_node)
|
||||
logging.info("Node with instance ID: %s has been released" % node)
|
||||
logging.info(
|
||||
"node_termination_scenario has been successfully injected!"
|
||||
@@ -288,17 +313,21 @@ class alibaba_node_scenarios(abstract_node_scenarios):
|
||||
)
|
||||
logging.error("node_termination_scenario injection failed!")
|
||||
raise e
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_reboot_scenario injection")
|
||||
instance_id = self.alibaba.get_instance_id(node)
|
||||
affected_node.node_id = instance_id
|
||||
logging.info("Rebooting the node with instance ID: %s " % (instance_id))
|
||||
self.alibaba.reboot_instances(instance_id)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli, affected_node)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
logging.info(
|
||||
"Node with instance ID: %s has been rebooted" % (instance_id)
|
||||
)
|
||||
@@ -310,3 +339,4 @@ class alibaba_node_scenarios(abstract_node_scenarios):
|
||||
)
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
raise e
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
@@ -7,12 +7,13 @@ from krkn.scenario_plugins.node_actions.abstract_node_scenarios import (
|
||||
abstract_node_scenarios,
|
||||
)
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
|
||||
class AWS:
|
||||
def __init__(self):
|
||||
self.boto_client = boto3.client("ec2")
|
||||
self.boto_instance = boto3.resource("ec2").Instance("id")
|
||||
self.boto_resource = boto3.resource("ec2")
|
||||
self.boto_instance = self.boto_resource.Instance("id")
|
||||
|
||||
# Get the instance ID of the node
|
||||
def get_instance_id(self, node):
|
||||
@@ -76,9 +77,13 @@ class AWS:
|
||||
# until a successful state is reached. An error is returned after 40 failed checks
|
||||
# Setting timeout for consistency with other cloud functions
|
||||
# Wait until the node instance is running
|
||||
def wait_until_running(self, instance_id, timeout=600):
|
||||
def wait_until_running(self, instance_id, timeout=600, affected_node=None):
|
||||
try:
|
||||
start_time = time.time()
|
||||
self.boto_instance.wait_until_running(InstanceIds=[instance_id])
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("running", end_time - start_time)
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
@@ -88,9 +93,13 @@ class AWS:
|
||||
return False
|
||||
|
||||
# Wait until the node instance is stopped
|
||||
def wait_until_stopped(self, instance_id, timeout=600):
|
||||
def wait_until_stopped(self, instance_id, timeout=600, affected_node= None):
|
||||
try:
|
||||
start_time = time.time()
|
||||
self.boto_instance.wait_until_stopped(InstanceIds=[instance_id])
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("stopped", end_time - start_time)
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
@@ -100,9 +109,13 @@ class AWS:
|
||||
return False
|
||||
|
||||
# Wait until the node instance is terminated
|
||||
def wait_until_terminated(self, instance_id, timeout=600):
|
||||
def wait_until_terminated(self, instance_id, timeout=600, affected_node= None):
|
||||
try:
|
||||
start_time = time.time()
|
||||
self.boto_instance.wait_until_terminated(InstanceIds=[instance_id])
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("terminated", end_time - start_time)
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
@@ -179,25 +192,95 @@ class AWS:
|
||||
|
||||
raise RuntimeError()
|
||||
|
||||
# Detach volume
|
||||
def detach_volumes(self, volumes_ids: list):
|
||||
for volume in volumes_ids:
|
||||
try:
|
||||
self.boto_client.detach_volume(VolumeId=volume, Force=True)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Detaching volume %s failed with exception: %s"
|
||||
% (volume, e)
|
||||
)
|
||||
|
||||
# Attach volume
|
||||
def attach_volume(self, attachment: dict):
|
||||
try:
|
||||
if self.get_volume_state(attachment["VolumeId"]) == "in-use":
|
||||
logging.info(
|
||||
"Volume %s is already in use." % attachment["VolumeId"]
|
||||
)
|
||||
return
|
||||
logging.info(
|
||||
"Attaching the %s volumes to instance %s."
|
||||
% (attachment["VolumeId"], attachment["InstanceId"])
|
||||
)
|
||||
self.boto_client.attach_volume(
|
||||
InstanceId=attachment["InstanceId"],
|
||||
Device=attachment["Device"],
|
||||
VolumeId=attachment["VolumeId"]
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed attaching disk %s to the %s instance. "
|
||||
"Encountered following exception: %s"
|
||||
% (attachment['VolumeId'], attachment['InstanceId'], e)
|
||||
)
|
||||
raise RuntimeError()
|
||||
|
||||
# Get IDs of node volumes
|
||||
def get_volumes_ids(self, instance_id: list):
|
||||
response = self.boto_client.describe_instances(InstanceIds=instance_id)
|
||||
instance_attachment_details = response["Reservations"][0]["Instances"][0]["BlockDeviceMappings"]
|
||||
root_volume_device_name = self.get_root_volume_id(instance_id)
|
||||
volume_ids = []
|
||||
for device in instance_attachment_details:
|
||||
if device["DeviceName"] != root_volume_device_name:
|
||||
volume_id = device["Ebs"]["VolumeId"]
|
||||
volume_ids.append(volume_id)
|
||||
return volume_ids
|
||||
|
||||
# Get volumes attachment details
|
||||
def get_volume_attachment_details(self, volume_ids: list):
|
||||
response = self.boto_client.describe_volumes(VolumeIds=volume_ids)
|
||||
volumes_details = response["Volumes"]
|
||||
return volumes_details
|
||||
|
||||
# Get root volume
|
||||
def get_root_volume_id(self, instance_id):
|
||||
instance_id = instance_id[0]
|
||||
instance = self.boto_resource.Instance(instance_id)
|
||||
root_volume_id = instance.root_device_name
|
||||
return root_volume_id
|
||||
|
||||
# Get volume state
|
||||
def get_volume_state(self, volume_id: str):
|
||||
volume = self.boto_resource.Volume(volume_id)
|
||||
state = volume.state
|
||||
return state
|
||||
|
||||
# krkn_lib
|
||||
class aws_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, kubecli: KrknKubernetes):
|
||||
super().__init__(kubecli)
|
||||
def __init__(self, kubecli: KrknKubernetes, node_action_kube_check: bool, affected_nodes_status: AffectedNodeStatus):
|
||||
super().__init__(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
self.aws = AWS()
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_start_scenario injection")
|
||||
instance_id = self.aws.get_instance_id(node)
|
||||
affected_node.node_id = instance_id
|
||||
logging.info(
|
||||
"Starting the node %s with instance ID: %s " % (node, instance_id)
|
||||
)
|
||||
self.aws.start_instances(instance_id)
|
||||
self.aws.wait_until_running(instance_id)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
self.aws.wait_until_running(instance_id, affected_node=affected_node)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
logging.info(
|
||||
"Node with instance ID: %s is in running state" % (instance_id)
|
||||
)
|
||||
@@ -210,22 +293,26 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
|
||||
raise RuntimeError()
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_stop_scenario injection")
|
||||
instance_id = self.aws.get_instance_id(node)
|
||||
affected_node.node_id = instance_id
|
||||
logging.info(
|
||||
"Stopping the node %s with instance ID: %s " % (node, instance_id)
|
||||
)
|
||||
self.aws.stop_instances(instance_id)
|
||||
self.aws.wait_until_stopped(instance_id)
|
||||
self.aws.wait_until_stopped(instance_id, affected_node=affected_node)
|
||||
logging.info(
|
||||
"Node with instance ID: %s is in stopped state" % (instance_id)
|
||||
)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli, affected_node=affected_node)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to stop node instance. Encountered following exception: %s. "
|
||||
@@ -234,19 +321,22 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
|
||||
raise RuntimeError()
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_termination_scenario injection")
|
||||
instance_id = self.aws.get_instance_id(node)
|
||||
affected_node.node_id = instance_id
|
||||
logging.info(
|
||||
"Terminating the node %s with instance ID: %s "
|
||||
% (node, instance_id)
|
||||
)
|
||||
self.aws.terminate_instances(instance_id)
|
||||
self.aws.wait_until_terminated(instance_id)
|
||||
self.aws.wait_until_terminated(instance_id, affected_node=affected_node)
|
||||
for _ in range(timeout):
|
||||
if node not in self.kubecli.list_nodes():
|
||||
break
|
||||
@@ -265,19 +355,23 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
logging.error("node_termination_scenario injection failed!")
|
||||
|
||||
raise RuntimeError()
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_reboot_scenario injection" + str(node))
|
||||
instance_id = self.aws.get_instance_id(node)
|
||||
affected_node.node_id = instance_id
|
||||
logging.info(
|
||||
"Rebooting the node %s with instance ID: %s " % (node, instance_id)
|
||||
)
|
||||
self.aws.reboot_instances(instance_id)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli, affected_node)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
logging.info(
|
||||
"Node with instance ID: %s has been rebooted" % (instance_id)
|
||||
)
|
||||
@@ -290,3 +384,50 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
|
||||
raise RuntimeError()
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Get volume attachment info
|
||||
def get_disk_attachment_info(self, instance_kill_count, node):
|
||||
for _ in range(instance_kill_count):
|
||||
try:
|
||||
logging.info("Obtaining disk attachment information")
|
||||
instance_id = (self.aws.get_instance_id(node)).split()
|
||||
volumes_ids = self.aws.get_volumes_ids(instance_id)
|
||||
if volumes_ids:
|
||||
vol_attachment_details = self.aws.get_volume_attachment_details(
|
||||
volumes_ids
|
||||
)
|
||||
return vol_attachment_details
|
||||
return
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to obtain disk attachment information of %s node. "
|
||||
"Encounteres following exception: %s." % (node, e)
|
||||
)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to detach the volume
|
||||
def disk_detach_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
try:
|
||||
logging.info("Starting disk_detach_scenario injection")
|
||||
instance_id = (self.aws.get_instance_id(node)).split()
|
||||
volumes_ids = self.aws.get_volumes_ids(instance_id)
|
||||
logging.info(
|
||||
"Detaching the %s volumes from instance %s "
|
||||
% (volumes_ids, node)
|
||||
)
|
||||
self.aws.detach_volumes(volumes_ids)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to detach disk from %s node. Encountered following"
|
||||
"exception: %s." % (node, e)
|
||||
)
|
||||
logging.debug("")
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to attach the volume
|
||||
def disk_attach_scenario(self, instance_kill_count, attachment_details, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
for attachment in attachment_details:
|
||||
self.aws.attach_volume(attachment["Attachments"][0])
|
||||
|
||||
@@ -6,32 +6,53 @@ from krkn.scenario_plugins.node_actions.abstract_node_scenarios import (
|
||||
abstract_node_scenarios,
|
||||
)
|
||||
from azure.mgmt.compute import ComputeManagementClient
|
||||
from azure.mgmt.network import NetworkManagementClient
|
||||
from azure.mgmt.network.models import SecurityRule, Subnet
|
||||
|
||||
from azure.identity import DefaultAzureCredential
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
|
||||
class Azure:
|
||||
def __init__(self):
|
||||
logging.info("azure " + str(self))
|
||||
# Acquire a credential object using CLI-based authentication.
|
||||
credentials = DefaultAzureCredential()
|
||||
logging.info("credential " + str(credentials))
|
||||
# az_account = runcommand.invoke("az account list -o yaml")
|
||||
# az_account_yaml = yaml.safe_load(az_account, Loader=yaml.FullLoader)
|
||||
logger = logging.getLogger("azure")
|
||||
logger.setLevel(logging.WARNING)
|
||||
subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID")
|
||||
self.compute_client = ComputeManagementClient(credentials, subscription_id)
|
||||
self.compute_client = ComputeManagementClient(credentials, subscription_id,logging=logger)
|
||||
self.network_client = NetworkManagementClient(credentials, subscription_id,logging=logger)
|
||||
|
||||
# Get the instance ID of the node
|
||||
def get_instance_id(self, node_name):
|
||||
vm_list = self.compute_client.virtual_machines.list_all()
|
||||
for vm in vm_list:
|
||||
array = vm.id.split("/")
|
||||
resource_group = array[4]
|
||||
vm_name = array[-1]
|
||||
if node_name == vm_name:
|
||||
return vm_name, resource_group
|
||||
if node_name == vm.name:
|
||||
resource_group = array[4]
|
||||
return vm.name, resource_group
|
||||
logging.error("Couldn't find vm with name " + str(node_name))
|
||||
|
||||
# Get the instance ID of the node
|
||||
def get_network_interface(self, node_name, resource_group):
|
||||
|
||||
vm = self.compute_client.virtual_machines.get(resource_group, node_name,expand='instanceView')
|
||||
|
||||
for nic in vm.network_profile.network_interfaces:
|
||||
nic_name = nic.id.split("/")[-1]
|
||||
nic = self.network_client.network_interfaces.get(resource_group, nic_name)
|
||||
location = nic.location
|
||||
subnet_list = nic.ip_configurations[0].subnet.id.split('/')
|
||||
subnet = subnet_list[-1]
|
||||
virtual_network = subnet_list[-3]
|
||||
network_resource_group = subnet_list[-7]
|
||||
|
||||
private_ip = nic.ip_configurations[0].private_ip_address
|
||||
return subnet, virtual_network, private_ip, network_resource_group, location
|
||||
|
||||
# Start the node instance
|
||||
def start_instances(self, group_name, vm_name):
|
||||
try:
|
||||
@@ -90,8 +111,9 @@ class Azure:
|
||||
return status
|
||||
|
||||
# Wait until the node instance is running
|
||||
def wait_until_running(self, resource_group, vm_name, timeout):
|
||||
def wait_until_running(self, resource_group, vm_name, timeout, affected_node):
|
||||
time_counter = 0
|
||||
start_time = time.time()
|
||||
status = self.get_vm_status(resource_group, vm_name)
|
||||
while status and status.code != "PowerState/running":
|
||||
status = self.get_vm_status(resource_group, vm_name)
|
||||
@@ -101,11 +123,15 @@ class Azure:
|
||||
if time_counter >= timeout:
|
||||
logging.info("Vm %s is still not ready in allotted time" % vm_name)
|
||||
return False
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("running", end_time - start_time)
|
||||
return True
|
||||
|
||||
# Wait until the node instance is stopped
|
||||
def wait_until_stopped(self, resource_group, vm_name, timeout):
|
||||
def wait_until_stopped(self, resource_group, vm_name, timeout, affected_node):
|
||||
time_counter = 0
|
||||
start_time = time.time()
|
||||
status = self.get_vm_status(resource_group, vm_name)
|
||||
while status and status.code != "PowerState/stopped":
|
||||
status = self.get_vm_status(resource_group, vm_name)
|
||||
@@ -115,10 +141,14 @@ class Azure:
|
||||
if time_counter >= timeout:
|
||||
logging.info("Vm %s is still not stopped in allotted time" % vm_name)
|
||||
return False
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("stopped", end_time - start_time)
|
||||
return True
|
||||
|
||||
# Wait until the node instance is terminated
|
||||
def wait_until_terminated(self, resource_group, vm_name, timeout):
|
||||
def wait_until_terminated(self, resource_group, vm_name, timeout, affected_node):
|
||||
start_time = time.time()
|
||||
statuses = self.compute_client.virtual_machines.instance_view(
|
||||
resource_group, vm_name
|
||||
).statuses[0]
|
||||
@@ -137,29 +167,72 @@ class Azure:
|
||||
return False
|
||||
except Exception:
|
||||
logging.info("Vm %s is terminated" % vm_name)
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("terminated", end_time - start_time)
|
||||
return True
|
||||
|
||||
|
||||
def create_security_group(self, resource_group, name, region, ip_address):
|
||||
|
||||
inbound_rule = SecurityRule(name="denyInbound", source_address_prefix="0.0.0.0/0", source_port_range="*", destination_address_prefix=ip_address, destination_port_range="*", priority=100, protocol="*",
|
||||
access="Deny", direction="Inbound")
|
||||
outbound_rule = SecurityRule(name="denyOutbound", source_port_range="*", source_address_prefix=ip_address,destination_address_prefix="0.0.0.0/0", destination_port_range="*", priority=100, protocol="*",
|
||||
access="Deny", direction="Outbound")
|
||||
|
||||
# create network resource group with deny in and out rules
|
||||
nsg = self.network_client.network_security_groups.begin_create_or_update(resource_group, name, parameters={"location": region, "security_rules": [inbound_rule,outbound_rule]})
|
||||
return nsg.result().id
|
||||
|
||||
def delete_security_group(self, resource_group, name):
|
||||
|
||||
# find and delete network security group
|
||||
nsg = self.network_client.network_security_groups.begin_delete(resource_group,name)
|
||||
if nsg.result() is not None:
|
||||
print(nsg.result().as_dict())
|
||||
|
||||
def update_subnet(self, network_group_id, resource_group, subnet_name, vnet_name):
|
||||
|
||||
subnet = self.network_client.subnets.get(
|
||||
resource_group_name=resource_group,
|
||||
virtual_network_name=vnet_name,
|
||||
subnet_name=subnet_name)
|
||||
old_network_group = subnet.network_security_group.id
|
||||
subnet.network_security_group.id = network_group_id
|
||||
# update subnet
|
||||
|
||||
self.network_client.subnets.begin_create_or_update(
|
||||
resource_group_name=resource_group,
|
||||
virtual_network_name=vnet_name,
|
||||
subnet_name=subnet_name,
|
||||
subnet_parameters=subnet)
|
||||
return old_network_group
|
||||
|
||||
# krkn_lib
|
||||
class azure_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, kubecli: KrknKubernetes):
|
||||
super().__init__(kubecli)
|
||||
def __init__(self, kubecli: KrknKubernetes, node_action_kube_check: bool, affected_nodes_status: AffectedNodeStatus):
|
||||
super().__init__(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
logging.info("init in azure")
|
||||
self.azure = Azure()
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_start_scenario injection")
|
||||
vm_name, resource_group = self.azure.get_instance_id(node)
|
||||
affected_node.node_id = vm_name
|
||||
logging.info(
|
||||
"Starting the node %s with instance ID: %s "
|
||||
% (vm_name, resource_group)
|
||||
)
|
||||
self.azure.start_instances(resource_group, vm_name)
|
||||
self.azure.wait_until_running(resource_group, vm_name, timeout)
|
||||
nodeaction.wait_for_ready_status(vm_name, timeout, self.kubecli)
|
||||
self.azure.wait_until_running(resource_group, vm_name, timeout, affected_node=affected_node)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_ready_status(vm_name, timeout, self.kubecli, affected_node)
|
||||
logging.info("Node with instance ID: %s is in running state" % node)
|
||||
logging.info("node_start_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
@@ -170,21 +243,25 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
|
||||
raise RuntimeError()
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_stop_scenario injection")
|
||||
vm_name, resource_group = self.azure.get_instance_id(node)
|
||||
affected_node.node_id = vm_name
|
||||
logging.info(
|
||||
"Stopping the node %s with instance ID: %s "
|
||||
% (vm_name, resource_group)
|
||||
)
|
||||
self.azure.stop_instances(resource_group, vm_name)
|
||||
self.azure.wait_until_stopped(resource_group, vm_name, timeout)
|
||||
self.azure.wait_until_stopped(resource_group, vm_name, timeout, affected_node=affected_node)
|
||||
logging.info("Node with instance ID: %s is in stopped state" % vm_name)
|
||||
nodeaction.wait_for_unknown_status(vm_name, timeout, self.kubecli)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(vm_name, timeout, self.kubecli, affected_node)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to stop node instance. Encountered following exception: %s. "
|
||||
@@ -193,19 +270,22 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
|
||||
raise RuntimeError()
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_termination_scenario injection")
|
||||
vm_name, resource_group = self.azure.get_instance_id(node)
|
||||
affected_node.node_id = vm_name
|
||||
logging.info(
|
||||
"Terminating the node %s with instance ID: %s "
|
||||
% (vm_name, resource_group)
|
||||
)
|
||||
self.azure.terminate_instances(resource_group, vm_name)
|
||||
self.azure.wait_until_terminated(resource_group, vm_name, timeout)
|
||||
self.azure.wait_until_terminated(resource_group, vm_name, timeout, affected_node)
|
||||
for _ in range(timeout):
|
||||
if vm_name not in self.kubecli.list_nodes():
|
||||
break
|
||||
@@ -224,20 +304,26 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
logging.error("node_termination_scenario injection failed!")
|
||||
|
||||
raise RuntimeError()
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_reboot_scenario injection")
|
||||
vm_name, resource_group = self.azure.get_instance_id(node)
|
||||
affected_node.node_id = vm_name
|
||||
logging.info(
|
||||
"Rebooting the node %s with instance ID: %s "
|
||||
% (vm_name, resource_group)
|
||||
)
|
||||
|
||||
self.azure.reboot_instances(resource_group, vm_name)
|
||||
nodeaction.wait_for_unknown_status(vm_name, timeout, self.kubecli)
|
||||
nodeaction.wait_for_ready_status(vm_name, timeout, self.kubecli)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_ready_status(vm_name, timeout, self.kubecli, affected_node)
|
||||
|
||||
logging.info("Node with instance ID: %s has been rebooted" % (vm_name))
|
||||
logging.info("node_reboot_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
@@ -248,3 +334,40 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
|
||||
raise RuntimeError()
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to block traffic to the node
|
||||
def node_block_scenario(self, instance_kill_count, node, timeout, duration):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_block_scenario injection")
|
||||
vm_name, resource_group = self.azure.get_instance_id(node)
|
||||
|
||||
subnet, virtual_network, private_ip, network_resource_group, location = self.azure.get_network_interface(vm_name, resource_group)
|
||||
affected_node.node_id = vm_name
|
||||
|
||||
logging.info(
|
||||
"block the node %s with instance ID: %s "
|
||||
% (vm_name, network_resource_group)
|
||||
)
|
||||
network_group_id = self.azure.create_security_group(network_resource_group, "chaos", location, private_ip)
|
||||
old_network_group= self.azure.update_subnet(network_group_id, network_resource_group, subnet, virtual_network)
|
||||
logging.info("Node with instance ID: %s has been blocked" % (vm_name))
|
||||
logging.info("Waiting for %s seconds before resetting the subnet" % (duration))
|
||||
time.sleep(duration)
|
||||
|
||||
# replace old network security group
|
||||
self.azure.update_subnet(old_network_group, network_resource_group, subnet, virtual_network)
|
||||
self.azure.delete_security_group(network_resource_group, "chaos")
|
||||
|
||||
logging.info("node_block_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to block node instance. Encountered following exception:"
|
||||
" %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_block_scenario injection failed!")
|
||||
|
||||
raise RuntimeError()
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
@@ -9,7 +9,7 @@ import pyipmi.interfaces
|
||||
import time
|
||||
import traceback
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
|
||||
class BM:
|
||||
def __init__(self, bm_info, user, passwd):
|
||||
@@ -109,40 +109,52 @@ class BM:
|
||||
self.get_ipmi_connection(bmc_addr, node_name).chassis_control_power_cycle()
|
||||
|
||||
# Wait until the node instance is running
|
||||
def wait_until_running(self, bmc_addr, node_name):
|
||||
def wait_until_running(self, bmc_addr, node_name, affected_node):
|
||||
start_time = time.time()
|
||||
while (
|
||||
not self.get_ipmi_connection(bmc_addr, node_name)
|
||||
.get_chassis_status()
|
||||
.power_on
|
||||
):
|
||||
time.sleep(1)
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("running", end_time - start_time)
|
||||
|
||||
# Wait until the node instance is stopped
|
||||
def wait_until_stopped(self, bmc_addr, node_name):
|
||||
def wait_until_stopped(self, bmc_addr, node_name, affected_node):
|
||||
start_time = time.time()
|
||||
while (
|
||||
self.get_ipmi_connection(bmc_addr, node_name).get_chassis_status().power_on
|
||||
):
|
||||
time.sleep(1)
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("stopped", end_time - start_time)
|
||||
|
||||
|
||||
# krkn_lib
|
||||
class bm_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, bm_info, user, passwd, kubecli: KrknKubernetes):
|
||||
super().__init__(kubecli)
|
||||
def __init__(self, bm_info, user, passwd, kubecli: KrknKubernetes,node_action_kube_check: bool, affected_nodes_status: AffectedNodeStatus):
|
||||
super().__init__(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
self.bm = BM(bm_info, user, passwd)
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_start_scenario injection")
|
||||
bmc_addr = self.bm.get_bmc_addr(node)
|
||||
affected_node.node_id = bmc_addr
|
||||
logging.info(
|
||||
"Starting the node %s with bmc address: %s " % (node, bmc_addr)
|
||||
)
|
||||
self.bm.start_instances(bmc_addr, node)
|
||||
self.bm.wait_until_running(bmc_addr, node)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
self.bm.wait_until_running(bmc_addr, node, affected_node)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
logging.info(
|
||||
"Node with bmc address: %s is in running state" % (bmc_addr)
|
||||
)
|
||||
@@ -155,22 +167,26 @@ class bm_node_scenarios(abstract_node_scenarios):
|
||||
)
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
raise e
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_stop_scenario injection")
|
||||
bmc_addr = self.bm.get_bmc_addr(node)
|
||||
affected_node.node_id = bmc_addr
|
||||
logging.info(
|
||||
"Stopping the node %s with bmc address: %s " % (node, bmc_addr)
|
||||
)
|
||||
self.bm.stop_instances(bmc_addr, node)
|
||||
self.bm.wait_until_stopped(bmc_addr, node)
|
||||
self.bm.wait_until_stopped(bmc_addr, node, affected_node)
|
||||
logging.info(
|
||||
"Node with bmc address: %s is in stopped state" % (bmc_addr)
|
||||
)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli, affected_node)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to stop node instance. Encountered following exception: %s. "
|
||||
@@ -179,6 +195,7 @@ class bm_node_scenarios(abstract_node_scenarios):
|
||||
)
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
raise e
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -187,6 +204,7 @@ class bm_node_scenarios(abstract_node_scenarios):
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_reboot_scenario injection")
|
||||
bmc_addr = self.bm.get_bmc_addr(node)
|
||||
@@ -195,8 +213,9 @@ class bm_node_scenarios(abstract_node_scenarios):
|
||||
"Rebooting the node %s with bmc address: %s " % (node, bmc_addr)
|
||||
)
|
||||
self.bm.reboot_instances(bmc_addr, node)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli, affected_node)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
logging.info("Node with bmc address: %s has been rebooted" % (bmc_addr))
|
||||
logging.info("node_reboot_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
@@ -208,3 +227,4 @@ class bm_node_scenarios(abstract_node_scenarios):
|
||||
traceback.print_exc()
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
raise e
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
@@ -1,26 +1,39 @@
|
||||
import datetime
|
||||
import time
|
||||
import random
|
||||
import logging
|
||||
import paramiko
|
||||
from krkn_lib.models.k8s import AffectedNode
|
||||
import krkn.invoke.command as runcommand
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
from krkn_lib.models.k8s import AffectedNode
|
||||
|
||||
node_general = False
|
||||
|
||||
|
||||
def get_node_by_name(node_name_list, kubecli: KrknKubernetes):
|
||||
killable_nodes = kubecli.list_killable_nodes()
|
||||
for node_name in node_name_list:
|
||||
if node_name not in killable_nodes:
|
||||
logging.info(
|
||||
f"Node with provided ${node_name} does not exist or the node might "
|
||||
"be in NotReady state."
|
||||
)
|
||||
return
|
||||
return node_name_list
|
||||
|
||||
|
||||
# Pick a random node with specified label selector
|
||||
def get_node(node_name, label_selector, instance_kill_count, kubecli: KrknKubernetes):
|
||||
if node_name in kubecli.list_killable_nodes():
|
||||
return [node_name]
|
||||
elif node_name:
|
||||
logging.info(
|
||||
"Node with provided node_name does not exist or the node might "
|
||||
"be in NotReady state."
|
||||
)
|
||||
nodes = kubecli.list_killable_nodes(label_selector)
|
||||
def get_node(label_selector, instance_kill_count, kubecli: KrknKubernetes):
|
||||
|
||||
label_selector_list = label_selector.split(",")
|
||||
nodes = []
|
||||
for label_selector in label_selector_list:
|
||||
nodes.extend(kubecli.list_killable_nodes(label_selector))
|
||||
if not nodes:
|
||||
raise Exception("Ready nodes with the provided label selector do not exist")
|
||||
logging.info("Ready nodes with the label selector %s: %s" % (label_selector, nodes))
|
||||
logging.info("Ready nodes with the label selector %s: %s" % (label_selector_list, nodes))
|
||||
number_of_nodes = len(nodes)
|
||||
if instance_kill_count == number_of_nodes:
|
||||
return nodes
|
||||
@@ -31,26 +44,25 @@ def get_node(node_name, label_selector, instance_kill_count, kubecli: KrknKubern
|
||||
nodes.remove(node_to_add)
|
||||
return nodes_to_return
|
||||
|
||||
|
||||
# krkn_lib
|
||||
# Wait until the node status becomes Ready
|
||||
def wait_for_ready_status(node, timeout, kubecli: KrknKubernetes):
|
||||
resource_version = kubecli.get_node_resource_version(node)
|
||||
kubecli.watch_node_status(node, "True", timeout, resource_version)
|
||||
|
||||
def wait_for_ready_status(node, timeout, kubecli: KrknKubernetes, affected_node: AffectedNode = None):
|
||||
affected_node = kubecli.watch_node_status(node, "True", timeout, affected_node)
|
||||
return affected_node
|
||||
|
||||
|
||||
# krkn_lib
|
||||
# Wait until the node status becomes Not Ready
|
||||
def wait_for_not_ready_status(node, timeout, kubecli: KrknKubernetes):
|
||||
resource_version = kubecli.get_node_resource_version(node)
|
||||
kubecli.watch_node_status(node, "False", timeout, resource_version)
|
||||
|
||||
def wait_for_not_ready_status(node, timeout, kubecli: KrknKubernetes, affected_node: AffectedNode = None):
|
||||
affected_node = kubecli.watch_node_status(node, "False", timeout, affected_node)
|
||||
return affected_node
|
||||
|
||||
|
||||
# krkn_lib
|
||||
# Wait until the node status becomes Unknown
|
||||
def wait_for_unknown_status(node, timeout, kubecli: KrknKubernetes):
|
||||
resource_version = kubecli.get_node_resource_version(node)
|
||||
kubecli.watch_node_status(node, "Unknown", timeout, resource_version)
|
||||
def wait_for_unknown_status(node, timeout, kubecli: KrknKubernetes, affected_node: AffectedNode = None):
|
||||
affected_node = kubecli.watch_node_status(node, "Unknown", timeout, affected_node)
|
||||
return affected_node
|
||||
|
||||
|
||||
# Get the ip of the cluster node
|
||||
|
||||
@@ -5,7 +5,7 @@ from krkn.scenario_plugins.node_actions.abstract_node_scenarios import (
|
||||
import logging
|
||||
import docker
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
|
||||
class Docker:
|
||||
def __init__(self):
|
||||
@@ -38,21 +38,25 @@ class Docker:
|
||||
|
||||
|
||||
class docker_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, kubecli: KrknKubernetes):
|
||||
super().__init__(kubecli)
|
||||
def __init__(self, kubecli: KrknKubernetes, node_action_kube_check: bool, affected_nodes_status: AffectedNodeStatus):
|
||||
super().__init__(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
self.docker = Docker()
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_start_scenario injection")
|
||||
container_id = self.docker.get_container_id(node)
|
||||
affected_node.node_id = container_id
|
||||
logging.info(
|
||||
"Starting the node %s with container ID: %s " % (node, container_id)
|
||||
)
|
||||
self.docker.start_instances(node)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
logging.info(
|
||||
"Node with container ID: %s is in running state" % (container_id)
|
||||
)
|
||||
@@ -64,13 +68,16 @@ class docker_node_scenarios(abstract_node_scenarios):
|
||||
)
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
raise e
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_stop_scenario injection")
|
||||
container_id = self.docker.get_container_id(node)
|
||||
affected_node.node_id = container_id
|
||||
logging.info(
|
||||
"Stopping the node %s with container ID: %s " % (node, container_id)
|
||||
)
|
||||
@@ -78,7 +85,8 @@ class docker_node_scenarios(abstract_node_scenarios):
|
||||
logging.info(
|
||||
"Node with container ID: %s is in stopped state" % (container_id)
|
||||
)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli, affected_node)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to stop node instance. Encountered following exception: %s. "
|
||||
@@ -86,6 +94,7 @@ class docker_node_scenarios(abstract_node_scenarios):
|
||||
)
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
raise e
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -113,6 +122,7 @@ class docker_node_scenarios(abstract_node_scenarios):
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_reboot_scenario injection")
|
||||
container_id = self.docker.get_container_id(node)
|
||||
@@ -121,8 +131,9 @@ class docker_node_scenarios(abstract_node_scenarios):
|
||||
% (node, container_id)
|
||||
)
|
||||
self.docker.reboot_instances(node)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli, affected_node)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
logging.info(
|
||||
"Node with container ID: %s has been rebooted" % (container_id)
|
||||
)
|
||||
@@ -134,3 +145,4 @@ class docker_node_scenarios(abstract_node_scenarios):
|
||||
)
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
raise e
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
@@ -1,66 +1,78 @@
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import json
|
||||
import google.auth
|
||||
import krkn.scenario_plugins.node_actions.common_node_functions as nodeaction
|
||||
from krkn.scenario_plugins.node_actions.abstract_node_scenarios import (
|
||||
abstract_node_scenarios,
|
||||
)
|
||||
from googleapiclient import discovery
|
||||
from oauth2client.client import GoogleCredentials
|
||||
from google.cloud import compute_v1
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
|
||||
class GCP:
|
||||
def __init__(self):
|
||||
try:
|
||||
gapp_creds = os.getenv("GOOGLE_APPLICATION_CREDENTIALS")
|
||||
with open(gapp_creds, "r") as f:
|
||||
f_str = f.read()
|
||||
self.project = json.loads(f_str)["project_id"]
|
||||
# self.project = runcommand.invoke("gcloud config get-value project").split("/n")[0].strip()
|
||||
logging.info("project " + str(self.project) + "!")
|
||||
credentials = GoogleCredentials.get_application_default()
|
||||
self.client = discovery.build(
|
||||
"compute", "v1", credentials=credentials, cache_discovery=False
|
||||
)
|
||||
|
||||
_, self.project_id = google.auth.default()
|
||||
self.instance_client = compute_v1.InstancesClient()
|
||||
except Exception as e:
|
||||
logging.error("Error on setting up GCP connection: " + str(e))
|
||||
|
||||
raise e
|
||||
|
||||
# Get the instance ID of the node
|
||||
def get_instance_id(self, node):
|
||||
zone_request = self.client.zones().list(project=self.project)
|
||||
while zone_request is not None:
|
||||
zone_response = zone_request.execute()
|
||||
for zone in zone_response["items"]:
|
||||
instances_request = self.client.instances().list(
|
||||
project=self.project, zone=zone["name"]
|
||||
)
|
||||
while instances_request is not None:
|
||||
instance_response = instances_request.execute()
|
||||
if "items" in instance_response.keys():
|
||||
for instance in instance_response["items"]:
|
||||
if instance["name"] in node:
|
||||
return instance["name"], zone["name"]
|
||||
instances_request = self.client.zones().list_next(
|
||||
previous_request=instances_request,
|
||||
previous_response=instance_response,
|
||||
)
|
||||
zone_request = self.client.zones().list_next(
|
||||
previous_request=zone_request, previous_response=zone_response
|
||||
# Get the instance of the node
|
||||
def get_node_instance(self, node):
|
||||
try:
|
||||
request = compute_v1.AggregatedListInstancesRequest(
|
||||
project = self.project_id
|
||||
)
|
||||
logging.info("no instances ")
|
||||
agg_list = self.instance_client.aggregated_list(request=request)
|
||||
for _, response in agg_list:
|
||||
if response.instances:
|
||||
for instance in response.instances:
|
||||
if instance.name in node:
|
||||
return instance
|
||||
logging.info("no instances ")
|
||||
except Exception as e:
|
||||
logging.error("Error getting the instance of the node: " + str(e))
|
||||
|
||||
raise e
|
||||
|
||||
# Get the instance name
|
||||
def get_instance_name(self, instance):
|
||||
if instance.name:
|
||||
return instance.name
|
||||
|
||||
# Get the instance zone
|
||||
def get_instance_zone(self, instance):
|
||||
if instance.zone:
|
||||
return instance.zone.split("/")[-1]
|
||||
|
||||
# Get the instance zone of the node
|
||||
def get_node_instance_zone(self, node):
|
||||
instance = self.get_node_instance(node)
|
||||
if instance:
|
||||
return self.get_instance_zone(instance)
|
||||
|
||||
# Get the instance name of the node
|
||||
def get_node_instance_name(self, node):
|
||||
instance = self.get_node_instance(node)
|
||||
if instance:
|
||||
return self.get_instance_name(instance)
|
||||
|
||||
# Get the instance name of the node
|
||||
def get_instance_id(self, node):
|
||||
return self.get_node_instance_name(node)
|
||||
|
||||
# Start the node instance
|
||||
def start_instances(self, zone, instance_id):
|
||||
def start_instances(self, instance_id):
|
||||
try:
|
||||
self.client.instances().start(
|
||||
project=self.project, zone=zone, instance=instance_id
|
||||
).execute()
|
||||
logging.info("vm name " + str(instance_id) + " started")
|
||||
request = compute_v1.StartInstanceRequest(
|
||||
instance=instance_id,
|
||||
project=self.project_id,
|
||||
zone=self.get_node_instance_zone(instance_id),
|
||||
)
|
||||
self.instance_client.start(request=request)
|
||||
logging.info("Instance: " + str(instance_id) + " started")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to start node instance %s. Encountered following "
|
||||
@@ -70,12 +82,15 @@ class GCP:
|
||||
raise RuntimeError()
|
||||
|
||||
# Stop the node instance
|
||||
def stop_instances(self, zone, instance_id):
|
||||
def stop_instances(self, instance_id):
|
||||
try:
|
||||
self.client.instances().stop(
|
||||
project=self.project, zone=zone, instance=instance_id
|
||||
).execute()
|
||||
logging.info("vm name " + str(instance_id) + " stopped")
|
||||
request = compute_v1.StopInstanceRequest(
|
||||
instance=instance_id,
|
||||
project=self.project_id,
|
||||
zone=self.get_node_instance_zone(instance_id),
|
||||
)
|
||||
self.instance_client.stop(request=request)
|
||||
logging.info("Instance: " + str(instance_id) + " stopped")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to stop node instance %s. Encountered following "
|
||||
@@ -84,13 +99,16 @@ class GCP:
|
||||
|
||||
raise RuntimeError()
|
||||
|
||||
# Start the node instance
|
||||
def suspend_instances(self, zone, instance_id):
|
||||
# Suspend the node instance
|
||||
def suspend_instances(self, instance_id):
|
||||
try:
|
||||
self.client.instances().suspend(
|
||||
project=self.project, zone=zone, instance=instance_id
|
||||
).execute()
|
||||
logging.info("vm name " + str(instance_id) + " suspended")
|
||||
request = compute_v1.SuspendInstanceRequest(
|
||||
instance=instance_id,
|
||||
project=self.project_id,
|
||||
zone=self.get_node_instance_zone(instance_id),
|
||||
)
|
||||
self.instance_client.suspend(request=request)
|
||||
logging.info("Instance: " + str(instance_id) + " suspended")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to suspend node instance %s. Encountered following "
|
||||
@@ -100,49 +118,65 @@ class GCP:
|
||||
raise RuntimeError()
|
||||
|
||||
# Terminate the node instance
|
||||
def terminate_instances(self, zone, instance_id):
|
||||
def terminate_instances(self, instance_id):
|
||||
try:
|
||||
self.client.instances().delete(
|
||||
project=self.project, zone=zone, instance=instance_id
|
||||
).execute()
|
||||
logging.info("vm name " + str(instance_id) + " terminated")
|
||||
request = compute_v1.DeleteInstanceRequest(
|
||||
instance=instance_id,
|
||||
project=self.project_id,
|
||||
zone=self.get_node_instance_zone(instance_id),
|
||||
)
|
||||
self.instance_client.delete(request=request)
|
||||
logging.info("Instance: " + str(instance_id) + " terminated")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to start node instance %s. Encountered following "
|
||||
"Failed to terminate node instance %s. Encountered following "
|
||||
"exception: %s." % (instance_id, e)
|
||||
)
|
||||
|
||||
raise RuntimeError()
|
||||
|
||||
# Reboot the node instance
|
||||
def reboot_instances(self, zone, instance_id):
|
||||
def reboot_instances(self, instance_id):
|
||||
try:
|
||||
self.client.instances().reset(
|
||||
project=self.project, zone=zone, instance=instance_id
|
||||
).execute()
|
||||
logging.info("vm name " + str(instance_id) + " rebooted")
|
||||
request = compute_v1.ResetInstanceRequest(
|
||||
instance=instance_id,
|
||||
project=self.project_id,
|
||||
zone=self.get_node_instance_zone(instance_id),
|
||||
)
|
||||
self.instance_client.reset(request=request)
|
||||
logging.info("Instance: " + str(instance_id) + " rebooted")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to start node instance %s. Encountered following "
|
||||
"Failed to reboot node instance %s. Encountered following "
|
||||
"exception: %s." % (instance_id, e)
|
||||
)
|
||||
|
||||
raise RuntimeError()
|
||||
|
||||
# Get instance status
|
||||
def get_instance_status(self, zone, instance_id, expected_status, timeout):
|
||||
# statuses: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING,
|
||||
def get_instance_status(self, instance_id, expected_status, timeout):
|
||||
# states: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDING, SUSPENDED, REPAIRING,
|
||||
# and TERMINATED.
|
||||
i = 0
|
||||
sleeper = 5
|
||||
while i <= timeout:
|
||||
instStatus = (
|
||||
self.client.instances()
|
||||
.get(project=self.project, zone=zone, instance=instance_id)
|
||||
.execute()
|
||||
)
|
||||
logging.info("Status of vm " + str(instStatus["status"]))
|
||||
if instStatus["status"] == expected_status:
|
||||
try:
|
||||
request = compute_v1.GetInstanceRequest(
|
||||
instance=instance_id,
|
||||
project=self.project_id,
|
||||
zone=self.get_node_instance_zone(instance_id),
|
||||
)
|
||||
instance_status = self.instance_client.get(request=request).status
|
||||
logging.info("Status of instance " + str(instance_id) + ": " + instance_status)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to get status of instance %s. Encountered following "
|
||||
"exception: %s." % (instance_id, e)
|
||||
)
|
||||
raise RuntimeError()
|
||||
|
||||
if instance_status == expected_status:
|
||||
logging.info('status matches, end' + str(expected_status) + str(instance_status))
|
||||
return True
|
||||
time.sleep(sleeper)
|
||||
i += sleeper
|
||||
@@ -153,53 +187,61 @@ class GCP:
|
||||
return False
|
||||
|
||||
# Wait until the node instance is suspended
|
||||
def wait_until_suspended(self, zone, instance_id, timeout):
|
||||
return self.get_instance_status(zone, instance_id, "SUSPENDED", timeout)
|
||||
def wait_until_suspended(self, instance_id, timeout):
|
||||
return self.get_instance_status(instance_id, "SUSPENDED", timeout)
|
||||
|
||||
# Wait until the node instance is running
|
||||
def wait_until_running(self, zone, instance_id, timeout):
|
||||
return self.get_instance_status(zone, instance_id, "RUNNING", timeout)
|
||||
def wait_until_running(self, instance_id, timeout, affected_node):
|
||||
start_time = time.time()
|
||||
instance_status = self.get_instance_status(instance_id, "RUNNING", timeout)
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("running", end_time - start_time)
|
||||
return instance_status
|
||||
|
||||
# Wait until the node instance is stopped
|
||||
def wait_until_stopped(self, zone, instance_id, timeout):
|
||||
return self.get_instance_status(zone, instance_id, "TERMINATED", timeout)
|
||||
def wait_until_stopped(self, instance_id, timeout, affected_node):
|
||||
# In GCP, the next state after STOPPING is TERMINATED
|
||||
start_time = time.time()
|
||||
instance_status = self.get_instance_status(instance_id, "TERMINATED", timeout)
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("stopped", end_time - start_time)
|
||||
return instance_status
|
||||
|
||||
# Wait until the node instance is terminated
|
||||
def wait_until_terminated(self, zone, instance_id, timeout):
|
||||
try:
|
||||
i = 0
|
||||
sleeper = 5
|
||||
while i <= timeout:
|
||||
instStatus = (
|
||||
self.client.instances()
|
||||
.get(project=self.project, zone=zone, instance=instance_id)
|
||||
.execute()
|
||||
)
|
||||
logging.info("Status of vm " + str(instStatus["status"]))
|
||||
time.sleep(sleeper)
|
||||
except Exception as e:
|
||||
logging.info("here " + str(e))
|
||||
return True
|
||||
def wait_until_terminated(self, instance_id, timeout, affected_node):
|
||||
start_time = time.time()
|
||||
instance_status = self.get_instance_status(instance_id, "TERMINATED", timeout)
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("terminated", end_time - start_time)
|
||||
return instance_status
|
||||
|
||||
|
||||
# krkn_lib
|
||||
class gcp_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, kubecli: KrknKubernetes):
|
||||
super().__init__(kubecli)
|
||||
def __init__(self, kubecli: KrknKubernetes, node_action_kube_check: bool, affected_nodes_status: AffectedNodeStatus):
|
||||
super().__init__(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
self.gcp = GCP()
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_start_scenario injection")
|
||||
instance_id, zone = self.gcp.get_instance_id(node)
|
||||
instance = self.gcp.get_node_instance(node)
|
||||
instance_id = self.gcp.get_instance_name(instance)
|
||||
affected_node.node_id = instance_id
|
||||
logging.info(
|
||||
"Starting the node %s with instance ID: %s " % (node, instance_id)
|
||||
)
|
||||
self.gcp.start_instances(zone, instance_id)
|
||||
self.gcp.wait_until_running(zone, instance_id, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
self.gcp.start_instances(instance_id)
|
||||
self.gcp.wait_until_running(instance_id, timeout, affected_node)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
logging.info(
|
||||
"Node with instance ID: %s is in running state" % instance_id
|
||||
)
|
||||
@@ -212,23 +254,27 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
|
||||
raise RuntimeError()
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
logging.info("stop scenario")
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_stop_scenario injection")
|
||||
instance_id, zone = self.gcp.get_instance_id(node)
|
||||
instance = self.gcp.get_node_instance(node)
|
||||
instance_id = self.gcp.get_instance_name(instance)
|
||||
affected_node.node_id = instance_id
|
||||
logging.info(
|
||||
"Stopping the node %s with instance ID: %s " % (node, instance_id)
|
||||
)
|
||||
self.gcp.stop_instances(zone, instance_id)
|
||||
self.gcp.wait_until_stopped(zone, instance_id, timeout)
|
||||
self.gcp.stop_instances(instance_id)
|
||||
self.gcp.wait_until_stopped(instance_id, timeout, affected_node=affected_node)
|
||||
logging.info(
|
||||
"Node with instance ID: %s is in stopped state" % instance_id
|
||||
)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli, affected_node)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to stop node instance. Encountered following exception: %s. "
|
||||
@@ -237,19 +283,23 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
|
||||
raise RuntimeError()
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_termination_scenario injection")
|
||||
instance_id, zone = self.gcp.get_instance_id(node)
|
||||
instance = self.gcp.get_node_instance(node)
|
||||
instance_id = self.gcp.get_instance_name(instance)
|
||||
affected_node.node_id = instance_id
|
||||
logging.info(
|
||||
"Terminating the node %s with instance ID: %s "
|
||||
% (node, instance_id)
|
||||
)
|
||||
self.gcp.terminate_instances(zone, instance_id)
|
||||
self.gcp.wait_until_terminated(zone, instance_id, timeout)
|
||||
self.gcp.terminate_instances(instance_id)
|
||||
self.gcp.wait_until_terminated(instance_id, timeout, affected_node=affected_node)
|
||||
for _ in range(timeout):
|
||||
if node not in self.kubecli.list_nodes():
|
||||
break
|
||||
@@ -267,20 +317,27 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
)
|
||||
logging.error("node_termination_scenario injection failed!")
|
||||
|
||||
|
||||
raise e
|
||||
raise RuntimeError()
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_reboot_scenario injection")
|
||||
instance_id, zone = self.gcp.get_instance_id(node)
|
||||
instance = self.gcp.get_node_instance(node)
|
||||
instance_id = self.gcp.get_instance_name(instance)
|
||||
affected_node.node_id = instance_id
|
||||
logging.info(
|
||||
"Rebooting the node %s with instance ID: %s " % (node, instance_id)
|
||||
)
|
||||
self.gcp.reboot_instances(zone, instance_id)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
self.gcp.reboot_instances(instance_id)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli, affected_node)
|
||||
self.gcp.wait_until_running(instance_id, timeout, affected_node)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
logging.info(
|
||||
"Node with instance ID: %s has been rebooted" % instance_id
|
||||
)
|
||||
@@ -293,3 +350,4 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
|
||||
raise RuntimeError()
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
@@ -3,7 +3,7 @@ from krkn.scenario_plugins.node_actions.abstract_node_scenarios import (
|
||||
abstract_node_scenarios,
|
||||
)
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
from krkn_lib.models.k8s import AffectedNodeStatus
|
||||
|
||||
class GENERAL:
|
||||
def __init__(self):
|
||||
@@ -12,9 +12,10 @@ class GENERAL:
|
||||
|
||||
# krkn_lib
|
||||
class general_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, kubecli: KrknKubernetes):
|
||||
super().__init__(kubecli)
|
||||
def __init__(self, kubecli: KrknKubernetes, node_action_kube_check: bool, affected_nodes_status: AffectedNodeStatus):
|
||||
super().__init__(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
self.general = GENERAL()
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
|
||||
370
krkn/scenario_plugins/node_actions/ibmcloud_node_scenarios.py
Normal file
370
krkn/scenario_plugins/node_actions/ibmcloud_node_scenarios.py
Normal file
@@ -0,0 +1,370 @@
|
||||
#!/usr/bin/env python
|
||||
import time
|
||||
import typing
|
||||
from os import environ
|
||||
from dataclasses import dataclass, field
|
||||
from traceback import format_exc
|
||||
import logging
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
import krkn.scenario_plugins.node_actions.common_node_functions as nodeaction
|
||||
from krkn.scenario_plugins.node_actions.abstract_node_scenarios import (
|
||||
abstract_node_scenarios,
|
||||
)
|
||||
from kubernetes import client, watch
|
||||
from ibm_vpc import VpcV1
|
||||
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
|
||||
import sys
|
||||
|
||||
from krkn_lib.models.k8s import AffectedNodeStatus, AffectedNode
|
||||
|
||||
|
||||
class IbmCloud:
|
||||
def __init__(self):
|
||||
"""
|
||||
Initialize the ibm cloud client by using the the env variables:
|
||||
'IBMC_APIKEY' 'IBMC_URL'
|
||||
"""
|
||||
apiKey = environ.get("IBMC_APIKEY")
|
||||
service_url = environ.get("IBMC_URL")
|
||||
if not apiKey:
|
||||
raise Exception("Environmental variable 'IBMC_APIKEY' is not set")
|
||||
if not service_url:
|
||||
raise Exception("Environmental variable 'IBMC_URL' is not set")
|
||||
try:
|
||||
authenticator = IAMAuthenticator(apiKey)
|
||||
self.service = VpcV1(authenticator=authenticator)
|
||||
|
||||
self.service.set_service_url(service_url)
|
||||
except Exception as e:
|
||||
logging.error("error authenticating" + str(e))
|
||||
|
||||
|
||||
# Get the instance ID of the node
|
||||
def get_instance_id(self, node_name):
|
||||
node_list = self.list_instances()
|
||||
for node in node_list:
|
||||
if node_name == node["vpc_name"]:
|
||||
return node["vpc_id"]
|
||||
logging.error("Couldn't find node with name " + str(node_name) + ", you could try another region")
|
||||
sys.exit(1)
|
||||
|
||||
def delete_instance(self, instance_id):
|
||||
"""
|
||||
Deletes the Instance whose name is given by 'instance_id'
|
||||
"""
|
||||
try:
|
||||
self.service.delete_instance(instance_id)
|
||||
logging.info("Deleted Instance -- '{}'".format(instance_id))
|
||||
except Exception as e:
|
||||
logging.info("Instance '{}' could not be deleted. ".format(instance_id))
|
||||
return False
|
||||
|
||||
def reboot_instances(self, instance_id):
|
||||
"""
|
||||
Reboots the Instance whose name is given by 'instance_id'. Returns True if successful, or
|
||||
returns False if the Instance is not powered on
|
||||
"""
|
||||
|
||||
try:
|
||||
self.service.create_instance_action(
|
||||
instance_id,
|
||||
type="reboot",
|
||||
)
|
||||
logging.info("Reset Instance -- '{}'".format(instance_id))
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.info("Instance '{}' could not be rebooted".format(instance_id))
|
||||
return False
|
||||
|
||||
def stop_instances(self, instance_id):
|
||||
"""
|
||||
Stops the Instance whose name is given by 'instance_id'. Returns True if successful, or
|
||||
returns False if the Instance is already stopped
|
||||
"""
|
||||
|
||||
try:
|
||||
self.service.create_instance_action(
|
||||
instance_id,
|
||||
type="stop",
|
||||
)
|
||||
logging.info("Stopped Instance -- '{}'".format(instance_id))
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.info("Instance '{}' could not be stopped".format(instance_id))
|
||||
logging.info("error" + str(e))
|
||||
return False
|
||||
|
||||
def start_instances(self, instance_id):
|
||||
"""
|
||||
Stops the Instance whose name is given by 'instance_id'. Returns True if successful, or
|
||||
returns False if the Instance is already running
|
||||
"""
|
||||
|
||||
try:
|
||||
self.service.create_instance_action(
|
||||
instance_id,
|
||||
type="start",
|
||||
)
|
||||
logging.info("Started Instance -- '{}'".format(instance_id))
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.info("Instance '{}' could not start running".format(instance_id))
|
||||
return False
|
||||
|
||||
def list_instances(self):
|
||||
"""
|
||||
Returns a list of Instances present in the datacenter
|
||||
"""
|
||||
instance_names = []
|
||||
try:
|
||||
instances_result = self.service.list_instances().get_result()
|
||||
instances_list = instances_result["instances"]
|
||||
for vpc in instances_list:
|
||||
instance_names.append({"vpc_name": vpc["name"], "vpc_id": vpc["id"]})
|
||||
starting_count = instances_result["total_count"]
|
||||
while instances_result["total_count"] == instances_result["limit"]:
|
||||
instances_result = self.service.list_instances(
|
||||
start=starting_count
|
||||
).get_result()
|
||||
instances_list = instances_result["instances"]
|
||||
starting_count += instances_result["total_count"]
|
||||
for vpc in instances_list:
|
||||
instance_names.append({"vpc_name": vpc.name, "vpc_id": vpc.id})
|
||||
except Exception as e:
|
||||
logging.error("Error listing out instances: " + str(e))
|
||||
sys.exit(1)
|
||||
return instance_names
|
||||
|
||||
def find_id_in_list(self, name, vpc_list):
|
||||
for vpc in vpc_list:
|
||||
if vpc["vpc_name"] == name:
|
||||
return vpc["vpc_id"]
|
||||
|
||||
def get_instance_status(self, instance_id):
|
||||
"""
|
||||
Returns the status of the Instance whose name is given by 'instance_id'
|
||||
"""
|
||||
|
||||
try:
|
||||
instance = self.service.get_instance(instance_id).get_result()
|
||||
state = instance["status"]
|
||||
return state
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to get node instance status %s. Encountered following "
|
||||
"exception: %s." % (instance_id, e)
|
||||
)
|
||||
return None
|
||||
|
||||
def wait_until_deleted(self, instance_id, timeout, affected_node=None):
|
||||
"""
|
||||
Waits until the instance is deleted or until the timeout. Returns True if
|
||||
the instance is successfully deleted, else returns False
|
||||
"""
|
||||
start_time = time.time()
|
||||
time_counter = 0
|
||||
vpc = self.get_instance_status(instance_id)
|
||||
while vpc is not None:
|
||||
vpc = self.get_instance_status(instance_id)
|
||||
logging.info(
|
||||
"Instance %s is still being deleted, sleeping for 5 seconds"
|
||||
% instance_id
|
||||
)
|
||||
time.sleep(5)
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info(
|
||||
"Instance %s is still not deleted in allotted time" % instance_id
|
||||
)
|
||||
return False
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("terminated", end_time - start_time)
|
||||
return True
|
||||
|
||||
def wait_until_running(self, instance_id, timeout, affected_node=None):
|
||||
"""
|
||||
Waits until the Instance switches to running state or until the timeout.
|
||||
Returns True if the Instance switches to running, else returns False
|
||||
"""
|
||||
start_time = time.time()
|
||||
time_counter = 0
|
||||
status = self.get_instance_status(instance_id)
|
||||
while status != "running":
|
||||
status = self.get_instance_status(instance_id)
|
||||
logging.info(
|
||||
"Instance %s is still not running, sleeping for 5 seconds" % instance_id
|
||||
)
|
||||
time.sleep(5)
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info(
|
||||
"Instance %s is still not ready in allotted time" % instance_id
|
||||
)
|
||||
return False
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("running", end_time - start_time)
|
||||
return True
|
||||
|
||||
def wait_until_stopped(self, instance_id, timeout, affected_node):
|
||||
"""
|
||||
Waits until the Instance switches to stopped state or until the timeout.
|
||||
Returns True if the Instance switches to stopped, else returns False
|
||||
"""
|
||||
start_time = time.time()
|
||||
time_counter = 0
|
||||
status = self.get_instance_status(instance_id)
|
||||
while status != "stopped":
|
||||
status = self.get_instance_status(instance_id)
|
||||
logging.info(
|
||||
"Instance %s is still not stopped, sleeping for 5 seconds" % instance_id
|
||||
)
|
||||
time.sleep(5)
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info(
|
||||
"Instance %s is still not stopped in allotted time" % instance_id
|
||||
)
|
||||
return False
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("stopped", end_time - start_time)
|
||||
return True
|
||||
|
||||
|
||||
def wait_until_rebooted(self, instance_id, timeout, affected_node):
|
||||
"""
|
||||
Waits until the Instance switches to restarting state and then running state or until the timeout.
|
||||
Returns True if the Instance switches back to running, else returns False
|
||||
"""
|
||||
|
||||
time_counter = 0
|
||||
status = self.get_instance_status(instance_id)
|
||||
while status == "starting":
|
||||
status = self.get_instance_status(instance_id)
|
||||
logging.info(
|
||||
"Instance %s is still restarting, sleeping for 5 seconds" % instance_id
|
||||
)
|
||||
time.sleep(5)
|
||||
time_counter += 5
|
||||
if time_counter >= timeout:
|
||||
logging.info(
|
||||
"Instance %s is still restarting after allotted time" % instance_id
|
||||
)
|
||||
return False
|
||||
self.wait_until_running(instance_id, timeout, affected_node)
|
||||
return True
|
||||
|
||||
|
||||
@dataclass
|
||||
class ibm_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, kubecli: KrknKubernetes, node_action_kube_check: bool, affected_nodes_status: AffectedNodeStatus):
|
||||
super().__init__(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
self.ibmcloud = IbmCloud()
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
try:
|
||||
instance_id = self.ibmcloud.get_instance_id( node)
|
||||
affected_node = AffectedNode(node, node_id=instance_id)
|
||||
for _ in range(instance_kill_count):
|
||||
logging.info("Starting node_start_scenario injection")
|
||||
logging.info("Starting the node %s " % (node))
|
||||
|
||||
if instance_id:
|
||||
vm_started = self.ibmcloud.start_instances(instance_id)
|
||||
if vm_started:
|
||||
self.ibmcloud.wait_until_running(instance_id, timeout, affected_node)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_ready_status(
|
||||
node, timeout, self.kubecli, affected_node
|
||||
)
|
||||
logging.info(
|
||||
"Node with instance ID: %s is in running state" % node
|
||||
)
|
||||
logging.info(
|
||||
"node_start_scenario has been successfully injected!"
|
||||
)
|
||||
else:
|
||||
logging.error(
|
||||
"Failed to find node that matched instances on ibm cloud in region"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logging.error("Failed to start node instance. Test Failed")
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
try:
|
||||
instance_id = self.ibmcloud.get_instance_id(node)
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node, instance_id)
|
||||
logging.info("Starting node_stop_scenario injection")
|
||||
logging.info("Stopping the node %s " % (node))
|
||||
vm_stopped = self.ibmcloud.stop_instances(instance_id)
|
||||
if vm_stopped:
|
||||
self.ibmcloud.wait_until_stopped(instance_id, timeout, affected_node)
|
||||
logging.info(
|
||||
"Node with instance ID: %s is in stopped state" % node
|
||||
)
|
||||
logging.info(
|
||||
"node_stop_scenario has been successfully injected!"
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Test Failed")
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
|
||||
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
try:
|
||||
instance_id = self.ibmcloud.get_instance_id(node)
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node, node_id=instance_id)
|
||||
logging.info("Starting node_reboot_scenario injection")
|
||||
logging.info("Rebooting the node %s " % (node))
|
||||
self.ibmcloud.reboot_instances(instance_id)
|
||||
self.ibmcloud.wait_until_rebooted(instance_id, timeout)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(
|
||||
node, timeout, affected_node
|
||||
)
|
||||
nodeaction.wait_for_ready_status(
|
||||
node, timeout, affected_node
|
||||
)
|
||||
logging.info(
|
||||
"Node with instance ID: %s has rebooted successfully" % node
|
||||
)
|
||||
logging.info(
|
||||
"node_reboot_scenario has been successfully injected!"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logging.error("Failed to reboot node instance. Test Failed")
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
|
||||
|
||||
def node_terminate_scenario(self, instance_kill_count, node, timeout):
|
||||
try:
|
||||
instance_id = self.ibmcloud.get_instance_id(node)
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node, node_id=instance_id)
|
||||
logging.info(
|
||||
"Starting node_termination_scenario injection by first stopping the node"
|
||||
)
|
||||
logging.info("Deleting the node with instance ID: %s " % (node))
|
||||
self.ibmcloud.delete_instance(instance_id)
|
||||
self.ibmcloud.wait_until_deleted(node, timeout, affected_node)
|
||||
logging.info(
|
||||
"Node with instance ID: %s has been released" % node
|
||||
)
|
||||
logging.info(
|
||||
"node_terminate_scenario has been successfully injected!"
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to terminate node instance. Test Failed")
|
||||
logging.error("node_terminate_scenario injection failed!")
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
import logging
|
||||
import time
|
||||
from multiprocessing.pool import ThreadPool
|
||||
from itertools import repeat
|
||||
|
||||
import yaml
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.models.k8s import AffectedNodeStatus
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.utils import get_yaml_item_value, log_exception
|
||||
|
||||
@@ -19,7 +22,8 @@ from krkn.scenario_plugins.node_actions.gcp_node_scenarios import gcp_node_scena
|
||||
from krkn.scenario_plugins.node_actions.general_cloud_node_scenarios import (
|
||||
general_node_scenarios,
|
||||
)
|
||||
|
||||
from krkn.scenario_plugins.node_actions.vmware_node_scenarios import vmware_node_scenarios
|
||||
from krkn.scenario_plugins.node_actions.ibmcloud_node_scenarios import ibm_node_scenarios
|
||||
node_general = False
|
||||
|
||||
|
||||
@@ -47,48 +51,51 @@ class NodeActionsScenarioPlugin(AbstractScenarioPlugin):
|
||||
node_scenario,
|
||||
node_scenario_object,
|
||||
lib_telemetry.get_lib_kubernetes(),
|
||||
scenario_telemetry,
|
||||
)
|
||||
end_time = int(time.time())
|
||||
cerberus.get_status(krkn_config, start_time, end_time)
|
||||
except (RuntimeError, Exception) as e:
|
||||
logging.error("Node Actions exiting due to Exception %s" % e)
|
||||
return 1
|
||||
else:
|
||||
return 0
|
||||
return 0
|
||||
|
||||
def get_node_scenario_object(self, node_scenario, kubecli: KrknKubernetes):
|
||||
affected_nodes_status = AffectedNodeStatus()
|
||||
|
||||
node_action_kube_check = get_yaml_item_value(node_scenario,"kube_check",True)
|
||||
if (
|
||||
"cloud_type" not in node_scenario.keys()
|
||||
or node_scenario["cloud_type"] == "generic"
|
||||
):
|
||||
global node_general
|
||||
node_general = True
|
||||
return general_node_scenarios(kubecli)
|
||||
if node_scenario["cloud_type"] == "aws":
|
||||
return aws_node_scenarios(kubecli)
|
||||
elif node_scenario["cloud_type"] == "gcp":
|
||||
return gcp_node_scenarios(kubecli)
|
||||
elif node_scenario["cloud_type"] == "openstack":
|
||||
return general_node_scenarios(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
if node_scenario["cloud_type"].lower() == "aws":
|
||||
return aws_node_scenarios(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
elif node_scenario["cloud_type"].lower() == "gcp":
|
||||
return gcp_node_scenarios(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
elif node_scenario["cloud_type"].lower() == "openstack":
|
||||
from krkn.scenario_plugins.node_actions.openstack_node_scenarios import (
|
||||
openstack_node_scenarios,
|
||||
)
|
||||
|
||||
return openstack_node_scenarios(kubecli)
|
||||
return openstack_node_scenarios(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
elif (
|
||||
node_scenario["cloud_type"] == "azure"
|
||||
or node_scenario["cloud_type"] == "az"
|
||||
node_scenario["cloud_type"].lower() == "azure"
|
||||
or node_scenario["cloud_type"].lower() == "az"
|
||||
):
|
||||
return azure_node_scenarios(kubecli)
|
||||
return azure_node_scenarios(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
elif (
|
||||
node_scenario["cloud_type"] == "alibaba"
|
||||
or node_scenario["cloud_type"] == "alicloud"
|
||||
node_scenario["cloud_type"].lower() == "alibaba"
|
||||
or node_scenario["cloud_type"].lower() == "alicloud"
|
||||
):
|
||||
from krkn.scenario_plugins.node_actions.alibaba_node_scenarios import (
|
||||
alibaba_node_scenarios,
|
||||
)
|
||||
|
||||
return alibaba_node_scenarios(kubecli)
|
||||
elif node_scenario["cloud_type"] == "bm":
|
||||
return alibaba_node_scenarios(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
elif node_scenario["cloud_type"].lower() == "bm":
|
||||
from krkn.scenario_plugins.node_actions.bm_node_scenarios import (
|
||||
bm_node_scenarios,
|
||||
)
|
||||
@@ -98,9 +105,22 @@ class NodeActionsScenarioPlugin(AbstractScenarioPlugin):
|
||||
node_scenario.get("bmc_user", None),
|
||||
node_scenario.get("bmc_password", None),
|
||||
kubecli,
|
||||
node_action_kube_check,
|
||||
affected_nodes_status
|
||||
)
|
||||
elif node_scenario["cloud_type"] == "docker":
|
||||
return docker_node_scenarios(kubecli)
|
||||
elif node_scenario["cloud_type"].lower() == "docker":
|
||||
return docker_node_scenarios(kubecli,node_action_kube_check,
|
||||
affected_nodes_status)
|
||||
elif (
|
||||
node_scenario["cloud_type"].lower() == "vsphere"
|
||||
or node_scenario["cloud_type"].lower() == "vmware"
|
||||
):
|
||||
return vmware_node_scenarios(kubecli, node_action_kube_check,affected_nodes_status)
|
||||
elif (
|
||||
node_scenario["cloud_type"].lower() == "ibm"
|
||||
or node_scenario["cloud_type"].lower() == "ibmcloud"
|
||||
):
|
||||
return ibm_node_scenarios(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
else:
|
||||
logging.error(
|
||||
"Cloud type "
|
||||
@@ -118,102 +138,137 @@ class NodeActionsScenarioPlugin(AbstractScenarioPlugin):
|
||||
)
|
||||
|
||||
def inject_node_scenario(
|
||||
self, action, node_scenario, node_scenario_object, kubecli: KrknKubernetes
|
||||
self, action, node_scenario, node_scenario_object, kubecli: KrknKubernetes, scenario_telemetry: ScenarioTelemetry
|
||||
):
|
||||
generic_cloud_scenarios = ("stop_kubelet_scenario", "node_crash_scenario")
|
||||
# Get the node scenario configurations
|
||||
run_kill_count = get_yaml_item_value(node_scenario, "runs", 1)
|
||||
|
||||
# Get the node scenario configurations for setting nodes
|
||||
|
||||
instance_kill_count = get_yaml_item_value(node_scenario, "instance_count", 1)
|
||||
node_name = get_yaml_item_value(node_scenario, "node_name", "")
|
||||
label_selector = get_yaml_item_value(node_scenario, "label_selector", "")
|
||||
if action == "node_stop_start_scenario":
|
||||
duration = get_yaml_item_value(node_scenario, "duration", 120)
|
||||
parallel_nodes = get_yaml_item_value(node_scenario, "parallel", False)
|
||||
|
||||
# Get the node to apply the scenario
|
||||
if node_name:
|
||||
node_name_list = node_name.split(",")
|
||||
nodes = common_node_functions.get_node_by_name(node_name_list, kubecli)
|
||||
else:
|
||||
nodes = common_node_functions.get_node(
|
||||
label_selector, instance_kill_count, kubecli
|
||||
)
|
||||
|
||||
# GCP api doesn't support multiprocessing calls, will only actually run 1
|
||||
if parallel_nodes:
|
||||
self.multiprocess_nodes(nodes, node_scenario_object, action, node_scenario)
|
||||
else:
|
||||
for single_node in nodes:
|
||||
self.run_node(single_node, node_scenario_object, action, node_scenario)
|
||||
affected_nodes_status = node_scenario_object.affected_nodes_status
|
||||
scenario_telemetry.affected_nodes.extend(affected_nodes_status.affected_nodes)
|
||||
|
||||
def multiprocess_nodes(self, nodes, node_scenario_object, action, node_scenario):
|
||||
try:
|
||||
# pool object with number of element
|
||||
pool = ThreadPool(processes=len(nodes))
|
||||
|
||||
pool.starmap(self.run_node,zip(nodes, repeat(node_scenario_object), repeat(action), repeat(node_scenario)))
|
||||
|
||||
pool.close()
|
||||
except Exception as e:
|
||||
logging.info("Error on pool multiprocessing: " + str(e))
|
||||
|
||||
|
||||
def run_node(self, single_node, node_scenario_object, action, node_scenario):
|
||||
# Get the scenario specifics for running action nodes
|
||||
run_kill_count = get_yaml_item_value(node_scenario, "runs", 1)
|
||||
duration = get_yaml_item_value(node_scenario, "duration", 120)
|
||||
|
||||
timeout = get_yaml_item_value(node_scenario, "timeout", 120)
|
||||
service = get_yaml_item_value(node_scenario, "service", "")
|
||||
ssh_private_key = get_yaml_item_value(
|
||||
node_scenario, "ssh_private_key", "~/.ssh/id_rsa"
|
||||
)
|
||||
# Get the node to apply the scenario
|
||||
if node_name:
|
||||
node_name_list = node_name.split(",")
|
||||
else:
|
||||
node_name_list = [node_name]
|
||||
for single_node_name in node_name_list:
|
||||
nodes = common_node_functions.get_node(
|
||||
single_node_name, label_selector, instance_kill_count, kubecli
|
||||
generic_cloud_scenarios = ("stop_kubelet_scenario", "node_crash_scenario")
|
||||
|
||||
if node_general and action not in generic_cloud_scenarios:
|
||||
logging.info(
|
||||
"Scenario: "
|
||||
+ action
|
||||
+ " is not set up for generic cloud type, skipping action"
|
||||
)
|
||||
for single_node in nodes:
|
||||
if node_general and action not in generic_cloud_scenarios:
|
||||
logging.info(
|
||||
"Scenario: "
|
||||
+ action
|
||||
+ " is not set up for generic cloud type, skipping action"
|
||||
else:
|
||||
if action == "node_start_scenario":
|
||||
node_scenario_object.node_start_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "node_stop_scenario":
|
||||
node_scenario_object.node_stop_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "node_stop_start_scenario":
|
||||
node_scenario_object.node_stop_start_scenario(
|
||||
run_kill_count, single_node, timeout, duration
|
||||
)
|
||||
elif action == "node_termination_scenario":
|
||||
node_scenario_object.node_termination_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "node_reboot_scenario":
|
||||
node_scenario_object.node_reboot_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "node_disk_detach_attach_scenario":
|
||||
node_scenario_object.node_disk_detach_attach_scenario(
|
||||
run_kill_count, single_node, timeout, duration)
|
||||
elif action == "stop_start_kubelet_scenario":
|
||||
node_scenario_object.stop_start_kubelet_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "restart_kubelet_scenario":
|
||||
node_scenario_object.restart_kubelet_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "stop_kubelet_scenario":
|
||||
node_scenario_object.stop_kubelet_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "node_crash_scenario":
|
||||
node_scenario_object.node_crash_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "stop_start_helper_node_scenario":
|
||||
if node_scenario["cloud_type"] != "openstack":
|
||||
logging.error(
|
||||
"Scenario: " + action + " is not supported for "
|
||||
"cloud type "
|
||||
+ node_scenario["cloud_type"]
|
||||
+ ", skipping action"
|
||||
)
|
||||
else:
|
||||
if action == "node_start_scenario":
|
||||
node_scenario_object.node_start_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "node_stop_scenario":
|
||||
node_scenario_object.node_stop_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "node_stop_start_scenario":
|
||||
node_scenario_object.node_stop_start_scenario(
|
||||
run_kill_count, single_node, timeout, duration
|
||||
)
|
||||
elif action == "node_termination_scenario":
|
||||
node_scenario_object.node_termination_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "node_reboot_scenario":
|
||||
node_scenario_object.node_reboot_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "stop_start_kubelet_scenario":
|
||||
node_scenario_object.stop_start_kubelet_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "restart_kubelet_scenario":
|
||||
node_scenario_object.restart_kubelet_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "stop_kubelet_scenario":
|
||||
node_scenario_object.stop_kubelet_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "node_crash_scenario":
|
||||
node_scenario_object.node_crash_scenario(
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "stop_start_helper_node_scenario":
|
||||
if node_scenario["cloud_type"] != "openstack":
|
||||
logging.error(
|
||||
"Scenario: " + action + " is not supported for "
|
||||
"cloud type "
|
||||
+ node_scenario["cloud_type"]
|
||||
+ ", skipping action"
|
||||
)
|
||||
else:
|
||||
if not node_scenario["helper_node_ip"]:
|
||||
logging.error("Helper node IP address is not provided")
|
||||
raise Exception(
|
||||
"Helper node IP address is not provided"
|
||||
)
|
||||
node_scenario_object.helper_node_stop_start_scenario(
|
||||
run_kill_count, node_scenario["helper_node_ip"], timeout
|
||||
)
|
||||
node_scenario_object.helper_node_service_status(
|
||||
node_scenario["helper_node_ip"],
|
||||
service,
|
||||
ssh_private_key,
|
||||
timeout,
|
||||
)
|
||||
else:
|
||||
logging.info(
|
||||
"There is no node action that matches %s, skipping scenario"
|
||||
% action
|
||||
if not node_scenario["helper_node_ip"]:
|
||||
logging.error("Helper node IP address is not provided")
|
||||
raise Exception(
|
||||
"Helper node IP address is not provided"
|
||||
)
|
||||
node_scenario_object.helper_node_stop_start_scenario(
|
||||
run_kill_count, node_scenario["helper_node_ip"], timeout
|
||||
)
|
||||
node_scenario_object.helper_node_service_status(
|
||||
node_scenario["helper_node_ip"],
|
||||
service,
|
||||
ssh_private_key,
|
||||
timeout,
|
||||
)
|
||||
elif action == "node_block_scenario":
|
||||
node_scenario_object.node_block_scenario(
|
||||
run_kill_count, single_node, timeout, duration
|
||||
)
|
||||
else:
|
||||
logging.info(
|
||||
"There is no node action that matches %s, skipping scenario"
|
||||
% action
|
||||
)
|
||||
|
||||
|
||||
def get_scenario_types(self) -> list[str]:
|
||||
return ["node_scenarios"]
|
||||
|
||||
@@ -7,15 +7,14 @@ from krkn.scenario_plugins.node_actions.abstract_node_scenarios import (
|
||||
abstract_node_scenarios,
|
||||
)
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
|
||||
class OPENSTACKCLOUD:
|
||||
def __init__(self):
|
||||
self.Wait = 30
|
||||
|
||||
# Get the instance ID of the node
|
||||
def get_instance_id(self, node):
|
||||
openstack_node_ip = nodeaction.get_node_ip(node)
|
||||
def get_instance_id(self, openstack_node_ip):
|
||||
openstack_node_name = self.get_openstack_nodename(openstack_node_ip)
|
||||
return openstack_node_name
|
||||
|
||||
@@ -56,12 +55,22 @@ class OPENSTACKCLOUD:
|
||||
raise RuntimeError()
|
||||
|
||||
# Wait until the node instance is running
|
||||
def wait_until_running(self, node, timeout):
|
||||
return self.get_instance_status(node, "ACTIVE", timeout)
|
||||
def wait_until_running(self, node, timeout, affected_node):
|
||||
start_time = time.time()
|
||||
instance_status= self.get_instance_status(node, "ACTIVE", timeout)
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("running", end_time - start_time)
|
||||
return instance_status
|
||||
|
||||
# Wait until the node instance is stopped
|
||||
def wait_until_stopped(self, node, timeout):
|
||||
return self.get_instance_status(node, "SHUTOFF", timeout)
|
||||
def wait_until_stopped(self, node, timeout, affected_node):
|
||||
start_time = time.time()
|
||||
instance_status = self.get_instance_status(node, "SHUTOFF", timeout)
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("stopped", end_time - start_time)
|
||||
return instance_status
|
||||
|
||||
# Get instance status
|
||||
def get_instance_status(self, node, expected_status, timeout):
|
||||
@@ -107,19 +116,24 @@ class OPENSTACKCLOUD:
|
||||
|
||||
# krkn_lib
|
||||
class openstack_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, kubecli: KrknKubernetes):
|
||||
def __init__(self, kubecli: KrknKubernetes,node_action_kube_check: bool, affected_nodes_status: AffectedNodeStatus ):
|
||||
super().__init__(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
self.openstackcloud = OPENSTACKCLOUD()
|
||||
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_start_scenario injection")
|
||||
logging.info("Starting the node %s" % (node))
|
||||
openstack_node_name = self.openstackcloud.get_instance_id(node)
|
||||
openstack_node_ip = self.kubecli.get_node_ip(node)
|
||||
openstack_node_name = self.openstackcloud.get_instance_id(openstack_node_ip)
|
||||
self.openstackcloud.start_instances(openstack_node_name)
|
||||
self.openstackcloud.wait_until_running(openstack_node_name, timeout)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
self.openstackcloud.wait_until_running(openstack_node_name, timeout, affected_node)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
logging.info("Node with instance ID: %s is in running state" % (node))
|
||||
logging.info("node_start_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
@@ -130,18 +144,22 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
|
||||
raise RuntimeError()
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_stop_scenario injection")
|
||||
logging.info("Stopping the node %s " % (node))
|
||||
openstack_node_name = self.openstackcloud.get_instance_id(node)
|
||||
openstack_node_ip = self.kubecli.get_node_ip(node)
|
||||
openstack_node_name = self.openstackcloud.get_instance_id(openstack_node_ip)
|
||||
self.openstackcloud.stop_instances(openstack_node_name)
|
||||
self.openstackcloud.wait_until_stopped(openstack_node_name, timeout)
|
||||
self.openstackcloud.wait_until_stopped(openstack_node_name, timeout, affected_node)
|
||||
logging.info("Node with instance name: %s is in stopped state" % (node))
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_not_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to stop node instance. Encountered following exception: %s. "
|
||||
@@ -150,17 +168,21 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
|
||||
raise RuntimeError()
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
logging.info("Starting node_reboot_scenario injection")
|
||||
logging.info("Rebooting the node %s" % (node))
|
||||
openstack_node_name = self.openstackcloud.get_instance_id(node)
|
||||
openstack_node_ip = self.kubecli.get_node_ip(node)
|
||||
openstack_node_name = self.openstackcloud.get_instance_id(openstack_node_ip)
|
||||
self.openstackcloud.reboot_instances(openstack_node_name)
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli, affected_node)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
logging.info("Node with instance name: %s has been rebooted" % (node))
|
||||
logging.info("node_reboot_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
@@ -171,10 +193,12 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
|
||||
raise RuntimeError()
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to start the node
|
||||
def helper_node_start_scenario(self, instance_kill_count, node_ip, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node_ip)
|
||||
try:
|
||||
logging.info("Starting helper_node_start_scenario injection")
|
||||
openstack_node_name = self.openstackcloud.get_openstack_nodename(
|
||||
@@ -182,7 +206,7 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
)
|
||||
logging.info("Starting the helper node %s" % (openstack_node_name))
|
||||
self.openstackcloud.start_instances(openstack_node_name)
|
||||
self.openstackcloud.wait_until_running(openstack_node_name, timeout)
|
||||
self.openstackcloud.wait_until_running(openstack_node_name, timeout, affected_node)
|
||||
logging.info("Helper node with IP: %s is in running state" % (node_ip))
|
||||
logging.info("node_start_scenario has been successfully injected!")
|
||||
except Exception as e:
|
||||
@@ -193,10 +217,12 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
logging.error("helper_node_start_scenario injection failed!")
|
||||
|
||||
raise RuntimeError()
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def helper_node_stop_scenario(self, instance_kill_count, node_ip, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node_ip)
|
||||
try:
|
||||
logging.info("Starting helper_node_stop_scenario injection")
|
||||
openstack_node_name = self.openstackcloud.get_openstack_nodename(
|
||||
@@ -204,7 +230,7 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
)
|
||||
logging.info("Stopping the helper node %s " % (openstack_node_name))
|
||||
self.openstackcloud.stop_instances(openstack_node_name)
|
||||
self.openstackcloud.wait_until_stopped(openstack_node_name, timeout)
|
||||
self.openstackcloud.wait_until_stopped(openstack_node_name, timeout, affected_node)
|
||||
logging.info("Helper node with IP: %s is in stopped state" % (node_ip))
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
@@ -214,6 +240,7 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
logging.error("helper_node_stop_scenario injection failed!")
|
||||
|
||||
raise RuntimeError()
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
def helper_node_service_status(self, node_ip, service, ssh_private_key, timeout):
|
||||
try:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user