mirror of
https://github.com/krkn-chaos/krkn.git
synced 2026-02-26 07:43:57 +00:00
Compare commits
2 Commits
feature/co
...
cncf_incub
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
99b14c5652 | ||
|
|
e377faa0e3 |
40
.github/PULL_REQUEST_TEMPLATE.md
vendored
40
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,47 +1,27 @@
|
||||
# Type of change
|
||||
## Type of change
|
||||
|
||||
- [ ] Refactor
|
||||
- [ ] New feature
|
||||
- [ ] Bug fix
|
||||
- [ ] Optimization
|
||||
|
||||
# Description
|
||||
<-- Provide a brief description of the changes made in this PR. -->
|
||||
## Description
|
||||
<!-- Provide a brief description of the changes made in this PR. -->
|
||||
|
||||
## Related Tickets & Documents
|
||||
If no related issue, please create one and start the converasation on wants of
|
||||
|
||||
- Related Issue #:
|
||||
- Closes #:
|
||||
- Related Issue #
|
||||
- Closes #
|
||||
|
||||
# Documentation
|
||||
## Documentation
|
||||
- [ ] **Is documentation needed for this update?**
|
||||
|
||||
If checked, a documentation PR must be created and merged in the [website repository](https://github.com/krkn-chaos/website/).
|
||||
|
||||
## Related Documentation PR (if applicable)
|
||||
<-- Add the link to the corresponding documentation PR in the website repository -->
|
||||
<!-- Add the link to the corresponding documentation PR in the website repository -->
|
||||
|
||||
# Checklist before requesting a review
|
||||
[ ] Ensure the changes and proposed solution have been discussed in the relevant issue and have received acknowledgment from the community or maintainers. See [contributing guidelines](https://krkn-chaos.dev/docs/contribution-guidelines/)
|
||||
See [testing your changes](https://krkn-chaos.dev/docs/developers-guide/testing-changes/) and run on any Kubernetes or OpenShift cluster to validate your changes
|
||||
- [ ] I have performed a self-review of my code by running krkn and specific scenario
|
||||
- [ ] If it is a core feature, I have added thorough unit tests with above 80% coverage
|
||||
## Checklist before requesting a review
|
||||
|
||||
*REQUIRED*:
|
||||
Description of combination of tests performed and output of run
|
||||
|
||||
```bash
|
||||
python run_kraken.py
|
||||
...
|
||||
<---insert test results output--->
|
||||
```
|
||||
|
||||
OR
|
||||
|
||||
|
||||
```bash
|
||||
python -m coverage run -a -m unittest discover -s tests -v
|
||||
...
|
||||
<---insert test results output--->
|
||||
```
|
||||
- [ ] I have performed a self-review of my code.
|
||||
- [ ] If it is a core feature, I have added thorough tests.
|
||||
52
.github/workflows/stale.yml
vendored
52
.github/workflows/stale.yml
vendored
@@ -1,52 +0,0 @@
|
||||
name: Manage Stale Issues and Pull Requests
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run daily at 1:00 AM UTC
|
||||
- cron: '0 1 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
name: Mark and Close Stale Issues and PRs
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Mark and close stale issues and PRs
|
||||
uses: actions/stale@v9
|
||||
with:
|
||||
days-before-issue-stale: 60
|
||||
days-before-issue-close: 14
|
||||
stale-issue-label: 'stale'
|
||||
stale-issue-message: |
|
||||
This issue has been automatically marked as stale because it has not had any activity in the last 60 days.
|
||||
It will be closed in 14 days if no further activity occurs.
|
||||
If this issue is still relevant, please leave a comment or remove the stale label.
|
||||
Thank you for your contributions to krkn!
|
||||
close-issue-message: |
|
||||
This issue has been automatically closed due to inactivity.
|
||||
If you believe this issue is still relevant, please feel free to reopen it or create a new issue with updated information.
|
||||
Thank you for your understanding!
|
||||
close-issue-reason: 'not_planned'
|
||||
|
||||
days-before-pr-stale: 90
|
||||
days-before-pr-close: 14
|
||||
stale-pr-label: 'stale'
|
||||
stale-pr-message: |
|
||||
This pull request has been automatically marked as stale because it has not had any activity in the last 90 days.
|
||||
It will be closed in 14 days if no further activity occurs.
|
||||
If this PR is still relevant, please rebase it, address any pending reviews, or leave a comment.
|
||||
Thank you for your contributions to krkn!
|
||||
close-pr-message: |
|
||||
This pull request has been automatically closed due to inactivity.
|
||||
If you believe this PR is still relevant, please feel free to reopen it or create a new pull request with updated changes.
|
||||
Thank you for your understanding!
|
||||
|
||||
# Exempt labels
|
||||
exempt-issue-labels: 'bug,enhancement,good first issue'
|
||||
exempt-pr-labels: 'pending discussions,hold'
|
||||
|
||||
remove-stale-when-updated: true
|
||||
59
.github/workflows/tests.yml
vendored
59
.github/workflows/tests.yml
vendored
@@ -32,14 +32,13 @@ jobs:
|
||||
- name: Install Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.11'
|
||||
python-version: '3.9'
|
||||
architecture: 'x64'
|
||||
- name: Install environment
|
||||
run: |
|
||||
sudo apt-get install build-essential python3-dev
|
||||
pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
pip install coverage
|
||||
|
||||
- name: Deploy test workloads
|
||||
run: |
|
||||
@@ -68,39 +67,38 @@ jobs:
|
||||
kubectl wait --for=condition=ready pod -l scenario=time-skew --timeout=300s
|
||||
kubectl apply -f CI/templates/service_hijacking.yaml
|
||||
kubectl wait --for=condition=ready pod -l "app.kubernetes.io/name=proxy" --timeout=300s
|
||||
kubectl apply -f CI/legacy/scenarios/volume_scenario.yaml
|
||||
kubectl wait --for=condition=ready pod kraken-test-pod -n kraken --timeout=300s
|
||||
- name: Get Kind nodes
|
||||
run: |
|
||||
kubectl get nodes --show-labels=true
|
||||
# Pull request only steps
|
||||
- name: Run unit tests
|
||||
if: github.event_name == 'pull_request'
|
||||
run: python -m coverage run -a -m unittest discover -s tests -v
|
||||
|
||||
- name: Setup Functional Tests
|
||||
- name: Setup Pull Request Functional Tests
|
||||
if: |
|
||||
github.event_name == 'pull_request'
|
||||
run: |
|
||||
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.performance_monitoring="localhost:9090"' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.elastic_port=9200' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.elastic_url="https://localhost"' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.enable_elastic=False' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.password="${{env.ELASTIC_PASSWORD}}"' CI/config/common_test_config.yaml
|
||||
yq -i '.performance_monitoring.prometheus_url="http://localhost:9090"' CI/config/common_test_config.yaml
|
||||
echo "test_service_hijacking" > ./CI/tests/functional_tests
|
||||
echo "test_app_outages" >> ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
echo "test_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_customapp_pod" >> ./CI/tests/functional_tests
|
||||
echo "test_io_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_node" >> ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
echo "test_pod" >> ./CI/tests/functional_tests
|
||||
echo "test_pod_error" >> ./CI/tests/functional_tests
|
||||
echo "test_service_hijacking" >> ./CI/tests/functional_tests
|
||||
echo "test_customapp_pod" >> ./CI/tests/functional_tests
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_time" >> ./CI/tests/functional_tests
|
||||
echo "test_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_io_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_pod_network_filter" >> ./CI/tests/functional_tests
|
||||
echo "test_pod_server" >> ./CI/tests/functional_tests
|
||||
echo "test_time" >> ./CI/tests/functional_tests
|
||||
# echo "test_pvc" >> ./CI/tests/functional_tests
|
||||
|
||||
# Push on main only steps + all other functional to collect coverage
|
||||
# for the badge
|
||||
@@ -114,9 +112,30 @@ jobs:
|
||||
- name: Setup Post Merge Request Functional Tests
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: |
|
||||
yq -i '.kraken.port="8081"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.signal_address="0.0.0.0"' CI/config/common_test_config.yaml
|
||||
yq -i '.kraken.performance_monitoring="localhost:9090"' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.enable_elastic=False' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.password="${{env.ELASTIC_PASSWORD}}"' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.elastic_port=9200' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.elastic_url="https://localhost"' CI/config/common_test_config.yaml
|
||||
yq -i '.performance_monitoring.prometheus_url="http://localhost:9090"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.username="${{secrets.TELEMETRY_USERNAME}}"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.password="${{secrets.TELEMETRY_PASSWORD}}"' CI/config/common_test_config.yaml
|
||||
echo "test_telemetry" >> ./CI/tests/functional_tests
|
||||
echo "test_telemetry" > ./CI/tests/functional_tests
|
||||
echo "test_service_hijacking" >> ./CI/tests/functional_tests
|
||||
echo "test_app_outages" >> ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
echo "test_pod" >> ./CI/tests/functional_tests
|
||||
echo "test_customapp_pod" >> ./CI/tests/functional_tests
|
||||
echo "test_namespace" >> ./CI/tests/functional_tests
|
||||
echo "test_net_chaos" >> ./CI/tests/functional_tests
|
||||
echo "test_time" >> ./CI/tests/functional_tests
|
||||
echo "test_cpu_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_memory_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_io_hog" >> ./CI/tests/functional_tests
|
||||
echo "test_pod_network_filter" >> ./CI/tests/functional_tests
|
||||
|
||||
# Final common steps
|
||||
- name: Run Functional tests
|
||||
env:
|
||||
@@ -182,7 +201,7 @@ jobs:
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.11'
|
||||
python-version: 3.9
|
||||
- name: Copy badge on GitHub Page Repo
|
||||
env:
|
||||
COLOR: yellow
|
||||
|
||||
@@ -2,10 +2,6 @@ kraken:
|
||||
distribution: kubernetes # Distribution can be kubernetes or openshift.
|
||||
kubeconfig_path: ~/.kube/config # Path to kubeconfig.
|
||||
exit_on_failure: False # Exit when a post action scenario fails.
|
||||
publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
port: 8081 # Signal port
|
||||
auto_rollback: True # Enable auto rollback for scenarios.
|
||||
rollback_versions_directory: /tmp/kraken-rollback # Directory to store rollback version files.
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load.
|
||||
|
||||
@@ -45,31 +45,6 @@ metadata:
|
||||
name: kraken-test-pod
|
||||
namespace: kraken
|
||||
spec:
|
||||
securityContext:
|
||||
fsGroup: 1001
|
||||
# initContainer to fix permissions on the mounted volume
|
||||
initContainers:
|
||||
- name: fix-permissions
|
||||
image: 'quay.io/centos7/httpd-24-centos7:centos7'
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
echo "Setting up permissions for /home/kraken..."
|
||||
# Create the directory if it doesn't exist
|
||||
mkdir -p /home/kraken
|
||||
# Set ownership to user 1001 and group 1001
|
||||
chown -R 1001:1001 /home/kraken
|
||||
# Set permissions to allow read/write
|
||||
chmod -R 755 /home/kraken
|
||||
rm -rf /home/kraken/*
|
||||
echo "Permissions fixed. Current state:"
|
||||
ls -la /home/kraken
|
||||
volumeMounts:
|
||||
- mountPath: "/home/kraken"
|
||||
name: kraken-test-pv
|
||||
securityContext:
|
||||
runAsUser: 0 # Run as root to fix permissions
|
||||
volumes:
|
||||
- name: kraken-test-pv
|
||||
persistentVolumeClaim:
|
||||
@@ -77,13 +52,8 @@ spec:
|
||||
containers:
|
||||
- name: kraken-test-container
|
||||
image: 'quay.io/centos7/httpd-24-centos7:centos7'
|
||||
securityContext:
|
||||
runAsUser: 1001
|
||||
runAsNonRoot: true
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
volumeMounts:
|
||||
- mountPath: "/home/kraken"
|
||||
- mountPath: "/home/krake-dir/"
|
||||
name: kraken-test-pv
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
||||
@@ -16,10 +16,8 @@ function functional_test_container_crash {
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/container_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/container_config.yaml -d True
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/container_config.yaml
|
||||
echo "Container scenario test: Success"
|
||||
|
||||
kubectl get pods -n kube-system -l component=etcd
|
||||
}
|
||||
|
||||
functional_test_container_crash
|
||||
|
||||
@@ -11,7 +11,7 @@ function functional_test_customapp_pod_node_selector {
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/customapp_pod_config.yaml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/customapp_pod_config.yaml -d True
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/customapp_pod_config.yaml
|
||||
echo "Pod disruption with node_label_selector test: Success"
|
||||
}
|
||||
|
||||
|
||||
@@ -1,18 +0,0 @@
|
||||
uset -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_node_stop_start {
|
||||
export scenario_type="node_scenarios"
|
||||
export scenario_file="scenarios/kind/node_scenarios_example.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/node_config.yaml
|
||||
cat CI/config/node_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/node_config.yaml
|
||||
echo "Node Stop/Start scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_node_stop_start
|
||||
@@ -10,11 +10,9 @@ function functional_test_pod_crash {
|
||||
export scenario_file="scenarios/kind/pod_etcd.yml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_config.yaml
|
||||
|
||||
cat CI/config/pod_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml
|
||||
echo "Pod disruption scenario test: Success"
|
||||
date
|
||||
kubectl get pods -n kube-system -l component=etcd -o yaml
|
||||
}
|
||||
|
||||
functional_test_pod_crash
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_pod_error {
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/kind/pod_etcd.yml"
|
||||
export post_config=""
|
||||
yq -i '.[0].config.kill=5' scenarios/kind/pod_etcd.yml
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_config.yaml
|
||||
cat CI/config/pod_config.yaml
|
||||
|
||||
cat scenarios/kind/pod_etcd.yml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml
|
||||
|
||||
ret=$?
|
||||
echo "\n\nret $ret"
|
||||
if [[ $ret -ge 1 ]]; then
|
||||
echo "Pod disruption error scenario test: Success"
|
||||
else
|
||||
echo "Pod disruption error scenario test: Failure"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
functional_test_pod_error
|
||||
@@ -1,35 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_pod_server {
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/kind/pod_etcd.yml"
|
||||
export post_config=""
|
||||
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pod_config.yaml
|
||||
yq -i '.[0].config.kill=1' scenarios/kind/pod_etcd.yml
|
||||
|
||||
yq -i '.tunings.daemon_mode=True' CI/config/pod_config.yaml
|
||||
cat CI/config/pod_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml &
|
||||
sleep 15
|
||||
curl -X POST http:/0.0.0.0:8081/STOP
|
||||
|
||||
wait
|
||||
|
||||
yq -i '.kraken.signal_state="PAUSE"' CI/config/pod_config.yaml
|
||||
yq -i '.tunings.daemon_mode=False' CI/config/pod_config.yaml
|
||||
cat CI/config/pod_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_config.yaml &
|
||||
sleep 5
|
||||
curl -X POST http:/0.0.0.0:8081/RUN
|
||||
wait
|
||||
|
||||
echo "Pod disruption with server scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_pod_server
|
||||
@@ -1,18 +0,0 @@
|
||||
set -xeEo pipefail
|
||||
|
||||
source CI/tests/common.sh
|
||||
|
||||
trap error ERR
|
||||
trap finish EXIT
|
||||
|
||||
function functional_test_pvc_fill {
|
||||
export scenario_type="pvc_scenarios"
|
||||
export scenario_file="scenarios/kind/pvc_scenario.yaml"
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/pvc_config.yaml
|
||||
cat CI/config/pvc_config.yaml
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pvc_config.yaml --debug True
|
||||
echo "PVC Fill scenario test: Success"
|
||||
}
|
||||
|
||||
functional_test_pvc_fill
|
||||
@@ -18,8 +18,9 @@ function functional_test_telemetry {
|
||||
yq -i '.performance_monitoring.prometheus_url="http://localhost:9090"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.run_tag=env(RUN_TAG)' CI/config/common_test_config.yaml
|
||||
|
||||
export scenario_type="pod_disruption_scenarios"
|
||||
export scenario_file="scenarios/kind/pod_etcd.yml"
|
||||
export scenario_type="hog_scenarios"
|
||||
|
||||
export scenario_file="scenarios/kube/cpu-hog.yml"
|
||||
|
||||
export post_config=""
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/telemetry.yaml
|
||||
|
||||
273
CLAUDE.md
273
CLAUDE.md
@@ -1,273 +0,0 @@
|
||||
# CLAUDE.md - Krkn Chaos Engineering Framework
|
||||
|
||||
## Project Overview
|
||||
|
||||
Krkn (Kraken) is a chaos engineering tool for Kubernetes/OpenShift clusters. It injects deliberate failures to validate cluster resilience. Plugin-based architecture with multi-cloud support (AWS, Azure, GCP, IBM Cloud, VMware, Alibaba, OpenStack).
|
||||
|
||||
## Repository Structure
|
||||
|
||||
```
|
||||
krkn/
|
||||
├── krkn/
|
||||
│ ├── scenario_plugins/ # Chaos scenario plugins (pod, node, network, hogs, etc.)
|
||||
│ ├── utils/ # Utility functions
|
||||
│ ├── rollback/ # Rollback management
|
||||
│ ├── prometheus/ # Prometheus integration
|
||||
│ └── cerberus/ # Health monitoring
|
||||
├── tests/ # Unit tests (unittest framework)
|
||||
├── scenarios/ # Example scenario configs (openshift/, kube/, kind/)
|
||||
├── config/ # Configuration files
|
||||
└── CI/ # CI/CD test scripts
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Setup (ALWAYS use virtual environment)
|
||||
python3 -m venv venv
|
||||
source venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
|
||||
# Run Krkn
|
||||
python run_kraken.py --config config/config.yaml
|
||||
|
||||
# Note: Scenarios are specified in config.yaml under kraken.chaos_scenarios
|
||||
# There is no --scenario flag; edit config/config.yaml to select scenarios
|
||||
|
||||
# Run tests
|
||||
python -m unittest discover -s tests -v
|
||||
python -m coverage run -a -m unittest discover -s tests -v
|
||||
```
|
||||
|
||||
## Critical Requirements
|
||||
|
||||
### Python Environment
|
||||
- **Python 3.9+** required
|
||||
- **NEVER install packages globally** - always use virtual environment
|
||||
- **CRITICAL**: `docker` must be <7.0 and `requests` must be <2.32 (Unix socket compatibility)
|
||||
|
||||
### Key Dependencies
|
||||
- **krkn-lib** (5.1.13): Core library for Kubernetes/OpenShift operations
|
||||
- **kubernetes** (34.1.0): Kubernetes Python client
|
||||
- **docker** (<7.0), **requests** (<2.32): DO NOT upgrade without verifying compatibility
|
||||
- Cloud SDKs: boto3 (AWS), azure-mgmt-* (Azure), google-cloud-compute (GCP), ibm_vpc (IBM), pyVmomi (VMware)
|
||||
|
||||
## Plugin Architecture (CRITICAL)
|
||||
|
||||
**Strictly enforced naming conventions:**
|
||||
|
||||
### Naming Rules
|
||||
- **Module files**: Must end with `_scenario_plugin.py` and use snake_case
|
||||
- Example: `pod_disruption_scenario_plugin.py`
|
||||
- **Class names**: Must be CamelCase and end with `ScenarioPlugin`
|
||||
- Example: `PodDisruptionScenarioPlugin`
|
||||
- Must match module filename (snake_case ↔ CamelCase)
|
||||
- **Directory structure**: Plugin dirs CANNOT contain "scenario" or "plugin"
|
||||
- Location: `krkn/scenario_plugins/<plugin_name>/`
|
||||
|
||||
### Plugin Implementation
|
||||
Every plugin MUST:
|
||||
1. Extend `AbstractScenarioPlugin`
|
||||
2. Implement `run()` method
|
||||
3. Implement `get_scenario_types()` method
|
||||
|
||||
```python
|
||||
from krkn.scenario_plugins import AbstractScenarioPlugin
|
||||
|
||||
class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
def run(self, config, scenarios_list, kubeconfig_path, wait_duration):
|
||||
pass
|
||||
|
||||
def get_scenario_types(self):
|
||||
return ["pod_scenarios", "pod_outage"]
|
||||
```
|
||||
|
||||
### Creating a New Plugin
|
||||
1. Create directory: `krkn/scenario_plugins/<plugin_name>/`
|
||||
2. Create module: `<plugin_name>_scenario_plugin.py`
|
||||
3. Create class: `<PluginName>ScenarioPlugin` extending `AbstractScenarioPlugin`
|
||||
4. Implement `run()` and `get_scenario_types()`
|
||||
5. Create unit test: `tests/test_<plugin_name>_scenario_plugin.py`
|
||||
6. Add example scenario: `scenarios/<platform>/<scenario>.yaml`
|
||||
|
||||
**DO NOT**: Violate naming conventions (factory will reject), include "scenario"/"plugin" in directory names, create plugins without tests.
|
||||
|
||||
## Testing
|
||||
|
||||
### Unit Tests
|
||||
```bash
|
||||
# Run all tests
|
||||
python -m unittest discover -s tests -v
|
||||
|
||||
# Specific test
|
||||
python -m unittest tests.test_pod_disruption_scenario_plugin
|
||||
|
||||
# With coverage
|
||||
python -m coverage run -a -m unittest discover -s tests -v
|
||||
python -m coverage html
|
||||
```
|
||||
|
||||
**Test requirements:**
|
||||
- Naming: `test_<module>_scenario_plugin.py`
|
||||
- Mock external dependencies (Kubernetes API, cloud providers)
|
||||
- Test success, failure, and edge cases
|
||||
- Keep tests isolated and independent
|
||||
|
||||
### Functional Tests
|
||||
Located in `CI/tests/`. Can be run locally on a kind cluster with Prometheus and Elasticsearch set up.
|
||||
|
||||
**Setup for local testing:**
|
||||
1. Deploy Prometheus and Elasticsearch on your kind cluster:
|
||||
- Prometheus setup: https://krkn-chaos.dev/docs/developers-guide/testing-changes/#prometheus
|
||||
- Elasticsearch setup: https://krkn-chaos.dev/docs/developers-guide/testing-changes/#elasticsearch
|
||||
|
||||
2. Or disable monitoring features in `config/config.yaml`:
|
||||
```yaml
|
||||
performance_monitoring:
|
||||
enable_alerts: False
|
||||
enable_metrics: False
|
||||
check_critical_alerts: False
|
||||
```
|
||||
|
||||
**Note:** Functional tests run automatically in CI with full monitoring enabled.
|
||||
|
||||
## Cloud Provider Implementations
|
||||
|
||||
Node chaos scenarios are cloud-specific. Each in `krkn/scenario_plugins/node_actions/<provider>_node_scenarios.py`:
|
||||
- AWS, Azure, GCP, IBM Cloud, VMware, Alibaba, OpenStack, Bare Metal
|
||||
|
||||
Implement: stop, start, reboot, terminate instances.
|
||||
|
||||
**When modifying**: Maintain consistency with other providers, handle API errors, add logging, update tests.
|
||||
|
||||
### Adding Cloud Provider Support
|
||||
1. Create: `krkn/scenario_plugins/node_actions/<provider>_node_scenarios.py`
|
||||
2. Extend: `abstract_node_scenarios.AbstractNodeScenarios`
|
||||
3. Implement: `stop_instances`, `start_instances`, `reboot_instances`, `terminate_instances`
|
||||
4. Add SDK to `requirements.txt`
|
||||
5. Create unit test with mocked SDK
|
||||
6. Add example scenario: `scenarios/openshift/<provider>_node_scenarios.yml`
|
||||
|
||||
## Configuration
|
||||
|
||||
**Main config**: `config/config.yaml`
|
||||
- `kraken`: Core settings
|
||||
- `cerberus`: Health monitoring
|
||||
- `performance_monitoring`: Prometheus
|
||||
- `elastic`: Elasticsearch telemetry
|
||||
|
||||
**Scenario configs**: `scenarios/` directory
|
||||
```yaml
|
||||
- config:
|
||||
scenario_type: <type> # Must match plugin's get_scenario_types()
|
||||
```
|
||||
|
||||
## Code Style
|
||||
|
||||
- **Import order**: Standard library, third-party, local imports
|
||||
- **Naming**: snake_case (functions/variables), CamelCase (classes)
|
||||
- **Logging**: Use Python's `logging` module
|
||||
- **Error handling**: Return appropriate exit codes
|
||||
- **Docstrings**: Required for public functions/classes
|
||||
|
||||
## Exit Codes
|
||||
|
||||
Krkn uses specific exit codes to communicate execution status:
|
||||
|
||||
- `0`: Success - all scenarios passed, no critical alerts
|
||||
- `1`: Scenario failure - one or more scenarios failed
|
||||
- `2`: Critical alerts fired during execution
|
||||
- `3+`: Health check failure (Cerberus monitoring detected issues)
|
||||
|
||||
**When implementing scenarios:**
|
||||
- Return `0` on success
|
||||
- Return `1` on scenario-specific failures
|
||||
- Propagate health check failures appropriately
|
||||
- Log exit code reasons clearly
|
||||
|
||||
## Container Support
|
||||
|
||||
Krkn can run inside a container. See `containers/` directory.
|
||||
|
||||
**Building custom image:**
|
||||
```bash
|
||||
cd containers
|
||||
./compile_dockerfile.sh # Generates Dockerfile from template
|
||||
docker build -t krkn:latest .
|
||||
```
|
||||
|
||||
**Running containerized:**
|
||||
```bash
|
||||
docker run -v ~/.kube:/root/.kube:Z \
|
||||
-v $(pwd)/config:/config:Z \
|
||||
-v $(pwd)/scenarios:/scenarios:Z \
|
||||
krkn:latest
|
||||
```
|
||||
|
||||
## Git Workflow
|
||||
|
||||
- **NEVER commit directly to main**
|
||||
- **NEVER use `--force` without approval**
|
||||
- **ALWAYS create feature branches**: `git checkout -b feature/description`
|
||||
- **ALWAYS run tests before pushing**
|
||||
|
||||
**Conventional commits**: `feat:`, `fix:`, `test:`, `docs:`, `refactor:`
|
||||
|
||||
```bash
|
||||
git checkout main && git pull origin main
|
||||
git checkout -b feature/your-feature-name
|
||||
# Make changes, write tests
|
||||
python -m unittest discover -s tests -v
|
||||
git add <specific-files>
|
||||
git commit -m "feat: description"
|
||||
git push -u origin feature/your-feature-name
|
||||
```
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- `KUBECONFIG`: Path to kubeconfig
|
||||
- `AWS_*`, `AZURE_*`, `GOOGLE_APPLICATION_CREDENTIALS`: Cloud credentials
|
||||
- `PROMETHEUS_URL`, `ELASTIC_URL`, `ELASTIC_PASSWORD`: Monitoring config
|
||||
|
||||
**NEVER commit credentials or API keys.**
|
||||
|
||||
## Common Pitfalls
|
||||
|
||||
1. Missing virtual environment - always activate venv
|
||||
2. Running functional tests without cluster setup
|
||||
3. Ignoring exit codes
|
||||
4. Modifying krkn-lib directly (it's a separate package)
|
||||
5. Upgrading docker/requests beyond version constraints
|
||||
|
||||
## Before Writing Code
|
||||
|
||||
1. Check for existing implementations
|
||||
2. Review existing plugins as examples
|
||||
3. Maintain consistency with cloud provider patterns
|
||||
4. Plan rollback logic
|
||||
5. Write tests alongside code
|
||||
6. Update documentation
|
||||
|
||||
## When Adding Dependencies
|
||||
|
||||
1. Check if functionality exists in krkn-lib or current dependencies
|
||||
2. Verify compatibility with existing versions
|
||||
3. Pin specific versions in `requirements.txt`
|
||||
4. Check for security vulnerabilities
|
||||
5. Test thoroughly for conflicts
|
||||
|
||||
## Common Development Tasks
|
||||
|
||||
### Modifying Existing Plugin
|
||||
1. Read plugin code and corresponding test
|
||||
2. Make changes
|
||||
3. Update/add unit tests
|
||||
4. Run: `python -m unittest tests.test_<plugin>_scenario_plugin`
|
||||
|
||||
### Writing Unit Tests
|
||||
1. Create: `tests/test_<module>_scenario_plugin.py`
|
||||
2. Import `unittest` and plugin class
|
||||
3. Mock external dependencies
|
||||
4. Test success, failure, and edge cases
|
||||
5. Run: `python -m unittest tests.test_<module>_scenario_plugin`
|
||||
|
||||
270
INCUBATION_CHECKLIST.md
Normal file
270
INCUBATION_CHECKLIST.md
Normal file
@@ -0,0 +1,270 @@
|
||||
# Review Project Moving Level Evaluation
|
||||
[x] I have reviewed the TOC's [moving level readiness triage guide](https://github.com/cncf/toc/blob/main/operations/dd-toc-guide.md#initial-triageevaluation-prior-to-assignment), ensured the criteria for my project are met before opening this issue, and understand that unmet criteria will result in the project's application being closed.
|
||||
|
||||
# Krkn Incubation Application
|
||||
v1.6
|
||||
This template provides the project with a framework to inform the TOC of their conformance to the Incubation Level Criteria.
|
||||
|
||||
Project Repo(s): https://github.com/krkn-chaos/krkn
|
||||
Project Site: https://www.krkn-chaos.dev
|
||||
Sub-Projects:
|
||||
- https://github.com/krkn-chaos/krknctl
|
||||
- https://github.com/krkn-chaos/krkn-hub
|
||||
Communication: [Slack](https://kubernetes.slack.com/archives/C05SFMHRWK1)
|
||||
|
||||
|
||||
Project points of contacts:
|
||||
- [Naga Ravi Elluri](mailto:nelluri@redhat.com)
|
||||
- [Paige Patton](mailto:ppatton@redhat.com)
|
||||
- [Tullio Sebastiani](mailto:tsebasti@redhat.com)
|
||||
|
||||
- [ ] (Post Incubation only) [Book a meeting with CNCF staff](http://project-meetings.cncf.io) to understand project benefits and event resources.
|
||||
|
||||
## Incubation Criteria Summary for Krkn
|
||||
|
||||
### Application Level Assertion
|
||||
|
||||
- [x] This project is currently Sandbox, accepted on 2023/12/19, and applying to Incubation.
|
||||
- [x] This project is applying to join the CNCF at the Incubation level.
|
||||
|
||||
### Adoption Assertion
|
||||
|
||||
_The project has been adopted by the following organizations in a testing and integration or production capacity:_
|
||||
|
||||
| Organization | Since | Website | Use-Case |
|
||||
|:-|:-|:-|:-|
|
||||
| MarketAxess | 2024 | https://www.marketaxess.com/ | Kraken enables us to achieve our goal of increasing the reliability of our cloud products on Kubernetes. The tool allows us to automatically run various chaos scenarios, identify resilience and performance bottlenecks, and seamlessly restore the system to its original state once scenarios finish. These chaos scenarios include pod disruptions, node (EC2) outages, simulating availability zone (AZ) outages, and filling up storage spaces like EBS and EFS. The community is highly responsive to requests and works on expanding the tool's capabilities. MarketAxess actively contributes to the project, adding features such as the ability to leverage existing network ACLs and proposing several feature improvements to enhance test coverage. |
|
||||
| Red Hat Openshift | 2020 | https://www.redhat.com/ | Kraken is a highly reliable chaos testing tool used to ensure the quality and resiliency of Red Hat Openshift. The engineering team runs all the test scenarios under Kraken on different cloud platforms on both self-managed and cloud services environments prior to the release of a new version of the product. The team also contributes to the Kraken project consistently which helps the test scenarios to keep up with the new features introduced to the product. Inclusion of this test coverage has contributed to gaining the trust of new and existing customers of the product. |
|
||||
| IBM | 2023 | https://www.ibm.com/ | While working on AI for Chaos Testing at IBM Research, we closely collaborated with the Kraken (Krkn) team to advance intelligent chaos engineering. Our contributions included developing AI-enabled chaos injection strategies and integrating reinforcement learning (RL)-based fault search techniques into the Krkn tool, enabling it to identify and explore system vulnerabilities more efficiently. Kraken stands out as one of the most user-friendly and effective tools for chaos engineering, and the Kraken team’s deep technical involvement played a crucial role in the success of this collaboration—helping bridge cutting-edge AI research with practical, real-world system reliability testing. |
|
||||
|
||||
## Application Process Principles
|
||||
|
||||
### Suggested
|
||||
|
||||
N/A
|
||||
|
||||
### Required
|
||||
|
||||
- [ ] **Engage with the domain specific TAG(s) to increase awareness through a presentation or completing a General Technical Review.**
|
||||
- This was completed and occurred on DD-MMM-YYYY, and can be discovered at $LINK.
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [x] **All project metadata and resources are [vendor-neutral](https://contribute.cncf.io/maintainers/community/vendor-neutrality/).**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [ ] **Review and acknowledgement of expectations for [Sandbox](https://sandbox.cncf.io) projects and requirements for moving forward through the CNCF Maturity levels.**
|
||||
- Met during Project's application on DD-MMM-YYYY.
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [ ] **Due Diligence Review.**
|
||||
|
||||
Completion of this due diligence document, resolution of concerns raised, and presented for public comment satisfies the Due Diligence Review criteria.
|
||||
|
||||
- [ ] **Additional documentation as appropriate for project type, e.g.: installation documentation, end user documentation, reference implementation and/or code samples.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
## Governance and Maintainers
|
||||
|
||||
Note: this section may be augmented by the completion of a Governance Review from the Project Reviews subproject.
|
||||
|
||||
### Suggested
|
||||
|
||||
- [ ] **Governance has continuously been iterated upon by the project as a result of their experience applying it, with the governance history demonstrating evolution of maturity alongside the project's maturity evolution.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [ ] **Clear and discoverable project governance documentation.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [ ] **Governance is up to date with actual project activities, including any meetings, elections, leadership, or approval processes.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [ ] **Governance clearly documents [vendor-neutrality](https://contribute.cncf.io/maintainers/community/vendor-neutrality/) of project direction.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [ ] **Document how the project makes decisions on leadership, contribution acceptance, requests to the CNCF, and changes to governance or project goals.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [ ] **Document how role, function-based members, or sub-teams are assigned, onboarded, and removed for specific teams (example: Security Response Committee).**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [ ] **Document a complete maintainer lifecycle process (including roles, onboarding, offboarding, and emeritus status).**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [ ] **Demonstrate usage of the maintainer lifecycle with outcomes, either through the addition or replacement of maintainers as project events have required.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [ ] **If the project has subprojects: subproject leadership, contribution, maturity status documented, including add/remove process.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
### Required
|
||||
|
||||
- [x] **Document complete list of current maintainers, including names, contact information, domain of responsibility, and affiliation.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [x] **A number of active maintainers which is appropriate to the size and scope of the project.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [x] **Code and Doc ownership in Github and elsewhere matches documented governance roles.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [x] **Document adoption and adherence to the CNCF Code of Conduct or the project's CoC which is based off the CNCF CoC and not in conflict with it.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [x] **CNCF Code of Conduct is cross-linked from other governance documents.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [x] **All subprojects, if any, are listed.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
## Contributors and Community
|
||||
|
||||
Note: this section may be augmented by the completion of a Governance Review from the Project Reviews subproject.
|
||||
|
||||
### Suggested
|
||||
|
||||
- [ ] **Contributor ladder with multiple roles for contributors.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
### Required
|
||||
|
||||
- [x] **Clearly defined and discoverable process to submit issues or changes.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [x] **Project must have, and document, at least one public communications channel for users and/or contributors.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [ ] **List and document all project communication channels, including subprojects (mail list/slack/etc.). List any non-public communications channels and what their special purpose is.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [x] **Up-to-date public meeting schedulers and/or integration with CNCF calendar.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [ ] **Documentation of how to contribute, with increasing detail as the project matures.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [x] **Demonstrate contributor activity and recruitment.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
## Engineering Principles
|
||||
|
||||
### Suggested
|
||||
|
||||
- [ ] **Roadmap change process is documented.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [ ] **History of regular, quality releases.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
### Required
|
||||
|
||||
- [ ] **Document project goals and objectives that illustrate the project’s differentiation in the Cloud Native landscape as well as outlines how this project fulfills an outstanding need and/or solves a problem differently. _This can also be satisfied by completing a General Technical Review._**
|
||||
- _If applicable_ a General Technical Review was completed/updated on DD-MMM-YYYY, and can be discovered at $LINK.
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [ ] **Document what the project does, and why it does it - including viable cloud native use cases. This can also be satisfied by completing a General Technical Review.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [ ] **Document and maintain a public roadmap or other forward looking planning document or tracking mechanism.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [ ] **Document overview of project architecture and software design that demonstrates viable cloud native use cases, as part of the project's documentation. _This can also be satisfied by completing a General Technical Review and capturing the output in the project's documentation._**
|
||||
- _If applicable_ a General Technical Review was completed/updated on DD-MMM-YYYY, and can be discovered at $LINK.
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [x] **Document the project's release process.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
## Security
|
||||
|
||||
### Suggested
|
||||
|
||||
N/A
|
||||
|
||||
### Required
|
||||
|
||||
Note: this section may be augmented by a joint-assessment performed by TAG Security and Compliance.
|
||||
|
||||
- [x] **Clearly defined and discoverable process to report security issues.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [x] **Enforcing Access Control Rules to secure the code base against attacks (Example: two factor authentication enforcement, and/or use of ACL tools.)**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [x] **Document assignment of security response roles and how reports are handled.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [x] **Document [Security Self-Assessment](https://tag-security.cncf.io/community/assessments/guide/self-assessment/).**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [ ] **Achieve the Open Source Security Foundation (OpenSSF) Best Practices passing badge.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
## Ecosystem
|
||||
|
||||
### Suggested
|
||||
|
||||
N/A
|
||||
|
||||
### Required
|
||||
|
||||
- [ ] **Publicly documented list of adopters, which may indicate their adoption level (dev/trialing, prod, etc.)**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
- [ ] **Used in appropriate capacity by at least 3 independent + indirect/direct adopters, (these are not required to be in the publicly documented list of adopters)**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
The project provided the TOC with a list of adopters for verification of use of the project at the level expected, i.e. production use for graduation, dev/test for incubation.
|
||||
|
||||
- [ ] **TOC verification of adopters.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
Refer to the Adoption portion of this document.
|
||||
|
||||
- [ ] **Clearly documented integrations and/or compatibility with other CNCF projects as well as non-CNCF projects.**
|
||||
|
||||
<!-- (Project assertion goes here) -->
|
||||
|
||||
## Additional Information
|
||||
|
||||
<!-- Provide any additional information you feel is relevant for the TOC in conducting due diligence on this project. -->
|
||||
@@ -56,7 +56,7 @@ kraken:
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
|
||||
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
prometheus_url: '' # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
|
||||
@@ -13,7 +13,7 @@ kraken:
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
|
||||
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
|
||||
@@ -14,7 +14,7 @@ kraken:
|
||||
cerberus:
|
||||
cerberus_enabled: False # Enable it when cerberus is previously installed
|
||||
cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
|
||||
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
|
||||
@@ -35,7 +35,7 @@ kraken:
|
||||
cerberus:
|
||||
cerberus_enabled: True # Enable it when cerberus is previously installed
|
||||
cerberus_url: http://0.0.0.0:8080 # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal
|
||||
check_application_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run
|
||||
|
||||
performance_monitoring:
|
||||
deploy_dashboards: True # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift
|
||||
|
||||
@@ -41,7 +41,7 @@ ENV KUBECONFIG /home/krkn/.kube/config
|
||||
|
||||
# This overwrites any existing configuration in /etc/yum.repos.d/kubernetes.repo
|
||||
RUN dnf update && dnf install -y --setopt=install_weak_deps=False \
|
||||
git python3.11 jq yq gettext wget which ipmitool openssh-server &&\
|
||||
git python39 jq yq gettext wget which ipmitool openssh-server &&\
|
||||
dnf clean all
|
||||
|
||||
# copy oc client binary from oc-build image
|
||||
@@ -63,15 +63,15 @@ RUN if [ -n "$PR_NUMBER" ]; then git fetch origin pull/${PR_NUMBER}/head:pr-${PR
|
||||
# if it is a TAG trigger checkout the tag
|
||||
RUN if [ -n "$TAG" ]; then git checkout "$TAG";fi
|
||||
|
||||
RUN python3.11 -m ensurepip --upgrade --default-pip
|
||||
RUN python3.11 -m pip install --upgrade pip setuptools==78.1.1
|
||||
RUN python3.9 -m ensurepip --upgrade --default-pip
|
||||
RUN python3.9 -m pip install --upgrade pip setuptools==78.1.1
|
||||
|
||||
# removes the the vulnerable versions of setuptools and pip
|
||||
RUN rm -rf "$(pip cache dir)"
|
||||
RUN rm -rf /tmp/*
|
||||
RUN rm -rf /usr/local/lib/python3.11/ensurepip/_bundled
|
||||
RUN pip3.11 install -r requirements.txt
|
||||
RUN pip3.11 install jsonschema
|
||||
RUN rm -rf /usr/local/lib/python3.9/ensurepip/_bundled
|
||||
RUN pip3.9 install -r requirements.txt
|
||||
RUN pip3.9 install jsonschema
|
||||
|
||||
LABEL krknctl.title.global="Krkn Base Image"
|
||||
LABEL krknctl.description.global="This is the krkn base image."
|
||||
|
||||
@@ -14,7 +14,7 @@ def get_status(config, start_time, end_time):
|
||||
if config["cerberus"]["cerberus_enabled"]:
|
||||
cerberus_url = config["cerberus"]["cerberus_url"]
|
||||
check_application_routes = \
|
||||
config["cerberus"]["check_application_routes"]
|
||||
config["cerberus"]["check_applicaton_routes"]
|
||||
if not cerberus_url:
|
||||
logging.error(
|
||||
"url where Cerberus publishes True/False signal "
|
||||
|
||||
@@ -15,7 +15,7 @@ def invoke(command, timeout=None):
|
||||
|
||||
|
||||
# Invokes a given command and returns the stdout
|
||||
def invoke_no_exit(command, timeout=15):
|
||||
def invoke_no_exit(command, timeout=None):
|
||||
output = ""
|
||||
try:
|
||||
output = subprocess.check_output(command, shell=True, universal_newlines=True, timeout=timeout, stderr=subprocess.DEVNULL)
|
||||
|
||||
@@ -146,7 +146,7 @@ class AbstractScenarioPlugin(ABC):
|
||||
if scenario_telemetry.exit_status != 0:
|
||||
failed_scenarios.append(scenario_config)
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
logging.info(f"waiting {wait_duration} before running the next scenario")
|
||||
logging.info(f"wating {wait_duration} before running the next scenario")
|
||||
time.sleep(wait_duration)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import logging
|
||||
import random
|
||||
import time
|
||||
import traceback
|
||||
from asyncio import Future
|
||||
import yaml
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
@@ -42,7 +41,6 @@ class ContainerScenarioPlugin(AbstractScenarioPlugin):
|
||||
logging.info("ContainerScenarioPlugin failed with unrecovered containers")
|
||||
return 1
|
||||
except (RuntimeError, Exception) as e:
|
||||
logging.error("Stack trace:\n%s", traceback.format_exc())
|
||||
logging.error("ContainerScenarioPlugin exiting due to Exception %s" % e)
|
||||
return 1
|
||||
else:
|
||||
@@ -52,6 +50,7 @@ class ContainerScenarioPlugin(AbstractScenarioPlugin):
|
||||
return ["container_scenarios"]
|
||||
|
||||
def start_monitoring(self, kill_scenario: dict, lib_telemetry: KrknTelemetryOpenshift) -> Future:
|
||||
|
||||
namespace_pattern = f"^{kill_scenario['namespace']}$"
|
||||
label_selector = kill_scenario["label_selector"]
|
||||
recovery_time = kill_scenario["expected_recovery_time"]
|
||||
@@ -233,5 +232,4 @@ class ContainerScenarioPlugin(AbstractScenarioPlugin):
|
||||
timer += 5
|
||||
logging.info("Waiting 5 seconds for containers to become ready")
|
||||
time.sleep(5)
|
||||
|
||||
return killed_container_list
|
||||
|
||||
@@ -53,7 +53,7 @@ class HogsScenarioPlugin(AbstractScenarioPlugin):
|
||||
raise Exception("no available nodes to schedule workload")
|
||||
|
||||
if not has_selector:
|
||||
available_nodes = [available_nodes[random.randint(0, len(available_nodes) - 1)]]
|
||||
available_nodes = [available_nodes[random.randint(0, len(available_nodes))]]
|
||||
|
||||
if scenario_config.number_of_nodes and len(available_nodes) > scenario_config.number_of_nodes:
|
||||
available_nodes = random.sample(available_nodes, scenario_config.number_of_nodes)
|
||||
|
||||
@@ -55,8 +55,7 @@ class KubevirtVmOutageScenarioPlugin(AbstractScenarioPlugin):
|
||||
pods_status.merge(single_pods_status)
|
||||
|
||||
scenario_telemetry.affected_pods = pods_status
|
||||
if len(scenario_telemetry.affected_pods.unrecovered) > 0:
|
||||
return 1
|
||||
|
||||
return 0
|
||||
except Exception as e:
|
||||
logging.error(f"KubeVirt VM Outage scenario failed: {e}")
|
||||
|
||||
@@ -27,7 +27,7 @@ def get_status(config, start_time, end_time):
|
||||
application_routes_status = True
|
||||
if config["cerberus"]["cerberus_enabled"]:
|
||||
cerberus_url = config["cerberus"]["cerberus_url"]
|
||||
check_application_routes = config["cerberus"]["check_application_routes"]
|
||||
check_application_routes = config["cerberus"]["check_applicaton_routes"]
|
||||
if not cerberus_url:
|
||||
logging.error("url where Cerberus publishes True/False signal is not provided.")
|
||||
sys.exit(1)
|
||||
|
||||
@@ -27,7 +27,7 @@ def get_status(config, start_time, end_time):
|
||||
application_routes_status = True
|
||||
if config["cerberus"]["cerberus_enabled"]:
|
||||
cerberus_url = config["cerberus"]["cerberus_url"]
|
||||
check_application_routes = config["cerberus"]["check_application_routes"]
|
||||
check_application_routes = config["cerberus"]["check_applicaton_routes"]
|
||||
if not cerberus_url:
|
||||
logging.error(
|
||||
"url where Cerberus publishes True/False signal is not provided.")
|
||||
|
||||
@@ -18,20 +18,20 @@ class abstract_node_scenarios:
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
pass
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
pass
|
||||
|
||||
# Node scenario to stop and then start the node
|
||||
def node_stop_start_scenario(self, instance_kill_count, node, timeout, duration, poll_interval):
|
||||
def node_stop_start_scenario(self, instance_kill_count, node, timeout, duration):
|
||||
logging.info("Starting node_stop_start_scenario injection")
|
||||
self.node_stop_scenario(instance_kill_count, node, timeout, poll_interval)
|
||||
self.node_stop_scenario(instance_kill_count, node, timeout)
|
||||
logging.info("Waiting for %s seconds before starting the node" % (duration))
|
||||
time.sleep(duration)
|
||||
self.node_start_scenario(instance_kill_count, node, timeout, poll_interval)
|
||||
self.node_start_scenario(instance_kill_count, node, timeout)
|
||||
self.affected_nodes_status.merge_affected_nodes()
|
||||
logging.info("node_stop_start_scenario has been successfully injected!")
|
||||
|
||||
@@ -56,7 +56,7 @@ class abstract_node_scenarios:
|
||||
logging.error("node_disk_detach_attach_scenario failed!")
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
pass
|
||||
|
||||
# Node scenario to reboot the node
|
||||
@@ -76,7 +76,7 @@ class abstract_node_scenarios:
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli, affected_node)
|
||||
|
||||
logging.info("The kubelet of the node %s has been stopped" % (node))
|
||||
logging.info("stop_kubelet_scenario has been successfully injected!")
|
||||
logging.info("stop_kubelet_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to stop the kubelet of the node. Encountered following "
|
||||
@@ -108,7 +108,7 @@ class abstract_node_scenarios:
|
||||
)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli,affected_node)
|
||||
logging.info("The kubelet of the node %s has been restarted" % (node))
|
||||
logging.info("restart_kubelet_scenario has been successfully injected!")
|
||||
logging.info("restart_kubelet_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to restart the kubelet of the node. Encountered following "
|
||||
@@ -128,7 +128,7 @@ class abstract_node_scenarios:
|
||||
"oc debug node/" + node + " -- chroot /host "
|
||||
"dd if=/dev/urandom of=/proc/sysrq-trigger"
|
||||
)
|
||||
logging.info("node_crash_scenario has been successfully injected!")
|
||||
logging.info("node_crash_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to crash the node. Encountered following exception: %s. "
|
||||
|
||||
@@ -234,7 +234,7 @@ class alibaba_node_scenarios(abstract_node_scenarios):
|
||||
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -260,7 +260,7 @@ class alibaba_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -286,7 +286,7 @@ class alibaba_node_scenarios(abstract_node_scenarios):
|
||||
|
||||
# Might need to stop and then release the instance
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
|
||||
@@ -77,21 +77,10 @@ class AWS:
|
||||
# until a successful state is reached. An error is returned after 40 failed checks
|
||||
# Setting timeout for consistency with other cloud functions
|
||||
# Wait until the node instance is running
|
||||
def wait_until_running(self, instance_id, timeout=600, affected_node=None, poll_interval=15):
|
||||
def wait_until_running(self, instance_id, timeout=600, affected_node=None):
|
||||
try:
|
||||
start_time = time.time()
|
||||
if timeout > 0:
|
||||
max_attempts = max(1, int(timeout / poll_interval))
|
||||
else:
|
||||
max_attempts = 40
|
||||
|
||||
self.boto_instance.wait_until_running(
|
||||
InstanceIds=[instance_id],
|
||||
WaiterConfig={
|
||||
'Delay': poll_interval,
|
||||
'MaxAttempts': max_attempts
|
||||
}
|
||||
)
|
||||
self.boto_instance.wait_until_running(InstanceIds=[instance_id])
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("running", end_time - start_time)
|
||||
@@ -104,21 +93,10 @@ class AWS:
|
||||
return False
|
||||
|
||||
# Wait until the node instance is stopped
|
||||
def wait_until_stopped(self, instance_id, timeout=600, affected_node= None, poll_interval=15):
|
||||
def wait_until_stopped(self, instance_id, timeout=600, affected_node= None):
|
||||
try:
|
||||
start_time = time.time()
|
||||
if timeout > 0:
|
||||
max_attempts = max(1, int(timeout / poll_interval))
|
||||
else:
|
||||
max_attempts = 40
|
||||
|
||||
self.boto_instance.wait_until_stopped(
|
||||
InstanceIds=[instance_id],
|
||||
WaiterConfig={
|
||||
'Delay': poll_interval,
|
||||
'MaxAttempts': max_attempts
|
||||
}
|
||||
)
|
||||
self.boto_instance.wait_until_stopped(InstanceIds=[instance_id])
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("stopped", end_time - start_time)
|
||||
@@ -131,21 +109,10 @@ class AWS:
|
||||
return False
|
||||
|
||||
# Wait until the node instance is terminated
|
||||
def wait_until_terminated(self, instance_id, timeout=600, affected_node= None, poll_interval=15):
|
||||
def wait_until_terminated(self, instance_id, timeout=600, affected_node= None):
|
||||
try:
|
||||
start_time = time.time()
|
||||
if timeout > 0:
|
||||
max_attempts = max(1, int(timeout / poll_interval))
|
||||
else:
|
||||
max_attempts = 40
|
||||
|
||||
self.boto_instance.wait_until_terminated(
|
||||
InstanceIds=[instance_id],
|
||||
WaiterConfig={
|
||||
'Delay': poll_interval,
|
||||
'MaxAttempts': max_attempts
|
||||
}
|
||||
)
|
||||
self.boto_instance.wait_until_terminated(InstanceIds=[instance_id])
|
||||
end_time = time.time()
|
||||
if affected_node:
|
||||
affected_node.set_affected_node_status("terminated", end_time - start_time)
|
||||
@@ -300,7 +267,7 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -311,7 +278,7 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
"Starting the node %s with instance ID: %s " % (node, instance_id)
|
||||
)
|
||||
self.aws.start_instances(instance_id)
|
||||
self.aws.wait_until_running(instance_id, timeout=timeout, affected_node=affected_node, poll_interval=poll_interval)
|
||||
self.aws.wait_until_running(instance_id, affected_node=affected_node)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
logging.info(
|
||||
@@ -329,7 +296,7 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -340,7 +307,7 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
"Stopping the node %s with instance ID: %s " % (node, instance_id)
|
||||
)
|
||||
self.aws.stop_instances(instance_id)
|
||||
self.aws.wait_until_stopped(instance_id, timeout=timeout, affected_node=affected_node, poll_interval=poll_interval)
|
||||
self.aws.wait_until_stopped(instance_id, affected_node=affected_node)
|
||||
logging.info(
|
||||
"Node with instance ID: %s is in stopped state" % (instance_id)
|
||||
)
|
||||
@@ -357,7 +324,7 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -369,7 +336,7 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
% (node, instance_id)
|
||||
)
|
||||
self.aws.terminate_instances(instance_id)
|
||||
self.aws.wait_until_terminated(instance_id, timeout=timeout, affected_node=affected_node, poll_interval=poll_interval)
|
||||
self.aws.wait_until_terminated(instance_id, affected_node=affected_node)
|
||||
for _ in range(timeout):
|
||||
if node not in self.kubecli.list_nodes():
|
||||
break
|
||||
@@ -379,7 +346,7 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
logging.info(
|
||||
"Node with instance ID: %s has been terminated" % (instance_id)
|
||||
)
|
||||
logging.info("node_termination_scenario has been successfully injected!")
|
||||
logging.info("node_termination_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to terminate node instance. Encountered following exception:"
|
||||
@@ -408,7 +375,7 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
logging.info(
|
||||
"Node with instance ID: %s has been rebooted" % (instance_id)
|
||||
)
|
||||
logging.info("node_reboot_scenario has been successfully injected!")
|
||||
logging.info("node_reboot_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to reboot node instance. Encountered following exception:"
|
||||
|
||||
@@ -18,6 +18,8 @@ class Azure:
|
||||
logging.info("azure " + str(self))
|
||||
# Acquire a credential object using CLI-based authentication.
|
||||
credentials = DefaultAzureCredential()
|
||||
# az_account = runcommand.invoke("az account list -o yaml")
|
||||
# az_account_yaml = yaml.safe_load(az_account, Loader=yaml.FullLoader)
|
||||
logger = logging.getLogger("azure")
|
||||
logger.setLevel(logging.WARNING)
|
||||
subscription_id = os.getenv("AZURE_SUBSCRIPTION_ID")
|
||||
@@ -216,7 +218,7 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -244,7 +246,7 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -271,7 +273,7 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
|
||||
@@ -153,7 +153,7 @@ class bm_node_scenarios(abstract_node_scenarios):
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -182,7 +182,7 @@ class bm_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -210,7 +210,7 @@ class bm_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
logging.info("Node termination scenario is not supported on baremetal")
|
||||
|
||||
# Node scenario to reboot the node
|
||||
@@ -229,7 +229,7 @@ class bm_node_scenarios(abstract_node_scenarios):
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli, affected_node)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
logging.info("Node with bmc address: %s has been rebooted" % (bmc_addr))
|
||||
logging.info("node_reboot_scenario has been successfully injected!")
|
||||
logging.info("node_reboot_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to reboot node instance. Encountered following exception:"
|
||||
|
||||
@@ -2,176 +2,49 @@ import krkn.scenario_plugins.node_actions.common_node_functions as nodeaction
|
||||
from krkn.scenario_plugins.node_actions.abstract_node_scenarios import (
|
||||
abstract_node_scenarios,
|
||||
)
|
||||
import os
|
||||
import platform
|
||||
import logging
|
||||
import docker
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
|
||||
class Docker:
|
||||
"""
|
||||
Container runtime client wrapper supporting both Docker and Podman.
|
||||
|
||||
This class automatically detects and connects to either Docker or Podman
|
||||
container runtimes using the Docker-compatible API. It tries multiple
|
||||
connection methods in order of preference:
|
||||
|
||||
1. Docker Unix socket (unix:///var/run/docker.sock)
|
||||
2. Platform-specific Podman sockets:
|
||||
- macOS: ~/.local/share/containers/podman/machine/podman.sock
|
||||
- Linux rootful: unix:///run/podman/podman.sock
|
||||
- Linux rootless: unix:///run/user/<uid>/podman/podman.sock
|
||||
3. Environment variables (DOCKER_HOST or CONTAINER_HOST)
|
||||
|
||||
The runtime type (docker/podman) is auto-detected and logged for debugging.
|
||||
Supports Kind clusters running on Podman.
|
||||
|
||||
Assisted By: Claude Code
|
||||
"""
|
||||
def __init__(self):
|
||||
self.client = None
|
||||
self.runtime = 'unknown'
|
||||
|
||||
|
||||
# Try multiple connection methods in order of preference
|
||||
# Supports both Docker and Podman
|
||||
connection_methods = [
|
||||
('unix:///var/run/docker.sock', 'Docker Unix socket'),
|
||||
]
|
||||
|
||||
# Add platform-specific Podman sockets
|
||||
if platform.system() == 'Darwin': # macOS
|
||||
# On macOS, Podman uses podman-machine with socket typically at:
|
||||
# ~/.local/share/containers/podman/machine/podman.sock
|
||||
# This is often symlinked to /var/run/docker.sock
|
||||
podman_machine_sock = os.path.expanduser('~/.local/share/containers/podman/machine/podman.sock')
|
||||
if os.path.exists(podman_machine_sock):
|
||||
connection_methods.append((f'unix://{podman_machine_sock}', 'Podman machine socket (macOS)'))
|
||||
else: # Linux
|
||||
connection_methods.extend([
|
||||
('unix:///run/podman/podman.sock', 'Podman Unix socket (rootful)'),
|
||||
('unix:///run/user/{uid}/podman/podman.sock', 'Podman Unix socket (rootless)'),
|
||||
])
|
||||
|
||||
# Always try from_env as last resort
|
||||
connection_methods.append(('from_env', 'Environment variables (DOCKER_HOST/CONTAINER_HOST)'))
|
||||
|
||||
for method, description in connection_methods:
|
||||
try:
|
||||
# Handle rootless Podman socket path with {uid} placeholder
|
||||
if '{uid}' in method:
|
||||
uid = os.getuid()
|
||||
method = method.format(uid=uid)
|
||||
logging.info(f'Attempting to connect using {description}: {method}')
|
||||
|
||||
if method == 'from_env':
|
||||
logging.info(f'Attempting to connect using {description}')
|
||||
self.client = docker.from_env()
|
||||
else:
|
||||
logging.info(f'Attempting to connect using {description}: {method}')
|
||||
self.client = docker.DockerClient(base_url=method)
|
||||
|
||||
# Test the connection
|
||||
self.client.ping()
|
||||
|
||||
# Detect runtime type
|
||||
try:
|
||||
version_info = self.client.version()
|
||||
version_str = version_info.get('Version', '')
|
||||
if 'podman' in version_str.lower():
|
||||
self.runtime = 'podman'
|
||||
else:
|
||||
self.runtime = 'docker'
|
||||
logging.debug(f'Runtime version info: {version_str}')
|
||||
except Exception as version_err:
|
||||
logging.warning(f'Could not detect runtime version: {version_err}')
|
||||
self.runtime = 'unknown'
|
||||
|
||||
logging.info(f'Successfully connected to {self.runtime} using {description}')
|
||||
|
||||
# Log available containers for debugging
|
||||
try:
|
||||
containers = self.client.containers.list(all=True)
|
||||
logging.info(f'Found {len(containers)} total containers')
|
||||
for container in containers[:5]: # Log first 5
|
||||
logging.debug(f' Container: {container.name} ({container.status})')
|
||||
except Exception as list_err:
|
||||
logging.warning(f'Could not list containers: {list_err}')
|
||||
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
logging.warning(f'Failed to connect using {description}: {e}')
|
||||
continue
|
||||
|
||||
if self.client is None:
|
||||
error_msg = 'Failed to initialize container runtime client (Docker/Podman) with any connection method'
|
||||
logging.error(error_msg)
|
||||
logging.error('Attempted connection methods:')
|
||||
for method, desc in connection_methods:
|
||||
logging.error(f' - {desc}: {method}')
|
||||
raise RuntimeError(error_msg)
|
||||
|
||||
logging.info(f'Container runtime client initialized successfully: {self.runtime}')
|
||||
self.client = docker.from_env()
|
||||
|
||||
def get_container_id(self, node_name):
|
||||
"""Get the container ID for a given node name."""
|
||||
container = self.client.containers.get(node_name)
|
||||
logging.info(f'Found {self.runtime} container for node {node_name}: {container.id}')
|
||||
return container.id
|
||||
|
||||
# Start the node instance
|
||||
def start_instances(self, node_name):
|
||||
"""Start a container instance (works with both Docker and Podman)."""
|
||||
logging.info(f'Starting {self.runtime} container for node: {node_name}')
|
||||
container = self.client.containers.get(node_name)
|
||||
container.start()
|
||||
logging.info(f'Container {container.id} started successfully')
|
||||
|
||||
# Stop the node instance
|
||||
def stop_instances(self, node_name):
|
||||
"""Stop a container instance (works with both Docker and Podman)."""
|
||||
logging.info(f'Stopping {self.runtime} container for node: {node_name}')
|
||||
container = self.client.containers.get(node_name)
|
||||
container.stop()
|
||||
logging.info(f'Container {container.id} stopped successfully')
|
||||
|
||||
# Reboot the node instance
|
||||
def reboot_instances(self, node_name):
|
||||
"""Restart a container instance (works with both Docker and Podman)."""
|
||||
logging.info(f'Restarting {self.runtime} container for node: {node_name}')
|
||||
container = self.client.containers.get(node_name)
|
||||
container.restart()
|
||||
logging.info(f'Container {container.id} restarted successfully')
|
||||
|
||||
# Terminate the node instance
|
||||
def terminate_instances(self, node_name):
|
||||
"""Stop and remove a container instance (works with both Docker and Podman)."""
|
||||
logging.info(f'Terminating {self.runtime} container for node: {node_name}')
|
||||
container = self.client.containers.get(node_name)
|
||||
container.stop()
|
||||
container.remove()
|
||||
logging.info(f'Container {container.id} terminated and removed successfully')
|
||||
|
||||
|
||||
class docker_node_scenarios(abstract_node_scenarios):
|
||||
"""
|
||||
Node chaos scenarios for containerized Kubernetes nodes.
|
||||
|
||||
Supports both Docker and Podman container runtimes. This class provides
|
||||
methods to inject chaos into Kubernetes nodes running as containers
|
||||
(e.g., Kind clusters, Podman-based clusters).
|
||||
"""
|
||||
def __init__(self, kubecli: KrknKubernetes, node_action_kube_check: bool, affected_nodes_status: AffectedNodeStatus):
|
||||
logging.info('Initializing docker_node_scenarios (supports Docker and Podman)')
|
||||
super().__init__(kubecli, node_action_kube_check, affected_nodes_status)
|
||||
self.docker = Docker()
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
logging.info(f'Node scenarios initialized successfully using {self.docker.runtime} runtime')
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -198,7 +71,7 @@ class docker_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -224,7 +97,7 @@ class docker_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
try:
|
||||
logging.info("Starting node_termination_scenario injection")
|
||||
@@ -237,7 +110,7 @@ class docker_node_scenarios(abstract_node_scenarios):
|
||||
logging.info(
|
||||
"Node with container ID: %s has been terminated" % (container_id)
|
||||
)
|
||||
logging.info("node_termination_scenario has been successfully injected!")
|
||||
logging.info("node_termination_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to terminate node instance. Encountered following exception:"
|
||||
@@ -264,7 +137,7 @@ class docker_node_scenarios(abstract_node_scenarios):
|
||||
logging.info(
|
||||
"Node with container ID: %s has been rebooted" % (container_id)
|
||||
)
|
||||
logging.info("node_reboot_scenario has been successfully injected!")
|
||||
logging.info("node_reboot_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to reboot node instance. Encountered following exception:"
|
||||
|
||||
@@ -227,7 +227,7 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -257,7 +257,7 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -286,7 +286,7 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -309,7 +309,7 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
logging.info(
|
||||
"Node with instance ID: %s has been terminated" % instance_id
|
||||
)
|
||||
logging.info("node_termination_scenario has been successfully injected!")
|
||||
logging.info("node_termination_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to terminate node instance. Encountered following exception:"
|
||||
@@ -341,7 +341,7 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
logging.info(
|
||||
"Node with instance ID: %s has been rebooted" % instance_id
|
||||
)
|
||||
logging.info("node_reboot_scenario has been successfully injected!")
|
||||
logging.info("node_reboot_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to reboot node instance. Encountered following exception:"
|
||||
|
||||
@@ -18,21 +18,21 @@ class general_node_scenarios(abstract_node_scenarios):
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
logging.info(
|
||||
"Node start is not set up yet for this cloud type, "
|
||||
"no action is going to be taken"
|
||||
)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
logging.info(
|
||||
"Node stop is not set up yet for this cloud type,"
|
||||
" no action is going to be taken"
|
||||
)
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
logging.info(
|
||||
"Node termination is not set up yet for this cloud type, "
|
||||
"no action is going to be taken"
|
||||
|
||||
@@ -284,7 +284,7 @@ class ibm_node_scenarios(abstract_node_scenarios):
|
||||
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
try:
|
||||
instance_id = self.ibmcloud.get_instance_id( node)
|
||||
affected_node = AffectedNode(node, node_id=instance_id)
|
||||
@@ -317,7 +317,7 @@ class ibm_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
try:
|
||||
instance_id = self.ibmcloud.get_instance_id(node)
|
||||
for _ in range(instance_kill_count):
|
||||
@@ -327,20 +327,14 @@ class ibm_node_scenarios(abstract_node_scenarios):
|
||||
vm_stopped = self.ibmcloud.stop_instances(instance_id)
|
||||
if vm_stopped:
|
||||
self.ibmcloud.wait_until_stopped(instance_id, timeout, affected_node)
|
||||
logging.info(
|
||||
"Node with instance ID: %s is in stopped state" % node
|
||||
)
|
||||
logging.info(
|
||||
"node_stop_scenario has been successfully injected!"
|
||||
)
|
||||
else:
|
||||
logging.error(
|
||||
"Failed to stop node instance %s. Stop command failed." % instance_id
|
||||
)
|
||||
raise Exception("Stop command failed for instance %s" % instance_id)
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
logging.info(
|
||||
"Node with instance ID: %s is in stopped state" % node
|
||||
)
|
||||
logging.info(
|
||||
"node_stop_scenario has been successfully injected!"
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Test Failed: %s" % str(e))
|
||||
logging.error("Failed to stop node instance. Test Failed")
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
|
||||
|
||||
@@ -351,35 +345,28 @@ class ibm_node_scenarios(abstract_node_scenarios):
|
||||
affected_node = AffectedNode(node, node_id=instance_id)
|
||||
logging.info("Starting node_reboot_scenario injection")
|
||||
logging.info("Rebooting the node %s " % (node))
|
||||
vm_rebooted = self.ibmcloud.reboot_instances(instance_id)
|
||||
if vm_rebooted:
|
||||
self.ibmcloud.wait_until_rebooted(instance_id, timeout, affected_node)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(
|
||||
node, timeout, self.kubecli, affected_node
|
||||
)
|
||||
nodeaction.wait_for_ready_status(
|
||||
node, timeout, self.kubecli, affected_node
|
||||
)
|
||||
logging.info(
|
||||
"Node with instance ID: %s has rebooted successfully" % node
|
||||
self.ibmcloud.reboot_instances(instance_id)
|
||||
self.ibmcloud.wait_until_rebooted(instance_id, timeout, affected_node)
|
||||
if self.node_action_kube_check:
|
||||
nodeaction.wait_for_unknown_status(
|
||||
node, timeout, affected_node
|
||||
)
|
||||
logging.info(
|
||||
"node_reboot_scenario has been successfully injected!"
|
||||
nodeaction.wait_for_ready_status(
|
||||
node, timeout, affected_node
|
||||
)
|
||||
else:
|
||||
logging.error(
|
||||
"Failed to reboot node instance %s. Reboot command failed." % instance_id
|
||||
)
|
||||
raise Exception("Reboot command failed for instance %s" % instance_id)
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
logging.info(
|
||||
"Node with instance ID: %s has rebooted successfully" % node
|
||||
)
|
||||
logging.info(
|
||||
"node_reboot_scenario has been successfully injected!"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logging.error("Failed to reboot node instance. Test Failed: %s" % str(e))
|
||||
logging.error("Failed to reboot node instance. Test Failed")
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
|
||||
|
||||
def node_terminate_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_terminate_scenario(self, instance_kill_count, node, timeout):
|
||||
try:
|
||||
instance_id = self.ibmcloud.get_instance_id(node)
|
||||
for _ in range(instance_kill_count):
|
||||
@@ -396,8 +383,7 @@ class ibm_node_scenarios(abstract_node_scenarios):
|
||||
logging.info(
|
||||
"node_terminate_scenario has been successfully injected!"
|
||||
)
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
except Exception as e:
|
||||
logging.error("Failed to terminate node instance. Test Failed: %s" % str(e))
|
||||
logging.error("Failed to terminate node instance. Test Failed")
|
||||
logging.error("node_terminate_scenario injection failed!")
|
||||
|
||||
|
||||
@@ -298,7 +298,7 @@ class ibmcloud_power_node_scenarios(abstract_node_scenarios):
|
||||
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
try:
|
||||
instance_id = self.ibmcloud_power.get_instance_id( node)
|
||||
affected_node = AffectedNode(node, node_id=instance_id)
|
||||
@@ -331,7 +331,7 @@ class ibmcloud_power_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
try:
|
||||
instance_id = self.ibmcloud_power.get_instance_id(node)
|
||||
for _ in range(instance_kill_count):
|
||||
@@ -380,7 +380,7 @@ class ibmcloud_power_node_scenarios(abstract_node_scenarios):
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
|
||||
|
||||
def node_terminate_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_terminate_scenario(self, instance_kill_count, node, timeout):
|
||||
try:
|
||||
instance_id = self.ibmcloud_power.get_instance_id(node)
|
||||
for _ in range(instance_kill_count):
|
||||
|
||||
@@ -236,7 +236,7 @@ class NodeActionsScenarioPlugin(AbstractScenarioPlugin):
|
||||
# Get the scenario specifics for running action nodes
|
||||
run_kill_count = get_yaml_item_value(node_scenario, "runs", 1)
|
||||
duration = get_yaml_item_value(node_scenario, "duration", 120)
|
||||
poll_interval = get_yaml_item_value(node_scenario, "poll_interval", 15)
|
||||
|
||||
timeout = get_yaml_item_value(node_scenario, "timeout", 120)
|
||||
service = get_yaml_item_value(node_scenario, "service", "")
|
||||
soft_reboot = get_yaml_item_value(node_scenario, "soft_reboot", False)
|
||||
@@ -254,19 +254,19 @@ class NodeActionsScenarioPlugin(AbstractScenarioPlugin):
|
||||
else:
|
||||
if action == "node_start_scenario":
|
||||
node_scenario_object.node_start_scenario(
|
||||
run_kill_count, single_node, timeout, poll_interval
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "node_stop_scenario":
|
||||
node_scenario_object.node_stop_scenario(
|
||||
run_kill_count, single_node, timeout, poll_interval
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "node_stop_start_scenario":
|
||||
node_scenario_object.node_stop_start_scenario(
|
||||
run_kill_count, single_node, timeout, duration, poll_interval
|
||||
run_kill_count, single_node, timeout, duration
|
||||
)
|
||||
elif action == "node_termination_scenario":
|
||||
node_scenario_object.node_termination_scenario(
|
||||
run_kill_count, single_node, timeout, poll_interval
|
||||
run_kill_count, single_node, timeout
|
||||
)
|
||||
elif action == "node_reboot_scenario":
|
||||
node_scenario_object.node_reboot_scenario(
|
||||
|
||||
@@ -122,7 +122,7 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -147,7 +147,7 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
self.affected_nodes_status.affected_nodes.append(affected_node)
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
try:
|
||||
@@ -184,7 +184,7 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
nodeaction.wait_for_unknown_status(node, timeout, self.kubecli, affected_node)
|
||||
nodeaction.wait_for_ready_status(node, timeout, self.kubecli, affected_node)
|
||||
logging.info("Node with instance name: %s has been rebooted" % (node))
|
||||
logging.info("node_reboot_scenario has been successfully injected!")
|
||||
logging.info("node_reboot_scenario has been successfuly injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to reboot node instance. Encountered following exception:"
|
||||
@@ -249,7 +249,7 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
node_ip.strip(), service, ssh_private_key, timeout
|
||||
)
|
||||
logging.info("Service status checked on %s" % (node_ip))
|
||||
logging.info("Check service status is successfully injected!")
|
||||
logging.info("Check service status is successfuly injected!")
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Failed to check service status. Encountered following exception:"
|
||||
|
||||
@@ -389,7 +389,7 @@ class vmware_node_scenarios(abstract_node_scenarios):
|
||||
self.vsphere = vSphere()
|
||||
self.node_action_kube_check = node_action_kube_check
|
||||
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
try:
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
@@ -409,7 +409,7 @@ class vmware_node_scenarios(abstract_node_scenarios):
|
||||
f"node_start_scenario injection failed! " f"Error was: {str(e)}"
|
||||
)
|
||||
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
try:
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
@@ -456,7 +456,7 @@ class vmware_node_scenarios(abstract_node_scenarios):
|
||||
)
|
||||
|
||||
|
||||
def node_terminate_scenario(self, instance_kill_count, node, timeout, poll_interval):
|
||||
def node_terminate_scenario(self, instance_kill_count, node, timeout):
|
||||
try:
|
||||
for _ in range(instance_kill_count):
|
||||
affected_node = AffectedNode(node)
|
||||
|
||||
@@ -2,7 +2,7 @@ import logging
|
||||
import random
|
||||
import time
|
||||
from asyncio import Future
|
||||
import traceback
|
||||
|
||||
import yaml
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.k8s.pod_monitor import select_and_monitor_by_namespace_pattern_and_label, \
|
||||
@@ -11,7 +11,6 @@ from krkn_lib.k8s.pod_monitor import select_and_monitor_by_namespace_pattern_and
|
||||
from krkn.scenario_plugins.pod_disruption.models.models import InputParams
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.models.pod_monitor.models import PodsSnapshot
|
||||
from datetime import datetime
|
||||
from dataclasses import dataclass
|
||||
|
||||
@@ -41,27 +40,10 @@ class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
kill_scenario_config,
|
||||
lib_telemetry
|
||||
)
|
||||
ret = self.killing_pods(
|
||||
self.killing_pods(
|
||||
kill_scenario_config, lib_telemetry.get_lib_kubernetes()
|
||||
)
|
||||
# returning 2 if configuration issue and exiting immediately
|
||||
if ret > 1:
|
||||
# Cancel the monitoring future since killing_pods already failed
|
||||
logging.info("Cancelling pod monitoring future")
|
||||
future_snapshot.cancel()
|
||||
# Wait for the future to finish (monitoring will stop when stop_event is set)
|
||||
while not future_snapshot.done():
|
||||
logging.info("waiting for future to finish")
|
||||
time.sleep(1)
|
||||
logging.info("future snapshot cancelled and finished")
|
||||
# Get the snapshot result (even if cancelled, it will have partial data)
|
||||
snapshot = future_snapshot.result()
|
||||
result = snapshot.get_pods_status()
|
||||
scenario_telemetry.affected_pods = result
|
||||
|
||||
logging.error("PodDisruptionScenarioPlugin failed during setup" + str(result))
|
||||
return 1
|
||||
|
||||
snapshot = future_snapshot.result()
|
||||
result = snapshot.get_pods_status()
|
||||
scenario_telemetry.affected_pods = result
|
||||
@@ -69,12 +51,7 @@ class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
logging.info("PodDisruptionScenarioPlugin failed with unrecovered pods")
|
||||
return 1
|
||||
|
||||
if ret > 0:
|
||||
logging.info("PodDisruptionScenarioPlugin failed")
|
||||
return 1
|
||||
|
||||
except (RuntimeError, Exception) as e:
|
||||
logging.error("Stack trace:\n%s", traceback.format_exc())
|
||||
logging.error("PodDisruptionScenariosPlugin exiting due to Exception %s" % e)
|
||||
return 1
|
||||
else:
|
||||
@@ -151,7 +128,7 @@ class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
field_selector=combined_field_selector
|
||||
)
|
||||
|
||||
def get_pods(self, name_pattern, label_selector, namespace, kubecli: KrknKubernetes, field_selector: str = None, node_label_selector: str = None, node_names: list = None):
|
||||
def get_pods(self, name_pattern, label_selector, namespace, kubecli: KrknKubernetes, field_selector: str = None, node_label_selector: str = None, node_names: list = None, quiet: bool = False):
|
||||
if label_selector and name_pattern:
|
||||
logging.error('Only, one of name pattern or label pattern can be specified')
|
||||
return []
|
||||
@@ -162,7 +139,8 @@ class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
|
||||
# If specific node names are provided, make multiple calls with field selector
|
||||
if node_names:
|
||||
logging.debug(f"Targeting pods on {len(node_names)} specific nodes")
|
||||
if not quiet:
|
||||
logging.info(f"Targeting pods on {len(node_names)} specific nodes")
|
||||
all_pods = []
|
||||
for node_name in node_names:
|
||||
pods = self._select_pods_with_field_selector(
|
||||
@@ -172,7 +150,8 @@ class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
if pods:
|
||||
all_pods.extend(pods)
|
||||
|
||||
logging.debug(f"Found {len(all_pods)} target pods across {len(node_names)} nodes")
|
||||
if not quiet:
|
||||
logging.info(f"Found {len(all_pods)} target pods across {len(node_names)} nodes")
|
||||
return all_pods
|
||||
|
||||
# Node label selector approach - use field selectors
|
||||
@@ -180,10 +159,11 @@ class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
# Get nodes matching the label selector first
|
||||
nodes_with_label = kubecli.list_nodes(label_selector=node_label_selector)
|
||||
if not nodes_with_label:
|
||||
logging.debug(f"No nodes found with label selector: {node_label_selector}")
|
||||
logging.info(f"No nodes found with label selector: {node_label_selector}")
|
||||
return []
|
||||
|
||||
logging.debug(f"Targeting pods on {len(nodes_with_label)} nodes with label: {node_label_selector}")
|
||||
if not quiet:
|
||||
logging.info(f"Targeting pods on {len(nodes_with_label)} nodes with label: {node_label_selector}")
|
||||
# Use field selector for each node
|
||||
all_pods = []
|
||||
for node_name in nodes_with_label:
|
||||
@@ -194,7 +174,8 @@ class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
if pods:
|
||||
all_pods.extend(pods)
|
||||
|
||||
logging.debug(f"Found {len(all_pods)} target pods across {len(nodes_with_label)} nodes")
|
||||
if not quiet:
|
||||
logging.info(f"Found {len(all_pods)} target pods across {len(nodes_with_label)} nodes")
|
||||
return all_pods
|
||||
|
||||
# Standard pod selection (no node targeting)
|
||||
@@ -204,40 +185,37 @@ class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
|
||||
def killing_pods(self, config: InputParams, kubecli: KrknKubernetes):
|
||||
# region Select target pods
|
||||
try:
|
||||
namespace = config.namespace_pattern
|
||||
if not namespace:
|
||||
logging.error('Namespace pattern must be specified')
|
||||
|
||||
pods = self.get_pods(config.name_pattern,config.label_selector,config.namespace_pattern, kubecli, field_selector="status.phase=Running", node_label_selector=config.node_label_selector, node_names=config.node_names)
|
||||
exclude_pods = set()
|
||||
if config.exclude_label:
|
||||
_exclude_pods = self.get_pods("",config.exclude_label,config.namespace_pattern, kubecli, field_selector="status.phase=Running", node_label_selector=config.node_label_selector, node_names=config.node_names)
|
||||
for pod in _exclude_pods:
|
||||
exclude_pods.add(pod[0])
|
||||
|
||||
|
||||
pods_count = len(pods)
|
||||
if len(pods) < config.kill:
|
||||
logging.error("Not enough pods match the criteria, expected {} but found only {} pods".format(
|
||||
config.kill, len(pods)))
|
||||
return 1
|
||||
|
||||
random.shuffle(pods)
|
||||
for i in range(config.kill):
|
||||
pod = pods[i]
|
||||
logging.info(pod)
|
||||
if pod[0] in exclude_pods:
|
||||
logging.info(f"Excluding {pod[0]} from chaos")
|
||||
else:
|
||||
logging.info(f'Deleting pod {pod[0]}')
|
||||
kubecli.delete_pod(pod[0], pod[1])
|
||||
|
||||
return_val = self.wait_for_pods(config.label_selector,config.name_pattern,config.namespace_pattern, pods_count, config.duration, config.timeout, kubecli, config.node_label_selector, config.node_names)
|
||||
except Exception as e:
|
||||
raise(e)
|
||||
namespace = config.namespace_pattern
|
||||
if not namespace:
|
||||
logging.error('Namespace pattern must be specified')
|
||||
|
||||
return return_val
|
||||
pods = self.get_pods(config.name_pattern,config.label_selector,config.namespace_pattern, kubecli, field_selector="status.phase=Running", node_label_selector=config.node_label_selector, node_names=config.node_names)
|
||||
exclude_pods = set()
|
||||
if config.exclude_label:
|
||||
_exclude_pods = self.get_pods("",config.exclude_label,config.namespace_pattern, kubecli, field_selector="status.phase=Running", node_label_selector=config.node_label_selector, node_names=config.node_names)
|
||||
for pod in _exclude_pods:
|
||||
exclude_pods.add(pod[0])
|
||||
|
||||
|
||||
pods_count = len(pods)
|
||||
if len(pods) < config.kill:
|
||||
logging.error("Not enough pods match the criteria, expected {} but found only {} pods".format(
|
||||
config.kill, len(pods)))
|
||||
return 1
|
||||
|
||||
random.shuffle(pods)
|
||||
for i in range(config.kill):
|
||||
pod = pods[i]
|
||||
logging.info(pod)
|
||||
if pod[0] in exclude_pods:
|
||||
logging.info(f"Excluding {pod[0]} from chaos")
|
||||
else:
|
||||
logging.info(f'Deleting pod {pod[0]}')
|
||||
kubecli.delete_pod(pod[0], pod[1])
|
||||
|
||||
self.wait_for_pods(config.label_selector,config.name_pattern,config.namespace_pattern, pods_count, config.duration, config.timeout, kubecli, config.node_label_selector, config.node_names)
|
||||
return 0
|
||||
|
||||
def wait_for_pods(
|
||||
self, label_selector, pod_name, namespace, pod_count, duration, wait_timeout, kubecli: KrknKubernetes, node_label_selector, node_names
|
||||
@@ -246,10 +224,10 @@ class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
start_time = datetime.now()
|
||||
|
||||
while not timeout:
|
||||
pods = self.get_pods(name_pattern=pod_name, label_selector=label_selector,namespace=namespace, field_selector="status.phase=Running", kubecli=kubecli, node_label_selector=node_label_selector, node_names=node_names)
|
||||
pods = self.get_pods(name_pattern=pod_name, label_selector=label_selector,namespace=namespace, field_selector="status.phase=Running", kubecli=kubecli, node_label_selector=node_label_selector, node_names=node_names, quiet=True)
|
||||
if pod_count == len(pods):
|
||||
return 0
|
||||
|
||||
return
|
||||
|
||||
time.sleep(duration)
|
||||
|
||||
now_time = datetime.now()
|
||||
@@ -258,5 +236,4 @@ class PodDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
if time_diff.seconds > wait_timeout:
|
||||
logging.error("timeout while waiting for pods to come up")
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
@@ -209,7 +209,7 @@ class ServiceDisruptionScenarioPlugin(AbstractScenarioPlugin):
|
||||
try:
|
||||
statefulsets = kubecli.get_all_statefulset(namespace)
|
||||
for statefulset in statefulsets:
|
||||
logging.info("Deleting statefulset" + statefulset)
|
||||
logging.info("Deleting statefulsets" + statefulsets)
|
||||
kubecli.delete_statefulset(statefulset, namespace)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
|
||||
@@ -43,7 +43,7 @@ class TimeActionsScenarioPlugin(AbstractScenarioPlugin):
|
||||
cerberus.publish_kraken_status(
|
||||
krkn_config, not_reset, start_time, end_time
|
||||
)
|
||||
except (RuntimeError, Exception) as e:
|
||||
except (RuntimeError, Exception):
|
||||
logging.error(
|
||||
f"TimeActionsScenarioPlugin scenario {scenario} failed with exception: {e}"
|
||||
)
|
||||
|
||||
@@ -140,7 +140,7 @@ class ZoneOutageScenarioPlugin(AbstractScenarioPlugin):
|
||||
network_association_ids[0], acl_id
|
||||
)
|
||||
|
||||
# capture the original_acl_id, created_acl_id and
|
||||
# capture the orginal_acl_id, created_acl_id and
|
||||
# new association_id to use during the recovery
|
||||
ids[new_association_id] = original_acl_id
|
||||
|
||||
@@ -156,7 +156,7 @@ class ZoneOutageScenarioPlugin(AbstractScenarioPlugin):
|
||||
new_association_id, original_acl_id
|
||||
)
|
||||
logging.info(
|
||||
"Waiting for 60 seconds to make sure " "the changes are in place"
|
||||
"Wating for 60 seconds to make sure " "the changes are in place"
|
||||
)
|
||||
time.sleep(60)
|
||||
|
||||
|
||||
@@ -1,17 +1,10 @@
|
||||
import json
|
||||
import tempfile
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
from krkn.scenario_plugins.abstract_scenario_plugin import AbstractScenarioPlugin
|
||||
from krkn.scenario_plugins.scenario_plugin_factory import ScenarioPluginFactory
|
||||
from krkn.scenario_plugins.native.plugins import PluginStep, Plugins, PLUGINS
|
||||
from krkn.tests.test_classes.correct_scenario_plugin import (
|
||||
CorrectScenarioPlugin,
|
||||
)
|
||||
import yaml
|
||||
|
||||
|
||||
|
||||
class TestPluginFactory(unittest.TestCase):
|
||||
@@ -115,437 +108,3 @@ class TestPluginFactory(unittest.TestCase):
|
||||
self.assertEqual(
|
||||
message, "scenario plugin folder cannot contain `scenario` or `plugin` word"
|
||||
)
|
||||
|
||||
|
||||
class TestPluginStep(unittest.TestCase):
|
||||
"""Test cases for PluginStep class"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
# Create a mock schema
|
||||
self.mock_schema = Mock()
|
||||
self.mock_schema.id = "test_step"
|
||||
|
||||
# Create mock output
|
||||
mock_output = Mock()
|
||||
mock_output.serialize = Mock(return_value={"status": "success", "message": "test"})
|
||||
self.mock_schema.outputs = {
|
||||
"success": mock_output,
|
||||
"error": mock_output
|
||||
}
|
||||
|
||||
self.plugin_step = PluginStep(
|
||||
schema=self.mock_schema,
|
||||
error_output_ids=["error"]
|
||||
)
|
||||
|
||||
def test_render_output(self):
|
||||
"""Test render_output method"""
|
||||
output_id = "success"
|
||||
output_data = {"status": "success", "message": "test output"}
|
||||
|
||||
result = self.plugin_step.render_output(output_id, output_data)
|
||||
|
||||
# Verify it returns a JSON string
|
||||
self.assertIsInstance(result, str)
|
||||
|
||||
# Verify it can be parsed as JSON
|
||||
parsed = json.loads(result)
|
||||
self.assertEqual(parsed["output_id"], output_id)
|
||||
self.assertIn("output_data", parsed)
|
||||
|
||||
|
||||
class TestPlugins(unittest.TestCase):
|
||||
"""Test cases for Plugins class"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
# Create mock steps with proper id attribute
|
||||
self.mock_step1 = Mock()
|
||||
self.mock_step1.id = "step1"
|
||||
|
||||
self.mock_step2 = Mock()
|
||||
self.mock_step2.id = "step2"
|
||||
|
||||
self.plugin_step1 = PluginStep(schema=self.mock_step1, error_output_ids=["error"])
|
||||
self.plugin_step2 = PluginStep(schema=self.mock_step2, error_output_ids=["error"])
|
||||
|
||||
def test_init_with_valid_steps(self):
|
||||
"""Test Plugins initialization with valid steps"""
|
||||
plugins = Plugins([self.plugin_step1, self.plugin_step2])
|
||||
|
||||
self.assertEqual(len(plugins.steps_by_id), 2)
|
||||
self.assertIn("step1", plugins.steps_by_id)
|
||||
self.assertIn("step2", plugins.steps_by_id)
|
||||
|
||||
def test_init_with_duplicate_step_ids(self):
|
||||
"""Test Plugins initialization with duplicate step IDs raises exception"""
|
||||
# Create two steps with the same ID
|
||||
duplicate_step = PluginStep(schema=self.mock_step1, error_output_ids=["error"])
|
||||
|
||||
with self.assertRaises(Exception) as context:
|
||||
Plugins([self.plugin_step1, duplicate_step])
|
||||
|
||||
self.assertIn("Duplicate step ID", str(context.exception))
|
||||
|
||||
def test_unserialize_scenario(self):
|
||||
"""Test unserialize_scenario method"""
|
||||
# Create a temporary YAML file
|
||||
test_data = [
|
||||
{"id": "test_step", "config": {"param": "value"}}
|
||||
]
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
|
||||
yaml.dump(test_data, f)
|
||||
temp_file = f.name
|
||||
|
||||
try:
|
||||
plugins = Plugins([self.plugin_step1])
|
||||
result = plugins.unserialize_scenario(temp_file)
|
||||
|
||||
self.assertIsInstance(result, list)
|
||||
finally:
|
||||
Path(temp_file).unlink()
|
||||
|
||||
def test_run_with_invalid_scenario_not_list(self):
|
||||
"""Test run method with scenario that is not a list"""
|
||||
# Create a temporary YAML file with dict instead of list
|
||||
test_data = {"id": "test_step", "config": {"param": "value"}}
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
|
||||
yaml.dump(test_data, f)
|
||||
temp_file = f.name
|
||||
|
||||
try:
|
||||
plugins = Plugins([self.plugin_step1])
|
||||
|
||||
with self.assertRaises(Exception) as context:
|
||||
plugins.run(temp_file, "/path/to/kubeconfig", "/path/to/kraken_config", "test-uuid")
|
||||
|
||||
self.assertIn("expected list", str(context.exception))
|
||||
finally:
|
||||
Path(temp_file).unlink()
|
||||
|
||||
def test_run_with_invalid_entry_not_dict(self):
|
||||
"""Test run method with entry that is not a dict"""
|
||||
# Create a temporary YAML file with list of strings instead of dicts
|
||||
test_data = ["invalid", "entries"]
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
|
||||
yaml.dump(test_data, f)
|
||||
temp_file = f.name
|
||||
|
||||
try:
|
||||
plugins = Plugins([self.plugin_step1])
|
||||
|
||||
with self.assertRaises(Exception) as context:
|
||||
plugins.run(temp_file, "/path/to/kubeconfig", "/path/to/kraken_config", "test-uuid")
|
||||
|
||||
self.assertIn("expected a list of dict's", str(context.exception))
|
||||
finally:
|
||||
Path(temp_file).unlink()
|
||||
|
||||
def test_run_with_missing_id_field(self):
|
||||
"""Test run method with missing 'id' field"""
|
||||
# Create a temporary YAML file with missing id
|
||||
test_data = [
|
||||
{"config": {"param": "value"}}
|
||||
]
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
|
||||
yaml.dump(test_data, f)
|
||||
temp_file = f.name
|
||||
|
||||
try:
|
||||
plugins = Plugins([self.plugin_step1])
|
||||
|
||||
with self.assertRaises(Exception) as context:
|
||||
plugins.run(temp_file, "/path/to/kubeconfig", "/path/to/kraken_config", "test-uuid")
|
||||
|
||||
self.assertIn("missing 'id' field", str(context.exception))
|
||||
finally:
|
||||
Path(temp_file).unlink()
|
||||
|
||||
def test_run_with_missing_config_field(self):
|
||||
"""Test run method with missing 'config' field"""
|
||||
# Create a temporary YAML file with missing config
|
||||
test_data = [
|
||||
{"id": "step1"}
|
||||
]
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
|
||||
yaml.dump(test_data, f)
|
||||
temp_file = f.name
|
||||
|
||||
try:
|
||||
plugins = Plugins([self.plugin_step1])
|
||||
|
||||
with self.assertRaises(Exception) as context:
|
||||
plugins.run(temp_file, "/path/to/kubeconfig", "/path/to/kraken_config", "test-uuid")
|
||||
|
||||
self.assertIn("missing 'config' field", str(context.exception))
|
||||
finally:
|
||||
Path(temp_file).unlink()
|
||||
|
||||
def test_run_with_invalid_step_id(self):
|
||||
"""Test run method with invalid step ID"""
|
||||
# Create a proper mock schema with string ID
|
||||
mock_schema = Mock()
|
||||
mock_schema.id = "valid_step"
|
||||
plugin_step = PluginStep(schema=mock_schema, error_output_ids=["error"])
|
||||
|
||||
# Create a temporary YAML file with unknown step ID
|
||||
test_data = [
|
||||
{"id": "unknown_step", "config": {"param": "value"}}
|
||||
]
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
|
||||
yaml.dump(test_data, f)
|
||||
temp_file = f.name
|
||||
|
||||
try:
|
||||
plugins = Plugins([plugin_step])
|
||||
|
||||
with self.assertRaises(Exception) as context:
|
||||
plugins.run(temp_file, "/path/to/kubeconfig", "/path/to/kraken_config", "test-uuid")
|
||||
|
||||
self.assertIn("Invalid step", str(context.exception))
|
||||
self.assertIn("expected one of", str(context.exception))
|
||||
finally:
|
||||
Path(temp_file).unlink()
|
||||
|
||||
@patch('krkn.scenario_plugins.native.plugins.logging')
|
||||
def test_run_with_valid_scenario(self, mock_logging):
|
||||
"""Test run method with valid scenario"""
|
||||
# Create mock schema with all necessary attributes
|
||||
mock_schema = Mock()
|
||||
mock_schema.id = "test_step"
|
||||
|
||||
# Mock input schema
|
||||
mock_input = Mock()
|
||||
mock_input.properties = {}
|
||||
mock_input.unserialize = Mock(return_value=Mock(spec=[]))
|
||||
mock_schema.input = mock_input
|
||||
|
||||
# Mock output
|
||||
mock_output = Mock()
|
||||
mock_output.serialize = Mock(return_value={"status": "success"})
|
||||
mock_schema.outputs = {"success": mock_output}
|
||||
|
||||
# Mock schema call
|
||||
mock_schema.return_value = ("success", {"status": "success"})
|
||||
|
||||
plugin_step = PluginStep(schema=mock_schema, error_output_ids=["error"])
|
||||
|
||||
# Create a temporary YAML file
|
||||
test_data = [
|
||||
{"id": "test_step", "config": {"param": "value"}}
|
||||
]
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
|
||||
yaml.dump(test_data, f)
|
||||
temp_file = f.name
|
||||
|
||||
try:
|
||||
plugins = Plugins([plugin_step])
|
||||
plugins.run(temp_file, "/path/to/kubeconfig", "/path/to/kraken_config", "test-uuid")
|
||||
|
||||
# Verify schema was called
|
||||
mock_schema.assert_called_once()
|
||||
finally:
|
||||
Path(temp_file).unlink()
|
||||
|
||||
@patch('krkn.scenario_plugins.native.plugins.logging')
|
||||
def test_run_with_error_output(self, mock_logging):
|
||||
"""Test run method when step returns error output"""
|
||||
# Create mock schema with error output
|
||||
mock_schema = Mock()
|
||||
mock_schema.id = "test_step"
|
||||
|
||||
# Mock input schema
|
||||
mock_input = Mock()
|
||||
mock_input.properties = {}
|
||||
mock_input.unserialize = Mock(return_value=Mock(spec=[]))
|
||||
mock_schema.input = mock_input
|
||||
|
||||
# Mock output
|
||||
mock_output = Mock()
|
||||
mock_output.serialize = Mock(return_value={"error": "test error"})
|
||||
mock_schema.outputs = {"error": mock_output}
|
||||
|
||||
# Mock schema call to return error
|
||||
mock_schema.return_value = ("error", {"error": "test error"})
|
||||
|
||||
plugin_step = PluginStep(schema=mock_schema, error_output_ids=["error"])
|
||||
|
||||
# Create a temporary YAML file
|
||||
test_data = [
|
||||
{"id": "test_step", "config": {"param": "value"}}
|
||||
]
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
|
||||
yaml.dump(test_data, f)
|
||||
temp_file = f.name
|
||||
|
||||
try:
|
||||
plugins = Plugins([plugin_step])
|
||||
|
||||
with self.assertRaises(Exception) as context:
|
||||
plugins.run(temp_file, "/path/to/kubeconfig", "/path/to/kraken_config", "test-uuid")
|
||||
|
||||
self.assertIn("failed", str(context.exception))
|
||||
finally:
|
||||
Path(temp_file).unlink()
|
||||
|
||||
@patch('krkn.scenario_plugins.native.plugins.logging')
|
||||
def test_run_with_kubeconfig_path_injection(self, mock_logging):
|
||||
"""Test run method injects kubeconfig_path when property exists"""
|
||||
# Create mock schema with kubeconfig_path in input properties
|
||||
mock_schema = Mock()
|
||||
mock_schema.id = "test_step"
|
||||
|
||||
# Mock input schema with kubeconfig_path property
|
||||
mock_input_instance = Mock()
|
||||
mock_input = Mock()
|
||||
mock_input.properties = {"kubeconfig_path": Mock()}
|
||||
mock_input.unserialize = Mock(return_value=mock_input_instance)
|
||||
mock_schema.input = mock_input
|
||||
|
||||
# Mock output
|
||||
mock_output = Mock()
|
||||
mock_output.serialize = Mock(return_value={"status": "success"})
|
||||
mock_schema.outputs = {"success": mock_output}
|
||||
|
||||
# Mock schema call
|
||||
mock_schema.return_value = ("success", {"status": "success"})
|
||||
|
||||
plugin_step = PluginStep(schema=mock_schema, error_output_ids=["error"])
|
||||
|
||||
# Create a temporary YAML file
|
||||
test_data = [
|
||||
{"id": "test_step", "config": {"param": "value"}}
|
||||
]
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
|
||||
yaml.dump(test_data, f)
|
||||
temp_file = f.name
|
||||
|
||||
try:
|
||||
plugins = Plugins([plugin_step])
|
||||
plugins.run(temp_file, "/custom/kubeconfig", "/path/to/kraken_config", "test-uuid")
|
||||
|
||||
# Verify kubeconfig_path was set
|
||||
self.assertEqual(mock_input_instance.kubeconfig_path, "/custom/kubeconfig")
|
||||
finally:
|
||||
Path(temp_file).unlink()
|
||||
|
||||
@patch('krkn.scenario_plugins.native.plugins.logging')
|
||||
def test_run_with_kraken_config_injection(self, mock_logging):
|
||||
"""Test run method injects kraken_config when property exists"""
|
||||
# Create mock schema with kraken_config in input properties
|
||||
mock_schema = Mock()
|
||||
mock_schema.id = "test_step"
|
||||
|
||||
# Mock input schema with kraken_config property
|
||||
mock_input_instance = Mock()
|
||||
mock_input = Mock()
|
||||
mock_input.properties = {"kraken_config": Mock()}
|
||||
mock_input.unserialize = Mock(return_value=mock_input_instance)
|
||||
mock_schema.input = mock_input
|
||||
|
||||
# Mock output
|
||||
mock_output = Mock()
|
||||
mock_output.serialize = Mock(return_value={"status": "success"})
|
||||
mock_schema.outputs = {"success": mock_output}
|
||||
|
||||
# Mock schema call
|
||||
mock_schema.return_value = ("success", {"status": "success"})
|
||||
|
||||
plugin_step = PluginStep(schema=mock_schema, error_output_ids=["error"])
|
||||
|
||||
# Create a temporary YAML file
|
||||
test_data = [
|
||||
{"id": "test_step", "config": {"param": "value"}}
|
||||
]
|
||||
|
||||
with tempfile.NamedTemporaryFile(mode='w', suffix='.yaml', delete=False) as f:
|
||||
yaml.dump(test_data, f)
|
||||
temp_file = f.name
|
||||
|
||||
try:
|
||||
plugins = Plugins([plugin_step])
|
||||
plugins.run(temp_file, "/path/to/kubeconfig", "/custom/kraken.yaml", "test-uuid")
|
||||
|
||||
# Verify kraken_config was set
|
||||
self.assertEqual(mock_input_instance.kraken_config, "/custom/kraken.yaml")
|
||||
finally:
|
||||
Path(temp_file).unlink()
|
||||
|
||||
def test_json_schema(self):
|
||||
"""Test json_schema method"""
|
||||
# Create mock schema with jsonschema support
|
||||
mock_schema = Mock()
|
||||
mock_schema.id = "test_step"
|
||||
|
||||
plugin_step = PluginStep(schema=mock_schema, error_output_ids=["error"])
|
||||
|
||||
with patch('krkn.scenario_plugins.native.plugins.jsonschema') as mock_jsonschema:
|
||||
# Mock the step_input function
|
||||
mock_jsonschema.step_input.return_value = {
|
||||
"$id": "http://example.com",
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "Test Schema",
|
||||
"description": "Test description",
|
||||
"type": "object",
|
||||
"properties": {"param": {"type": "string"}}
|
||||
}
|
||||
|
||||
plugins = Plugins([plugin_step])
|
||||
result = plugins.json_schema()
|
||||
|
||||
# Verify it returns a JSON string
|
||||
self.assertIsInstance(result, str)
|
||||
|
||||
# Parse and verify structure
|
||||
parsed = json.loads(result)
|
||||
self.assertEqual(parsed["$id"], "https://github.com/redhat-chaos/krkn/")
|
||||
self.assertEqual(parsed["type"], "array")
|
||||
self.assertEqual(parsed["minContains"], 1)
|
||||
self.assertIn("items", parsed)
|
||||
self.assertIn("oneOf", parsed["items"])
|
||||
|
||||
# Verify step is included
|
||||
self.assertEqual(len(parsed["items"]["oneOf"]), 1)
|
||||
step_schema = parsed["items"]["oneOf"][0]
|
||||
self.assertEqual(step_schema["properties"]["id"]["const"], "test_step")
|
||||
|
||||
|
||||
class TestPLUGINSConstant(unittest.TestCase):
|
||||
"""Test cases for the PLUGINS constant"""
|
||||
|
||||
def test_plugins_initialized(self):
|
||||
"""Test that PLUGINS constant is properly initialized"""
|
||||
self.assertIsInstance(PLUGINS, Plugins)
|
||||
|
||||
# Verify all expected steps are registered
|
||||
expected_steps = [
|
||||
"run_python",
|
||||
"network_chaos",
|
||||
"pod_network_outage",
|
||||
"pod_egress_shaping",
|
||||
"pod_ingress_shaping"
|
||||
]
|
||||
|
||||
for step_id in expected_steps:
|
||||
self.assertIn(step_id, PLUGINS.steps_by_id)
|
||||
|
||||
# Ensure the registered id matches the decorator and no legacy alias is present
|
||||
self.assertEqual(
|
||||
PLUGINS.steps_by_id["pod_network_outage"].schema.id,
|
||||
"pod_network_outage",
|
||||
)
|
||||
self.assertNotIn("pod_outage", PLUGINS.steps_by_id)
|
||||
|
||||
def test_plugins_step_count(self):
|
||||
"""Test that PLUGINS has the expected number of steps"""
|
||||
self.assertEqual(len(PLUGINS.steps_by_id), 5)
|
||||
|
||||
@@ -1,71 +0,0 @@
|
||||
import logging
|
||||
import threading
|
||||
from datetime import datetime, timezone
|
||||
from krkn.utils.ErrorLog import ErrorLog
|
||||
|
||||
|
||||
class ErrorCollectionHandler(logging.Handler):
|
||||
"""
|
||||
Custom logging handler that captures ERROR and CRITICAL level logs
|
||||
in structured format for telemetry collection.
|
||||
|
||||
Stores logs in memory as ErrorLog objects for later retrieval.
|
||||
Thread-safe for concurrent logging operations.
|
||||
"""
|
||||
|
||||
def __init__(self, level=logging.ERROR):
|
||||
"""
|
||||
Initialize the error collection handler.
|
||||
|
||||
Args:
|
||||
level: Minimum log level to capture (default: ERROR)
|
||||
"""
|
||||
super().__init__(level)
|
||||
self.error_logs: list[ErrorLog] = []
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def emit(self, record: logging.LogRecord):
|
||||
"""
|
||||
Capture ERROR and CRITICAL logs and store as ErrorLog objects.
|
||||
|
||||
Args:
|
||||
record: LogRecord from Python logging framework
|
||||
"""
|
||||
try:
|
||||
# Only capture ERROR (40) and CRITICAL (50) levels
|
||||
if record.levelno < logging.ERROR:
|
||||
return
|
||||
|
||||
# Format timestamp as ISO 8601 UTC
|
||||
timestamp = datetime.fromtimestamp(
|
||||
record.created, tz=timezone.utc
|
||||
).strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3] + "Z"
|
||||
|
||||
# Create ErrorLog object
|
||||
error_log = ErrorLog(
|
||||
timestamp=timestamp,
|
||||
message=record.getMessage()
|
||||
)
|
||||
|
||||
# Thread-safe append
|
||||
with self._lock:
|
||||
self.error_logs.append(error_log)
|
||||
|
||||
except Exception:
|
||||
# Handler should never raise exceptions (logging best practice)
|
||||
self.handleError(record)
|
||||
|
||||
def get_error_logs(self) -> list[dict]:
|
||||
"""
|
||||
Retrieve all collected error logs as list of dictionaries.
|
||||
|
||||
Returns:
|
||||
List of error log dictionaries with timestamp and message
|
||||
"""
|
||||
with self._lock:
|
||||
return [log.to_dict() for log in self.error_logs]
|
||||
|
||||
def clear(self):
|
||||
"""Clear all collected error logs (useful for testing)"""
|
||||
with self._lock:
|
||||
self.error_logs.clear()
|
||||
@@ -1,18 +0,0 @@
|
||||
from dataclasses import dataclass, asdict
|
||||
|
||||
|
||||
@dataclass
|
||||
class ErrorLog:
|
||||
"""
|
||||
Represents a single error log entry for telemetry collection.
|
||||
|
||||
Attributes:
|
||||
timestamp: ISO 8601 formatted timestamp (UTC)
|
||||
message: Full error message text
|
||||
"""
|
||||
timestamp: str
|
||||
message: str
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary for JSON serialization"""
|
||||
return asdict(self)
|
||||
@@ -20,7 +20,6 @@ class VirtChecker:
|
||||
self.namespace = get_yaml_item_value(kubevirt_check_config, "namespace", "")
|
||||
self.vm_list = []
|
||||
self.threads = []
|
||||
self.iteration_lock = threading.Lock() # Lock to protect current_iterations
|
||||
self.threads_limit = threads_limit
|
||||
# setting to 0 in case no variables are set, so no threads later get made
|
||||
self.batch_size = 0
|
||||
@@ -58,6 +57,7 @@ class VirtChecker:
|
||||
elif len(node_name_list) == 0:
|
||||
# If node_name_list is blank, add all vms
|
||||
self.vm_list.append(VirtCheck({'vm_name':vmi_name, 'ip_address': ip_address, 'namespace':namespace, 'node_name':node_name, "new_ip_address":""}))
|
||||
|
||||
self.batch_size = math.ceil(len(self.vm_list)/self.threads_limit)
|
||||
|
||||
def check_disconnected_access(self, ip_address: str, worker_name:str = '', vmi_name: str = ''):
|
||||
@@ -121,7 +121,8 @@ class VirtChecker:
|
||||
for thread in self.threads:
|
||||
thread.join()
|
||||
|
||||
def batch_list(self, queue: queue.SimpleQueue = None):
|
||||
def batch_list(self, queue: queue.Queue = None):
|
||||
logging.info("batch size" + str(self.batch_size))
|
||||
if self.batch_size > 0:
|
||||
# Provided prints to easily visualize how the threads are processed.
|
||||
for i in range (0, len(self.vm_list),self.batch_size):
|
||||
@@ -134,23 +135,13 @@ class VirtChecker:
|
||||
self.threads.append(t)
|
||||
t.start()
|
||||
|
||||
def increment_iterations(self):
|
||||
"""Thread-safe method to increment current_iterations"""
|
||||
with self.iteration_lock:
|
||||
self.current_iterations += 1
|
||||
|
||||
def run_virt_check(self, vm_list_batch, virt_check_telemetry_queue: queue.SimpleQueue):
|
||||
|
||||
def run_virt_check(self, vm_list_batch, virt_check_telemetry_queue: queue.Queue):
|
||||
|
||||
virt_check_telemetry = []
|
||||
virt_check_tracker = {}
|
||||
while True:
|
||||
# Thread-safe read of current_iterations
|
||||
with self.iteration_lock:
|
||||
current = self.current_iterations
|
||||
if current >= self.iterations:
|
||||
break
|
||||
while self.current_iterations < self.iterations:
|
||||
for vm in vm_list_batch:
|
||||
start_time= datetime.now()
|
||||
try:
|
||||
if not self.disconnected:
|
||||
vm_status = self.get_vm_access(vm.vm_name, vm.namespace)
|
||||
@@ -166,9 +157,8 @@ class VirtChecker:
|
||||
if new_node_name and vm.node_name != new_node_name:
|
||||
vm.node_name = new_node_name
|
||||
except Exception:
|
||||
logging.info('Exception in get vm status')
|
||||
vm_status = False
|
||||
|
||||
|
||||
if vm.vm_name not in virt_check_tracker:
|
||||
start_timestamp = datetime.now()
|
||||
virt_check_tracker[vm.vm_name] = {
|
||||
@@ -181,7 +171,6 @@ class VirtChecker:
|
||||
"new_ip_address": vm.new_ip_address
|
||||
}
|
||||
else:
|
||||
|
||||
if vm_status != virt_check_tracker[vm.vm_name]["status"]:
|
||||
end_timestamp = datetime.now()
|
||||
start_timestamp = virt_check_tracker[vm.vm_name]["start_timestamp"]
|
||||
@@ -210,11 +199,9 @@ class VirtChecker:
|
||||
virt_check_telemetry.append(VirtCheck(virt_check_tracker[vm]))
|
||||
else:
|
||||
virt_check_telemetry.append(VirtCheck(virt_check_tracker[vm]))
|
||||
try:
|
||||
virt_check_telemetry_queue.put(virt_check_telemetry)
|
||||
except Exception as e:
|
||||
logging.error('Put queue error ' + str(e))
|
||||
def run_post_virt_check(self, vm_list_batch, virt_check_telemetry, post_virt_check_queue: queue.SimpleQueue):
|
||||
virt_check_telemetry_queue.put(virt_check_telemetry)
|
||||
|
||||
def run_post_virt_check(self, vm_list_batch, virt_check_telemetry, post_virt_check_queue: queue.Queue):
|
||||
|
||||
virt_check_telemetry = []
|
||||
virt_check_tracker = {}
|
||||
@@ -253,7 +240,7 @@ class VirtChecker:
|
||||
|
||||
def gather_post_virt_checks(self, kubevirt_check_telem):
|
||||
|
||||
post_kubevirt_check_queue = queue.SimpleQueue()
|
||||
post_kubevirt_check_queue = queue.Queue()
|
||||
post_threads = []
|
||||
|
||||
if self.batch_size > 0:
|
||||
|
||||
@@ -1,4 +1,2 @@
|
||||
from .TeeLogHandler import TeeLogHandler
|
||||
from .ErrorLog import ErrorLog
|
||||
from .ErrorCollectionHandler import ErrorCollectionHandler
|
||||
from .functions import *
|
||||
|
||||
@@ -6,16 +6,17 @@ azure-identity==1.16.1
|
||||
azure-keyvault==4.2.0
|
||||
azure-mgmt-compute==30.5.0
|
||||
azure-mgmt-network==27.0.0
|
||||
itsdangerous==2.0.1
|
||||
coverage==7.6.12
|
||||
datetime==5.4
|
||||
docker>=6.0,<7.0 # docker 7.0+ has breaking changes with Unix sockets
|
||||
docker==7.0.0
|
||||
gitpython==3.1.41
|
||||
google-auth==2.37.0
|
||||
google-cloud-compute==1.22.0
|
||||
ibm_cloud_sdk_core==3.18.0
|
||||
ibm_vpc==0.20.0
|
||||
jinja2==3.1.6
|
||||
krkn-lib==6.0.1
|
||||
krkn-lib==5.1.12
|
||||
lxml==5.1.0
|
||||
kubernetes==34.1.0
|
||||
numpy==1.26.4
|
||||
@@ -27,13 +28,13 @@ pyfiglet==1.0.2
|
||||
pytest==8.0.0
|
||||
python-ipmi==0.5.4
|
||||
python-openstackclient==6.5.0
|
||||
requests<2.32 # requests 2.32+ breaks Unix socket support (http+docker scheme)
|
||||
requests-unixsocket>=0.4.0 # Required for Docker Unix socket support
|
||||
requests==2.32.4
|
||||
service_identity==24.1.0
|
||||
PyYAML==6.0.1
|
||||
setuptools==78.1.1
|
||||
wheel>=0.44.0
|
||||
zope.interface==6.1
|
||||
werkzeug==3.0.6
|
||||
wheel==0.42.0
|
||||
zope.interface==5.4.0
|
||||
|
||||
git+https://github.com/vmware/vsphere-automation-sdk-python.git@v8.0.0.0
|
||||
cryptography>=42.0.4 # not directly required, pinned by Snyk to avoid a vulnerability
|
||||
|
||||
@@ -27,7 +27,7 @@ from krkn_lib.models.telemetry import ChaosRunTelemetry
|
||||
from krkn_lib.utils import SafeLogger
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, get_junit_test_case
|
||||
|
||||
from krkn.utils import TeeLogHandler, ErrorCollectionHandler
|
||||
from krkn.utils import TeeLogHandler
|
||||
from krkn.utils.HealthChecker import HealthChecker
|
||||
from krkn.utils.VirtChecker import VirtChecker
|
||||
from krkn.scenario_plugins.scenario_plugin_factory import (
|
||||
@@ -141,7 +141,7 @@ def main(options, command: Optional[str]) -> int:
|
||||
logging.error(
|
||||
"Cannot read the kubeconfig file at %s, please check" % kubeconfig_path
|
||||
)
|
||||
return -1
|
||||
return 1
|
||||
logging.info("Initializing client to talk to the Kubernetes cluster")
|
||||
|
||||
# Generate uuid for the run
|
||||
@@ -184,10 +184,10 @@ def main(options, command: Optional[str]) -> int:
|
||||
# Set up kraken url to track signal
|
||||
if not 0 <= int(port) <= 65535:
|
||||
logging.error("%s isn't a valid port number, please check" % (port))
|
||||
return -1
|
||||
return 1
|
||||
if not signal_address:
|
||||
logging.error("Please set the signal address in the config")
|
||||
return -1
|
||||
return 1
|
||||
address = (signal_address, port)
|
||||
|
||||
# If publish_running_status is False this should keep us going
|
||||
@@ -220,7 +220,7 @@ def main(options, command: Optional[str]) -> int:
|
||||
"invalid distribution selected, running openshift scenarios against kubernetes cluster."
|
||||
"Please set 'kubernetes' in config.yaml krkn.platform and try again"
|
||||
)
|
||||
return -1
|
||||
return 1
|
||||
if cv != "":
|
||||
logging.info(cv)
|
||||
else:
|
||||
@@ -326,7 +326,7 @@ def main(options, command: Optional[str]) -> int:
|
||||
args=(health_check_config, health_check_telemetry_queue))
|
||||
health_check_worker.start()
|
||||
|
||||
kubevirt_check_telemetry_queue = queue.SimpleQueue()
|
||||
kubevirt_check_telemetry_queue = queue.Queue()
|
||||
kubevirt_checker = VirtChecker(kubevirt_check_config, iterations=iterations, krkn_lib=kubecli)
|
||||
kubevirt_checker.batch_list(kubevirt_check_telemetry_queue)
|
||||
|
||||
@@ -361,7 +361,7 @@ def main(options, command: Optional[str]) -> int:
|
||||
logging.error(
|
||||
f"impossible to find scenario {scenario_type}, plugin not found. Exiting"
|
||||
)
|
||||
sys.exit(-1)
|
||||
sys.exit(1)
|
||||
|
||||
failed_post_scenarios, scenario_telemetries = (
|
||||
scenario_plugin.run_scenarios(
|
||||
@@ -393,7 +393,8 @@ def main(options, command: Optional[str]) -> int:
|
||||
|
||||
iteration += 1
|
||||
health_checker.current_iterations += 1
|
||||
kubevirt_checker.increment_iterations()
|
||||
kubevirt_checker.current_iterations += 1
|
||||
|
||||
# telemetry
|
||||
# in order to print decoded telemetry data even if telemetry collection
|
||||
# is disabled, it's necessary to serialize the ChaosRunTelemetry object
|
||||
@@ -410,7 +411,6 @@ def main(options, command: Optional[str]) -> int:
|
||||
while not kubevirt_check_telemetry_queue.empty():
|
||||
kubevirt_check_telem.extend(kubevirt_check_telemetry_queue.get_nowait())
|
||||
chaos_telemetry.virt_checks = kubevirt_check_telem
|
||||
|
||||
post_kubevirt_check = kubevirt_checker.gather_post_virt_checks(kubevirt_check_telem)
|
||||
chaos_telemetry.post_virt_checks = post_kubevirt_check
|
||||
# if platform is openshift will be collected
|
||||
@@ -425,22 +425,16 @@ def main(options, command: Optional[str]) -> int:
|
||||
logging.info("collecting Kubernetes cluster metadata....")
|
||||
telemetry_k8s.collect_cluster_metadata(chaos_telemetry)
|
||||
|
||||
# Collect error logs from handler
|
||||
error_logs = error_collection_handler.get_error_logs()
|
||||
if error_logs:
|
||||
logging.info(f"Collected {len(error_logs)} error logs for telemetry")
|
||||
chaos_telemetry.error_logs = error_logs
|
||||
else:
|
||||
logging.info("No error logs collected during chaos run")
|
||||
chaos_telemetry.error_logs = []
|
||||
|
||||
telemetry_json = chaos_telemetry.to_json()
|
||||
decoded_chaos_run_telemetry = ChaosRunTelemetry(json.loads(telemetry_json))
|
||||
chaos_output.telemetry = decoded_chaos_run_telemetry
|
||||
logging.info(f"Chaos data:\n{chaos_output.to_json()}")
|
||||
if enable_elastic:
|
||||
elastic_telemetry = ElasticChaosRunTelemetry(
|
||||
chaos_run_telemetry=decoded_chaos_run_telemetry
|
||||
)
|
||||
result = elastic_search.push_telemetry(
|
||||
decoded_chaos_run_telemetry, elastic_telemetry_index
|
||||
elastic_telemetry, elastic_telemetry_index
|
||||
)
|
||||
if result == -1:
|
||||
safe_logger.error(
|
||||
@@ -528,7 +522,7 @@ def main(options, command: Optional[str]) -> int:
|
||||
|
||||
else:
|
||||
logging.error("Alert profile is not defined")
|
||||
return -1
|
||||
return 1
|
||||
# sys.exit(1)
|
||||
if enable_metrics:
|
||||
logging.info(f'Capturing metrics using file {metrics_profile}')
|
||||
@@ -543,20 +537,17 @@ def main(options, command: Optional[str]) -> int:
|
||||
telemetry_json
|
||||
)
|
||||
|
||||
# want to exit with 1 first to show failure of scenario
|
||||
# even if alerts failing
|
||||
if failed_post_scenarios:
|
||||
logging.error(
|
||||
"Post scenarios are still failing at the end of all iterations"
|
||||
)
|
||||
# sys.exit(1)
|
||||
return 1
|
||||
|
||||
if post_critical_alerts > 0:
|
||||
logging.error("Critical alerts are firing, please check; exiting")
|
||||
# sys.exit(2)
|
||||
return 2
|
||||
|
||||
if failed_post_scenarios:
|
||||
logging.error(
|
||||
"Post scenarios are still failing at the end of all iterations"
|
||||
)
|
||||
# sys.exit(2)
|
||||
return 2
|
||||
if health_checker.ret_value != 0:
|
||||
logging.error("Health check failed for the applications, Please check; exiting")
|
||||
return health_checker.ret_value
|
||||
@@ -572,7 +563,7 @@ def main(options, command: Optional[str]) -> int:
|
||||
else:
|
||||
logging.error("Cannot find a config at %s, please check" % (cfg))
|
||||
# sys.exit(1)
|
||||
return -1
|
||||
return 2
|
||||
|
||||
return 0
|
||||
|
||||
@@ -652,13 +643,10 @@ if __name__ == "__main__":
|
||||
# If no command or regular execution, continue with existing logic
|
||||
report_file = options.output
|
||||
tee_handler = TeeLogHandler()
|
||||
error_collection_handler = ErrorCollectionHandler(level=logging.ERROR)
|
||||
|
||||
handlers = [
|
||||
logging.FileHandler(report_file, mode="w"),
|
||||
logging.StreamHandler(),
|
||||
tee_handler,
|
||||
error_collection_handler,
|
||||
]
|
||||
|
||||
logging.basicConfig(
|
||||
@@ -744,4 +732,4 @@ if __name__ == "__main__":
|
||||
with open(junit_testcase_file_path, "w") as stream:
|
||||
stream.write(junit_testcase_xml)
|
||||
|
||||
sys.exit(retval)
|
||||
sys.exit(retval)
|
||||
@@ -1,18 +1,16 @@
|
||||
node_scenarios:
|
||||
- actions: # node chaos scenarios to be injected
|
||||
- node_stop_start_scenario
|
||||
# node_name: kind-control-plane # node on which scenario has to be injected; can set multiple names separated by comma
|
||||
label_selector: kubernetes.io/hostname=kind-worker # when node_name is not specified, a node with matching label_selector is selected for node chaos scenario injection
|
||||
node_name: kind-worker # node on which scenario has to be injected; can set multiple names separated by comma
|
||||
# label_selector: node-role.kubernetes.io/worker # when node_name is not specified, a node with matching label_selector is selected for node chaos scenario injection
|
||||
instance_count: 1 # Number of nodes to perform action/select that match the label selector
|
||||
runs: 1 # number of times to inject each scenario under actions (will perform on same node each time)
|
||||
timeout: 120 # duration to wait for completion of node scenario injection
|
||||
cloud_type: docker # cloud type on which Kubernetes/OpenShift runs
|
||||
duration: 10
|
||||
- actions:
|
||||
- node_reboot_scenario
|
||||
node_name: kind-control-plane
|
||||
# label_selector: kubernetes.io/hostname=kind-worker
|
||||
node_name: kind-worker
|
||||
# label_selector: node-role.kubernetes.io/infra
|
||||
instance_count: 1
|
||||
timeout: 120
|
||||
cloud_type: docker
|
||||
kube_check: false
|
||||
|
||||
@@ -3,4 +3,3 @@
|
||||
namespace_pattern: "kube-system"
|
||||
label_selector: "component=etcd"
|
||||
krkn_pod_recovery_time: 120
|
||||
kill: 1
|
||||
@@ -1,7 +0,0 @@
|
||||
pvc_scenario:
|
||||
pvc_name: kraken-test-pvc # Name of the target PVC
|
||||
pod_name: kraken-test-pod # Name of the pod where the PVC is mounted, it will be ignored if the pvc_name is defined
|
||||
namespace: kraken # Namespace where the PVC is
|
||||
fill_percentage: 98 # Target percentage to fill up the cluster, value must be higher than current percentage, valid values are between 0 and 99
|
||||
duration: 10 # Duration in seconds for the fault
|
||||
block_size: 102400 # used only by dd if fallocate not present in the container
|
||||
@@ -10,7 +10,6 @@ node_scenarios:
|
||||
cloud_type: aws # cloud type on which Kubernetes/OpenShift runs
|
||||
parallel: true # Run action on label or node name in parallel or sequential, defaults to sequential
|
||||
kube_check: true # Run the kubernetes api calls to see if the node gets to a certain state during the node scenario
|
||||
poll_interval: 15 # Time interval(in seconds) to periodically check the node's status
|
||||
- actions:
|
||||
- node_reboot_scenario
|
||||
node_name:
|
||||
|
||||
64
tests/ingress_network/test_ingress_network_plugin.py
Normal file
64
tests/ingress_network/test_ingress_network_plugin.py
Normal file
@@ -0,0 +1,64 @@
|
||||
import unittest
|
||||
import logging
|
||||
from arcaflow_plugin_sdk import plugin
|
||||
|
||||
from krkn.scenario_plugins.native.network import ingress_shaping
|
||||
|
||||
|
||||
class NetworkScenariosTest(unittest.TestCase):
|
||||
|
||||
def test_serialization(self):
|
||||
plugin.test_object_serialization(
|
||||
ingress_shaping.NetworkScenarioConfig(
|
||||
node_interface_name={"foo": ["bar"]},
|
||||
network_params={
|
||||
"latency": "50ms",
|
||||
"loss": "0.02",
|
||||
"bandwidth": "100mbit",
|
||||
},
|
||||
),
|
||||
self.fail,
|
||||
)
|
||||
plugin.test_object_serialization(
|
||||
ingress_shaping.NetworkScenarioSuccessOutput(
|
||||
filter_direction="ingress",
|
||||
test_interfaces={"foo": ["bar"]},
|
||||
network_parameters={
|
||||
"latency": "50ms",
|
||||
"loss": "0.02",
|
||||
"bandwidth": "100mbit",
|
||||
},
|
||||
execution_type="parallel",
|
||||
),
|
||||
self.fail,
|
||||
)
|
||||
plugin.test_object_serialization(
|
||||
ingress_shaping.NetworkScenarioErrorOutput(
|
||||
error="Hello World",
|
||||
),
|
||||
self.fail,
|
||||
)
|
||||
|
||||
def test_network_chaos(self):
|
||||
output_id, output_data = ingress_shaping.network_chaos(
|
||||
params=ingress_shaping.NetworkScenarioConfig(
|
||||
label_selector="node-role.kubernetes.io/control-plane",
|
||||
instance_count=1,
|
||||
network_params={
|
||||
"latency": "50ms",
|
||||
"loss": "0.02",
|
||||
"bandwidth": "100mbit",
|
||||
},
|
||||
),
|
||||
run_id="network-shaping-test",
|
||||
)
|
||||
if output_id == "error":
|
||||
logging.error(output_data.error)
|
||||
self.fail(
|
||||
"The network chaos scenario did not complete successfully "
|
||||
"because an error/exception occurred"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
215
tests/kubevirt_vm_outage/test_kubevirt_vm_outage.py
Normal file
215
tests/kubevirt_vm_outage/test_kubevirt_vm_outage.py
Normal file
@@ -0,0 +1,215 @@
|
||||
import unittest
|
||||
import time
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import yaml
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
from krkn.scenario_plugins.kubevirt_vm_outage.kubevirt_vm_outage_scenario_plugin import KubevirtVmOutageScenarioPlugin
|
||||
|
||||
|
||||
class TestKubevirtVmOutageScenarioPlugin(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
"""
|
||||
Set up test fixtures for KubevirtVmOutageScenarioPlugin
|
||||
"""
|
||||
self.plugin = KubevirtVmOutageScenarioPlugin()
|
||||
|
||||
# Create mock k8s client
|
||||
self.k8s_client = MagicMock()
|
||||
self.custom_object_client = MagicMock()
|
||||
self.k8s_client.custom_object_client = self.custom_object_client
|
||||
self.plugin.k8s_client = self.k8s_client
|
||||
|
||||
# Mock methods needed for KubeVirt operations
|
||||
self.k8s_client.list_custom_resource_definition = MagicMock()
|
||||
|
||||
# Mock custom resource definition list with KubeVirt CRDs
|
||||
crd_list = MagicMock()
|
||||
crd_item = MagicMock()
|
||||
crd_item.spec = MagicMock()
|
||||
crd_item.spec.group = "kubevirt.io"
|
||||
crd_list.items = [crd_item]
|
||||
self.k8s_client.list_custom_resource_definition.return_value = crd_list
|
||||
|
||||
# Mock VMI data
|
||||
self.mock_vmi = {
|
||||
"metadata": {
|
||||
"name": "test-vm",
|
||||
"namespace": "default"
|
||||
},
|
||||
"status": {
|
||||
"phase": "Running"
|
||||
}
|
||||
}
|
||||
|
||||
# Create test config
|
||||
self.config = {
|
||||
"scenarios": [
|
||||
{
|
||||
"name": "kubevirt outage test",
|
||||
"scenario": "kubevirt_vm_outage",
|
||||
"parameters": {
|
||||
"vm_name": "test-vm",
|
||||
"namespace": "default",
|
||||
"duration": 0
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# Create a temporary config file
|
||||
import tempfile, os
|
||||
temp_dir = tempfile.gettempdir()
|
||||
self.scenario_file = os.path.join(temp_dir, "test_kubevirt_scenario.yaml")
|
||||
with open(self.scenario_file, "w") as f:
|
||||
yaml.dump(self.config, f)
|
||||
|
||||
# Mock dependencies
|
||||
self.telemetry = MagicMock(spec=KrknTelemetryOpenshift)
|
||||
self.scenario_telemetry = MagicMock(spec=ScenarioTelemetry)
|
||||
self.telemetry.get_lib_kubernetes.return_value = self.k8s_client
|
||||
|
||||
def test_successful_injection_and_recovery(self):
|
||||
"""
|
||||
Test successful deletion and recovery of a VMI
|
||||
"""
|
||||
# Mock get_vmi to return our mock VMI
|
||||
with patch.object(self.plugin, 'get_vmi', return_value=self.mock_vmi):
|
||||
# Mock inject and recover to simulate success
|
||||
with patch.object(self.plugin, 'inject', return_value=0) as mock_inject:
|
||||
with patch.object(self.plugin, 'recover', return_value=0) as mock_recover:
|
||||
with patch("builtins.open", unittest.mock.mock_open(read_data=yaml.dump(self.config))):
|
||||
result = self.plugin.run("test-uuid", self.scenario_file, {}, self.telemetry, self.scenario_telemetry)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
mock_inject.assert_called_once_with("test-vm", "default", False)
|
||||
mock_recover.assert_called_once_with("test-vm", "default", False)
|
||||
|
||||
def test_injection_failure(self):
|
||||
"""
|
||||
Test failure during VMI deletion
|
||||
"""
|
||||
# Mock get_vmi to return our mock VMI
|
||||
with patch.object(self.plugin, 'get_vmi', return_value=self.mock_vmi):
|
||||
# Mock inject to simulate failure
|
||||
with patch.object(self.plugin, 'inject', return_value=1) as mock_inject:
|
||||
with patch.object(self.plugin, 'recover', return_value=0) as mock_recover:
|
||||
with patch("builtins.open", unittest.mock.mock_open(read_data=yaml.dump(self.config))):
|
||||
result = self.plugin.run("test-uuid", self.scenario_file, {}, self.telemetry, self.scenario_telemetry)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
mock_inject.assert_called_once_with("test-vm", "default", False)
|
||||
mock_recover.assert_not_called()
|
||||
|
||||
def test_disable_auto_restart(self):
|
||||
"""
|
||||
Test VM auto-restart can be disabled
|
||||
"""
|
||||
# Configure test with disable_auto_restart=True
|
||||
self.config["scenarios"][0]["parameters"]["disable_auto_restart"] = True
|
||||
|
||||
# Mock VM object for patching
|
||||
mock_vm = {
|
||||
"metadata": {"name": "test-vm", "namespace": "default"},
|
||||
"spec": {}
|
||||
}
|
||||
|
||||
# Mock get_vmi to return our mock VMI
|
||||
with patch.object(self.plugin, 'get_vmi', return_value=self.mock_vmi):
|
||||
# Mock VM patch operation
|
||||
with patch.object(self.plugin, 'patch_vm_spec') as mock_patch_vm:
|
||||
mock_patch_vm.return_value = True
|
||||
# Mock inject and recover
|
||||
with patch.object(self.plugin, 'inject', return_value=0) as mock_inject:
|
||||
with patch.object(self.plugin, 'recover', return_value=0) as mock_recover:
|
||||
with patch("builtins.open", unittest.mock.mock_open(read_data=yaml.dump(self.config))):
|
||||
result = self.plugin.run("test-uuid", self.scenario_file, {}, self.telemetry, self.scenario_telemetry)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
# Should call patch_vm_spec to disable auto-restart
|
||||
mock_patch_vm.assert_any_call("test-vm", "default", False)
|
||||
# Should call patch_vm_spec to re-enable auto-restart during recovery
|
||||
mock_patch_vm.assert_any_call("test-vm", "default", True)
|
||||
mock_inject.assert_called_once_with("test-vm", "default", True)
|
||||
mock_recover.assert_called_once_with("test-vm", "default", True)
|
||||
|
||||
def test_recovery_when_vmi_does_not_exist(self):
|
||||
"""
|
||||
Test recovery logic when VMI does not exist after deletion
|
||||
"""
|
||||
# Store the original VMI in the plugin for recovery
|
||||
self.plugin.original_vmi = self.mock_vmi.copy()
|
||||
|
||||
# Create a cleaned vmi_dict as the plugin would
|
||||
vmi_dict = self.mock_vmi.copy()
|
||||
|
||||
# Set up running VMI data for after recovery
|
||||
running_vmi = {
|
||||
"metadata": {"name": "test-vm", "namespace": "default"},
|
||||
"status": {"phase": "Running"}
|
||||
}
|
||||
|
||||
# Set up time.time to immediately exceed the timeout for auto-recovery
|
||||
with patch('time.time', side_effect=[0, 301, 301, 301, 301, 310, 320]):
|
||||
# Mock get_vmi to always return None (not auto-recovered)
|
||||
with patch.object(self.plugin, 'get_vmi', side_effect=[None, None, running_vmi]):
|
||||
# Mock the custom object API to return success
|
||||
self.custom_object_client.create_namespaced_custom_object = MagicMock(return_value=running_vmi)
|
||||
|
||||
# Run recovery with mocked time.sleep
|
||||
with patch('time.sleep'):
|
||||
result = self.plugin.recover("test-vm", "default", False)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
# Verify create was called with the right arguments for our API version and kind
|
||||
self.custom_object_client.create_namespaced_custom_object.assert_called_once_with(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace="default",
|
||||
plural="virtualmachineinstances",
|
||||
body=vmi_dict
|
||||
)
|
||||
|
||||
def test_validation_failure(self):
|
||||
"""
|
||||
Test validation failure when KubeVirt is not installed
|
||||
"""
|
||||
# Mock empty CRD list (no KubeVirt CRDs)
|
||||
empty_crd_list = MagicMock()
|
||||
empty_crd_list.items = []
|
||||
self.k8s_client.list_custom_resource_definition.return_value = empty_crd_list
|
||||
|
||||
with patch("builtins.open", unittest.mock.mock_open(read_data=yaml.dump(self.config))):
|
||||
result = self.plugin.run("test-uuid", self.scenario_file, {}, self.telemetry, self.scenario_telemetry)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
def test_delete_vmi_timeout(self):
|
||||
"""
|
||||
Test timeout during VMI deletion
|
||||
"""
|
||||
# Mock successful delete operation
|
||||
self.custom_object_client.delete_namespaced_custom_object = MagicMock(return_value={})
|
||||
|
||||
# Mock that get_vmi always returns VMI (never gets deleted)
|
||||
with patch.object(self.plugin, 'get_vmi', return_value=self.mock_vmi):
|
||||
# Simulate timeout by making time.time return values that exceed the timeout
|
||||
with patch('time.sleep'), patch('time.time', side_effect=[0, 10, 20, 130, 130, 130, 130, 140]):
|
||||
result = self.plugin.inject("test-vm", "default", False)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
self.custom_object_client.delete_namespaced_custom_object.assert_called_once_with(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace="default",
|
||||
plural="virtualmachineinstances",
|
||||
name="test-vm"
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -1,415 +0,0 @@
|
||||
"""
|
||||
Test suite for AbstractNode Scenarios
|
||||
|
||||
Usage:
|
||||
python -m coverage run -a -m unittest tests/test_abstract_node_scenarios.py
|
||||
|
||||
Assisted By: Claude Code
|
||||
"""
|
||||
|
||||
import unittest
|
||||
from unittest.mock import Mock, patch
|
||||
from krkn.scenario_plugins.node_actions.abstract_node_scenarios import abstract_node_scenarios
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
|
||||
|
||||
class TestAbstractNodeScenarios(unittest.TestCase):
|
||||
"""Test suite for abstract_node_scenarios class"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures before each test method"""
|
||||
self.mock_kubecli = Mock(spec=KrknKubernetes)
|
||||
self.mock_affected_nodes_status = Mock(spec=AffectedNodeStatus)
|
||||
self.mock_affected_nodes_status.affected_nodes = []
|
||||
self.node_action_kube_check = True
|
||||
|
||||
self.scenarios = abstract_node_scenarios(
|
||||
kubecli=self.mock_kubecli,
|
||||
node_action_kube_check=self.node_action_kube_check,
|
||||
affected_nodes_status=self.mock_affected_nodes_status
|
||||
)
|
||||
|
||||
def test_init(self):
|
||||
"""Test initialization of abstract_node_scenarios"""
|
||||
self.assertEqual(self.scenarios.kubecli, self.mock_kubecli)
|
||||
self.assertEqual(self.scenarios.affected_nodes_status, self.mock_affected_nodes_status)
|
||||
self.assertTrue(self.scenarios.node_action_kube_check)
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
def test_node_stop_start_scenario(self, mock_logging, mock_sleep):
|
||||
"""Test node_stop_start_scenario calls stop and start in sequence"""
|
||||
# Arrange
|
||||
instance_kill_count = 1
|
||||
node = "test-node"
|
||||
timeout = 300
|
||||
duration = 60
|
||||
poll_interval = 10
|
||||
|
||||
self.scenarios.node_stop_scenario = Mock()
|
||||
self.scenarios.node_start_scenario = Mock()
|
||||
|
||||
# Act
|
||||
self.scenarios.node_stop_start_scenario(
|
||||
instance_kill_count, node, timeout, duration, poll_interval
|
||||
)
|
||||
|
||||
# Assert
|
||||
self.scenarios.node_stop_scenario.assert_called_once_with(
|
||||
instance_kill_count, node, timeout, poll_interval
|
||||
)
|
||||
mock_sleep.assert_called_once_with(duration)
|
||||
self.scenarios.node_start_scenario.assert_called_once_with(
|
||||
instance_kill_count, node, timeout, poll_interval
|
||||
)
|
||||
self.mock_affected_nodes_status.merge_affected_nodes.assert_called_once()
|
||||
|
||||
@patch('logging.info')
|
||||
def test_helper_node_stop_start_scenario(self, mock_logging):
|
||||
"""Test helper_node_stop_start_scenario calls helper stop and start"""
|
||||
# Arrange
|
||||
instance_kill_count = 1
|
||||
node = "helper-node"
|
||||
timeout = 300
|
||||
|
||||
self.scenarios.helper_node_stop_scenario = Mock()
|
||||
self.scenarios.helper_node_start_scenario = Mock()
|
||||
|
||||
# Act
|
||||
self.scenarios.helper_node_stop_start_scenario(instance_kill_count, node, timeout)
|
||||
|
||||
# Assert
|
||||
self.scenarios.helper_node_stop_scenario.assert_called_once_with(
|
||||
instance_kill_count, node, timeout
|
||||
)
|
||||
self.scenarios.helper_node_start_scenario.assert_called_once_with(
|
||||
instance_kill_count, node, timeout
|
||||
)
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
def test_node_disk_detach_attach_scenario_success(self, mock_logging, mock_sleep):
|
||||
"""Test disk detach/attach scenario with valid disk attachment"""
|
||||
# Arrange
|
||||
instance_kill_count = 1
|
||||
node = "test-node"
|
||||
timeout = 300
|
||||
duration = 60
|
||||
disk_details = {"disk_id": "disk-123", "device": "/dev/sdb"}
|
||||
|
||||
self.scenarios.get_disk_attachment_info = Mock(return_value=disk_details)
|
||||
self.scenarios.disk_detach_scenario = Mock()
|
||||
self.scenarios.disk_attach_scenario = Mock()
|
||||
|
||||
# Act
|
||||
self.scenarios.node_disk_detach_attach_scenario(
|
||||
instance_kill_count, node, timeout, duration
|
||||
)
|
||||
|
||||
# Assert
|
||||
self.scenarios.get_disk_attachment_info.assert_called_once_with(
|
||||
instance_kill_count, node
|
||||
)
|
||||
self.scenarios.disk_detach_scenario.assert_called_once_with(
|
||||
instance_kill_count, node, timeout
|
||||
)
|
||||
mock_sleep.assert_called_once_with(duration)
|
||||
self.scenarios.disk_attach_scenario.assert_called_once_with(
|
||||
instance_kill_count, disk_details, timeout
|
||||
)
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('logging.info')
|
||||
def test_node_disk_detach_attach_scenario_no_disk(self, mock_info, mock_error):
|
||||
"""Test disk detach/attach scenario when only root disk exists"""
|
||||
# Arrange
|
||||
instance_kill_count = 1
|
||||
node = "test-node"
|
||||
timeout = 300
|
||||
duration = 60
|
||||
|
||||
self.scenarios.get_disk_attachment_info = Mock(return_value=None)
|
||||
self.scenarios.disk_detach_scenario = Mock()
|
||||
self.scenarios.disk_attach_scenario = Mock()
|
||||
|
||||
# Act
|
||||
self.scenarios.node_disk_detach_attach_scenario(
|
||||
instance_kill_count, node, timeout, duration
|
||||
)
|
||||
|
||||
# Assert
|
||||
self.scenarios.disk_detach_scenario.assert_not_called()
|
||||
self.scenarios.disk_attach_scenario.assert_not_called()
|
||||
mock_error.assert_any_call("Node %s has only root disk attached" % node)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.abstract_node_scenarios.nodeaction.wait_for_unknown_status')
|
||||
@patch('krkn.scenario_plugins.node_actions.abstract_node_scenarios.runcommand.run')
|
||||
@patch('logging.info')
|
||||
def test_stop_kubelet_scenario_success(self, mock_logging, mock_run, mock_wait):
|
||||
"""Test successful kubelet stop scenario"""
|
||||
# Arrange
|
||||
instance_kill_count = 2
|
||||
node = "test-node"
|
||||
timeout = 300
|
||||
mock_affected_node = Mock(spec=AffectedNode)
|
||||
mock_wait.return_value = None
|
||||
|
||||
# Act
|
||||
with patch('krkn.scenario_plugins.node_actions.abstract_node_scenarios.AffectedNode') as mock_affected_node_class:
|
||||
mock_affected_node_class.return_value = mock_affected_node
|
||||
self.scenarios.stop_kubelet_scenario(instance_kill_count, node, timeout)
|
||||
|
||||
# Assert
|
||||
self.assertEqual(mock_run.call_count, 2)
|
||||
expected_command = "oc debug node/" + node + " -- chroot /host systemctl stop kubelet"
|
||||
mock_run.assert_called_with(expected_command)
|
||||
self.assertEqual(mock_wait.call_count, 2)
|
||||
self.assertEqual(len(self.mock_affected_nodes_status.affected_nodes), 2)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.abstract_node_scenarios.nodeaction.wait_for_unknown_status')
|
||||
@patch('krkn.scenario_plugins.node_actions.abstract_node_scenarios.runcommand.run')
|
||||
@patch('logging.error')
|
||||
@patch('logging.info')
|
||||
def test_stop_kubelet_scenario_failure(self, mock_info, mock_error, mock_run, mock_wait):
|
||||
"""Test kubelet stop scenario when command fails"""
|
||||
# Arrange
|
||||
instance_kill_count = 1
|
||||
node = "test-node"
|
||||
timeout = 300
|
||||
error_msg = "Command failed"
|
||||
mock_run.side_effect = Exception(error_msg)
|
||||
|
||||
# Act & Assert
|
||||
with self.assertRaises(Exception):
|
||||
with patch('krkn.scenario_plugins.node_actions.abstract_node_scenarios.AffectedNode'):
|
||||
self.scenarios.stop_kubelet_scenario(instance_kill_count, node, timeout)
|
||||
|
||||
mock_error.assert_any_call(
|
||||
"Failed to stop the kubelet of the node. Encountered following "
|
||||
"exception: %s. Test Failed" % error_msg
|
||||
)
|
||||
|
||||
@patch('logging.info')
|
||||
def test_stop_start_kubelet_scenario(self, mock_logging):
|
||||
"""Test stop/start kubelet scenario"""
|
||||
# Arrange
|
||||
instance_kill_count = 1
|
||||
node = "test-node"
|
||||
timeout = 300
|
||||
|
||||
self.scenarios.stop_kubelet_scenario = Mock()
|
||||
self.scenarios.node_reboot_scenario = Mock()
|
||||
|
||||
# Act
|
||||
self.scenarios.stop_start_kubelet_scenario(instance_kill_count, node, timeout)
|
||||
|
||||
# Assert
|
||||
self.scenarios.stop_kubelet_scenario.assert_called_once_with(
|
||||
instance_kill_count, node, timeout
|
||||
)
|
||||
self.scenarios.node_reboot_scenario.assert_called_once_with(
|
||||
instance_kill_count, node, timeout
|
||||
)
|
||||
self.mock_affected_nodes_status.merge_affected_nodes.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.abstract_node_scenarios.nodeaction.wait_for_ready_status')
|
||||
@patch('krkn.scenario_plugins.node_actions.abstract_node_scenarios.runcommand.run')
|
||||
@patch('logging.info')
|
||||
def test_restart_kubelet_scenario_success(self, mock_logging, mock_run, mock_wait):
|
||||
"""Test successful kubelet restart scenario"""
|
||||
# Arrange
|
||||
instance_kill_count = 2
|
||||
node = "test-node"
|
||||
timeout = 300
|
||||
mock_affected_node = Mock(spec=AffectedNode)
|
||||
mock_wait.return_value = None
|
||||
|
||||
# Act
|
||||
with patch('krkn.scenario_plugins.node_actions.abstract_node_scenarios.AffectedNode') as mock_affected_node_class:
|
||||
mock_affected_node_class.return_value = mock_affected_node
|
||||
self.scenarios.restart_kubelet_scenario(instance_kill_count, node, timeout)
|
||||
|
||||
# Assert
|
||||
self.assertEqual(mock_run.call_count, 2)
|
||||
expected_command = "oc debug node/" + node + " -- chroot /host systemctl restart kubelet &"
|
||||
mock_run.assert_called_with(expected_command)
|
||||
self.assertEqual(mock_wait.call_count, 2)
|
||||
self.assertEqual(len(self.mock_affected_nodes_status.affected_nodes), 2)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.abstract_node_scenarios.nodeaction.wait_for_ready_status')
|
||||
@patch('krkn.scenario_plugins.node_actions.abstract_node_scenarios.runcommand.run')
|
||||
@patch('logging.error')
|
||||
@patch('logging.info')
|
||||
def test_restart_kubelet_scenario_failure(self, mock_info, mock_error, mock_run, mock_wait):
|
||||
"""Test kubelet restart scenario when command fails"""
|
||||
# Arrange
|
||||
instance_kill_count = 1
|
||||
node = "test-node"
|
||||
timeout = 300
|
||||
error_msg = "Restart failed"
|
||||
mock_run.side_effect = Exception(error_msg)
|
||||
|
||||
# Act & Assert
|
||||
with self.assertRaises(Exception):
|
||||
with patch('krkn.scenario_plugins.node_actions.abstract_node_scenarios.AffectedNode'):
|
||||
self.scenarios.restart_kubelet_scenario(instance_kill_count, node, timeout)
|
||||
|
||||
mock_error.assert_any_call(
|
||||
"Failed to restart the kubelet of the node. Encountered following "
|
||||
"exception: %s. Test Failed" % error_msg
|
||||
)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.abstract_node_scenarios.runcommand.run')
|
||||
@patch('logging.info')
|
||||
def test_node_crash_scenario_success(self, mock_logging, mock_run):
|
||||
"""Test successful node crash scenario"""
|
||||
# Arrange
|
||||
instance_kill_count = 2
|
||||
node = "test-node"
|
||||
timeout = 300
|
||||
|
||||
# Act
|
||||
result = self.scenarios.node_crash_scenario(instance_kill_count, node, timeout)
|
||||
|
||||
# Assert
|
||||
self.assertEqual(mock_run.call_count, 2)
|
||||
expected_command = (
|
||||
"oc debug node/" + node + " -- chroot /host "
|
||||
"dd if=/dev/urandom of=/proc/sysrq-trigger"
|
||||
)
|
||||
mock_run.assert_called_with(expected_command)
|
||||
self.assertIsNone(result)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.abstract_node_scenarios.runcommand.run')
|
||||
@patch('logging.error')
|
||||
@patch('logging.info')
|
||||
def test_node_crash_scenario_failure(self, mock_info, mock_error, mock_run):
|
||||
"""Test node crash scenario when command fails"""
|
||||
# Arrange
|
||||
instance_kill_count = 1
|
||||
node = "test-node"
|
||||
timeout = 300
|
||||
error_msg = "Crash command failed"
|
||||
mock_run.side_effect = Exception(error_msg)
|
||||
|
||||
# Act
|
||||
result = self.scenarios.node_crash_scenario(instance_kill_count, node, timeout)
|
||||
|
||||
# Assert
|
||||
self.assertEqual(result, 1)
|
||||
mock_error.assert_any_call(
|
||||
"Failed to crash the node. Encountered following exception: %s. "
|
||||
"Test Failed" % error_msg
|
||||
)
|
||||
|
||||
def test_node_start_scenario_not_implemented(self):
|
||||
"""Test that node_start_scenario returns None (not implemented)"""
|
||||
result = self.scenarios.node_start_scenario(1, "test-node", 300, 10)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_node_stop_scenario_not_implemented(self):
|
||||
"""Test that node_stop_scenario returns None (not implemented)"""
|
||||
result = self.scenarios.node_stop_scenario(1, "test-node", 300, 10)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_node_termination_scenario_not_implemented(self):
|
||||
"""Test that node_termination_scenario returns None (not implemented)"""
|
||||
result = self.scenarios.node_termination_scenario(1, "test-node", 300, 10)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_node_reboot_scenario_not_implemented(self):
|
||||
"""Test that node_reboot_scenario returns None (not implemented)"""
|
||||
result = self.scenarios.node_reboot_scenario(1, "test-node", 300)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_node_service_status_not_implemented(self):
|
||||
"""Test that node_service_status returns None (not implemented)"""
|
||||
result = self.scenarios.node_service_status("test-node", "service", "key", 300)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_node_block_scenario_not_implemented(self):
|
||||
"""Test that node_block_scenario returns None (not implemented)"""
|
||||
result = self.scenarios.node_block_scenario(1, "test-node", 300, 60)
|
||||
self.assertIsNone(result)
|
||||
|
||||
|
||||
class TestAbstractNodeScenariosIntegration(unittest.TestCase):
|
||||
"""Integration tests for abstract_node_scenarios workflows"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures before each test method"""
|
||||
self.mock_kubecli = Mock(spec=KrknKubernetes)
|
||||
self.mock_affected_nodes_status = Mock(spec=AffectedNodeStatus)
|
||||
self.mock_affected_nodes_status.affected_nodes = []
|
||||
|
||||
self.scenarios = abstract_node_scenarios(
|
||||
kubecli=self.mock_kubecli,
|
||||
node_action_kube_check=True,
|
||||
affected_nodes_status=self.mock_affected_nodes_status
|
||||
)
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('krkn.scenario_plugins.node_actions.abstract_node_scenarios.nodeaction.wait_for_unknown_status')
|
||||
@patch('krkn.scenario_plugins.node_actions.abstract_node_scenarios.runcommand.run')
|
||||
def test_complete_stop_start_kubelet_workflow(self, mock_run, mock_wait, mock_sleep):
|
||||
"""Test complete workflow of stop/start kubelet scenario"""
|
||||
# Arrange
|
||||
instance_kill_count = 1
|
||||
node = "test-node"
|
||||
timeout = 300
|
||||
|
||||
self.scenarios.node_reboot_scenario = Mock()
|
||||
|
||||
# Act
|
||||
with patch('krkn.scenario_plugins.node_actions.abstract_node_scenarios.AffectedNode'):
|
||||
self.scenarios.stop_start_kubelet_scenario(instance_kill_count, node, timeout)
|
||||
|
||||
# Assert - verify stop kubelet was called
|
||||
expected_stop_command = "oc debug node/" + node + " -- chroot /host systemctl stop kubelet"
|
||||
mock_run.assert_any_call(expected_stop_command)
|
||||
|
||||
# Verify reboot was called
|
||||
self.scenarios.node_reboot_scenario.assert_called_once_with(
|
||||
instance_kill_count, node, timeout
|
||||
)
|
||||
|
||||
# Verify merge was called
|
||||
self.mock_affected_nodes_status.merge_affected_nodes.assert_called_once()
|
||||
|
||||
@patch('time.sleep')
|
||||
def test_node_stop_start_scenario_workflow(self, mock_sleep):
|
||||
"""Test complete workflow of node stop/start scenario"""
|
||||
# Arrange
|
||||
instance_kill_count = 1
|
||||
node = "test-node"
|
||||
timeout = 300
|
||||
duration = 60
|
||||
poll_interval = 10
|
||||
|
||||
self.scenarios.node_stop_scenario = Mock()
|
||||
self.scenarios.node_start_scenario = Mock()
|
||||
|
||||
# Act
|
||||
self.scenarios.node_stop_start_scenario(
|
||||
instance_kill_count, node, timeout, duration, poll_interval
|
||||
)
|
||||
|
||||
# Assert - verify order of operations
|
||||
call_order = []
|
||||
|
||||
# Verify stop was called first
|
||||
self.scenarios.node_stop_scenario.assert_called_once()
|
||||
|
||||
# Verify sleep was called
|
||||
mock_sleep.assert_called_once_with(duration)
|
||||
|
||||
# Verify start was called after sleep
|
||||
self.scenarios.node_start_scenario.assert_called_once()
|
||||
|
||||
# Verify merge was called
|
||||
self.mock_affected_nodes_status.merge_affected_nodes.assert_called_once()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -1,680 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Test suite for alibaba_node_scenarios class
|
||||
|
||||
Usage:
|
||||
python -m coverage run -a -m unittest tests/test_alibaba_node_scenarios.py -v
|
||||
|
||||
Assisted By: Claude Code
|
||||
"""
|
||||
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, Mock, patch, PropertyMock, call
|
||||
import logging
|
||||
import json
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
|
||||
from krkn.scenario_plugins.node_actions.alibaba_node_scenarios import Alibaba, alibaba_node_scenarios
|
||||
|
||||
|
||||
class TestAlibaba(unittest.TestCase):
|
||||
"""Test suite for Alibaba class"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
# Mock environment variables
|
||||
self.env_patcher = patch.dict('os.environ', {
|
||||
'ALIBABA_ID': 'test-access-key',
|
||||
'ALIBABA_SECRET': 'test-secret-key',
|
||||
'ALIBABA_REGION_ID': 'cn-hangzhou'
|
||||
})
|
||||
self.env_patcher.start()
|
||||
|
||||
def tearDown(self):
|
||||
"""Clean up after tests"""
|
||||
self.env_patcher.stop()
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_alibaba_init_success(self, mock_acs_client, mock_logging):
|
||||
"""Test Alibaba class initialization"""
|
||||
mock_client = Mock()
|
||||
mock_acs_client.return_value = mock_client
|
||||
|
||||
alibaba = Alibaba()
|
||||
|
||||
mock_acs_client.assert_called_once_with('test-access-key', 'test-secret-key', 'cn-hangzhou')
|
||||
self.assertEqual(alibaba.compute_client, mock_client)
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_alibaba_init_failure(self, mock_acs_client, mock_logging):
|
||||
"""Test Alibaba initialization handles errors"""
|
||||
mock_acs_client.side_effect = Exception("Credential error")
|
||||
|
||||
alibaba = Alibaba()
|
||||
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("Initializing alibaba", str(mock_logging.call_args))
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_send_request_success(self, mock_acs_client):
|
||||
"""Test _send_request successfully sends request"""
|
||||
alibaba = Alibaba()
|
||||
|
||||
mock_request = Mock()
|
||||
mock_response = {'Instances': {'Instance': []}}
|
||||
alibaba.compute_client.do_action.return_value = json.dumps(mock_response).encode('utf-8')
|
||||
|
||||
result = alibaba._send_request(mock_request)
|
||||
|
||||
mock_request.set_accept_format.assert_called_once_with('json')
|
||||
alibaba.compute_client.do_action.assert_called_once_with(mock_request)
|
||||
self.assertEqual(result, mock_response)
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_send_request_failure(self, mock_acs_client, mock_logging):
|
||||
"""Test _send_request handles errors"""
|
||||
alibaba = Alibaba()
|
||||
|
||||
mock_request = Mock()
|
||||
alibaba.compute_client.do_action.side_effect = Exception("API error")
|
||||
|
||||
# The actual code has a bug in the format string (%S instead of %s)
|
||||
# So we expect this to raise a ValueError
|
||||
with self.assertRaises(ValueError):
|
||||
alibaba._send_request(mock_request)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_list_instances_success(self, mock_acs_client):
|
||||
"""Test list_instances returns instance list"""
|
||||
alibaba = Alibaba()
|
||||
|
||||
mock_instances = [
|
||||
{'InstanceId': 'i-123', 'InstanceName': 'node1'},
|
||||
{'InstanceId': 'i-456', 'InstanceName': 'node2'}
|
||||
]
|
||||
mock_response = {'Instances': {'Instance': mock_instances}}
|
||||
alibaba.compute_client.do_action.return_value = json.dumps(mock_response).encode('utf-8')
|
||||
|
||||
result = alibaba.list_instances()
|
||||
|
||||
self.assertEqual(result, mock_instances)
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_list_instances_no_instances_key(self, mock_acs_client, mock_logging):
|
||||
"""Test list_instances handles missing Instances key"""
|
||||
alibaba = Alibaba()
|
||||
|
||||
mock_response = {'SomeOtherKey': 'value'}
|
||||
alibaba.compute_client.do_action.return_value = json.dumps(mock_response).encode('utf-8')
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
alibaba.list_instances()
|
||||
|
||||
mock_logging.assert_called()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_list_instances_none_response(self, mock_acs_client):
|
||||
"""Test list_instances handles None response"""
|
||||
alibaba = Alibaba()
|
||||
alibaba._send_request = Mock(return_value=None)
|
||||
|
||||
result = alibaba.list_instances()
|
||||
|
||||
self.assertEqual(result, [])
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_list_instances_exception(self, mock_acs_client, mock_logging):
|
||||
"""Test list_instances handles exceptions"""
|
||||
alibaba = Alibaba()
|
||||
alibaba._send_request = Mock(side_effect=Exception("Network error"))
|
||||
|
||||
with self.assertRaises(Exception):
|
||||
alibaba.list_instances()
|
||||
|
||||
mock_logging.assert_called()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_get_instance_id_found(self, mock_acs_client):
|
||||
"""Test get_instance_id when instance is found"""
|
||||
alibaba = Alibaba()
|
||||
|
||||
mock_instances = [
|
||||
{'InstanceId': 'i-123', 'InstanceName': 'test-node'},
|
||||
{'InstanceId': 'i-456', 'InstanceName': 'other-node'}
|
||||
]
|
||||
alibaba.list_instances = Mock(return_value=mock_instances)
|
||||
|
||||
result = alibaba.get_instance_id('test-node')
|
||||
|
||||
self.assertEqual(result, 'i-123')
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_get_instance_id_not_found(self, mock_acs_client, mock_logging):
|
||||
"""Test get_instance_id when instance is not found"""
|
||||
alibaba = Alibaba()
|
||||
|
||||
alibaba.list_instances = Mock(return_value=[])
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
alibaba.get_instance_id('nonexistent-node')
|
||||
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("Couldn't find vm", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_start_instances_success(self, mock_acs_client, mock_logging):
|
||||
"""Test start_instances successfully starts instance"""
|
||||
alibaba = Alibaba()
|
||||
alibaba._send_request = Mock(return_value={'RequestId': 'req-123'})
|
||||
|
||||
alibaba.start_instances('i-123')
|
||||
|
||||
alibaba._send_request.assert_called_once()
|
||||
mock_logging.assert_called()
|
||||
call_str = str(mock_logging.call_args_list)
|
||||
self.assertTrue('started' in call_str or 'submit successfully' in call_str)
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_start_instances_failure(self, mock_acs_client, mock_logging):
|
||||
"""Test start_instances handles failure"""
|
||||
alibaba = Alibaba()
|
||||
alibaba._send_request = Mock(side_effect=Exception("Start failed"))
|
||||
|
||||
with self.assertRaises(Exception):
|
||||
alibaba.start_instances('i-123')
|
||||
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("Failed to start", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_stop_instances_success(self, mock_acs_client, mock_logging):
|
||||
"""Test stop_instances successfully stops instance"""
|
||||
alibaba = Alibaba()
|
||||
alibaba._send_request = Mock(return_value={'RequestId': 'req-123'})
|
||||
|
||||
alibaba.stop_instances('i-123', force_stop=True)
|
||||
|
||||
alibaba._send_request.assert_called_once()
|
||||
mock_logging.assert_called()
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_stop_instances_failure(self, mock_acs_client, mock_logging):
|
||||
"""Test stop_instances handles failure"""
|
||||
alibaba = Alibaba()
|
||||
alibaba._send_request = Mock(side_effect=Exception("Stop failed"))
|
||||
|
||||
with self.assertRaises(Exception):
|
||||
alibaba.stop_instances('i-123')
|
||||
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("Failed to stop", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_release_instance_success(self, mock_acs_client, mock_logging):
|
||||
"""Test release_instance successfully releases instance"""
|
||||
alibaba = Alibaba()
|
||||
alibaba._send_request = Mock(return_value={'RequestId': 'req-123'})
|
||||
|
||||
alibaba.release_instance('i-123', force_release=True)
|
||||
|
||||
alibaba._send_request.assert_called_once()
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("released", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_release_instance_failure(self, mock_acs_client, mock_logging):
|
||||
"""Test release_instance handles failure"""
|
||||
alibaba = Alibaba()
|
||||
alibaba._send_request = Mock(side_effect=Exception("Release failed"))
|
||||
|
||||
with self.assertRaises(Exception):
|
||||
alibaba.release_instance('i-123')
|
||||
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("Failed to terminate", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_reboot_instances_success(self, mock_acs_client, mock_logging):
|
||||
"""Test reboot_instances successfully reboots instance"""
|
||||
alibaba = Alibaba()
|
||||
alibaba._send_request = Mock(return_value={'RequestId': 'req-123'})
|
||||
|
||||
alibaba.reboot_instances('i-123', force_reboot=True)
|
||||
|
||||
alibaba._send_request.assert_called_once()
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("rebooted", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_reboot_instances_failure(self, mock_acs_client, mock_logging):
|
||||
"""Test reboot_instances handles failure"""
|
||||
alibaba = Alibaba()
|
||||
alibaba._send_request = Mock(side_effect=Exception("Reboot failed"))
|
||||
|
||||
with self.assertRaises(Exception):
|
||||
alibaba.reboot_instances('i-123')
|
||||
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("Failed to reboot", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_get_vm_status_success(self, mock_acs_client, mock_logging):
|
||||
"""Test get_vm_status returns instance status"""
|
||||
alibaba = Alibaba()
|
||||
|
||||
mock_response = {
|
||||
'Instances': {
|
||||
'Instance': [{'Status': 'Running'}]
|
||||
}
|
||||
}
|
||||
alibaba._send_request = Mock(return_value=mock_response)
|
||||
|
||||
result = alibaba.get_vm_status('i-123')
|
||||
|
||||
self.assertEqual(result, 'Running')
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_get_vm_status_no_instances(self, mock_acs_client, mock_logging):
|
||||
"""Test get_vm_status when no instances found"""
|
||||
alibaba = Alibaba()
|
||||
|
||||
mock_response = {
|
||||
'Instances': {
|
||||
'Instance': []
|
||||
}
|
||||
}
|
||||
alibaba._send_request = Mock(return_value=mock_response)
|
||||
|
||||
result = alibaba.get_vm_status('i-123')
|
||||
|
||||
self.assertIsNone(result)
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_get_vm_status_none_response(self, mock_acs_client, mock_logging):
|
||||
"""Test get_vm_status with None response"""
|
||||
alibaba = Alibaba()
|
||||
alibaba._send_request = Mock(return_value=None)
|
||||
|
||||
result = alibaba.get_vm_status('i-123')
|
||||
|
||||
self.assertEqual(result, 'Unknown')
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_get_vm_status_exception(self, mock_acs_client, mock_logging):
|
||||
"""Test get_vm_status handles exceptions"""
|
||||
alibaba = Alibaba()
|
||||
alibaba._send_request = Mock(side_effect=Exception("API error"))
|
||||
|
||||
result = alibaba.get_vm_status('i-123')
|
||||
|
||||
self.assertIsNone(result)
|
||||
mock_logging.assert_called()
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_wait_until_running_success(self, mock_acs_client, mock_logging, mock_sleep):
|
||||
"""Test wait_until_running waits for instance to be running"""
|
||||
alibaba = Alibaba()
|
||||
|
||||
alibaba.get_vm_status = Mock(side_effect=['Starting', 'Running'])
|
||||
mock_affected_node = Mock(spec=AffectedNode)
|
||||
|
||||
result = alibaba.wait_until_running('i-123', 300, mock_affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
mock_affected_node.set_affected_node_status.assert_called_once()
|
||||
args = mock_affected_node.set_affected_node_status.call_args[0]
|
||||
self.assertEqual(args[0], 'running')
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_wait_until_running_timeout(self, mock_acs_client, mock_logging, mock_sleep):
|
||||
"""Test wait_until_running returns False on timeout"""
|
||||
alibaba = Alibaba()
|
||||
|
||||
alibaba.get_vm_status = Mock(return_value='Starting')
|
||||
|
||||
result = alibaba.wait_until_running('i-123', 10, None)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_wait_until_stopped_success(self, mock_acs_client, mock_logging, mock_sleep):
|
||||
"""Test wait_until_stopped waits for instance to be stopped"""
|
||||
alibaba = Alibaba()
|
||||
|
||||
alibaba.get_vm_status = Mock(side_effect=['Stopping', 'Stopped'])
|
||||
mock_affected_node = Mock(spec=AffectedNode)
|
||||
|
||||
result = alibaba.wait_until_stopped('i-123', 300, mock_affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
mock_affected_node.set_affected_node_status.assert_called_once()
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_wait_until_stopped_timeout(self, mock_acs_client, mock_logging, mock_sleep):
|
||||
"""Test wait_until_stopped returns False on timeout"""
|
||||
alibaba = Alibaba()
|
||||
|
||||
alibaba.get_vm_status = Mock(return_value='Stopping')
|
||||
|
||||
result = alibaba.wait_until_stopped('i-123', 10, None)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_wait_until_released_success(self, mock_acs_client, mock_logging, mock_sleep):
|
||||
"""Test wait_until_released waits for instance to be released"""
|
||||
alibaba = Alibaba()
|
||||
|
||||
alibaba.get_vm_status = Mock(side_effect=['Deleting', 'Released'])
|
||||
mock_affected_node = Mock(spec=AffectedNode)
|
||||
|
||||
result = alibaba.wait_until_released('i-123', 300, mock_affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
mock_affected_node.set_affected_node_status.assert_called_once()
|
||||
args = mock_affected_node.set_affected_node_status.call_args[0]
|
||||
self.assertEqual(args[0], 'terminated')
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_wait_until_released_timeout(self, mock_acs_client, mock_logging, mock_sleep):
|
||||
"""Test wait_until_released returns False on timeout"""
|
||||
alibaba = Alibaba()
|
||||
|
||||
alibaba.get_vm_status = Mock(return_value='Deleting')
|
||||
|
||||
result = alibaba.wait_until_released('i-123', 10, None)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.AcsClient')
|
||||
def test_wait_until_released_none_status(self, mock_acs_client, mock_logging, mock_sleep):
|
||||
"""Test wait_until_released when status becomes None"""
|
||||
alibaba = Alibaba()
|
||||
|
||||
alibaba.get_vm_status = Mock(side_effect=['Deleting', None])
|
||||
mock_affected_node = Mock(spec=AffectedNode)
|
||||
|
||||
result = alibaba.wait_until_released('i-123', 300, mock_affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
|
||||
|
||||
class TestAlibabaNodeScenarios(unittest.TestCase):
|
||||
"""Test suite for alibaba_node_scenarios class"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
self.env_patcher = patch.dict('os.environ', {
|
||||
'ALIBABA_ID': 'test-access-key',
|
||||
'ALIBABA_SECRET': 'test-secret-key',
|
||||
'ALIBABA_REGION_ID': 'cn-hangzhou'
|
||||
})
|
||||
self.env_patcher.start()
|
||||
|
||||
self.mock_kubecli = Mock(spec=KrknKubernetes)
|
||||
self.affected_nodes_status = AffectedNodeStatus()
|
||||
|
||||
def tearDown(self):
|
||||
"""Clean up after tests"""
|
||||
self.env_patcher.stop()
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.Alibaba')
|
||||
def test_init(self, mock_alibaba_class, mock_logging):
|
||||
"""Test alibaba_node_scenarios initialization"""
|
||||
mock_alibaba_instance = Mock()
|
||||
mock_alibaba_class.return_value = mock_alibaba_instance
|
||||
|
||||
scenarios = alibaba_node_scenarios(self.mock_kubecli, True, self.affected_nodes_status)
|
||||
|
||||
self.assertEqual(scenarios.kubecli, self.mock_kubecli)
|
||||
self.assertTrue(scenarios.node_action_kube_check)
|
||||
self.assertEqual(scenarios.alibaba, mock_alibaba_instance)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.nodeaction')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.Alibaba')
|
||||
def test_node_start_scenario_success(self, mock_alibaba_class, mock_logging, mock_nodeaction):
|
||||
"""Test node_start_scenario successfully starts node"""
|
||||
mock_alibaba = Mock()
|
||||
mock_alibaba_class.return_value = mock_alibaba
|
||||
mock_alibaba.get_instance_id.return_value = 'i-123'
|
||||
mock_alibaba.wait_until_running.return_value = True
|
||||
|
||||
scenarios = alibaba_node_scenarios(self.mock_kubecli, True, self.affected_nodes_status)
|
||||
|
||||
scenarios.node_start_scenario(1, 'test-node', 300, 15)
|
||||
|
||||
mock_alibaba.get_instance_id.assert_called_once_with('test-node')
|
||||
mock_alibaba.start_instances.assert_called_once_with('i-123')
|
||||
mock_alibaba.wait_until_running.assert_called_once()
|
||||
mock_nodeaction.wait_for_ready_status.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.nodeaction')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.Alibaba')
|
||||
def test_node_start_scenario_no_kube_check(self, mock_alibaba_class, mock_logging, mock_nodeaction):
|
||||
"""Test node_start_scenario without Kubernetes check"""
|
||||
mock_alibaba = Mock()
|
||||
mock_alibaba_class.return_value = mock_alibaba
|
||||
mock_alibaba.get_instance_id.return_value = 'i-123'
|
||||
mock_alibaba.wait_until_running.return_value = True
|
||||
|
||||
scenarios = alibaba_node_scenarios(self.mock_kubecli, False, self.affected_nodes_status)
|
||||
|
||||
scenarios.node_start_scenario(1, 'test-node', 300, 15)
|
||||
|
||||
mock_alibaba.start_instances.assert_called_once()
|
||||
mock_nodeaction.wait_for_ready_status.assert_not_called()
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.Alibaba')
|
||||
def test_node_start_scenario_failure(self, mock_alibaba_class, mock_logging):
|
||||
"""Test node_start_scenario handles failure"""
|
||||
mock_alibaba = Mock()
|
||||
mock_alibaba_class.return_value = mock_alibaba
|
||||
mock_alibaba.get_instance_id.return_value = 'i-123'
|
||||
mock_alibaba.start_instances.side_effect = Exception('Start failed')
|
||||
|
||||
scenarios = alibaba_node_scenarios(self.mock_kubecli, False, self.affected_nodes_status)
|
||||
|
||||
with self.assertRaises(Exception):
|
||||
scenarios.node_start_scenario(1, 'test-node', 300, 15)
|
||||
|
||||
mock_logging.assert_called()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.nodeaction')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.Alibaba')
|
||||
def test_node_start_scenario_multiple_runs(self, mock_alibaba_class, mock_logging, mock_nodeaction):
|
||||
"""Test node_start_scenario with multiple runs"""
|
||||
mock_alibaba = Mock()
|
||||
mock_alibaba_class.return_value = mock_alibaba
|
||||
mock_alibaba.get_instance_id.return_value = 'i-123'
|
||||
mock_alibaba.wait_until_running.return_value = True
|
||||
|
||||
scenarios = alibaba_node_scenarios(self.mock_kubecli, True, self.affected_nodes_status)
|
||||
|
||||
scenarios.node_start_scenario(3, 'test-node', 300, 15)
|
||||
|
||||
self.assertEqual(mock_alibaba.start_instances.call_count, 3)
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 3)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.nodeaction')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.Alibaba')
|
||||
def test_node_stop_scenario_success(self, mock_alibaba_class, mock_logging, mock_nodeaction):
|
||||
"""Test node_stop_scenario successfully stops node"""
|
||||
mock_alibaba = Mock()
|
||||
mock_alibaba_class.return_value = mock_alibaba
|
||||
mock_alibaba.get_instance_id.return_value = 'i-123'
|
||||
mock_alibaba.wait_until_stopped.return_value = True
|
||||
|
||||
scenarios = alibaba_node_scenarios(self.mock_kubecli, True, self.affected_nodes_status)
|
||||
|
||||
scenarios.node_stop_scenario(1, 'test-node', 300, 15)
|
||||
|
||||
mock_alibaba.get_instance_id.assert_called_once_with('test-node')
|
||||
mock_alibaba.stop_instances.assert_called_once_with('i-123')
|
||||
mock_alibaba.wait_until_stopped.assert_called_once()
|
||||
mock_nodeaction.wait_for_unknown_status.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.Alibaba')
|
||||
def test_node_stop_scenario_failure(self, mock_alibaba_class, mock_logging):
|
||||
"""Test node_stop_scenario handles failure"""
|
||||
mock_alibaba = Mock()
|
||||
mock_alibaba_class.return_value = mock_alibaba
|
||||
mock_alibaba.get_instance_id.return_value = 'i-123'
|
||||
mock_alibaba.stop_instances.side_effect = Exception('Stop failed')
|
||||
|
||||
scenarios = alibaba_node_scenarios(self.mock_kubecli, False, self.affected_nodes_status)
|
||||
|
||||
with self.assertRaises(Exception):
|
||||
scenarios.node_stop_scenario(1, 'test-node', 300, 15)
|
||||
|
||||
mock_logging.assert_called()
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.Alibaba')
|
||||
def test_node_termination_scenario_success(self, mock_alibaba_class, mock_logging):
|
||||
"""Test node_termination_scenario successfully terminates node"""
|
||||
mock_alibaba = Mock()
|
||||
mock_alibaba_class.return_value = mock_alibaba
|
||||
mock_alibaba.get_instance_id.return_value = 'i-123'
|
||||
mock_alibaba.wait_until_stopped.return_value = True
|
||||
mock_alibaba.wait_until_released.return_value = True
|
||||
|
||||
scenarios = alibaba_node_scenarios(self.mock_kubecli, False, self.affected_nodes_status)
|
||||
|
||||
scenarios.node_termination_scenario(1, 'test-node', 300, 15)
|
||||
|
||||
mock_alibaba.stop_instances.assert_called_once_with('i-123')
|
||||
mock_alibaba.wait_until_stopped.assert_called_once()
|
||||
mock_alibaba.release_instance.assert_called_once_with('i-123')
|
||||
mock_alibaba.wait_until_released.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.Alibaba')
|
||||
def test_node_termination_scenario_failure(self, mock_alibaba_class, mock_logging):
|
||||
"""Test node_termination_scenario handles failure"""
|
||||
mock_alibaba = Mock()
|
||||
mock_alibaba_class.return_value = mock_alibaba
|
||||
mock_alibaba.get_instance_id.return_value = 'i-123'
|
||||
mock_alibaba.stop_instances.side_effect = Exception('Stop failed')
|
||||
|
||||
scenarios = alibaba_node_scenarios(self.mock_kubecli, False, self.affected_nodes_status)
|
||||
|
||||
with self.assertRaises(Exception):
|
||||
scenarios.node_termination_scenario(1, 'test-node', 300, 15)
|
||||
|
||||
mock_logging.assert_called()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.nodeaction')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.Alibaba')
|
||||
def test_node_reboot_scenario_success(self, mock_alibaba_class, mock_logging, mock_nodeaction):
|
||||
"""Test node_reboot_scenario successfully reboots node"""
|
||||
mock_alibaba = Mock()
|
||||
mock_alibaba_class.return_value = mock_alibaba
|
||||
mock_alibaba.get_instance_id.return_value = 'i-123'
|
||||
|
||||
scenarios = alibaba_node_scenarios(self.mock_kubecli, True, self.affected_nodes_status)
|
||||
|
||||
scenarios.node_reboot_scenario(1, 'test-node', 300, soft_reboot=False)
|
||||
|
||||
mock_alibaba.reboot_instances.assert_called_once_with('i-123')
|
||||
mock_nodeaction.wait_for_unknown_status.assert_called_once()
|
||||
mock_nodeaction.wait_for_ready_status.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.nodeaction')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.Alibaba')
|
||||
def test_node_reboot_scenario_no_kube_check(self, mock_alibaba_class, mock_logging, mock_nodeaction):
|
||||
"""Test node_reboot_scenario without Kubernetes check"""
|
||||
mock_alibaba = Mock()
|
||||
mock_alibaba_class.return_value = mock_alibaba
|
||||
mock_alibaba.get_instance_id.return_value = 'i-123'
|
||||
|
||||
scenarios = alibaba_node_scenarios(self.mock_kubecli, False, self.affected_nodes_status)
|
||||
|
||||
scenarios.node_reboot_scenario(1, 'test-node', 300)
|
||||
|
||||
mock_alibaba.reboot_instances.assert_called_once()
|
||||
mock_nodeaction.wait_for_unknown_status.assert_not_called()
|
||||
mock_nodeaction.wait_for_ready_status.assert_not_called()
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.Alibaba')
|
||||
def test_node_reboot_scenario_failure(self, mock_alibaba_class, mock_logging):
|
||||
"""Test node_reboot_scenario handles failure"""
|
||||
mock_alibaba = Mock()
|
||||
mock_alibaba_class.return_value = mock_alibaba
|
||||
mock_alibaba.get_instance_id.return_value = 'i-123'
|
||||
mock_alibaba.reboot_instances.side_effect = Exception('Reboot failed')
|
||||
|
||||
scenarios = alibaba_node_scenarios(self.mock_kubecli, False, self.affected_nodes_status)
|
||||
|
||||
with self.assertRaises(Exception):
|
||||
scenarios.node_reboot_scenario(1, 'test-node', 300)
|
||||
|
||||
mock_logging.assert_called()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.nodeaction')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.Alibaba')
|
||||
def test_node_reboot_scenario_multiple_runs(self, mock_alibaba_class, mock_logging, mock_nodeaction):
|
||||
"""Test node_reboot_scenario with multiple runs"""
|
||||
mock_alibaba = Mock()
|
||||
mock_alibaba_class.return_value = mock_alibaba
|
||||
mock_alibaba.get_instance_id.return_value = 'i-123'
|
||||
|
||||
scenarios = alibaba_node_scenarios(self.mock_kubecli, True, self.affected_nodes_status)
|
||||
|
||||
scenarios.node_reboot_scenario(2, 'test-node', 300)
|
||||
|
||||
self.assertEqual(mock_alibaba.reboot_instances.call_count, 2)
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -1,984 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Test suite for AWS node scenarios
|
||||
|
||||
This test suite covers both the AWS class and aws_node_scenarios class
|
||||
using mocks to avoid actual AWS API calls.
|
||||
|
||||
Usage:
|
||||
python -m coverage run -a -m unittest tests/test_aws_node_scenarios.py -v
|
||||
|
||||
Assisted By: Claude Code
|
||||
"""
|
||||
|
||||
import unittest
|
||||
import sys
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
# Mock external dependencies before any imports that use them
|
||||
sys.modules['boto3'] = MagicMock()
|
||||
sys.modules['paramiko'] = MagicMock()
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
from krkn.scenario_plugins.node_actions.aws_node_scenarios import AWS, aws_node_scenarios
|
||||
|
||||
|
||||
class TestAWS(unittest.TestCase):
|
||||
"""Test cases for AWS class"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
# Mock boto3 to avoid actual AWS calls
|
||||
self.boto_client_patcher = patch('boto3.client')
|
||||
self.boto_resource_patcher = patch('boto3.resource')
|
||||
|
||||
self.mock_client = self.boto_client_patcher.start()
|
||||
self.mock_resource = self.boto_resource_patcher.start()
|
||||
|
||||
# Create AWS instance with mocked boto3
|
||||
self.aws = AWS()
|
||||
|
||||
def tearDown(self):
|
||||
"""Clean up after tests"""
|
||||
self.boto_client_patcher.stop()
|
||||
self.boto_resource_patcher.stop()
|
||||
|
||||
def test_aws_init(self):
|
||||
"""Test AWS class initialization"""
|
||||
self.assertIsNotNone(self.aws.boto_client)
|
||||
self.assertIsNotNone(self.aws.boto_resource)
|
||||
self.assertIsNotNone(self.aws.boto_instance)
|
||||
|
||||
def test_get_instance_id_by_dns_name(self):
|
||||
"""Test getting instance ID by DNS name"""
|
||||
mock_response = {
|
||||
'Reservations': [{
|
||||
'Instances': [{
|
||||
'InstanceId': 'i-1234567890abcdef0'
|
||||
}]
|
||||
}]
|
||||
}
|
||||
self.aws.boto_client.describe_instances = MagicMock(return_value=mock_response)
|
||||
|
||||
instance_id = self.aws.get_instance_id('ip-10-0-1-100.ec2.internal')
|
||||
|
||||
self.assertEqual(instance_id, 'i-1234567890abcdef0')
|
||||
self.aws.boto_client.describe_instances.assert_called_once()
|
||||
|
||||
def test_get_instance_id_by_ip_address(self):
|
||||
"""Test getting instance ID by IP address when DNS name fails"""
|
||||
# First call returns empty, second call returns the instance
|
||||
mock_response_empty = {'Reservations': []}
|
||||
mock_response_with_instance = {
|
||||
'Reservations': [{
|
||||
'Instances': [{
|
||||
'InstanceId': 'i-1234567890abcdef0'
|
||||
}]
|
||||
}]
|
||||
}
|
||||
self.aws.boto_client.describe_instances = MagicMock(
|
||||
side_effect=[mock_response_empty, mock_response_with_instance]
|
||||
)
|
||||
|
||||
instance_id = self.aws.get_instance_id('ip-10-0-1-100')
|
||||
|
||||
self.assertEqual(instance_id, 'i-1234567890abcdef0')
|
||||
self.assertEqual(self.aws.boto_client.describe_instances.call_count, 2)
|
||||
|
||||
def test_start_instances_success(self):
|
||||
"""Test starting instances successfully"""
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
self.aws.boto_client.start_instances = MagicMock()
|
||||
|
||||
self.aws.start_instances(instance_id)
|
||||
|
||||
self.aws.boto_client.start_instances.assert_called_once_with(
|
||||
InstanceIds=[instance_id]
|
||||
)
|
||||
|
||||
def test_start_instances_failure(self):
|
||||
"""Test starting instances with failure"""
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
self.aws.boto_client.start_instances = MagicMock(
|
||||
side_effect=Exception("AWS error")
|
||||
)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.aws.start_instances(instance_id)
|
||||
|
||||
def test_stop_instances_success(self):
|
||||
"""Test stopping instances successfully"""
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
self.aws.boto_client.stop_instances = MagicMock()
|
||||
|
||||
self.aws.stop_instances(instance_id)
|
||||
|
||||
self.aws.boto_client.stop_instances.assert_called_once_with(
|
||||
InstanceIds=[instance_id]
|
||||
)
|
||||
|
||||
def test_stop_instances_failure(self):
|
||||
"""Test stopping instances with failure"""
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
self.aws.boto_client.stop_instances = MagicMock(
|
||||
side_effect=Exception("AWS error")
|
||||
)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.aws.stop_instances(instance_id)
|
||||
|
||||
def test_terminate_instances_success(self):
|
||||
"""Test terminating instances successfully"""
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
self.aws.boto_client.terminate_instances = MagicMock()
|
||||
|
||||
self.aws.terminate_instances(instance_id)
|
||||
|
||||
self.aws.boto_client.terminate_instances.assert_called_once_with(
|
||||
InstanceIds=[instance_id]
|
||||
)
|
||||
|
||||
def test_terminate_instances_failure(self):
|
||||
"""Test terminating instances with failure"""
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
self.aws.boto_client.terminate_instances = MagicMock(
|
||||
side_effect=Exception("AWS error")
|
||||
)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.aws.terminate_instances(instance_id)
|
||||
|
||||
def test_reboot_instances_success(self):
|
||||
"""Test rebooting instances successfully"""
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
self.aws.boto_client.reboot_instances = MagicMock()
|
||||
|
||||
self.aws.reboot_instances(instance_id)
|
||||
|
||||
self.aws.boto_client.reboot_instances.assert_called_once_with(
|
||||
InstanceIds=[instance_id]
|
||||
)
|
||||
|
||||
def test_reboot_instances_failure(self):
|
||||
"""Test rebooting instances with failure"""
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
self.aws.boto_client.reboot_instances = MagicMock(
|
||||
side_effect=Exception("AWS error")
|
||||
)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.aws.reboot_instances(instance_id)
|
||||
|
||||
def test_wait_until_running_success(self):
|
||||
"""Test waiting until instance is running successfully"""
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
self.aws.boto_instance.wait_until_running = MagicMock()
|
||||
|
||||
result = self.aws.wait_until_running(instance_id, timeout=600, poll_interval=15)
|
||||
|
||||
self.assertTrue(result)
|
||||
self.aws.boto_instance.wait_until_running.assert_called_once()
|
||||
|
||||
def test_wait_until_running_with_affected_node(self):
|
||||
"""Test waiting until running with affected node tracking"""
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
affected_node = MagicMock(spec=AffectedNode)
|
||||
self.aws.boto_instance.wait_until_running = MagicMock()
|
||||
|
||||
with patch('time.time', side_effect=[100, 110]):
|
||||
result = self.aws.wait_until_running(
|
||||
instance_id,
|
||||
timeout=600,
|
||||
affected_node=affected_node,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
self.assertTrue(result)
|
||||
affected_node.set_affected_node_status.assert_called_once_with("running", 10)
|
||||
|
||||
def test_wait_until_running_failure(self):
|
||||
"""Test waiting until running with failure"""
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
self.aws.boto_instance.wait_until_running = MagicMock(
|
||||
side_effect=Exception("Timeout")
|
||||
)
|
||||
|
||||
result = self.aws.wait_until_running(instance_id)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_wait_until_stopped_success(self):
|
||||
"""Test waiting until instance is stopped successfully"""
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
self.aws.boto_instance.wait_until_stopped = MagicMock()
|
||||
|
||||
result = self.aws.wait_until_stopped(instance_id, timeout=600, poll_interval=15)
|
||||
|
||||
self.assertTrue(result)
|
||||
self.aws.boto_instance.wait_until_stopped.assert_called_once()
|
||||
|
||||
def test_wait_until_stopped_with_affected_node(self):
|
||||
"""Test waiting until stopped with affected node tracking"""
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
affected_node = MagicMock(spec=AffectedNode)
|
||||
self.aws.boto_instance.wait_until_stopped = MagicMock()
|
||||
|
||||
with patch('time.time', side_effect=[100, 115]):
|
||||
result = self.aws.wait_until_stopped(
|
||||
instance_id,
|
||||
timeout=600,
|
||||
affected_node=affected_node,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
self.assertTrue(result)
|
||||
affected_node.set_affected_node_status.assert_called_once_with("stopped", 15)
|
||||
|
||||
def test_wait_until_stopped_failure(self):
|
||||
"""Test waiting until stopped with failure"""
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
self.aws.boto_instance.wait_until_stopped = MagicMock(
|
||||
side_effect=Exception("Timeout")
|
||||
)
|
||||
|
||||
result = self.aws.wait_until_stopped(instance_id)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_wait_until_terminated_success(self):
|
||||
"""Test waiting until instance is terminated successfully"""
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
self.aws.boto_instance.wait_until_terminated = MagicMock()
|
||||
|
||||
result = self.aws.wait_until_terminated(instance_id, timeout=600, poll_interval=15)
|
||||
|
||||
self.assertTrue(result)
|
||||
self.aws.boto_instance.wait_until_terminated.assert_called_once()
|
||||
|
||||
def test_wait_until_terminated_with_affected_node(self):
|
||||
"""Test waiting until terminated with affected node tracking"""
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
affected_node = MagicMock(spec=AffectedNode)
|
||||
self.aws.boto_instance.wait_until_terminated = MagicMock()
|
||||
|
||||
with patch('time.time', side_effect=[100, 120]):
|
||||
result = self.aws.wait_until_terminated(
|
||||
instance_id,
|
||||
timeout=600,
|
||||
affected_node=affected_node,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
self.assertTrue(result)
|
||||
affected_node.set_affected_node_status.assert_called_once_with("terminated", 20)
|
||||
|
||||
def test_wait_until_terminated_failure(self):
|
||||
"""Test waiting until terminated with failure"""
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
self.aws.boto_instance.wait_until_terminated = MagicMock(
|
||||
side_effect=Exception("Timeout")
|
||||
)
|
||||
|
||||
result = self.aws.wait_until_terminated(instance_id)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_create_default_network_acl_success(self):
|
||||
"""Test creating default network ACL successfully"""
|
||||
vpc_id = 'vpc-12345678'
|
||||
acl_id = 'acl-12345678'
|
||||
mock_response = {
|
||||
'NetworkAcl': {
|
||||
'NetworkAclId': acl_id
|
||||
}
|
||||
}
|
||||
self.aws.boto_client.create_network_acl = MagicMock(return_value=mock_response)
|
||||
|
||||
result = self.aws.create_default_network_acl(vpc_id)
|
||||
|
||||
self.assertEqual(result, acl_id)
|
||||
self.aws.boto_client.create_network_acl.assert_called_once_with(VpcId=vpc_id)
|
||||
|
||||
def test_create_default_network_acl_failure(self):
|
||||
"""Test creating default network ACL with failure"""
|
||||
vpc_id = 'vpc-12345678'
|
||||
self.aws.boto_client.create_network_acl = MagicMock(
|
||||
side_effect=Exception("AWS error")
|
||||
)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.aws.create_default_network_acl(vpc_id)
|
||||
|
||||
def test_replace_network_acl_association_success(self):
|
||||
"""Test replacing network ACL association successfully"""
|
||||
association_id = 'aclassoc-12345678'
|
||||
acl_id = 'acl-12345678'
|
||||
new_association_id = 'aclassoc-87654321'
|
||||
mock_response = {
|
||||
'NewAssociationId': new_association_id
|
||||
}
|
||||
self.aws.boto_client.replace_network_acl_association = MagicMock(
|
||||
return_value=mock_response
|
||||
)
|
||||
|
||||
result = self.aws.replace_network_acl_association(association_id, acl_id)
|
||||
|
||||
self.assertEqual(result, new_association_id)
|
||||
self.aws.boto_client.replace_network_acl_association.assert_called_once_with(
|
||||
AssociationId=association_id, NetworkAclId=acl_id
|
||||
)
|
||||
|
||||
def test_replace_network_acl_association_failure(self):
|
||||
"""Test replacing network ACL association with failure"""
|
||||
association_id = 'aclassoc-12345678'
|
||||
acl_id = 'acl-12345678'
|
||||
self.aws.boto_client.replace_network_acl_association = MagicMock(
|
||||
side_effect=Exception("AWS error")
|
||||
)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.aws.replace_network_acl_association(association_id, acl_id)
|
||||
|
||||
def test_describe_network_acls_success(self):
|
||||
"""Test describing network ACLs successfully"""
|
||||
vpc_id = 'vpc-12345678'
|
||||
subnet_id = 'subnet-12345678'
|
||||
acl_id = 'acl-12345678'
|
||||
associations = [{'NetworkAclId': acl_id, 'SubnetId': subnet_id}]
|
||||
mock_response = {
|
||||
'NetworkAcls': [{
|
||||
'Associations': associations
|
||||
}]
|
||||
}
|
||||
self.aws.boto_client.describe_network_acls = MagicMock(return_value=mock_response)
|
||||
|
||||
result_associations, result_acl_id = self.aws.describe_network_acls(vpc_id, subnet_id)
|
||||
|
||||
self.assertEqual(result_associations, associations)
|
||||
self.assertEqual(result_acl_id, acl_id)
|
||||
|
||||
def test_describe_network_acls_failure(self):
|
||||
"""Test describing network ACLs with failure"""
|
||||
vpc_id = 'vpc-12345678'
|
||||
subnet_id = 'subnet-12345678'
|
||||
self.aws.boto_client.describe_network_acls = MagicMock(
|
||||
side_effect=Exception("AWS error")
|
||||
)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.aws.describe_network_acls(vpc_id, subnet_id)
|
||||
|
||||
def test_delete_network_acl_success(self):
|
||||
"""Test deleting network ACL successfully"""
|
||||
acl_id = 'acl-12345678'
|
||||
self.aws.boto_client.delete_network_acl = MagicMock()
|
||||
|
||||
self.aws.delete_network_acl(acl_id)
|
||||
|
||||
self.aws.boto_client.delete_network_acl.assert_called_once_with(NetworkAclId=acl_id)
|
||||
|
||||
def test_delete_network_acl_failure(self):
|
||||
"""Test deleting network ACL with failure"""
|
||||
acl_id = 'acl-12345678'
|
||||
self.aws.boto_client.delete_network_acl = MagicMock(
|
||||
side_effect=Exception("AWS error")
|
||||
)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.aws.delete_network_acl(acl_id)
|
||||
|
||||
def test_detach_volumes_success(self):
|
||||
"""Test detaching volumes successfully"""
|
||||
volume_ids = ['vol-12345678', 'vol-87654321']
|
||||
self.aws.boto_client.detach_volume = MagicMock()
|
||||
|
||||
self.aws.detach_volumes(volume_ids)
|
||||
|
||||
self.assertEqual(self.aws.boto_client.detach_volume.call_count, 2)
|
||||
self.aws.boto_client.detach_volume.assert_any_call(VolumeId='vol-12345678', Force=True)
|
||||
self.aws.boto_client.detach_volume.assert_any_call(VolumeId='vol-87654321', Force=True)
|
||||
|
||||
def test_detach_volumes_partial_failure(self):
|
||||
"""Test detaching volumes with partial failure"""
|
||||
volume_ids = ['vol-12345678', 'vol-87654321']
|
||||
# First call succeeds, second fails - should not raise exception
|
||||
self.aws.boto_client.detach_volume = MagicMock(
|
||||
side_effect=[None, Exception("AWS error")]
|
||||
)
|
||||
|
||||
# Should not raise exception, just log error
|
||||
self.aws.detach_volumes(volume_ids)
|
||||
|
||||
self.assertEqual(self.aws.boto_client.detach_volume.call_count, 2)
|
||||
|
||||
def test_attach_volume_success(self):
|
||||
"""Test attaching volume successfully"""
|
||||
attachment = {
|
||||
'VolumeId': 'vol-12345678',
|
||||
'InstanceId': 'i-1234567890abcdef0',
|
||||
'Device': '/dev/sdf'
|
||||
}
|
||||
mock_volume = MagicMock()
|
||||
mock_volume.state = 'available'
|
||||
self.aws.boto_resource.Volume = MagicMock(return_value=mock_volume)
|
||||
self.aws.boto_client.attach_volume = MagicMock()
|
||||
|
||||
self.aws.attach_volume(attachment)
|
||||
|
||||
self.aws.boto_client.attach_volume.assert_called_once_with(
|
||||
InstanceId=attachment['InstanceId'],
|
||||
Device=attachment['Device'],
|
||||
VolumeId=attachment['VolumeId']
|
||||
)
|
||||
|
||||
def test_attach_volume_already_in_use(self):
|
||||
"""Test attaching volume that is already in use"""
|
||||
attachment = {
|
||||
'VolumeId': 'vol-12345678',
|
||||
'InstanceId': 'i-1234567890abcdef0',
|
||||
'Device': '/dev/sdf'
|
||||
}
|
||||
mock_volume = MagicMock()
|
||||
mock_volume.state = 'in-use'
|
||||
self.aws.boto_resource.Volume = MagicMock(return_value=mock_volume)
|
||||
self.aws.boto_client.attach_volume = MagicMock()
|
||||
|
||||
self.aws.attach_volume(attachment)
|
||||
|
||||
# Should not attempt to attach
|
||||
self.aws.boto_client.attach_volume.assert_not_called()
|
||||
|
||||
def test_attach_volume_failure(self):
|
||||
"""Test attaching volume with failure"""
|
||||
attachment = {
|
||||
'VolumeId': 'vol-12345678',
|
||||
'InstanceId': 'i-1234567890abcdef0',
|
||||
'Device': '/dev/sdf'
|
||||
}
|
||||
mock_volume = MagicMock()
|
||||
mock_volume.state = 'available'
|
||||
self.aws.boto_resource.Volume = MagicMock(return_value=mock_volume)
|
||||
self.aws.boto_client.attach_volume = MagicMock(
|
||||
side_effect=Exception("AWS error")
|
||||
)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.aws.attach_volume(attachment)
|
||||
|
||||
def test_get_volumes_ids(self):
|
||||
"""Test getting volume IDs from instance"""
|
||||
instance_id = ['i-1234567890abcdef0']
|
||||
mock_response = {
|
||||
'Reservations': [{
|
||||
'Instances': [{
|
||||
'BlockDeviceMappings': [
|
||||
{'DeviceName': '/dev/sda1', 'Ebs': {'VolumeId': 'vol-root'}},
|
||||
{'DeviceName': '/dev/sdf', 'Ebs': {'VolumeId': 'vol-12345678'}},
|
||||
{'DeviceName': '/dev/sdg', 'Ebs': {'VolumeId': 'vol-87654321'}}
|
||||
]
|
||||
}]
|
||||
}]
|
||||
}
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.root_device_name = '/dev/sda1'
|
||||
self.aws.boto_resource.Instance = MagicMock(return_value=mock_instance)
|
||||
self.aws.boto_client.describe_instances = MagicMock(return_value=mock_response)
|
||||
|
||||
volume_ids = self.aws.get_volumes_ids(instance_id)
|
||||
|
||||
self.assertEqual(len(volume_ids), 2)
|
||||
self.assertIn('vol-12345678', volume_ids)
|
||||
self.assertIn('vol-87654321', volume_ids)
|
||||
self.assertNotIn('vol-root', volume_ids)
|
||||
|
||||
def test_get_volume_attachment_details(self):
|
||||
"""Test getting volume attachment details"""
|
||||
volume_ids = ['vol-12345678', 'vol-87654321']
|
||||
mock_response = {
|
||||
'Volumes': [
|
||||
{'VolumeId': 'vol-12345678', 'State': 'in-use'},
|
||||
{'VolumeId': 'vol-87654321', 'State': 'available'}
|
||||
]
|
||||
}
|
||||
self.aws.boto_client.describe_volumes = MagicMock(return_value=mock_response)
|
||||
|
||||
details = self.aws.get_volume_attachment_details(volume_ids)
|
||||
|
||||
self.assertEqual(len(details), 2)
|
||||
self.assertEqual(details[0]['VolumeId'], 'vol-12345678')
|
||||
self.assertEqual(details[1]['VolumeId'], 'vol-87654321')
|
||||
|
||||
def test_get_root_volume_id(self):
|
||||
"""Test getting root volume ID"""
|
||||
instance_id = ['i-1234567890abcdef0']
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.root_device_name = '/dev/sda1'
|
||||
self.aws.boto_resource.Instance = MagicMock(return_value=mock_instance)
|
||||
|
||||
root_volume = self.aws.get_root_volume_id(instance_id)
|
||||
|
||||
self.assertEqual(root_volume, '/dev/sda1')
|
||||
|
||||
def test_get_volume_state(self):
|
||||
"""Test getting volume state"""
|
||||
volume_id = 'vol-12345678'
|
||||
mock_volume = MagicMock()
|
||||
mock_volume.state = 'available'
|
||||
self.aws.boto_resource.Volume = MagicMock(return_value=mock_volume)
|
||||
|
||||
state = self.aws.get_volume_state(volume_id)
|
||||
|
||||
self.assertEqual(state, 'available')
|
||||
|
||||
|
||||
class TestAWSNodeScenarios(unittest.TestCase):
|
||||
"""Test cases for aws_node_scenarios class"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
self.kubecli = MagicMock(spec=KrknKubernetes)
|
||||
self.affected_nodes_status = AffectedNodeStatus()
|
||||
|
||||
# Mock the AWS class
|
||||
with patch('krkn.scenario_plugins.node_actions.aws_node_scenarios.AWS') as mock_aws_class:
|
||||
self.mock_aws = MagicMock()
|
||||
mock_aws_class.return_value = self.mock_aws
|
||||
self.scenario = aws_node_scenarios(
|
||||
kubecli=self.kubecli,
|
||||
node_action_kube_check=True,
|
||||
affected_nodes_status=self.affected_nodes_status
|
||||
)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
def test_node_start_scenario_success(self, mock_wait_ready):
|
||||
"""Test node start scenario successfully"""
|
||||
node = 'ip-10-0-1-100.ec2.internal'
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
|
||||
self.mock_aws.get_instance_id.return_value = instance_id
|
||||
self.mock_aws.start_instances.return_value = None
|
||||
self.mock_aws.wait_until_running.return_value = True
|
||||
|
||||
self.scenario.node_start_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
self.mock_aws.get_instance_id.assert_called_once_with(node)
|
||||
self.mock_aws.start_instances.assert_called_once_with(instance_id)
|
||||
self.mock_aws.wait_until_running.assert_called_once()
|
||||
mock_wait_ready.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
self.assertEqual(self.affected_nodes_status.affected_nodes[0].node_name, node)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
def test_node_start_scenario_no_kube_check(self, mock_wait_ready):
|
||||
"""Test node start scenario without kube check"""
|
||||
node = 'ip-10-0-1-100.ec2.internal'
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
|
||||
# Create scenario with node_action_kube_check=False
|
||||
with patch('krkn.scenario_plugins.node_actions.aws_node_scenarios.AWS') as mock_aws_class:
|
||||
mock_aws = MagicMock()
|
||||
mock_aws_class.return_value = mock_aws
|
||||
scenario = aws_node_scenarios(
|
||||
kubecli=self.kubecli,
|
||||
node_action_kube_check=False,
|
||||
affected_nodes_status=AffectedNodeStatus()
|
||||
)
|
||||
|
||||
mock_aws.get_instance_id.return_value = instance_id
|
||||
mock_aws.start_instances.return_value = None
|
||||
mock_aws.wait_until_running.return_value = True
|
||||
|
||||
scenario.node_start_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
# Should not call wait_for_ready_status
|
||||
mock_wait_ready.assert_not_called()
|
||||
|
||||
def test_node_start_scenario_failure(self):
|
||||
"""Test node start scenario with failure"""
|
||||
node = 'ip-10-0-1-100.ec2.internal'
|
||||
|
||||
self.mock_aws.get_instance_id.side_effect = Exception("AWS error")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.scenario.node_start_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_unknown_status')
|
||||
def test_node_stop_scenario_success(self, mock_wait_unknown):
|
||||
"""Test node stop scenario successfully"""
|
||||
node = 'ip-10-0-1-100.ec2.internal'
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
|
||||
self.mock_aws.get_instance_id.return_value = instance_id
|
||||
self.mock_aws.stop_instances.return_value = None
|
||||
self.mock_aws.wait_until_stopped.return_value = True
|
||||
|
||||
self.scenario.node_stop_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
self.mock_aws.get_instance_id.assert_called_once_with(node)
|
||||
self.mock_aws.stop_instances.assert_called_once_with(instance_id)
|
||||
self.mock_aws.wait_until_stopped.assert_called_once()
|
||||
mock_wait_unknown.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_unknown_status')
|
||||
def test_node_stop_scenario_no_kube_check(self, mock_wait_unknown):
|
||||
"""Test node stop scenario without kube check"""
|
||||
node = 'ip-10-0-1-100.ec2.internal'
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
|
||||
# Create scenario with node_action_kube_check=False
|
||||
with patch('krkn.scenario_plugins.node_actions.aws_node_scenarios.AWS') as mock_aws_class:
|
||||
mock_aws = MagicMock()
|
||||
mock_aws_class.return_value = mock_aws
|
||||
scenario = aws_node_scenarios(
|
||||
kubecli=self.kubecli,
|
||||
node_action_kube_check=False,
|
||||
affected_nodes_status=AffectedNodeStatus()
|
||||
)
|
||||
|
||||
mock_aws.get_instance_id.return_value = instance_id
|
||||
mock_aws.stop_instances.return_value = None
|
||||
mock_aws.wait_until_stopped.return_value = True
|
||||
|
||||
scenario.node_stop_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
# Should not call wait_for_unknown_status
|
||||
mock_wait_unknown.assert_not_called()
|
||||
|
||||
def test_node_stop_scenario_failure(self):
|
||||
"""Test node stop scenario with failure"""
|
||||
node = 'ip-10-0-1-100.ec2.internal'
|
||||
|
||||
self.mock_aws.get_instance_id.side_effect = Exception("AWS error")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.scenario.node_stop_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
@patch('time.sleep')
|
||||
def test_node_termination_scenario_success(self, _mock_sleep):
|
||||
"""Test node termination scenario successfully"""
|
||||
node = 'ip-10-0-1-100.ec2.internal'
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
|
||||
self.mock_aws.get_instance_id.return_value = instance_id
|
||||
self.mock_aws.terminate_instances.return_value = None
|
||||
self.mock_aws.wait_until_terminated.return_value = True
|
||||
self.kubecli.list_nodes.return_value = []
|
||||
|
||||
self.scenario.node_termination_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
self.mock_aws.get_instance_id.assert_called_once_with(node)
|
||||
self.mock_aws.terminate_instances.assert_called_once_with(instance_id)
|
||||
self.mock_aws.wait_until_terminated.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('time.sleep')
|
||||
def test_node_termination_scenario_node_still_exists(self, _mock_sleep):
|
||||
"""Test node termination scenario when node still exists"""
|
||||
node = 'ip-10-0-1-100.ec2.internal'
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
|
||||
self.mock_aws.get_instance_id.return_value = instance_id
|
||||
self.mock_aws.terminate_instances.return_value = None
|
||||
self.mock_aws.wait_until_terminated.return_value = True
|
||||
# Node still in list after timeout
|
||||
self.kubecli.list_nodes.return_value = [node]
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.scenario.node_termination_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=2,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
def test_node_termination_scenario_failure(self):
|
||||
"""Test node termination scenario with failure"""
|
||||
node = 'ip-10-0-1-100.ec2.internal'
|
||||
|
||||
self.mock_aws.get_instance_id.side_effect = Exception("AWS error")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.scenario.node_termination_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_unknown_status')
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
def test_node_reboot_scenario_success(self, mock_wait_ready, mock_wait_unknown):
|
||||
"""Test node reboot scenario successfully"""
|
||||
node = 'ip-10-0-1-100.ec2.internal'
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
|
||||
self.mock_aws.get_instance_id.return_value = instance_id
|
||||
self.mock_aws.reboot_instances.return_value = None
|
||||
|
||||
self.scenario.node_reboot_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
self.mock_aws.get_instance_id.assert_called_once_with(node)
|
||||
self.mock_aws.reboot_instances.assert_called_once_with(instance_id)
|
||||
mock_wait_unknown.assert_called_once()
|
||||
mock_wait_ready.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_unknown_status')
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
def test_node_reboot_scenario_no_kube_check(self, mock_wait_ready, mock_wait_unknown):
|
||||
"""Test node reboot scenario without kube check"""
|
||||
node = 'ip-10-0-1-100.ec2.internal'
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
|
||||
# Create scenario with node_action_kube_check=False
|
||||
with patch('krkn.scenario_plugins.node_actions.aws_node_scenarios.AWS') as mock_aws_class:
|
||||
mock_aws = MagicMock()
|
||||
mock_aws_class.return_value = mock_aws
|
||||
scenario = aws_node_scenarios(
|
||||
kubecli=self.kubecli,
|
||||
node_action_kube_check=False,
|
||||
affected_nodes_status=AffectedNodeStatus()
|
||||
)
|
||||
|
||||
mock_aws.get_instance_id.return_value = instance_id
|
||||
mock_aws.reboot_instances.return_value = None
|
||||
|
||||
scenario.node_reboot_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
# Should not call wait functions
|
||||
mock_wait_unknown.assert_not_called()
|
||||
mock_wait_ready.assert_not_called()
|
||||
|
||||
def test_node_reboot_scenario_failure(self):
|
||||
"""Test node reboot scenario with failure"""
|
||||
node = 'ip-10-0-1-100.ec2.internal'
|
||||
|
||||
self.mock_aws.get_instance_id.side_effect = Exception("AWS error")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.scenario.node_reboot_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
def test_node_reboot_scenario_multiple_kills(self):
|
||||
"""Test node reboot scenario with multiple kill counts"""
|
||||
node = 'ip-10-0-1-100.ec2.internal'
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
|
||||
with patch('krkn.scenario_plugins.node_actions.aws_node_scenarios.AWS') as mock_aws_class:
|
||||
mock_aws = MagicMock()
|
||||
mock_aws_class.return_value = mock_aws
|
||||
scenario = aws_node_scenarios(
|
||||
kubecli=self.kubecli,
|
||||
node_action_kube_check=False,
|
||||
affected_nodes_status=AffectedNodeStatus()
|
||||
)
|
||||
|
||||
mock_aws.get_instance_id.return_value = instance_id
|
||||
mock_aws.reboot_instances.return_value = None
|
||||
|
||||
scenario.node_reboot_scenario(
|
||||
instance_kill_count=3,
|
||||
node=node,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
self.assertEqual(mock_aws.reboot_instances.call_count, 3)
|
||||
self.assertEqual(len(scenario.affected_nodes_status.affected_nodes), 3)
|
||||
|
||||
def test_get_disk_attachment_info_success(self):
|
||||
"""Test getting disk attachment info successfully"""
|
||||
node = 'ip-10-0-1-100.ec2.internal'
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
volume_ids = ['vol-12345678']
|
||||
attachment_details = [
|
||||
{
|
||||
'VolumeId': 'vol-12345678',
|
||||
'Attachments': [{
|
||||
'InstanceId': instance_id,
|
||||
'Device': '/dev/sdf'
|
||||
}]
|
||||
}
|
||||
]
|
||||
|
||||
self.mock_aws.get_instance_id.return_value = instance_id
|
||||
self.mock_aws.get_volumes_ids.return_value = volume_ids
|
||||
self.mock_aws.get_volume_attachment_details.return_value = attachment_details
|
||||
|
||||
result = self.scenario.get_disk_attachment_info(
|
||||
instance_kill_count=1,
|
||||
node=node
|
||||
)
|
||||
|
||||
self.assertEqual(result, attachment_details)
|
||||
self.mock_aws.get_instance_id.assert_called_once_with(node)
|
||||
self.mock_aws.get_volumes_ids.assert_called_once()
|
||||
self.mock_aws.get_volume_attachment_details.assert_called_once_with(volume_ids)
|
||||
|
||||
def test_get_disk_attachment_info_no_volumes(self):
|
||||
"""Test getting disk attachment info when no volumes exist"""
|
||||
node = 'ip-10-0-1-100.ec2.internal'
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
|
||||
self.mock_aws.get_instance_id.return_value = instance_id
|
||||
self.mock_aws.get_volumes_ids.return_value = []
|
||||
|
||||
result = self.scenario.get_disk_attachment_info(
|
||||
instance_kill_count=1,
|
||||
node=node
|
||||
)
|
||||
|
||||
self.assertIsNone(result)
|
||||
self.mock_aws.get_volume_attachment_details.assert_not_called()
|
||||
|
||||
def test_get_disk_attachment_info_failure(self):
|
||||
"""Test getting disk attachment info with failure"""
|
||||
node = 'ip-10-0-1-100.ec2.internal'
|
||||
|
||||
self.mock_aws.get_instance_id.side_effect = Exception("AWS error")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.scenario.get_disk_attachment_info(
|
||||
instance_kill_count=1,
|
||||
node=node
|
||||
)
|
||||
|
||||
def test_disk_detach_scenario_success(self):
|
||||
"""Test disk detach scenario successfully"""
|
||||
node = 'ip-10-0-1-100.ec2.internal'
|
||||
instance_id = 'i-1234567890abcdef0'
|
||||
volume_ids = ['vol-12345678', 'vol-87654321']
|
||||
|
||||
self.mock_aws.get_instance_id.return_value = instance_id
|
||||
self.mock_aws.get_volumes_ids.return_value = volume_ids
|
||||
self.mock_aws.detach_volumes.return_value = None
|
||||
|
||||
self.scenario.disk_detach_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
self.mock_aws.get_instance_id.assert_called_once_with(node)
|
||||
self.mock_aws.get_volumes_ids.assert_called_once()
|
||||
self.mock_aws.detach_volumes.assert_called_once_with(volume_ids)
|
||||
|
||||
def test_disk_detach_scenario_failure(self):
|
||||
"""Test disk detach scenario with failure"""
|
||||
node = 'ip-10-0-1-100.ec2.internal'
|
||||
|
||||
self.mock_aws.get_instance_id.side_effect = Exception("AWS error")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.scenario.disk_detach_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
def test_disk_attach_scenario_success(self):
|
||||
"""Test disk attach scenario successfully"""
|
||||
attachment_details = [
|
||||
{
|
||||
'VolumeId': 'vol-12345678',
|
||||
'Attachments': [{
|
||||
'InstanceId': 'i-1234567890abcdef0',
|
||||
'Device': '/dev/sdf',
|
||||
'VolumeId': 'vol-12345678'
|
||||
}]
|
||||
},
|
||||
{
|
||||
'VolumeId': 'vol-87654321',
|
||||
'Attachments': [{
|
||||
'InstanceId': 'i-1234567890abcdef0',
|
||||
'Device': '/dev/sdg',
|
||||
'VolumeId': 'vol-87654321'
|
||||
}]
|
||||
}
|
||||
]
|
||||
|
||||
self.mock_aws.attach_volume.return_value = None
|
||||
|
||||
self.scenario.disk_attach_scenario(
|
||||
instance_kill_count=1,
|
||||
attachment_details=attachment_details,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
self.assertEqual(self.mock_aws.attach_volume.call_count, 2)
|
||||
|
||||
def test_disk_attach_scenario_multiple_kills(self):
|
||||
"""Test disk attach scenario with multiple kill counts"""
|
||||
attachment_details = [
|
||||
{
|
||||
'VolumeId': 'vol-12345678',
|
||||
'Attachments': [{
|
||||
'InstanceId': 'i-1234567890abcdef0',
|
||||
'Device': '/dev/sdf',
|
||||
'VolumeId': 'vol-12345678'
|
||||
}]
|
||||
}
|
||||
]
|
||||
|
||||
self.mock_aws.attach_volume.return_value = None
|
||||
|
||||
self.scenario.disk_attach_scenario(
|
||||
instance_kill_count=3,
|
||||
attachment_details=attachment_details,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
# Should call attach_volume 3 times (once per kill count)
|
||||
self.assertEqual(self.mock_aws.attach_volume.call_count, 3)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -1,784 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Test suite for azure_node_scenarios class
|
||||
|
||||
Usage:
|
||||
python -m coverage run -a -m unittest tests/test_az_node_scenarios.py -v
|
||||
|
||||
Assisted By: Claude Code
|
||||
"""
|
||||
|
||||
import unittest
|
||||
from unittest.mock import Mock, patch
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
|
||||
from krkn.scenario_plugins.node_actions.az_node_scenarios import Azure, azure_node_scenarios
|
||||
|
||||
|
||||
class TestAzure(unittest.TestCase):
|
||||
"""Test suite for Azure class"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
# Mock environment variable
|
||||
self.env_patcher = patch.dict('os.environ', {'AZURE_SUBSCRIPTION_ID': 'test-subscription-id'})
|
||||
self.env_patcher.start()
|
||||
|
||||
def tearDown(self):
|
||||
"""Clean up after tests"""
|
||||
self.env_patcher.stop()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
@patch('logging.info')
|
||||
def test_azure_init(self, mock_logging, mock_credential, mock_compute, mock_network):
|
||||
"""Test Azure class initialization"""
|
||||
mock_creds = Mock()
|
||||
mock_credential.return_value = mock_creds
|
||||
|
||||
azure = Azure()
|
||||
|
||||
mock_credential.assert_called_once()
|
||||
mock_compute.assert_called_once()
|
||||
mock_network.assert_called_once()
|
||||
self.assertIsNotNone(azure.compute_client)
|
||||
self.assertIsNotNone(azure.network_client)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_get_instance_id_found(self, mock_credential, mock_compute, mock_network):
|
||||
"""Test get_instance_id when VM is found"""
|
||||
azure = Azure()
|
||||
|
||||
# Mock VM
|
||||
mock_vm = Mock()
|
||||
mock_vm.name = "test-node"
|
||||
mock_vm.id = "/subscriptions/sub-id/resourceGroups/test-rg/providers/Microsoft.Compute/virtualMachines/test-node"
|
||||
|
||||
azure.compute_client.virtual_machines.list_all.return_value = [mock_vm]
|
||||
|
||||
vm_name, resource_group = azure.get_instance_id("test-node")
|
||||
|
||||
self.assertEqual(vm_name, "test-node")
|
||||
self.assertEqual(resource_group, "test-rg")
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_get_instance_id_not_found(self, mock_credential, mock_compute, mock_network, mock_logging):
|
||||
"""Test get_instance_id when VM is not found"""
|
||||
azure = Azure()
|
||||
|
||||
azure.compute_client.virtual_machines.list_all.return_value = []
|
||||
|
||||
result = azure.get_instance_id("nonexistent-node")
|
||||
|
||||
self.assertIsNone(result)
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("Couldn't find vm", str(mock_logging.call_args))
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_get_network_interface(self, mock_credential, mock_compute, mock_network):
|
||||
"""Test get_network_interface retrieves network details"""
|
||||
azure = Azure()
|
||||
|
||||
# Mock VM with network profile
|
||||
mock_vm = Mock()
|
||||
mock_nic_ref = Mock()
|
||||
mock_nic_ref.id = "/subscriptions/sub-id/resourceGroups/test-rg/providers/Microsoft.Network/networkInterfaces/test-nic"
|
||||
mock_vm.network_profile.network_interfaces = [mock_nic_ref]
|
||||
|
||||
# Mock NIC
|
||||
mock_nic = Mock()
|
||||
mock_nic.location = "eastus"
|
||||
mock_ip_config = Mock()
|
||||
mock_ip_config.private_ip_address = "10.0.1.5"
|
||||
mock_ip_config.subnet.id = "/subscriptions/sub-id/resourceGroups/network-rg/providers/Microsoft.Network/virtualNetworks/test-vnet/subnets/test-subnet"
|
||||
mock_nic.ip_configurations = [mock_ip_config]
|
||||
|
||||
azure.compute_client.virtual_machines.get.return_value = mock_vm
|
||||
azure.network_client.network_interfaces.get.return_value = mock_nic
|
||||
|
||||
subnet, vnet, ip, net_rg, location = azure.get_network_interface("test-node", "test-rg")
|
||||
|
||||
self.assertEqual(subnet, "test-subnet")
|
||||
self.assertEqual(vnet, "test-vnet")
|
||||
self.assertEqual(ip, "10.0.1.5")
|
||||
self.assertEqual(net_rg, "network-rg")
|
||||
self.assertEqual(location, "eastus")
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_start_instances_success(self, mock_credential, mock_compute, mock_network, mock_logging):
|
||||
"""Test start_instances successfully starts VM"""
|
||||
azure = Azure()
|
||||
|
||||
mock_operation = Mock()
|
||||
azure.compute_client.virtual_machines.begin_start.return_value = mock_operation
|
||||
|
||||
azure.start_instances("test-rg", "test-vm")
|
||||
|
||||
azure.compute_client.virtual_machines.begin_start.assert_called_once_with("test-rg", "test-vm")
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("started", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_start_instances_failure(self, mock_credential, mock_compute, mock_network, mock_logging):
|
||||
"""Test start_instances handles failure"""
|
||||
azure = Azure()
|
||||
|
||||
azure.compute_client.virtual_machines.begin_start.side_effect = Exception("Start failed")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
azure.start_instances("test-rg", "test-vm")
|
||||
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("Failed to start", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_stop_instances_success(self, mock_credential, mock_compute, mock_network, mock_logging):
|
||||
"""Test stop_instances successfully stops VM"""
|
||||
azure = Azure()
|
||||
|
||||
mock_operation = Mock()
|
||||
azure.compute_client.virtual_machines.begin_power_off.return_value = mock_operation
|
||||
|
||||
azure.stop_instances("test-rg", "test-vm")
|
||||
|
||||
azure.compute_client.virtual_machines.begin_power_off.assert_called_once_with("test-rg", "test-vm")
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("stopped", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_stop_instances_failure(self, mock_credential, mock_compute, mock_network, mock_logging):
|
||||
"""Test stop_instances handles failure"""
|
||||
azure = Azure()
|
||||
|
||||
azure.compute_client.virtual_machines.begin_power_off.side_effect = Exception("Stop failed")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
azure.stop_instances("test-rg", "test-vm")
|
||||
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("Failed to stop", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_terminate_instances_success(self, mock_credential, mock_compute, mock_network, mock_logging):
|
||||
"""Test terminate_instances successfully deletes VM"""
|
||||
azure = Azure()
|
||||
|
||||
mock_operation = Mock()
|
||||
azure.compute_client.virtual_machines.begin_delete.return_value = mock_operation
|
||||
|
||||
azure.terminate_instances("test-rg", "test-vm")
|
||||
|
||||
azure.compute_client.virtual_machines.begin_delete.assert_called_once_with("test-rg", "test-vm")
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("terminated", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_terminate_instances_failure(self, mock_credential, mock_compute, mock_network, mock_logging):
|
||||
"""Test terminate_instances handles failure"""
|
||||
azure = Azure()
|
||||
|
||||
azure.compute_client.virtual_machines.begin_delete.side_effect = Exception("Delete failed")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
azure.terminate_instances("test-rg", "test-vm")
|
||||
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("Failed to terminate", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_reboot_instances_success(self, mock_credential, mock_compute, mock_network, mock_logging):
|
||||
"""Test reboot_instances successfully reboots VM"""
|
||||
azure = Azure()
|
||||
|
||||
mock_operation = Mock()
|
||||
azure.compute_client.virtual_machines.begin_restart.return_value = mock_operation
|
||||
|
||||
azure.reboot_instances("test-rg", "test-vm")
|
||||
|
||||
azure.compute_client.virtual_machines.begin_restart.assert_called_once_with("test-rg", "test-vm")
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("rebooted", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_reboot_instances_failure(self, mock_credential, mock_compute, mock_network, mock_logging):
|
||||
"""Test reboot_instances handles failure"""
|
||||
azure = Azure()
|
||||
|
||||
azure.compute_client.virtual_machines.begin_restart.side_effect = Exception("Reboot failed")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
azure.reboot_instances("test-rg", "test-vm")
|
||||
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("Failed to reboot", str(mock_logging.call_args))
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_get_vm_status(self, mock_credential, mock_compute, mock_network):
|
||||
"""Test get_vm_status returns VM power state"""
|
||||
azure = Azure()
|
||||
|
||||
mock_status1 = Mock()
|
||||
mock_status1.code = "ProvisioningState/succeeded"
|
||||
mock_status2 = Mock()
|
||||
mock_status2.code = "PowerState/running"
|
||||
|
||||
mock_instance_view = Mock()
|
||||
mock_instance_view.statuses = [mock_status1, mock_status2]
|
||||
azure.compute_client.virtual_machines.instance_view.return_value = mock_instance_view
|
||||
|
||||
status = azure.get_vm_status("test-rg", "test-vm")
|
||||
|
||||
self.assertEqual(status.code, "PowerState/running")
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_wait_until_running_success(self, mock_credential, mock_compute, mock_network, mock_logging, mock_sleep):
|
||||
"""Test wait_until_running waits for VM to be running"""
|
||||
azure = Azure()
|
||||
|
||||
mock_status_starting = Mock()
|
||||
mock_status_starting.code = "PowerState/starting"
|
||||
mock_status_running = Mock()
|
||||
mock_status_running.code = "PowerState/running"
|
||||
|
||||
mock_instance_view1 = Mock()
|
||||
mock_instance_view1.statuses = [Mock(), mock_status_starting]
|
||||
mock_instance_view2 = Mock()
|
||||
mock_instance_view2.statuses = [Mock(), mock_status_running]
|
||||
|
||||
azure.compute_client.virtual_machines.instance_view.side_effect = [
|
||||
mock_instance_view1,
|
||||
mock_instance_view2
|
||||
]
|
||||
|
||||
mock_affected_node = Mock(spec=AffectedNode)
|
||||
|
||||
result = azure.wait_until_running("test-rg", "test-vm", 300, mock_affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
mock_affected_node.set_affected_node_status.assert_called_once()
|
||||
args = mock_affected_node.set_affected_node_status.call_args[0]
|
||||
self.assertEqual(args[0], "running")
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_wait_until_running_timeout(self, mock_credential, mock_compute, mock_network, mock_logging, mock_sleep):
|
||||
"""Test wait_until_running returns False on timeout"""
|
||||
azure = Azure()
|
||||
|
||||
mock_status = Mock()
|
||||
mock_status.code = "PowerState/starting"
|
||||
mock_instance_view = Mock()
|
||||
mock_instance_view.statuses = [Mock(), mock_status]
|
||||
|
||||
azure.compute_client.virtual_machines.instance_view.return_value = mock_instance_view
|
||||
|
||||
result = azure.wait_until_running("test-rg", "test-vm", 10, None)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_wait_until_stopped_success(self, mock_credential, mock_compute, mock_network, mock_logging, mock_sleep):
|
||||
"""Test wait_until_stopped waits for VM to be stopped"""
|
||||
azure = Azure()
|
||||
|
||||
mock_status_stopping = Mock()
|
||||
mock_status_stopping.code = "PowerState/stopping"
|
||||
mock_status_stopped = Mock()
|
||||
mock_status_stopped.code = "PowerState/stopped"
|
||||
|
||||
mock_instance_view1 = Mock()
|
||||
mock_instance_view1.statuses = [Mock(), mock_status_stopping]
|
||||
mock_instance_view2 = Mock()
|
||||
mock_instance_view2.statuses = [Mock(), mock_status_stopped]
|
||||
|
||||
azure.compute_client.virtual_machines.instance_view.side_effect = [
|
||||
mock_instance_view1,
|
||||
mock_instance_view2
|
||||
]
|
||||
|
||||
mock_affected_node = Mock(spec=AffectedNode)
|
||||
|
||||
result = azure.wait_until_stopped("test-rg", "test-vm", 300, mock_affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
mock_affected_node.set_affected_node_status.assert_called_once()
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_wait_until_stopped_timeout(self, mock_credential, mock_compute, mock_network, mock_logging, mock_sleep):
|
||||
"""Test wait_until_stopped returns False on timeout"""
|
||||
azure = Azure()
|
||||
|
||||
mock_status = Mock()
|
||||
mock_status.code = "PowerState/stopping"
|
||||
mock_instance_view = Mock()
|
||||
mock_instance_view.statuses = [Mock(), mock_status]
|
||||
|
||||
azure.compute_client.virtual_machines.instance_view.return_value = mock_instance_view
|
||||
|
||||
result = azure.wait_until_stopped("test-rg", "test-vm", 10, None)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_wait_until_terminated_success(self, mock_credential, mock_compute, mock_network, mock_logging, mock_sleep):
|
||||
"""Test wait_until_terminated waits for VM deletion"""
|
||||
azure = Azure()
|
||||
|
||||
mock_status_deleting = Mock()
|
||||
mock_status_deleting.code = "ProvisioningState/deleting"
|
||||
mock_instance_view = Mock()
|
||||
mock_instance_view.statuses = [mock_status_deleting]
|
||||
|
||||
# First call returns deleting, second raises exception (VM deleted)
|
||||
azure.compute_client.virtual_machines.instance_view.side_effect = [
|
||||
mock_instance_view,
|
||||
Exception("VM not found")
|
||||
]
|
||||
|
||||
mock_affected_node = Mock(spec=AffectedNode)
|
||||
|
||||
result = azure.wait_until_terminated("test-rg", "test-vm", 300, mock_affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
mock_affected_node.set_affected_node_status.assert_called_once()
|
||||
args = mock_affected_node.set_affected_node_status.call_args[0]
|
||||
self.assertEqual(args[0], "terminated")
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_wait_until_terminated_timeout(self, mock_credential, mock_compute, mock_network, mock_logging, mock_sleep):
|
||||
"""Test wait_until_terminated returns False on timeout"""
|
||||
azure = Azure()
|
||||
|
||||
mock_status = Mock()
|
||||
mock_status.code = "ProvisioningState/deleting"
|
||||
mock_instance_view = Mock()
|
||||
mock_instance_view.statuses = [mock_status]
|
||||
|
||||
azure.compute_client.virtual_machines.instance_view.return_value = mock_instance_view
|
||||
|
||||
result = azure.wait_until_terminated("test-rg", "test-vm", 10, None)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_create_security_group(self, mock_credential, mock_compute, mock_network):
|
||||
"""Test create_security_group creates NSG with deny rules"""
|
||||
azure = Azure()
|
||||
|
||||
mock_nsg_result = Mock()
|
||||
mock_nsg_result.id = "/subscriptions/sub-id/resourceGroups/test-rg/providers/Microsoft.Network/networkSecurityGroups/chaos"
|
||||
mock_operation = Mock()
|
||||
mock_operation.result.return_value = mock_nsg_result
|
||||
|
||||
azure.network_client.network_security_groups.begin_create_or_update.return_value = mock_operation
|
||||
|
||||
nsg_id = azure.create_security_group("test-rg", "chaos", "eastus", "10.0.1.5")
|
||||
|
||||
self.assertEqual(nsg_id, mock_nsg_result.id)
|
||||
azure.network_client.network_security_groups.begin_create_or_update.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_delete_security_group(self, mock_credential, mock_compute, mock_network):
|
||||
"""Test delete_security_group deletes NSG"""
|
||||
azure = Azure()
|
||||
|
||||
mock_operation = Mock()
|
||||
mock_operation.result.return_value = None
|
||||
azure.network_client.network_security_groups.begin_delete.return_value = mock_operation
|
||||
|
||||
azure.delete_security_group("test-rg", "chaos")
|
||||
|
||||
azure.network_client.network_security_groups.begin_delete.assert_called_once_with("test-rg", "chaos")
|
||||
|
||||
@patch('builtins.print')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_delete_security_group_with_result(self, mock_credential, mock_compute, mock_network, mock_print):
|
||||
"""Test delete_security_group deletes NSG with non-None result"""
|
||||
azure = Azure()
|
||||
|
||||
mock_result = Mock()
|
||||
mock_result.as_dict.return_value = {"id": "/test-nsg-id", "name": "chaos"}
|
||||
mock_operation = Mock()
|
||||
mock_operation.result.return_value = mock_result
|
||||
azure.network_client.network_security_groups.begin_delete.return_value = mock_operation
|
||||
|
||||
azure.delete_security_group("test-rg", "chaos")
|
||||
|
||||
azure.network_client.network_security_groups.begin_delete.assert_called_once_with("test-rg", "chaos")
|
||||
mock_result.as_dict.assert_called_once()
|
||||
mock_print.assert_called_once_with({"id": "/test-nsg-id", "name": "chaos"})
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.NetworkManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.ComputeManagementClient')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.DefaultAzureCredential')
|
||||
def test_update_subnet(self, mock_credential, mock_compute, mock_network):
|
||||
"""Test update_subnet updates subnet NSG"""
|
||||
azure = Azure()
|
||||
|
||||
# Mock existing subnet
|
||||
mock_old_nsg = Mock()
|
||||
mock_old_nsg.id = "/old-nsg-id"
|
||||
mock_subnet = Mock()
|
||||
mock_subnet.network_security_group = mock_old_nsg
|
||||
|
||||
azure.network_client.subnets.get.return_value = mock_subnet
|
||||
|
||||
old_nsg = azure.update_subnet("/new-nsg-id", "test-rg", "test-subnet", "test-vnet")
|
||||
|
||||
self.assertEqual(old_nsg, "/old-nsg-id")
|
||||
azure.network_client.subnets.begin_create_or_update.assert_called_once()
|
||||
|
||||
|
||||
class TestAzureNodeScenarios(unittest.TestCase):
|
||||
"""Test suite for azure_node_scenarios class"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
self.env_patcher = patch.dict('os.environ', {'AZURE_SUBSCRIPTION_ID': 'test-subscription-id'})
|
||||
self.env_patcher.start()
|
||||
|
||||
self.mock_kubecli = Mock(spec=KrknKubernetes)
|
||||
self.affected_nodes_status = AffectedNodeStatus()
|
||||
|
||||
def tearDown(self):
|
||||
"""Clean up after tests"""
|
||||
self.env_patcher.stop()
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.Azure')
|
||||
def test_init(self, mock_azure_class, mock_logging):
|
||||
"""Test azure_node_scenarios initialization"""
|
||||
mock_azure_instance = Mock()
|
||||
mock_azure_class.return_value = mock_azure_instance
|
||||
|
||||
scenarios = azure_node_scenarios(self.mock_kubecli, True, self.affected_nodes_status)
|
||||
|
||||
self.assertEqual(scenarios.kubecli, self.mock_kubecli)
|
||||
self.assertTrue(scenarios.node_action_kube_check)
|
||||
self.assertEqual(scenarios.azure, mock_azure_instance)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.nodeaction')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.Azure')
|
||||
def test_node_start_scenario_success(self, mock_azure_class, mock_logging, mock_nodeaction):
|
||||
"""Test node_start_scenario successfully starts node"""
|
||||
mock_azure = Mock()
|
||||
mock_azure_class.return_value = mock_azure
|
||||
mock_azure.get_instance_id.return_value = ("test-vm", "test-rg")
|
||||
mock_azure.wait_until_running.return_value = True
|
||||
|
||||
scenarios = azure_node_scenarios(self.mock_kubecli, True, self.affected_nodes_status)
|
||||
|
||||
scenarios.node_start_scenario(1, "test-node", 300, 15)
|
||||
|
||||
mock_azure.get_instance_id.assert_called_once_with("test-node")
|
||||
mock_azure.start_instances.assert_called_once_with("test-rg", "test-vm")
|
||||
mock_azure.wait_until_running.assert_called_once()
|
||||
mock_nodeaction.wait_for_ready_status.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.nodeaction')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.Azure')
|
||||
def test_node_start_scenario_no_kube_check(self, mock_azure_class, mock_logging, mock_nodeaction):
|
||||
"""Test node_start_scenario without Kubernetes check"""
|
||||
mock_azure = Mock()
|
||||
mock_azure_class.return_value = mock_azure
|
||||
mock_azure.get_instance_id.return_value = ("test-vm", "test-rg")
|
||||
mock_azure.wait_until_running.return_value = True
|
||||
|
||||
scenarios = azure_node_scenarios(self.mock_kubecli, False, self.affected_nodes_status)
|
||||
|
||||
scenarios.node_start_scenario(1, "test-node", 300, 15)
|
||||
|
||||
mock_azure.start_instances.assert_called_once()
|
||||
mock_nodeaction.wait_for_ready_status.assert_not_called()
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.Azure')
|
||||
def test_node_start_scenario_failure(self, mock_azure_class, mock_logging):
|
||||
"""Test node_start_scenario handles failure"""
|
||||
mock_azure = Mock()
|
||||
mock_azure_class.return_value = mock_azure
|
||||
mock_azure.get_instance_id.return_value = ("test-vm", "test-rg")
|
||||
mock_azure.start_instances.side_effect = Exception("Start failed")
|
||||
|
||||
scenarios = azure_node_scenarios(self.mock_kubecli, False, self.affected_nodes_status)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
scenarios.node_start_scenario(1, "test-node", 300, 15)
|
||||
|
||||
mock_logging.assert_called()
|
||||
# Check that failure was logged (either specific or general injection failed message)
|
||||
call_str = str(mock_logging.call_args)
|
||||
self.assertTrue("Failed to start" in call_str or "injection failed" in call_str)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.nodeaction')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.Azure')
|
||||
def test_node_start_scenario_multiple_runs(self, mock_azure_class, mock_logging, mock_nodeaction):
|
||||
"""Test node_start_scenario with multiple runs"""
|
||||
mock_azure = Mock()
|
||||
mock_azure_class.return_value = mock_azure
|
||||
mock_azure.get_instance_id.return_value = ("test-vm", "test-rg")
|
||||
mock_azure.wait_until_running.return_value = True
|
||||
|
||||
scenarios = azure_node_scenarios(self.mock_kubecli, True, self.affected_nodes_status)
|
||||
|
||||
scenarios.node_start_scenario(3, "test-node", 300, 15)
|
||||
|
||||
self.assertEqual(mock_azure.start_instances.call_count, 3)
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 3)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.nodeaction')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.Azure')
|
||||
def test_node_stop_scenario_success(self, mock_azure_class, mock_logging, mock_nodeaction):
|
||||
"""Test node_stop_scenario successfully stops node"""
|
||||
mock_azure = Mock()
|
||||
mock_azure_class.return_value = mock_azure
|
||||
mock_azure.get_instance_id.return_value = ("test-vm", "test-rg")
|
||||
mock_azure.wait_until_stopped.return_value = True
|
||||
|
||||
scenarios = azure_node_scenarios(self.mock_kubecli, True, self.affected_nodes_status)
|
||||
|
||||
scenarios.node_stop_scenario(1, "test-node", 300, 15)
|
||||
|
||||
mock_azure.get_instance_id.assert_called_once_with("test-node")
|
||||
mock_azure.stop_instances.assert_called_once_with("test-rg", "test-vm")
|
||||
mock_azure.wait_until_stopped.assert_called_once()
|
||||
mock_nodeaction.wait_for_unknown_status.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.Azure')
|
||||
def test_node_stop_scenario_failure(self, mock_azure_class, mock_logging):
|
||||
"""Test node_stop_scenario handles failure"""
|
||||
mock_azure = Mock()
|
||||
mock_azure_class.return_value = mock_azure
|
||||
mock_azure.get_instance_id.return_value = ("test-vm", "test-rg")
|
||||
mock_azure.stop_instances.side_effect = Exception("Stop failed")
|
||||
|
||||
scenarios = azure_node_scenarios(self.mock_kubecli, False, self.affected_nodes_status)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
scenarios.node_stop_scenario(1, "test-node", 300, 15)
|
||||
|
||||
mock_logging.assert_called()
|
||||
# Check that failure was logged
|
||||
call_str = str(mock_logging.call_args)
|
||||
self.assertTrue("Failed to stop" in call_str or "injection failed" in call_str)
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.Azure')
|
||||
def test_node_termination_scenario_success(self, mock_azure_class, mock_logging, mock_sleep):
|
||||
"""Test node_termination_scenario successfully terminates node"""
|
||||
mock_azure = Mock()
|
||||
mock_azure_class.return_value = mock_azure
|
||||
mock_azure.get_instance_id.return_value = ("test-vm", "test-rg")
|
||||
mock_azure.wait_until_terminated.return_value = True
|
||||
|
||||
self.mock_kubecli.list_nodes.return_value = ["other-node"]
|
||||
|
||||
scenarios = azure_node_scenarios(self.mock_kubecli, False, self.affected_nodes_status)
|
||||
|
||||
scenarios.node_termination_scenario(1, "test-node", 300, 15)
|
||||
|
||||
mock_azure.terminate_instances.assert_called_once_with("test-rg", "test-vm")
|
||||
mock_azure.wait_until_terminated.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.Azure')
|
||||
def test_node_termination_scenario_node_still_exists(self, mock_azure_class, mock_logging, mock_sleep):
|
||||
"""Test node_termination_scenario when node still exists after timeout"""
|
||||
mock_azure = Mock()
|
||||
mock_azure_class.return_value = mock_azure
|
||||
mock_azure.get_instance_id.return_value = ("test-vm", "test-rg")
|
||||
mock_azure.wait_until_terminated.return_value = True
|
||||
|
||||
# Node still in list after termination attempt
|
||||
self.mock_kubecli.list_nodes.return_value = ["test-vm", "other-node"]
|
||||
|
||||
scenarios = azure_node_scenarios(self.mock_kubecli, False, self.affected_nodes_status)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
scenarios.node_termination_scenario(1, "test-node", 5, 15)
|
||||
|
||||
mock_logging.assert_called()
|
||||
# Check that failure was logged
|
||||
call_str = str(mock_logging.call_args)
|
||||
self.assertTrue("Failed to terminate" in call_str or "injection failed" in call_str)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.nodeaction')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.Azure')
|
||||
def test_node_reboot_scenario_success(self, mock_azure_class, mock_logging, mock_nodeaction):
|
||||
"""Test node_reboot_scenario successfully reboots node"""
|
||||
mock_azure = Mock()
|
||||
mock_azure_class.return_value = mock_azure
|
||||
mock_azure.get_instance_id.return_value = ("test-vm", "test-rg")
|
||||
|
||||
scenarios = azure_node_scenarios(self.mock_kubecli, True, self.affected_nodes_status)
|
||||
|
||||
scenarios.node_reboot_scenario(1, "test-node", 300, soft_reboot=False)
|
||||
|
||||
mock_azure.reboot_instances.assert_called_once_with("test-rg", "test-vm")
|
||||
mock_nodeaction.wait_for_ready_status.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.Azure')
|
||||
def test_node_reboot_scenario_failure(self, mock_azure_class, mock_logging):
|
||||
"""Test node_reboot_scenario handles failure"""
|
||||
mock_azure = Mock()
|
||||
mock_azure_class.return_value = mock_azure
|
||||
mock_azure.get_instance_id.return_value = ("test-vm", "test-rg")
|
||||
mock_azure.reboot_instances.side_effect = Exception("Reboot failed")
|
||||
|
||||
scenarios = azure_node_scenarios(self.mock_kubecli, False, self.affected_nodes_status)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
scenarios.node_reboot_scenario(1, "test-node", 300)
|
||||
|
||||
mock_logging.assert_called()
|
||||
# Check that failure was logged
|
||||
call_str = str(mock_logging.call_args)
|
||||
self.assertTrue("Failed to reboot" in call_str or "injection failed" in call_str)
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.Azure')
|
||||
def test_node_block_scenario_success(self, mock_azure_class, mock_logging, mock_sleep):
|
||||
"""Test node_block_scenario successfully blocks and unblocks node"""
|
||||
mock_azure = Mock()
|
||||
mock_azure_class.return_value = mock_azure
|
||||
mock_azure.get_instance_id.return_value = ("test-vm", "test-rg")
|
||||
mock_azure.get_network_interface.return_value = (
|
||||
"test-subnet", "test-vnet", "10.0.1.5", "network-rg", "eastus"
|
||||
)
|
||||
mock_azure.create_security_group.return_value = "/new-nsg-id"
|
||||
mock_azure.update_subnet.return_value = "/old-nsg-id"
|
||||
|
||||
scenarios = azure_node_scenarios(self.mock_kubecli, False, self.affected_nodes_status)
|
||||
|
||||
scenarios.node_block_scenario(1, "test-node", 300, 60)
|
||||
|
||||
mock_azure.create_security_group.assert_called_once()
|
||||
# Should be called twice: once to apply block, once to remove
|
||||
self.assertEqual(mock_azure.update_subnet.call_count, 2)
|
||||
mock_azure.delete_security_group.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.Azure')
|
||||
def test_node_block_scenario_failure(self, mock_azure_class, mock_logging, mock_sleep):
|
||||
"""Test node_block_scenario handles failure"""
|
||||
mock_azure = Mock()
|
||||
mock_azure_class.return_value = mock_azure
|
||||
mock_azure.get_instance_id.return_value = ("test-vm", "test-rg")
|
||||
mock_azure.get_network_interface.side_effect = Exception("Network error")
|
||||
|
||||
scenarios = azure_node_scenarios(self.mock_kubecli, False, self.affected_nodes_status)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
scenarios.node_block_scenario(1, "test-node", 300, 60)
|
||||
|
||||
mock_logging.assert_called()
|
||||
# Check that failure was logged
|
||||
call_str = str(mock_logging.call_args)
|
||||
self.assertTrue("Failed to block" in call_str or "injection failed" in call_str)
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.az_node_scenarios.Azure')
|
||||
def test_node_block_scenario_duration_timing(self, mock_azure_class, mock_logging, mock_sleep):
|
||||
"""Test node_block_scenario waits for specified duration"""
|
||||
mock_azure = Mock()
|
||||
mock_azure_class.return_value = mock_azure
|
||||
mock_azure.get_instance_id.return_value = ("test-vm", "test-rg")
|
||||
mock_azure.get_network_interface.return_value = (
|
||||
"test-subnet", "test-vnet", "10.0.1.5", "network-rg", "eastus"
|
||||
)
|
||||
mock_azure.create_security_group.return_value = "/new-nsg-id"
|
||||
mock_azure.update_subnet.return_value = "/old-nsg-id"
|
||||
|
||||
scenarios = azure_node_scenarios(self.mock_kubecli, False, self.affected_nodes_status)
|
||||
|
||||
scenarios.node_block_scenario(1, "test-node", 300, 120)
|
||||
|
||||
# Verify sleep was called with the correct duration
|
||||
mock_sleep.assert_called_with(120)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -1,476 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Test suite for common_node_functions module
|
||||
|
||||
Usage:
|
||||
python -m coverage run -a -m unittest tests/test_common_node_functions.py -v
|
||||
|
||||
Assisted By: Claude Code
|
||||
"""
|
||||
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, Mock, patch, call
|
||||
import logging
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.k8s import AffectedNode
|
||||
|
||||
from krkn.scenario_plugins.node_actions import common_node_functions
|
||||
|
||||
|
||||
class TestCommonNodeFunctions(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
"""
|
||||
Set up test fixtures before each test
|
||||
"""
|
||||
self.mock_kubecli = Mock(spec=KrknKubernetes)
|
||||
self.mock_affected_node = Mock(spec=AffectedNode)
|
||||
|
||||
def test_get_node_by_name_all_nodes_exist(self):
|
||||
"""
|
||||
Test get_node_by_name returns list when all nodes exist
|
||||
"""
|
||||
node_name_list = ["node1", "node2", "node3"]
|
||||
self.mock_kubecli.list_killable_nodes.return_value = ["node1", "node2", "node3", "node4"]
|
||||
|
||||
result = common_node_functions.get_node_by_name(node_name_list, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, node_name_list)
|
||||
self.mock_kubecli.list_killable_nodes.assert_called_once()
|
||||
|
||||
def test_get_node_by_name_single_node(self):
|
||||
"""
|
||||
Test get_node_by_name with single node
|
||||
"""
|
||||
node_name_list = ["worker-1"]
|
||||
self.mock_kubecli.list_killable_nodes.return_value = ["worker-1", "worker-2"]
|
||||
|
||||
result = common_node_functions.get_node_by_name(node_name_list, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, node_name_list)
|
||||
|
||||
@patch('logging.info')
|
||||
def test_get_node_by_name_node_not_exist(self, mock_logging):
|
||||
"""
|
||||
Test get_node_by_name returns None when node doesn't exist
|
||||
"""
|
||||
node_name_list = ["node1", "nonexistent-node"]
|
||||
self.mock_kubecli.list_killable_nodes.return_value = ["node1", "node2", "node3"]
|
||||
|
||||
result = common_node_functions.get_node_by_name(node_name_list, self.mock_kubecli)
|
||||
|
||||
self.assertIsNone(result)
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("does not exist", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.info')
|
||||
def test_get_node_by_name_empty_killable_list(self, mock_logging):
|
||||
"""
|
||||
Test get_node_by_name when no killable nodes exist
|
||||
"""
|
||||
node_name_list = ["node1"]
|
||||
self.mock_kubecli.list_killable_nodes.return_value = []
|
||||
|
||||
result = common_node_functions.get_node_by_name(node_name_list, self.mock_kubecli)
|
||||
|
||||
self.assertIsNone(result)
|
||||
mock_logging.assert_called()
|
||||
|
||||
@patch('logging.info')
|
||||
def test_get_node_single_label_selector(self, mock_logging):
|
||||
"""
|
||||
Test get_node with single label selector
|
||||
"""
|
||||
label_selector = "node-role.kubernetes.io/worker"
|
||||
instance_kill_count = 2
|
||||
self.mock_kubecli.list_killable_nodes.return_value = ["worker-1", "worker-2", "worker-3"]
|
||||
|
||||
result = common_node_functions.get_node(label_selector, instance_kill_count, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(len(result), 2)
|
||||
self.assertTrue(all(node in ["worker-1", "worker-2", "worker-3"] for node in result))
|
||||
self.mock_kubecli.list_killable_nodes.assert_called_once_with(label_selector)
|
||||
mock_logging.assert_called()
|
||||
|
||||
@patch('logging.info')
|
||||
def test_get_node_multiple_label_selectors(self, mock_logging):
|
||||
"""
|
||||
Test get_node with multiple comma-separated label selectors
|
||||
"""
|
||||
label_selector = "node-role.kubernetes.io/worker,topology.kubernetes.io/zone=us-east-1a"
|
||||
instance_kill_count = 3
|
||||
self.mock_kubecli.list_killable_nodes.side_effect = [
|
||||
["worker-1", "worker-2"],
|
||||
["worker-3", "worker-4"]
|
||||
]
|
||||
|
||||
result = common_node_functions.get_node(label_selector, instance_kill_count, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(len(result), 3)
|
||||
self.assertTrue(all(node in ["worker-1", "worker-2", "worker-3", "worker-4"] for node in result))
|
||||
self.assertEqual(self.mock_kubecli.list_killable_nodes.call_count, 2)
|
||||
|
||||
@patch('logging.info')
|
||||
def test_get_node_return_all_when_count_equals_total(self, mock_logging):
|
||||
"""
|
||||
Test get_node returns all nodes when instance_kill_count equals number of nodes
|
||||
"""
|
||||
label_selector = "node-role.kubernetes.io/worker"
|
||||
nodes = ["worker-1", "worker-2", "worker-3"]
|
||||
instance_kill_count = 3
|
||||
self.mock_kubecli.list_killable_nodes.return_value = nodes
|
||||
|
||||
result = common_node_functions.get_node(label_selector, instance_kill_count, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, nodes)
|
||||
|
||||
@patch('logging.info')
|
||||
def test_get_node_return_all_when_count_is_zero(self, mock_logging):
|
||||
"""
|
||||
Test get_node returns all nodes when instance_kill_count is 0
|
||||
"""
|
||||
label_selector = "node-role.kubernetes.io/worker"
|
||||
nodes = ["worker-1", "worker-2", "worker-3"]
|
||||
instance_kill_count = 0
|
||||
self.mock_kubecli.list_killable_nodes.return_value = nodes
|
||||
|
||||
result = common_node_functions.get_node(label_selector, instance_kill_count, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, nodes)
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('random.randint')
|
||||
def test_get_node_random_selection(self, mock_randint, mock_logging):
|
||||
"""
|
||||
Test get_node randomly selects nodes when count is less than total
|
||||
"""
|
||||
label_selector = "node-role.kubernetes.io/worker"
|
||||
instance_kill_count = 2
|
||||
self.mock_kubecli.list_killable_nodes.return_value = ["worker-1", "worker-2", "worker-3", "worker-4"]
|
||||
# Mock random selection to return predictable values
|
||||
mock_randint.side_effect = [1, 0] # Select index 1, then index 0
|
||||
|
||||
result = common_node_functions.get_node(label_selector, instance_kill_count, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(len(result), 2)
|
||||
# Verify nodes were removed after selection to avoid duplicates
|
||||
self.assertEqual(len(set(result)), 2)
|
||||
|
||||
def test_get_node_no_nodes_with_label(self):
|
||||
"""
|
||||
Test get_node raises exception when no nodes match label selector
|
||||
"""
|
||||
label_selector = "nonexistent-label"
|
||||
instance_kill_count = 1
|
||||
self.mock_kubecli.list_killable_nodes.return_value = []
|
||||
|
||||
with self.assertRaises(Exception) as context:
|
||||
common_node_functions.get_node(label_selector, instance_kill_count, self.mock_kubecli)
|
||||
|
||||
self.assertIn("Ready nodes with the provided label selector do not exist", str(context.exception))
|
||||
|
||||
def test_get_node_single_node_available(self):
|
||||
"""
|
||||
Test get_node when only one node is available
|
||||
"""
|
||||
label_selector = "node-role.kubernetes.io/master"
|
||||
instance_kill_count = 1
|
||||
self.mock_kubecli.list_killable_nodes.return_value = ["master-1"]
|
||||
|
||||
result = common_node_functions.get_node(label_selector, instance_kill_count, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, ["master-1"])
|
||||
|
||||
def test_wait_for_ready_status_without_affected_node(self):
|
||||
"""
|
||||
Test wait_for_ready_status without providing affected_node
|
||||
"""
|
||||
node = "test-node"
|
||||
timeout = 300
|
||||
expected_affected_node = Mock(spec=AffectedNode)
|
||||
self.mock_kubecli.watch_node_status.return_value = expected_affected_node
|
||||
|
||||
result = common_node_functions.wait_for_ready_status(node, timeout, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, expected_affected_node)
|
||||
self.mock_kubecli.watch_node_status.assert_called_once_with(node, "True", timeout, None)
|
||||
|
||||
def test_wait_for_ready_status_with_affected_node(self):
|
||||
"""
|
||||
Test wait_for_ready_status with provided affected_node
|
||||
"""
|
||||
node = "test-node"
|
||||
timeout = 300
|
||||
self.mock_kubecli.watch_node_status.return_value = self.mock_affected_node
|
||||
|
||||
result = common_node_functions.wait_for_ready_status(
|
||||
node, timeout, self.mock_kubecli, self.mock_affected_node
|
||||
)
|
||||
|
||||
self.assertEqual(result, self.mock_affected_node)
|
||||
self.mock_kubecli.watch_node_status.assert_called_once_with(
|
||||
node, "True", timeout, self.mock_affected_node
|
||||
)
|
||||
|
||||
def test_wait_for_not_ready_status_without_affected_node(self):
|
||||
"""
|
||||
Test wait_for_not_ready_status without providing affected_node
|
||||
"""
|
||||
node = "test-node"
|
||||
timeout = 300
|
||||
expected_affected_node = Mock(spec=AffectedNode)
|
||||
self.mock_kubecli.watch_node_status.return_value = expected_affected_node
|
||||
|
||||
result = common_node_functions.wait_for_not_ready_status(node, timeout, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, expected_affected_node)
|
||||
self.mock_kubecli.watch_node_status.assert_called_once_with(node, "False", timeout, None)
|
||||
|
||||
def test_wait_for_not_ready_status_with_affected_node(self):
|
||||
"""
|
||||
Test wait_for_not_ready_status with provided affected_node
|
||||
"""
|
||||
node = "test-node"
|
||||
timeout = 300
|
||||
self.mock_kubecli.watch_node_status.return_value = self.mock_affected_node
|
||||
|
||||
result = common_node_functions.wait_for_not_ready_status(
|
||||
node, timeout, self.mock_kubecli, self.mock_affected_node
|
||||
)
|
||||
|
||||
self.assertEqual(result, self.mock_affected_node)
|
||||
self.mock_kubecli.watch_node_status.assert_called_once_with(
|
||||
node, "False", timeout, self.mock_affected_node
|
||||
)
|
||||
|
||||
def test_wait_for_unknown_status_without_affected_node(self):
|
||||
"""
|
||||
Test wait_for_unknown_status without providing affected_node
|
||||
"""
|
||||
node = "test-node"
|
||||
timeout = 300
|
||||
expected_affected_node = Mock(spec=AffectedNode)
|
||||
self.mock_kubecli.watch_node_status.return_value = expected_affected_node
|
||||
|
||||
result = common_node_functions.wait_for_unknown_status(node, timeout, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, expected_affected_node)
|
||||
self.mock_kubecli.watch_node_status.assert_called_once_with(node, "Unknown", timeout, None)
|
||||
|
||||
def test_wait_for_unknown_status_with_affected_node(self):
|
||||
"""
|
||||
Test wait_for_unknown_status with provided affected_node
|
||||
"""
|
||||
node = "test-node"
|
||||
timeout = 300
|
||||
self.mock_kubecli.watch_node_status.return_value = self.mock_affected_node
|
||||
|
||||
result = common_node_functions.wait_for_unknown_status(
|
||||
node, timeout, self.mock_kubecli, self.mock_affected_node
|
||||
)
|
||||
|
||||
self.assertEqual(result, self.mock_affected_node)
|
||||
self.mock_kubecli.watch_node_status.assert_called_once_with(
|
||||
node, "Unknown", timeout, self.mock_affected_node
|
||||
)
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.paramiko.SSHClient')
|
||||
def test_check_service_status_success(self, mock_ssh_client, mock_logging, mock_sleep):
|
||||
"""
|
||||
Test check_service_status successfully checks service status
|
||||
"""
|
||||
node = "192.168.1.100"
|
||||
service = ["neutron-server", "nova-compute"]
|
||||
ssh_private_key = "~/.ssh/id_rsa"
|
||||
timeout = 60
|
||||
|
||||
# Mock SSH client
|
||||
mock_ssh = Mock()
|
||||
mock_ssh_client.return_value = mock_ssh
|
||||
mock_ssh.connect.return_value = None
|
||||
|
||||
# Mock exec_command to return active status
|
||||
mock_stdout = Mock()
|
||||
mock_stdout.readlines.return_value = ["active\n"]
|
||||
mock_ssh.exec_command.return_value = (Mock(), mock_stdout, Mock())
|
||||
|
||||
common_node_functions.check_service_status(node, service, ssh_private_key, timeout)
|
||||
|
||||
# Verify SSH connection was attempted
|
||||
mock_ssh.connect.assert_called()
|
||||
# Verify service status was checked for each service
|
||||
self.assertEqual(mock_ssh.exec_command.call_count, 2)
|
||||
# Verify SSH connection was closed
|
||||
mock_ssh.close.assert_called_once()
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.error')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.paramiko.SSHClient')
|
||||
def test_check_service_status_service_inactive(self, mock_ssh_client, mock_logging_info, mock_logging_error, mock_sleep):
|
||||
"""
|
||||
Test check_service_status logs error when service is inactive
|
||||
"""
|
||||
node = "192.168.1.100"
|
||||
service = ["neutron-server"]
|
||||
ssh_private_key = "~/.ssh/id_rsa"
|
||||
timeout = 60
|
||||
|
||||
# Mock SSH client
|
||||
mock_ssh = Mock()
|
||||
mock_ssh_client.return_value = mock_ssh
|
||||
mock_ssh.connect.return_value = None
|
||||
|
||||
# Mock exec_command to return inactive status
|
||||
mock_stdout = Mock()
|
||||
mock_stdout.readlines.return_value = ["inactive\n"]
|
||||
mock_ssh.exec_command.return_value = (Mock(), mock_stdout, Mock())
|
||||
|
||||
common_node_functions.check_service_status(node, service, ssh_private_key, timeout)
|
||||
|
||||
# Verify error was logged for inactive service
|
||||
mock_logging_error.assert_called()
|
||||
error_call_str = str(mock_logging_error.call_args)
|
||||
self.assertIn("inactive", error_call_str)
|
||||
mock_ssh.close.assert_called_once()
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.error')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.paramiko.SSHClient')
|
||||
def test_check_service_status_ssh_connection_fails(self, mock_ssh_client, mock_logging_info, mock_logging_error, mock_sleep):
|
||||
"""
|
||||
Test check_service_status handles SSH connection failures
|
||||
"""
|
||||
node = "192.168.1.100"
|
||||
service = ["neutron-server"]
|
||||
ssh_private_key = "~/.ssh/id_rsa"
|
||||
timeout = 5
|
||||
|
||||
# Mock SSH client to raise exception
|
||||
mock_ssh = Mock()
|
||||
mock_ssh_client.return_value = mock_ssh
|
||||
mock_ssh.connect.side_effect = Exception("Connection timeout")
|
||||
|
||||
# Mock exec_command for when connection eventually works (or doesn't)
|
||||
mock_stdout = Mock()
|
||||
mock_stdout.readlines.return_value = ["active\n"]
|
||||
mock_ssh.exec_command.return_value = (Mock(), mock_stdout, Mock())
|
||||
|
||||
common_node_functions.check_service_status(node, service, ssh_private_key, timeout)
|
||||
|
||||
# Verify error was logged for SSH connection failure
|
||||
mock_logging_error.assert_called()
|
||||
error_call_str = str(mock_logging_error.call_args)
|
||||
self.assertIn("Failed to ssh", error_call_str)
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.paramiko.SSHClient')
|
||||
def test_check_service_status_multiple_services(self, mock_ssh_client, mock_logging, mock_sleep):
|
||||
"""
|
||||
Test check_service_status with multiple services
|
||||
"""
|
||||
node = "192.168.1.100"
|
||||
service = ["service1", "service2", "service3"]
|
||||
ssh_private_key = "~/.ssh/id_rsa"
|
||||
timeout = 60
|
||||
|
||||
# Mock SSH client
|
||||
mock_ssh = Mock()
|
||||
mock_ssh_client.return_value = mock_ssh
|
||||
mock_ssh.connect.return_value = None
|
||||
|
||||
# Mock exec_command to return active status
|
||||
mock_stdout = Mock()
|
||||
mock_stdout.readlines.return_value = ["active\n"]
|
||||
mock_ssh.exec_command.return_value = (Mock(), mock_stdout, Mock())
|
||||
|
||||
common_node_functions.check_service_status(node, service, ssh_private_key, timeout)
|
||||
|
||||
# Verify service status was checked for all services
|
||||
self.assertEqual(mock_ssh.exec_command.call_count, 3)
|
||||
mock_ssh.close.assert_called_once()
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.paramiko.SSHClient')
|
||||
def test_check_service_status_retry_logic(self, mock_ssh_client, mock_logging, mock_sleep):
|
||||
"""
|
||||
Test check_service_status retry logic on connection failure then success
|
||||
"""
|
||||
node = "192.168.1.100"
|
||||
service = ["neutron-server"]
|
||||
ssh_private_key = "~/.ssh/id_rsa"
|
||||
timeout = 10
|
||||
|
||||
# Mock SSH client
|
||||
mock_ssh = Mock()
|
||||
mock_ssh_client.return_value = mock_ssh
|
||||
# First two attempts fail, third succeeds
|
||||
mock_ssh.connect.side_effect = [
|
||||
Exception("Timeout"),
|
||||
Exception("Timeout"),
|
||||
None # Success
|
||||
]
|
||||
|
||||
# Mock exec_command
|
||||
mock_stdout = Mock()
|
||||
mock_stdout.readlines.return_value = ["active\n"]
|
||||
mock_ssh.exec_command.return_value = (Mock(), mock_stdout, Mock())
|
||||
|
||||
common_node_functions.check_service_status(node, service, ssh_private_key, timeout)
|
||||
|
||||
# Verify multiple connection attempts were made
|
||||
self.assertGreater(mock_ssh.connect.call_count, 1)
|
||||
# Verify service was eventually checked
|
||||
mock_ssh.exec_command.assert_called()
|
||||
mock_ssh.close.assert_called_once()
|
||||
|
||||
|
||||
class TestCommonNodeFunctionsIntegration(unittest.TestCase):
|
||||
"""Integration-style tests for common_node_functions"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
self.mock_kubecli = Mock(spec=KrknKubernetes)
|
||||
|
||||
@patch('logging.info')
|
||||
def test_get_node_workflow_with_label_filtering(self, mock_logging):
|
||||
"""
|
||||
Test complete workflow of getting nodes with label selector and filtering
|
||||
"""
|
||||
label_selector = "node-role.kubernetes.io/worker"
|
||||
instance_kill_count = 2
|
||||
available_nodes = ["worker-1", "worker-2", "worker-3", "worker-4", "worker-5"]
|
||||
self.mock_kubecli.list_killable_nodes.return_value = available_nodes
|
||||
|
||||
result = common_node_functions.get_node(label_selector, instance_kill_count, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(len(result), 2)
|
||||
# Verify no duplicates
|
||||
self.assertEqual(len(result), len(set(result)))
|
||||
# Verify all nodes are from the available list
|
||||
self.assertTrue(all(node in available_nodes for node in result))
|
||||
|
||||
@patch('logging.info')
|
||||
def test_get_node_by_name_validation_workflow(self, mock_logging):
|
||||
"""
|
||||
Test complete workflow of validating node names
|
||||
"""
|
||||
requested_nodes = ["node-a", "node-b"]
|
||||
killable_nodes = ["node-a", "node-b", "node-c", "node-d"]
|
||||
self.mock_kubecli.list_killable_nodes.return_value = killable_nodes
|
||||
|
||||
result = common_node_functions.get_node_by_name(requested_nodes, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, requested_nodes)
|
||||
self.mock_kubecli.list_killable_nodes.assert_called_once()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -1,782 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Test suite for GCP node scenarios
|
||||
|
||||
This test suite covers both the GCP class and gcp_node_scenarios class
|
||||
using mocks to avoid actual GCP API calls.
|
||||
|
||||
Usage:
|
||||
python -m coverage run -a -m unittest tests/test_gcp_node_scenarios.py -v
|
||||
|
||||
Assisted By: Claude Code
|
||||
"""
|
||||
|
||||
import unittest
|
||||
import sys
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
# Mock external dependencies before any imports that use them
|
||||
# Create proper nested mock structure for google modules
|
||||
mock_google = MagicMock()
|
||||
mock_google_auth = MagicMock()
|
||||
mock_google_auth_transport = MagicMock()
|
||||
mock_google_cloud = MagicMock()
|
||||
mock_google_cloud_compute = MagicMock()
|
||||
|
||||
sys.modules['google'] = mock_google
|
||||
sys.modules['google.auth'] = mock_google_auth
|
||||
sys.modules['google.auth.transport'] = mock_google_auth_transport
|
||||
sys.modules['google.auth.transport.requests'] = MagicMock()
|
||||
sys.modules['google.cloud'] = mock_google_cloud
|
||||
sys.modules['google.cloud.compute_v1'] = mock_google_cloud_compute
|
||||
sys.modules['paramiko'] = MagicMock()
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
from krkn.scenario_plugins.node_actions.gcp_node_scenarios import GCP, gcp_node_scenarios
|
||||
|
||||
|
||||
class TestGCP(unittest.TestCase):
|
||||
"""Test cases for GCP class"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
# Mock google.auth before creating GCP instance
|
||||
self.auth_patcher = patch('krkn.scenario_plugins.node_actions.gcp_node_scenarios.google.auth.default')
|
||||
self.compute_patcher = patch('krkn.scenario_plugins.node_actions.gcp_node_scenarios.compute_v1.InstancesClient')
|
||||
|
||||
self.mock_auth = self.auth_patcher.start()
|
||||
self.mock_compute_client = self.compute_patcher.start()
|
||||
|
||||
# Configure auth mock to return credentials and project_id
|
||||
mock_credentials = MagicMock()
|
||||
self.mock_auth.return_value = (mock_credentials, 'test-project-123')
|
||||
|
||||
# Create GCP instance with mocked dependencies
|
||||
self.gcp = GCP()
|
||||
|
||||
def tearDown(self):
|
||||
"""Clean up after tests"""
|
||||
self.auth_patcher.stop()
|
||||
self.compute_patcher.stop()
|
||||
|
||||
def test_gcp_init_success(self):
|
||||
"""Test GCP class initialization success"""
|
||||
self.assertEqual(self.gcp.project_id, 'test-project-123')
|
||||
self.assertIsNotNone(self.gcp.instance_client)
|
||||
|
||||
def test_gcp_init_failure(self):
|
||||
"""Test GCP class initialization failure"""
|
||||
with patch('krkn.scenario_plugins.node_actions.gcp_node_scenarios.google.auth.default', side_effect=Exception("Auth error")):
|
||||
with self.assertRaises(Exception):
|
||||
GCP()
|
||||
|
||||
def test_get_node_instance_success(self):
|
||||
"""Test getting node instance successfully"""
|
||||
# Create mock instance
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.name = 'gke-cluster-node-1'
|
||||
|
||||
# Create mock response
|
||||
mock_response = MagicMock()
|
||||
mock_response.instances = [mock_instance]
|
||||
|
||||
# Mock aggregated_list to return our mock data
|
||||
self.gcp.instance_client.aggregated_list = MagicMock(
|
||||
return_value=[('zones/us-central1-a', mock_response)]
|
||||
)
|
||||
|
||||
result = self.gcp.get_node_instance('gke-cluster-node-1')
|
||||
|
||||
self.assertEqual(result, mock_instance)
|
||||
self.assertEqual(result.name, 'gke-cluster-node-1')
|
||||
|
||||
def test_get_node_instance_partial_match(self):
|
||||
"""Test getting node instance with partial name match"""
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.name = 'node-1'
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.instances = [mock_instance]
|
||||
|
||||
self.gcp.instance_client.aggregated_list = MagicMock(
|
||||
return_value=[('zones/us-central1-a', mock_response)]
|
||||
)
|
||||
|
||||
# instance.name ('node-1') in node ('gke-cluster-node-1-abc') == True
|
||||
result = self.gcp.get_node_instance('gke-cluster-node-1-abc')
|
||||
|
||||
self.assertIsNotNone(result)
|
||||
self.assertEqual(result.name, 'node-1')
|
||||
|
||||
def test_get_node_instance_not_found(self):
|
||||
"""Test getting node instance when not found"""
|
||||
mock_response = MagicMock()
|
||||
mock_response.instances = None
|
||||
|
||||
self.gcp.instance_client.aggregated_list = MagicMock(
|
||||
return_value=[('zones/us-central1-a', mock_response)]
|
||||
)
|
||||
|
||||
result = self.gcp.get_node_instance('non-existent-node')
|
||||
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_get_node_instance_failure(self):
|
||||
"""Test getting node instance with failure"""
|
||||
self.gcp.instance_client.aggregated_list = MagicMock(
|
||||
side_effect=Exception("GCP error")
|
||||
)
|
||||
|
||||
with self.assertRaises(Exception):
|
||||
self.gcp.get_node_instance('node-1')
|
||||
|
||||
def test_get_instance_name(self):
|
||||
"""Test getting instance name"""
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.name = 'gke-cluster-node-1'
|
||||
|
||||
result = self.gcp.get_instance_name(mock_instance)
|
||||
|
||||
self.assertEqual(result, 'gke-cluster-node-1')
|
||||
|
||||
def test_get_instance_name_none(self):
|
||||
"""Test getting instance name when name is None"""
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.name = None
|
||||
|
||||
result = self.gcp.get_instance_name(mock_instance)
|
||||
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_get_instance_zone(self):
|
||||
"""Test getting instance zone"""
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.zone = 'https://www.googleapis.com/compute/v1/projects/test-project/zones/us-central1-a'
|
||||
|
||||
result = self.gcp.get_instance_zone(mock_instance)
|
||||
|
||||
self.assertEqual(result, 'us-central1-a')
|
||||
|
||||
def test_get_instance_zone_none(self):
|
||||
"""Test getting instance zone when zone is None"""
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.zone = None
|
||||
|
||||
result = self.gcp.get_instance_zone(mock_instance)
|
||||
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_get_node_instance_zone(self):
|
||||
"""Test getting node instance zone"""
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.name = 'gke-cluster-node-1'
|
||||
mock_instance.zone = 'https://www.googleapis.com/compute/v1/projects/test-project/zones/us-west1-b'
|
||||
|
||||
# Patch get_node_instance to return our mock directly
|
||||
with patch.object(self.gcp, 'get_node_instance', return_value=mock_instance):
|
||||
result = self.gcp.get_node_instance_zone('node-1')
|
||||
self.assertEqual(result, 'us-west1-b')
|
||||
|
||||
def test_get_node_instance_name(self):
|
||||
"""Test getting node instance name"""
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.name = 'gke-cluster-node-1'
|
||||
|
||||
# Patch get_node_instance to return our mock directly
|
||||
with patch.object(self.gcp, 'get_node_instance', return_value=mock_instance):
|
||||
result = self.gcp.get_node_instance_name('node-1')
|
||||
self.assertEqual(result, 'gke-cluster-node-1')
|
||||
|
||||
def test_get_instance_id(self):
|
||||
"""Test getting instance ID (alias for get_node_instance_name)"""
|
||||
# Patch get_node_instance_name since get_instance_id just calls it
|
||||
with patch.object(self.gcp, 'get_node_instance_name', return_value='gke-cluster-node-1'):
|
||||
result = self.gcp.get_instance_id('node-1')
|
||||
self.assertEqual(result, 'gke-cluster-node-1')
|
||||
|
||||
def test_start_instances_success(self):
|
||||
"""Test starting instances successfully"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
# Mock get_node_instance_zone
|
||||
with patch.object(self.gcp, 'get_node_instance_zone', return_value='us-central1-a'):
|
||||
self.gcp.instance_client.start = MagicMock()
|
||||
|
||||
self.gcp.start_instances(instance_id)
|
||||
|
||||
self.gcp.instance_client.start.assert_called_once()
|
||||
|
||||
def test_start_instances_failure(self):
|
||||
"""Test starting instances with failure"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
with patch.object(self.gcp, 'get_node_instance_zone', return_value='us-central1-a'):
|
||||
self.gcp.instance_client.start = MagicMock(
|
||||
side_effect=Exception("GCP error")
|
||||
)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.gcp.start_instances(instance_id)
|
||||
|
||||
def test_stop_instances_success(self):
|
||||
"""Test stopping instances successfully"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
with patch.object(self.gcp, 'get_node_instance_zone', return_value='us-central1-a'):
|
||||
self.gcp.instance_client.stop = MagicMock()
|
||||
|
||||
self.gcp.stop_instances(instance_id)
|
||||
|
||||
self.gcp.instance_client.stop.assert_called_once()
|
||||
|
||||
def test_stop_instances_failure(self):
|
||||
"""Test stopping instances with failure"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
with patch.object(self.gcp, 'get_node_instance_zone', return_value='us-central1-a'):
|
||||
self.gcp.instance_client.stop = MagicMock(
|
||||
side_effect=Exception("GCP error")
|
||||
)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.gcp.stop_instances(instance_id)
|
||||
|
||||
def test_suspend_instances_success(self):
|
||||
"""Test suspending instances successfully"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
with patch.object(self.gcp, 'get_node_instance_zone', return_value='us-central1-a'):
|
||||
self.gcp.instance_client.suspend = MagicMock()
|
||||
|
||||
self.gcp.suspend_instances(instance_id)
|
||||
|
||||
self.gcp.instance_client.suspend.assert_called_once()
|
||||
|
||||
def test_suspend_instances_failure(self):
|
||||
"""Test suspending instances with failure"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
with patch.object(self.gcp, 'get_node_instance_zone', return_value='us-central1-a'):
|
||||
self.gcp.instance_client.suspend = MagicMock(
|
||||
side_effect=Exception("GCP error")
|
||||
)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.gcp.suspend_instances(instance_id)
|
||||
|
||||
def test_terminate_instances_success(self):
|
||||
"""Test terminating instances successfully"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
with patch.object(self.gcp, 'get_node_instance_zone', return_value='us-central1-a'):
|
||||
self.gcp.instance_client.delete = MagicMock()
|
||||
|
||||
self.gcp.terminate_instances(instance_id)
|
||||
|
||||
self.gcp.instance_client.delete.assert_called_once()
|
||||
|
||||
def test_terminate_instances_failure(self):
|
||||
"""Test terminating instances with failure"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
with patch.object(self.gcp, 'get_node_instance_zone', return_value='us-central1-a'):
|
||||
self.gcp.instance_client.delete = MagicMock(
|
||||
side_effect=Exception("GCP error")
|
||||
)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.gcp.terminate_instances(instance_id)
|
||||
|
||||
def test_reboot_instances_success(self):
|
||||
"""Test rebooting instances successfully"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
with patch.object(self.gcp, 'get_node_instance_zone', return_value='us-central1-a'):
|
||||
self.gcp.instance_client.reset = MagicMock()
|
||||
|
||||
self.gcp.reboot_instances(instance_id)
|
||||
|
||||
self.gcp.instance_client.reset.assert_called_once()
|
||||
|
||||
def test_reboot_instances_failure(self):
|
||||
"""Test rebooting instances with failure"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
with patch.object(self.gcp, 'get_node_instance_zone', return_value='us-central1-a'):
|
||||
self.gcp.instance_client.reset = MagicMock(
|
||||
side_effect=Exception("GCP error")
|
||||
)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.gcp.reboot_instances(instance_id)
|
||||
|
||||
@patch('time.sleep')
|
||||
def test_get_instance_status_success(self, _mock_sleep):
|
||||
"""Test getting instance status successfully"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.status = 'RUNNING'
|
||||
|
||||
with patch.object(self.gcp, 'get_node_instance_zone', return_value='us-central1-a'):
|
||||
self.gcp.instance_client.get = MagicMock(return_value=mock_instance)
|
||||
|
||||
result = self.gcp.get_instance_status(instance_id, 'RUNNING', 60)
|
||||
|
||||
self.assertTrue(result)
|
||||
|
||||
@patch('time.sleep')
|
||||
def test_get_instance_status_timeout(self, _mock_sleep):
|
||||
"""Test getting instance status with timeout"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.status = 'PROVISIONING'
|
||||
|
||||
with patch.object(self.gcp, 'get_node_instance_zone', return_value='us-central1-a'):
|
||||
self.gcp.instance_client.get = MagicMock(return_value=mock_instance)
|
||||
|
||||
result = self.gcp.get_instance_status(instance_id, 'RUNNING', 5)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
@patch('time.sleep')
|
||||
def test_get_instance_status_failure(self, _mock_sleep):
|
||||
"""Test getting instance status with failure"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
with patch.object(self.gcp, 'get_node_instance_zone', return_value='us-central1-a'):
|
||||
self.gcp.instance_client.get = MagicMock(
|
||||
side_effect=Exception("GCP error")
|
||||
)
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.gcp.get_instance_status(instance_id, 'RUNNING', 60)
|
||||
|
||||
def test_wait_until_suspended_success(self):
|
||||
"""Test waiting until instance is suspended"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
with patch.object(self.gcp, 'get_instance_status', return_value=True) as mock_get_status:
|
||||
result = self.gcp.wait_until_suspended(instance_id, 60)
|
||||
|
||||
self.assertTrue(result)
|
||||
mock_get_status.assert_called_once_with(instance_id, 'SUSPENDED', 60)
|
||||
|
||||
def test_wait_until_suspended_failure(self):
|
||||
"""Test waiting until instance is suspended with failure"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
with patch.object(self.gcp, 'get_instance_status', return_value=False):
|
||||
result = self.gcp.wait_until_suspended(instance_id, 60)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_wait_until_running_success(self):
|
||||
"""Test waiting until instance is running successfully"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
affected_node = MagicMock(spec=AffectedNode)
|
||||
|
||||
with patch('time.time', side_effect=[100, 110]):
|
||||
with patch.object(self.gcp, 'get_instance_status', return_value=True):
|
||||
result = self.gcp.wait_until_running(instance_id, 60, affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
affected_node.set_affected_node_status.assert_called_once_with('running', 10)
|
||||
|
||||
def test_wait_until_running_without_affected_node(self):
|
||||
"""Test waiting until running without affected node tracking"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
with patch.object(self.gcp, 'get_instance_status', return_value=True):
|
||||
result = self.gcp.wait_until_running(instance_id, 60, None)
|
||||
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_wait_until_stopped_success(self):
|
||||
"""Test waiting until instance is stopped successfully"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
affected_node = MagicMock(spec=AffectedNode)
|
||||
|
||||
with patch('time.time', side_effect=[100, 115]):
|
||||
with patch.object(self.gcp, 'get_instance_status', return_value=True):
|
||||
result = self.gcp.wait_until_stopped(instance_id, 60, affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
affected_node.set_affected_node_status.assert_called_once_with('stopped', 15)
|
||||
|
||||
def test_wait_until_stopped_without_affected_node(self):
|
||||
"""Test waiting until stopped without affected node tracking"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
with patch.object(self.gcp, 'get_instance_status', return_value=True):
|
||||
result = self.gcp.wait_until_stopped(instance_id, 60, None)
|
||||
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_wait_until_terminated_success(self):
|
||||
"""Test waiting until instance is terminated successfully"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
affected_node = MagicMock(spec=AffectedNode)
|
||||
|
||||
with patch('time.time', side_effect=[100, 120]):
|
||||
with patch.object(self.gcp, 'get_instance_status', return_value=True):
|
||||
result = self.gcp.wait_until_terminated(instance_id, 60, affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
affected_node.set_affected_node_status.assert_called_once_with('terminated', 20)
|
||||
|
||||
def test_wait_until_terminated_without_affected_node(self):
|
||||
"""Test waiting until terminated without affected node tracking"""
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
with patch.object(self.gcp, 'get_instance_status', return_value=True):
|
||||
result = self.gcp.wait_until_terminated(instance_id, 60, None)
|
||||
|
||||
self.assertTrue(result)
|
||||
|
||||
|
||||
class TestGCPNodeScenarios(unittest.TestCase):
|
||||
"""Test cases for gcp_node_scenarios class"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
self.kubecli = MagicMock(spec=KrknKubernetes)
|
||||
self.affected_nodes_status = AffectedNodeStatus()
|
||||
|
||||
# Mock the GCP class
|
||||
with patch('krkn.scenario_plugins.node_actions.gcp_node_scenarios.GCP') as mock_gcp_class:
|
||||
self.mock_gcp = MagicMock()
|
||||
mock_gcp_class.return_value = self.mock_gcp
|
||||
self.scenario = gcp_node_scenarios(
|
||||
kubecli=self.kubecli,
|
||||
node_action_kube_check=True,
|
||||
affected_nodes_status=self.affected_nodes_status
|
||||
)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
def test_node_start_scenario_success(self, mock_wait_ready):
|
||||
"""Test node start scenario successfully"""
|
||||
node = 'gke-cluster-node-1'
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.name = instance_id
|
||||
|
||||
self.mock_gcp.get_node_instance.return_value = mock_instance
|
||||
self.mock_gcp.get_instance_name.return_value = instance_id
|
||||
self.mock_gcp.start_instances.return_value = None
|
||||
self.mock_gcp.wait_until_running.return_value = True
|
||||
|
||||
self.scenario.node_start_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
self.mock_gcp.get_node_instance.assert_called_once_with(node)
|
||||
self.mock_gcp.get_instance_name.assert_called_once_with(mock_instance)
|
||||
self.mock_gcp.start_instances.assert_called_once_with(instance_id)
|
||||
self.mock_gcp.wait_until_running.assert_called_once()
|
||||
mock_wait_ready.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
self.assertEqual(self.affected_nodes_status.affected_nodes[0].node_name, node)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
def test_node_start_scenario_no_kube_check(self, mock_wait_ready):
|
||||
"""Test node start scenario without kube check"""
|
||||
node = 'gke-cluster-node-1'
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
# Create scenario with node_action_kube_check=False
|
||||
with patch('krkn.scenario_plugins.node_actions.gcp_node_scenarios.GCP') as mock_gcp_class:
|
||||
mock_gcp = MagicMock()
|
||||
mock_gcp_class.return_value = mock_gcp
|
||||
scenario = gcp_node_scenarios(
|
||||
kubecli=self.kubecli,
|
||||
node_action_kube_check=False,
|
||||
affected_nodes_status=AffectedNodeStatus()
|
||||
)
|
||||
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.name = instance_id
|
||||
|
||||
mock_gcp.get_node_instance.return_value = mock_instance
|
||||
mock_gcp.get_instance_name.return_value = instance_id
|
||||
mock_gcp.start_instances.return_value = None
|
||||
mock_gcp.wait_until_running.return_value = True
|
||||
|
||||
scenario.node_start_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
# Should not call wait_for_ready_status
|
||||
mock_wait_ready.assert_not_called()
|
||||
|
||||
def test_node_start_scenario_failure(self):
|
||||
"""Test node start scenario with failure"""
|
||||
node = 'gke-cluster-node-1'
|
||||
|
||||
self.mock_gcp.get_node_instance.side_effect = Exception("GCP error")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.scenario.node_start_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_unknown_status')
|
||||
def test_node_stop_scenario_success(self, mock_wait_unknown):
|
||||
"""Test node stop scenario successfully"""
|
||||
node = 'gke-cluster-node-1'
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.name = instance_id
|
||||
|
||||
self.mock_gcp.get_node_instance.return_value = mock_instance
|
||||
self.mock_gcp.get_instance_name.return_value = instance_id
|
||||
self.mock_gcp.stop_instances.return_value = None
|
||||
self.mock_gcp.wait_until_stopped.return_value = True
|
||||
|
||||
self.scenario.node_stop_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
self.mock_gcp.get_node_instance.assert_called_once_with(node)
|
||||
self.mock_gcp.get_instance_name.assert_called_once_with(mock_instance)
|
||||
self.mock_gcp.stop_instances.assert_called_once_with(instance_id)
|
||||
self.mock_gcp.wait_until_stopped.assert_called_once()
|
||||
mock_wait_unknown.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_unknown_status')
|
||||
def test_node_stop_scenario_no_kube_check(self, mock_wait_unknown):
|
||||
"""Test node stop scenario without kube check"""
|
||||
node = 'gke-cluster-node-1'
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
# Create scenario with node_action_kube_check=False
|
||||
with patch('krkn.scenario_plugins.node_actions.gcp_node_scenarios.GCP') as mock_gcp_class:
|
||||
mock_gcp = MagicMock()
|
||||
mock_gcp_class.return_value = mock_gcp
|
||||
scenario = gcp_node_scenarios(
|
||||
kubecli=self.kubecli,
|
||||
node_action_kube_check=False,
|
||||
affected_nodes_status=AffectedNodeStatus()
|
||||
)
|
||||
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.name = instance_id
|
||||
|
||||
mock_gcp.get_node_instance.return_value = mock_instance
|
||||
mock_gcp.get_instance_name.return_value = instance_id
|
||||
mock_gcp.stop_instances.return_value = None
|
||||
mock_gcp.wait_until_stopped.return_value = True
|
||||
|
||||
scenario.node_stop_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
# Should not call wait_for_unknown_status
|
||||
mock_wait_unknown.assert_not_called()
|
||||
|
||||
def test_node_stop_scenario_failure(self):
|
||||
"""Test node stop scenario with failure"""
|
||||
node = 'gke-cluster-node-1'
|
||||
|
||||
self.mock_gcp.get_node_instance.side_effect = Exception("GCP error")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.scenario.node_stop_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
@patch('time.sleep')
|
||||
def test_node_termination_scenario_success(self, _mock_sleep):
|
||||
"""Test node termination scenario successfully"""
|
||||
node = 'gke-cluster-node-1'
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.name = instance_id
|
||||
|
||||
self.mock_gcp.get_node_instance.return_value = mock_instance
|
||||
self.mock_gcp.get_instance_name.return_value = instance_id
|
||||
self.mock_gcp.terminate_instances.return_value = None
|
||||
self.mock_gcp.wait_until_terminated.return_value = True
|
||||
self.kubecli.list_nodes.return_value = []
|
||||
|
||||
self.scenario.node_termination_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
self.mock_gcp.get_node_instance.assert_called_once_with(node)
|
||||
self.mock_gcp.get_instance_name.assert_called_once_with(mock_instance)
|
||||
self.mock_gcp.terminate_instances.assert_called_once_with(instance_id)
|
||||
self.mock_gcp.wait_until_terminated.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('time.sleep')
|
||||
def test_node_termination_scenario_node_still_exists(self, _mock_sleep):
|
||||
"""Test node termination scenario when node still exists"""
|
||||
node = 'gke-cluster-node-1'
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.name = instance_id
|
||||
|
||||
self.mock_gcp.get_node_instance.return_value = mock_instance
|
||||
self.mock_gcp.get_instance_name.return_value = instance_id
|
||||
self.mock_gcp.terminate_instances.return_value = None
|
||||
self.mock_gcp.wait_until_terminated.return_value = True
|
||||
# Node still in list after timeout
|
||||
self.kubecli.list_nodes.return_value = [node]
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.scenario.node_termination_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=2,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
def test_node_termination_scenario_failure(self):
|
||||
"""Test node termination scenario with failure"""
|
||||
node = 'gke-cluster-node-1'
|
||||
|
||||
self.mock_gcp.get_node_instance.side_effect = Exception("GCP error")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.scenario.node_termination_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_unknown_status')
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
def test_node_reboot_scenario_success(self, mock_wait_ready, mock_wait_unknown):
|
||||
"""Test node reboot scenario successfully"""
|
||||
node = 'gke-cluster-node-1'
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.name = instance_id
|
||||
|
||||
self.mock_gcp.get_node_instance.return_value = mock_instance
|
||||
self.mock_gcp.get_instance_name.return_value = instance_id
|
||||
self.mock_gcp.reboot_instances.return_value = None
|
||||
self.mock_gcp.wait_until_running.return_value = True
|
||||
|
||||
self.scenario.node_reboot_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
self.mock_gcp.get_node_instance.assert_called_once_with(node)
|
||||
self.mock_gcp.get_instance_name.assert_called_once_with(mock_instance)
|
||||
self.mock_gcp.reboot_instances.assert_called_once_with(instance_id)
|
||||
self.mock_gcp.wait_until_running.assert_called_once()
|
||||
# Should be called twice in GCP implementation
|
||||
self.assertEqual(mock_wait_unknown.call_count, 1)
|
||||
self.assertEqual(mock_wait_ready.call_count, 1)
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_unknown_status')
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
def test_node_reboot_scenario_no_kube_check(self, mock_wait_ready, mock_wait_unknown):
|
||||
"""Test node reboot scenario without kube check"""
|
||||
node = 'gke-cluster-node-1'
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
# Create scenario with node_action_kube_check=False
|
||||
with patch('krkn.scenario_plugins.node_actions.gcp_node_scenarios.GCP') as mock_gcp_class:
|
||||
mock_gcp = MagicMock()
|
||||
mock_gcp_class.return_value = mock_gcp
|
||||
scenario = gcp_node_scenarios(
|
||||
kubecli=self.kubecli,
|
||||
node_action_kube_check=False,
|
||||
affected_nodes_status=AffectedNodeStatus()
|
||||
)
|
||||
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.name = instance_id
|
||||
|
||||
mock_gcp.get_node_instance.return_value = mock_instance
|
||||
mock_gcp.get_instance_name.return_value = instance_id
|
||||
mock_gcp.reboot_instances.return_value = None
|
||||
mock_gcp.wait_until_running.return_value = True
|
||||
|
||||
scenario.node_reboot_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
# Should not call wait functions
|
||||
mock_wait_unknown.assert_not_called()
|
||||
mock_wait_ready.assert_not_called()
|
||||
|
||||
def test_node_reboot_scenario_failure(self):
|
||||
"""Test node reboot scenario with failure"""
|
||||
node = 'gke-cluster-node-1'
|
||||
|
||||
self.mock_gcp.get_node_instance.side_effect = Exception("GCP error")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.scenario.node_reboot_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
def test_node_start_scenario_multiple_kills(self, mock_wait_ready):
|
||||
"""Test node start scenario with multiple kill counts"""
|
||||
node = 'gke-cluster-node-1'
|
||||
instance_id = 'gke-cluster-node-1'
|
||||
|
||||
mock_instance = MagicMock()
|
||||
mock_instance.name = instance_id
|
||||
|
||||
self.mock_gcp.get_node_instance.return_value = mock_instance
|
||||
self.mock_gcp.get_instance_name.return_value = instance_id
|
||||
self.mock_gcp.start_instances.return_value = None
|
||||
self.mock_gcp.wait_until_running.return_value = True
|
||||
|
||||
self.scenario.node_start_scenario(
|
||||
instance_kill_count=3,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
self.assertEqual(self.mock_gcp.start_instances.call_count, 3)
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 3)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -1,637 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Test suite for IBM Cloud VPC node scenarios
|
||||
|
||||
This test suite covers both the IbmCloud class and ibm_node_scenarios class
|
||||
using mocks to avoid actual IBM Cloud API calls.
|
||||
|
||||
IMPORTANT: These tests use comprehensive mocking and do NOT require any cloud provider
|
||||
settings or credentials. No environment variables need to be set. All API clients and
|
||||
external dependencies are mocked.
|
||||
|
||||
Test Coverage:
|
||||
- TestIbmCloud: 30 tests for the IbmCloud VPC API client class
|
||||
- Initialization, SSL configuration, instance operations (start/stop/reboot/delete)
|
||||
- Status checking, wait operations, error handling
|
||||
- TestIbmNodeScenarios: 14 tests for node scenario orchestration
|
||||
- Node start/stop/reboot/terminate scenarios
|
||||
- Exception handling, multiple kill counts
|
||||
|
||||
Usage:
|
||||
# Run all tests
|
||||
python -m unittest tests.test_ibmcloud_node_scenarios -v
|
||||
|
||||
# Run with coverage
|
||||
python -m coverage run -a -m unittest tests/test_ibmcloud_node_scenarios.py -v
|
||||
|
||||
Assisted By: Claude Code
|
||||
"""
|
||||
|
||||
import unittest
|
||||
import sys
|
||||
import json
|
||||
from unittest.mock import MagicMock, patch, Mock
|
||||
|
||||
# Mock paramiko and IBM SDK before importing
|
||||
sys.modules['paramiko'] = MagicMock()
|
||||
sys.modules['ibm_vpc'] = MagicMock()
|
||||
sys.modules['ibm_cloud_sdk_core'] = MagicMock()
|
||||
sys.modules['ibm_cloud_sdk_core.authenticators'] = MagicMock()
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
from krkn.scenario_plugins.node_actions.ibmcloud_node_scenarios import (
|
||||
IbmCloud,
|
||||
ibm_node_scenarios
|
||||
)
|
||||
|
||||
|
||||
class TestIbmCloud(unittest.TestCase):
|
||||
"""Test cases for IbmCloud class"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
# Set up environment variables
|
||||
self.env_patcher = patch.dict('os.environ', {
|
||||
'IBMC_APIKEY': 'test-api-key',
|
||||
'IBMC_URL': 'https://test.cloud.ibm.com'
|
||||
})
|
||||
self.env_patcher.start()
|
||||
|
||||
# Mock IBM VPC client
|
||||
self.mock_vpc = MagicMock()
|
||||
self.vpc_patcher = patch('krkn.scenario_plugins.node_actions.ibmcloud_node_scenarios.VpcV1')
|
||||
self.mock_vpc_class = self.vpc_patcher.start()
|
||||
self.mock_vpc_class.return_value = self.mock_vpc
|
||||
|
||||
# Mock IAMAuthenticator
|
||||
self.auth_patcher = patch('krkn.scenario_plugins.node_actions.ibmcloud_node_scenarios.IAMAuthenticator')
|
||||
self.mock_auth = self.auth_patcher.start()
|
||||
|
||||
# Create IbmCloud instance
|
||||
self.ibm = IbmCloud()
|
||||
|
||||
def tearDown(self):
|
||||
"""Clean up after tests"""
|
||||
self.env_patcher.stop()
|
||||
self.vpc_patcher.stop()
|
||||
self.auth_patcher.stop()
|
||||
|
||||
def test_init_success(self):
|
||||
"""Test IbmCloud class initialization"""
|
||||
self.assertIsNotNone(self.ibm.service)
|
||||
self.mock_vpc.set_service_url.assert_called_once_with('https://test.cloud.ibm.com')
|
||||
|
||||
def test_init_missing_api_key(self):
|
||||
"""Test initialization fails when IBMC_APIKEY is missing"""
|
||||
with patch.dict('os.environ', {
|
||||
'IBMC_URL': 'https://test.cloud.ibm.com'
|
||||
}, clear=True):
|
||||
with self.assertRaises(Exception) as context:
|
||||
IbmCloud()
|
||||
self.assertIn("IBMC_APIKEY", str(context.exception))
|
||||
|
||||
def test_init_missing_url(self):
|
||||
"""Test initialization fails when IBMC_URL is missing"""
|
||||
with patch.dict('os.environ', {
|
||||
'IBMC_APIKEY': 'test-api-key'
|
||||
}, clear=True):
|
||||
with self.assertRaises(Exception) as context:
|
||||
IbmCloud()
|
||||
self.assertIn("IBMC_URL", str(context.exception))
|
||||
|
||||
def test_configure_ssl_verification_disabled(self):
|
||||
"""Test disabling SSL verification"""
|
||||
self.ibm.configure_ssl_verification(True)
|
||||
self.mock_vpc.set_disable_ssl_verification.assert_called_with(True)
|
||||
|
||||
def test_configure_ssl_verification_enabled(self):
|
||||
"""Test enabling SSL verification"""
|
||||
self.ibm.configure_ssl_verification(False)
|
||||
self.mock_vpc.set_disable_ssl_verification.assert_called_with(False)
|
||||
|
||||
def test_get_instance_id_success(self):
|
||||
"""Test getting instance ID by node name"""
|
||||
mock_list = [
|
||||
{'vpc_name': 'test-node-1', 'vpc_id': 'vpc-1'},
|
||||
{'vpc_name': 'test-node-2', 'vpc_id': 'vpc-2'}
|
||||
]
|
||||
|
||||
with patch.object(self.ibm, 'list_instances', return_value=mock_list):
|
||||
instance_id = self.ibm.get_instance_id('test-node-1')
|
||||
self.assertEqual(instance_id, 'vpc-1')
|
||||
|
||||
def test_get_instance_id_not_found(self):
|
||||
"""Test getting instance ID when node not found"""
|
||||
mock_list = [
|
||||
{'vpc_name': 'test-node-1', 'vpc_id': 'vpc-1'}
|
||||
]
|
||||
|
||||
with patch.object(self.ibm, 'list_instances', return_value=mock_list):
|
||||
with self.assertRaises(SystemExit):
|
||||
self.ibm.get_instance_id('non-existent-node')
|
||||
|
||||
def test_delete_instance_success(self):
|
||||
"""Test deleting instance successfully"""
|
||||
self.mock_vpc.delete_instance.return_value = None
|
||||
|
||||
result = self.ibm.delete_instance('vpc-123')
|
||||
|
||||
self.mock_vpc.delete_instance.assert_called_once_with('vpc-123')
|
||||
# Method doesn't explicitly return True, so we just verify no exception
|
||||
|
||||
def test_delete_instance_failure(self):
|
||||
"""Test deleting instance with failure"""
|
||||
self.mock_vpc.delete_instance.side_effect = Exception("API Error")
|
||||
|
||||
result = self.ibm.delete_instance('vpc-123')
|
||||
|
||||
self.assertEqual(result, False)
|
||||
|
||||
def test_reboot_instances_success(self):
|
||||
"""Test rebooting instance successfully"""
|
||||
self.mock_vpc.create_instance_action.return_value = None
|
||||
|
||||
result = self.ibm.reboot_instances('vpc-123')
|
||||
|
||||
self.assertTrue(result)
|
||||
self.mock_vpc.create_instance_action.assert_called_once_with(
|
||||
'vpc-123',
|
||||
type='reboot'
|
||||
)
|
||||
|
||||
def test_reboot_instances_failure(self):
|
||||
"""Test rebooting instance with failure"""
|
||||
self.mock_vpc.create_instance_action.side_effect = Exception("API Error")
|
||||
|
||||
result = self.ibm.reboot_instances('vpc-123')
|
||||
|
||||
self.assertEqual(result, False)
|
||||
|
||||
def test_stop_instances_success(self):
|
||||
"""Test stopping instance successfully"""
|
||||
self.mock_vpc.create_instance_action.return_value = None
|
||||
|
||||
result = self.ibm.stop_instances('vpc-123')
|
||||
|
||||
self.assertTrue(result)
|
||||
self.mock_vpc.create_instance_action.assert_called_once_with(
|
||||
'vpc-123',
|
||||
type='stop'
|
||||
)
|
||||
|
||||
def test_stop_instances_failure(self):
|
||||
"""Test stopping instance with failure"""
|
||||
self.mock_vpc.create_instance_action.side_effect = Exception("API Error")
|
||||
|
||||
result = self.ibm.stop_instances('vpc-123')
|
||||
|
||||
self.assertEqual(result, False)
|
||||
|
||||
def test_start_instances_success(self):
|
||||
"""Test starting instance successfully"""
|
||||
self.mock_vpc.create_instance_action.return_value = None
|
||||
|
||||
result = self.ibm.start_instances('vpc-123')
|
||||
|
||||
self.assertTrue(result)
|
||||
self.mock_vpc.create_instance_action.assert_called_once_with(
|
||||
'vpc-123',
|
||||
type='start'
|
||||
)
|
||||
|
||||
def test_start_instances_failure(self):
|
||||
"""Test starting instance with failure"""
|
||||
self.mock_vpc.create_instance_action.side_effect = Exception("API Error")
|
||||
|
||||
result = self.ibm.start_instances('vpc-123')
|
||||
|
||||
self.assertEqual(result, False)
|
||||
|
||||
def test_list_instances_success(self):
|
||||
"""Test listing instances successfully"""
|
||||
mock_result = Mock()
|
||||
mock_result.get_result.return_value = {
|
||||
'instances': [
|
||||
{'name': 'node-1', 'id': 'vpc-1'},
|
||||
{'name': 'node-2', 'id': 'vpc-2'}
|
||||
],
|
||||
'total_count': 2,
|
||||
'limit': 50
|
||||
}
|
||||
self.mock_vpc.list_instances.return_value = mock_result
|
||||
|
||||
instances = self.ibm.list_instances()
|
||||
|
||||
self.assertEqual(len(instances), 2)
|
||||
self.assertEqual(instances[0]['vpc_name'], 'node-1')
|
||||
self.assertEqual(instances[1]['vpc_name'], 'node-2')
|
||||
|
||||
def test_list_instances_with_pagination(self):
|
||||
"""Test listing instances with pagination"""
|
||||
# First call returns limit reached
|
||||
mock_result_1 = Mock()
|
||||
mock_result_1.get_result.return_value = {
|
||||
'instances': [
|
||||
{'name': 'node-1', 'id': 'vpc-1'}
|
||||
],
|
||||
'total_count': 1,
|
||||
'limit': 1
|
||||
}
|
||||
|
||||
# Second call returns remaining
|
||||
mock_result_2 = Mock()
|
||||
mock_vpc_2 = type('obj', (object,), {'name': 'node-2', 'id': 'vpc-2'})
|
||||
mock_result_2.get_result.return_value = {
|
||||
'instances': [mock_vpc_2],
|
||||
'total_count': 1,
|
||||
'limit': 50
|
||||
}
|
||||
|
||||
self.mock_vpc.list_instances.side_effect = [mock_result_1, mock_result_2]
|
||||
|
||||
instances = self.ibm.list_instances()
|
||||
|
||||
self.assertEqual(len(instances), 2)
|
||||
self.assertEqual(self.mock_vpc.list_instances.call_count, 2)
|
||||
|
||||
def test_list_instances_failure(self):
|
||||
"""Test listing instances with failure"""
|
||||
self.mock_vpc.list_instances.side_effect = Exception("API Error")
|
||||
|
||||
with self.assertRaises(SystemExit):
|
||||
self.ibm.list_instances()
|
||||
|
||||
def test_find_id_in_list(self):
|
||||
"""Test finding ID in VPC list"""
|
||||
vpc_list = [
|
||||
{'vpc_name': 'vpc-1', 'vpc_id': 'id-1'},
|
||||
{'vpc_name': 'vpc-2', 'vpc_id': 'id-2'}
|
||||
]
|
||||
|
||||
vpc_id = self.ibm.find_id_in_list('vpc-2', vpc_list)
|
||||
|
||||
self.assertEqual(vpc_id, 'id-2')
|
||||
|
||||
def test_find_id_in_list_not_found(self):
|
||||
"""Test finding ID in VPC list when not found"""
|
||||
vpc_list = [
|
||||
{'vpc_name': 'vpc-1', 'vpc_id': 'id-1'}
|
||||
]
|
||||
|
||||
vpc_id = self.ibm.find_id_in_list('vpc-3', vpc_list)
|
||||
|
||||
self.assertIsNone(vpc_id)
|
||||
|
||||
def test_get_instance_status_success(self):
|
||||
"""Test getting instance status successfully"""
|
||||
mock_result = Mock()
|
||||
mock_result.get_result.return_value = {'status': 'running'}
|
||||
self.mock_vpc.get_instance.return_value = mock_result
|
||||
|
||||
status = self.ibm.get_instance_status('vpc-123')
|
||||
|
||||
self.assertEqual(status, 'running')
|
||||
|
||||
def test_get_instance_status_failure(self):
|
||||
"""Test getting instance status with failure"""
|
||||
self.mock_vpc.get_instance.side_effect = Exception("API Error")
|
||||
|
||||
status = self.ibm.get_instance_status('vpc-123')
|
||||
|
||||
self.assertIsNone(status)
|
||||
|
||||
def test_wait_until_deleted_success(self):
|
||||
"""Test waiting until instance is deleted"""
|
||||
# First call returns status, second returns None (deleted)
|
||||
with patch.object(self.ibm, 'get_instance_status', side_effect=['deleting', None]):
|
||||
affected_node = MagicMock(spec=AffectedNode)
|
||||
|
||||
with patch('time.time', side_effect=[100, 105]), \
|
||||
patch('time.sleep'):
|
||||
result = self.ibm.wait_until_deleted('vpc-123', timeout=60, affected_node=affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
affected_node.set_affected_node_status.assert_called_once_with("terminated", 5)
|
||||
|
||||
def test_wait_until_deleted_timeout(self):
|
||||
"""Test waiting until deleted with timeout"""
|
||||
with patch.object(self.ibm, 'get_instance_status', return_value='deleting'):
|
||||
with patch('time.sleep'):
|
||||
result = self.ibm.wait_until_deleted('vpc-123', timeout=5)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_wait_until_running_success(self):
|
||||
"""Test waiting until instance is running"""
|
||||
with patch.object(self.ibm, 'get_instance_status', side_effect=['starting', 'running']):
|
||||
affected_node = MagicMock(spec=AffectedNode)
|
||||
|
||||
with patch('time.time', side_effect=[100, 105]), \
|
||||
patch('time.sleep'):
|
||||
result = self.ibm.wait_until_running('vpc-123', timeout=60, affected_node=affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
affected_node.set_affected_node_status.assert_called_once_with("running", 5)
|
||||
|
||||
def test_wait_until_running_timeout(self):
|
||||
"""Test waiting until running with timeout"""
|
||||
with patch.object(self.ibm, 'get_instance_status', return_value='starting'):
|
||||
with patch('time.sleep'):
|
||||
result = self.ibm.wait_until_running('vpc-123', timeout=5)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_wait_until_stopped_success(self):
|
||||
"""Test waiting until instance is stopped"""
|
||||
with patch.object(self.ibm, 'get_instance_status', side_effect=['stopping', 'stopped']):
|
||||
affected_node = MagicMock(spec=AffectedNode)
|
||||
|
||||
with patch('time.time', side_effect=[100, 105]), \
|
||||
patch('time.sleep'):
|
||||
result = self.ibm.wait_until_stopped('vpc-123', timeout=60, affected_node=affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
affected_node.set_affected_node_status.assert_called_once_with("stopped", 5)
|
||||
|
||||
def test_wait_until_stopped_timeout(self):
|
||||
"""Test waiting until stopped with timeout"""
|
||||
with patch.object(self.ibm, 'get_instance_status', return_value='stopping'):
|
||||
with patch('time.sleep'):
|
||||
result = self.ibm.wait_until_stopped('vpc-123', timeout=5, affected_node=None)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_wait_until_rebooted_success(self):
|
||||
"""Test waiting until instance is rebooted"""
|
||||
# First call checks reboot status (not 'starting'), second call in wait_until_running checks status
|
||||
with patch.object(self.ibm, 'get_instance_status', side_effect=['running', 'running']):
|
||||
affected_node = MagicMock(spec=AffectedNode)
|
||||
|
||||
time_values = [100, 105, 110]
|
||||
with patch('time.time', side_effect=time_values), \
|
||||
patch('time.sleep'):
|
||||
result = self.ibm.wait_until_rebooted('vpc-123', timeout=60, affected_node=affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_wait_until_rebooted_timeout(self):
|
||||
"""Test waiting until rebooted with timeout"""
|
||||
with patch.object(self.ibm, 'get_instance_status', return_value='starting'):
|
||||
with patch('time.sleep'):
|
||||
result = self.ibm.wait_until_rebooted('vpc-123', timeout=5, affected_node=None)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
|
||||
class TestIbmNodeScenarios(unittest.TestCase):
|
||||
"""Test cases for ibm_node_scenarios class"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
# Mock KrknKubernetes
|
||||
self.mock_kubecli = MagicMock(spec=KrknKubernetes)
|
||||
self.affected_nodes_status = AffectedNodeStatus()
|
||||
|
||||
# Mock the IbmCloud class entirely to avoid any real API calls
|
||||
self.ibm_cloud_patcher = patch('krkn.scenario_plugins.node_actions.ibmcloud_node_scenarios.IbmCloud')
|
||||
self.mock_ibm_cloud_class = self.ibm_cloud_patcher.start()
|
||||
|
||||
# Create a mock instance that will be returned when IbmCloud() is called
|
||||
self.mock_ibm_cloud_instance = MagicMock()
|
||||
self.mock_ibm_cloud_class.return_value = self.mock_ibm_cloud_instance
|
||||
|
||||
# Create ibm_node_scenarios instance
|
||||
self.scenario = ibm_node_scenarios(
|
||||
kubecli=self.mock_kubecli,
|
||||
node_action_kube_check=True,
|
||||
affected_nodes_status=self.affected_nodes_status,
|
||||
disable_ssl_verification=False
|
||||
)
|
||||
|
||||
def tearDown(self):
|
||||
"""Clean up after tests"""
|
||||
self.ibm_cloud_patcher.stop()
|
||||
|
||||
def test_init(self):
|
||||
"""Test ibm_node_scenarios initialization"""
|
||||
self.assertIsNotNone(self.scenario.ibmcloud)
|
||||
self.assertTrue(self.scenario.node_action_kube_check)
|
||||
self.assertEqual(self.scenario.kubecli, self.mock_kubecli)
|
||||
|
||||
def test_init_with_ssl_disabled(self):
|
||||
"""Test initialization with SSL verification disabled"""
|
||||
scenario = ibm_node_scenarios(
|
||||
kubecli=self.mock_kubecli,
|
||||
node_action_kube_check=True,
|
||||
affected_nodes_status=self.affected_nodes_status,
|
||||
disable_ssl_verification=True
|
||||
)
|
||||
|
||||
# Verify configure_ssl_verification was called
|
||||
self.mock_ibm_cloud_instance.configure_ssl_verification.assert_called_with(True)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
def test_node_start_scenario_success(self, mock_wait_ready):
|
||||
"""Test node start scenario successfully"""
|
||||
# Configure mock methods
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'vpc-123'
|
||||
self.mock_ibm_cloud_instance.start_instances.return_value = True
|
||||
self.mock_ibm_cloud_instance.wait_until_running.return_value = True
|
||||
|
||||
self.scenario.node_start_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
poll_interval=5
|
||||
)
|
||||
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
self.assertEqual(self.affected_nodes_status.affected_nodes[0].node_name, 'test-node')
|
||||
mock_wait_ready.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
def test_node_start_scenario_no_kube_check(self, mock_wait_ready):
|
||||
"""Test node start scenario without Kubernetes check"""
|
||||
self.scenario.node_action_kube_check = False
|
||||
|
||||
# Configure mock methods
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'vpc-123'
|
||||
self.mock_ibm_cloud_instance.start_instances.return_value = True
|
||||
self.mock_ibm_cloud_instance.wait_until_running.return_value = True
|
||||
|
||||
self.scenario.node_start_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
poll_interval=5
|
||||
)
|
||||
|
||||
mock_wait_ready.assert_not_called()
|
||||
|
||||
def test_node_stop_scenario_success(self):
|
||||
"""Test node stop scenario successfully"""
|
||||
# Configure mock methods
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'vpc-123'
|
||||
self.mock_ibm_cloud_instance.stop_instances.return_value = True
|
||||
self.mock_ibm_cloud_instance.wait_until_stopped.return_value = True
|
||||
|
||||
self.scenario.node_stop_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
poll_interval=5
|
||||
)
|
||||
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
def test_node_stop_scenario_failure(self):
|
||||
"""Test node stop scenario with stop command failure"""
|
||||
# Configure mock - get_instance_id succeeds but stop_instances fails
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'vpc-123'
|
||||
self.mock_ibm_cloud_instance.stop_instances.return_value = False
|
||||
|
||||
# Code raises exception inside try/except, so it should be caught and logged
|
||||
self.scenario.node_stop_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
poll_interval=5
|
||||
)
|
||||
|
||||
# Verify that affected nodes were not appended since exception was caught
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 0)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_unknown_status')
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
def test_node_reboot_scenario_success(self, mock_wait_ready, mock_wait_unknown):
|
||||
"""Test node reboot scenario successfully"""
|
||||
# Configure mock methods
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'vpc-123'
|
||||
self.mock_ibm_cloud_instance.reboot_instances.return_value = True
|
||||
self.mock_ibm_cloud_instance.wait_until_rebooted.return_value = True
|
||||
|
||||
self.scenario.node_reboot_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
soft_reboot=False
|
||||
)
|
||||
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
mock_wait_unknown.assert_called_once()
|
||||
mock_wait_ready.assert_called_once()
|
||||
|
||||
def test_node_reboot_scenario_failure(self):
|
||||
"""Test node reboot scenario with reboot command failure"""
|
||||
# Configure mock - get_instance_id succeeds but reboot_instances fails
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'vpc-123'
|
||||
self.mock_ibm_cloud_instance.reboot_instances.return_value = False
|
||||
|
||||
# Code raises exception inside try/except, so it should be caught and logged
|
||||
self.scenario.node_reboot_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
soft_reboot=False
|
||||
)
|
||||
|
||||
# Verify that affected nodes were not appended since exception was caught
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 0)
|
||||
|
||||
def test_node_terminate_scenario_success(self):
|
||||
"""Test node terminate scenario successfully"""
|
||||
# Configure mock methods
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'vpc-123'
|
||||
self.mock_ibm_cloud_instance.delete_instance.return_value = None
|
||||
self.mock_ibm_cloud_instance.wait_until_deleted.return_value = True
|
||||
|
||||
self.scenario.node_terminate_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
poll_interval=5
|
||||
)
|
||||
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
def test_node_scenario_multiple_kill_count(self):
|
||||
"""Test node scenario with multiple kill count"""
|
||||
# Configure mock methods
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'vpc-123'
|
||||
self.mock_ibm_cloud_instance.stop_instances.return_value = True
|
||||
self.mock_ibm_cloud_instance.wait_until_stopped.return_value = True
|
||||
|
||||
self.scenario.node_stop_scenario(
|
||||
instance_kill_count=2,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
poll_interval=5
|
||||
)
|
||||
|
||||
# Should have 2 affected nodes for 2 iterations
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 2)
|
||||
|
||||
def test_node_start_scenario_exception(self):
|
||||
"""Test node start scenario with exception during operation"""
|
||||
# Configure mock - get_instance_id succeeds but start_instances fails
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'vpc-123'
|
||||
self.mock_ibm_cloud_instance.start_instances.side_effect = Exception("API Error")
|
||||
|
||||
# Should handle exception gracefully
|
||||
self.scenario.node_start_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
poll_interval=5
|
||||
)
|
||||
|
||||
# Verify affected node still added even on failure
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
def test_node_stop_scenario_exception(self):
|
||||
"""Test node stop scenario with exception"""
|
||||
# Configure mock to raise SystemExit
|
||||
self.mock_ibm_cloud_instance.get_instance_id.side_effect = SystemExit(1)
|
||||
|
||||
# Should handle system exit gracefully
|
||||
with self.assertRaises(SystemExit):
|
||||
self.scenario.node_stop_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
poll_interval=5
|
||||
)
|
||||
|
||||
def test_node_reboot_scenario_exception(self):
|
||||
"""Test node reboot scenario with exception during operation"""
|
||||
# Configure mock - get_instance_id succeeds but reboot_instances fails
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'vpc-123'
|
||||
self.mock_ibm_cloud_instance.reboot_instances.side_effect = Exception("API Error")
|
||||
|
||||
# Should handle exception gracefully
|
||||
self.scenario.node_reboot_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
soft_reboot=False
|
||||
)
|
||||
|
||||
def test_node_terminate_scenario_exception(self):
|
||||
"""Test node terminate scenario with exception"""
|
||||
# Configure mock - get_instance_id succeeds but delete_instance fails
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'vpc-123'
|
||||
self.mock_ibm_cloud_instance.delete_instance.side_effect = Exception("API Error")
|
||||
|
||||
# Should handle exception gracefully
|
||||
self.scenario.node_terminate_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
poll_interval=5
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -1,673 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Test suite for IBM Cloud Power node scenarios
|
||||
|
||||
This test suite covers both the IbmCloudPower class and ibmcloud_power_node_scenarios class
|
||||
using mocks to avoid actual IBM Cloud API calls.
|
||||
|
||||
IMPORTANT: These tests use comprehensive mocking and do NOT require any cloud provider
|
||||
settings or credentials. No environment variables need to be set. All API clients and
|
||||
external dependencies are mocked.
|
||||
|
||||
Test Coverage:
|
||||
- TestIbmCloudPower: 31 tests for the IbmCloudPower API client class
|
||||
- Authentication, instance operations (start/stop/reboot/delete)
|
||||
- Status checking, wait operations, error handling
|
||||
- TestIbmCloudPowerNodeScenarios: 10 tests for node scenario orchestration
|
||||
- Node start/stop/reboot/terminate scenarios
|
||||
- Exception handling, multiple kill counts
|
||||
|
||||
Usage:
|
||||
# Run all tests
|
||||
python -m unittest tests.test_ibmcloud_power_node_scenarios -v
|
||||
|
||||
# Run with coverage
|
||||
python -m coverage run -a -m unittest tests/test_ibmcloud_power_node_scenarios.py -v
|
||||
|
||||
Assisted By: Claude Code
|
||||
"""
|
||||
|
||||
import unittest
|
||||
import sys
|
||||
import json
|
||||
from unittest.mock import MagicMock, patch, Mock
|
||||
|
||||
# Mock paramiko before importing
|
||||
sys.modules['paramiko'] = MagicMock()
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
from krkn.scenario_plugins.node_actions.ibmcloud_power_node_scenarios import (
|
||||
IbmCloudPower,
|
||||
ibmcloud_power_node_scenarios
|
||||
)
|
||||
|
||||
|
||||
class TestIbmCloudPower(unittest.TestCase):
|
||||
"""Test cases for IbmCloudPower class"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
# Set up environment variables
|
||||
self.env_patcher = patch.dict('os.environ', {
|
||||
'IBMC_APIKEY': 'test-api-key',
|
||||
'IBMC_POWER_URL': 'https://test.cloud.ibm.com',
|
||||
'IBMC_POWER_CRN': 'crn:v1:bluemix:public:power-iaas:us-south:a/abc123:instance-id::'
|
||||
})
|
||||
self.env_patcher.start()
|
||||
|
||||
# Mock requests
|
||||
self.requests_patcher = patch('krkn.scenario_plugins.node_actions.ibmcloud_power_node_scenarios.requests')
|
||||
self.mock_requests = self.requests_patcher.start()
|
||||
|
||||
# Mock authentication response
|
||||
mock_auth_response = Mock()
|
||||
mock_auth_response.status_code = 200
|
||||
mock_auth_response.json.return_value = {
|
||||
'access_token': 'test-token',
|
||||
'token_type': 'Bearer',
|
||||
'expires_in': 3600
|
||||
}
|
||||
|
||||
self.mock_requests.request.return_value = mock_auth_response
|
||||
|
||||
# Create IbmCloudPower instance
|
||||
self.ibm = IbmCloudPower()
|
||||
|
||||
def tearDown(self):
|
||||
"""Clean up after tests"""
|
||||
self.env_patcher.stop()
|
||||
self.requests_patcher.stop()
|
||||
|
||||
def test_init_success(self):
|
||||
"""Test IbmCloudPower class initialization"""
|
||||
self.assertIsNotNone(self.ibm.api_key)
|
||||
self.assertEqual(self.ibm.api_key, 'test-api-key')
|
||||
self.assertIsNotNone(self.ibm.service_url)
|
||||
self.assertEqual(self.ibm.service_url, 'https://test.cloud.ibm.com')
|
||||
self.assertIsNotNone(self.ibm.CRN)
|
||||
self.assertEqual(self.ibm.cloud_instance_id, 'instance-id')
|
||||
self.assertIsNotNone(self.ibm.token)
|
||||
self.assertIsNotNone(self.ibm.headers)
|
||||
|
||||
def test_init_missing_api_key(self):
|
||||
"""Test initialization fails when IBMC_APIKEY is missing"""
|
||||
with patch.dict('os.environ', {
|
||||
'IBMC_POWER_URL': 'https://test.cloud.ibm.com',
|
||||
'IBMC_POWER_CRN': 'crn:v1:bluemix:public:power-iaas:us-south:a/abc123:instance-id::'
|
||||
}, clear=True):
|
||||
with self.assertRaises(Exception) as context:
|
||||
IbmCloudPower()
|
||||
self.assertIn("IBMC_APIKEY", str(context.exception))
|
||||
|
||||
def test_init_missing_power_url(self):
|
||||
"""Test initialization fails when IBMC_POWER_URL is missing"""
|
||||
with patch.dict('os.environ', {
|
||||
'IBMC_APIKEY': 'test-api-key',
|
||||
'IBMC_POWER_CRN': 'crn:v1:bluemix:public:power-iaas:us-south:a/abc123:instance-id::'
|
||||
}, clear=True):
|
||||
with self.assertRaises(Exception) as context:
|
||||
IbmCloudPower()
|
||||
self.assertIn("IBMC_POWER_URL", str(context.exception))
|
||||
|
||||
def test_init_missing_crn(self):
|
||||
"""Test initialization fails when IBMC_POWER_CRN is missing"""
|
||||
with patch.dict('os.environ', {
|
||||
'IBMC_APIKEY': 'test-api-key',
|
||||
'IBMC_POWER_URL': 'https://test.cloud.ibm.com'
|
||||
}, clear=True):
|
||||
# The code will fail on split() before the IBMC_POWER_CRN check
|
||||
# so we check for either AttributeError or the exception message
|
||||
with self.assertRaises((Exception, AttributeError)):
|
||||
IbmCloudPower()
|
||||
|
||||
def test_authenticate_success(self):
|
||||
"""Test successful authentication"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
'access_token': 'new-test-token',
|
||||
'token_type': 'Bearer',
|
||||
'expires_in': 3600
|
||||
}
|
||||
self.mock_requests.request.return_value = mock_response
|
||||
|
||||
self.ibm.authenticate()
|
||||
|
||||
self.assertEqual(self.ibm.token['access_token'], 'new-test-token')
|
||||
self.assertIn('Authorization', self.ibm.headers)
|
||||
self.assertEqual(self.ibm.headers['Authorization'], 'Bearer new-test-token')
|
||||
|
||||
def test_authenticate_failure(self):
|
||||
"""Test authentication failure"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 401
|
||||
mock_response.raise_for_status.side_effect = Exception("Unauthorized")
|
||||
self.mock_requests.request.return_value = mock_response
|
||||
|
||||
with self.assertRaises(Exception):
|
||||
self.ibm.authenticate()
|
||||
|
||||
def test_get_instance_id_success(self):
|
||||
"""Test getting instance ID by node name"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
'pvmInstances': [
|
||||
{'serverName': 'test-node-1', 'pvmInstanceID': 'pvm-1'},
|
||||
{'serverName': 'test-node-2', 'pvmInstanceID': 'pvm-2'}
|
||||
]
|
||||
}
|
||||
self.mock_requests.request.return_value = mock_response
|
||||
|
||||
instance_id = self.ibm.get_instance_id('test-node-1')
|
||||
|
||||
self.assertEqual(instance_id, 'pvm-1')
|
||||
|
||||
def test_get_instance_id_not_found(self):
|
||||
"""Test getting instance ID when node not found"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
'pvmInstances': [
|
||||
{'serverName': 'test-node-1', 'pvmInstanceID': 'pvm-1'}
|
||||
]
|
||||
}
|
||||
self.mock_requests.request.return_value = mock_response
|
||||
|
||||
with self.assertRaises(SystemExit):
|
||||
self.ibm.get_instance_id('non-existent-node')
|
||||
|
||||
def test_delete_instance_success(self):
|
||||
"""Test deleting instance successfully"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 200
|
||||
self.mock_requests.request.return_value = mock_response
|
||||
|
||||
result = self.ibm.delete_instance('pvm-123')
|
||||
|
||||
self.mock_requests.request.assert_called()
|
||||
call_args = self.mock_requests.request.call_args
|
||||
self.assertIn('immediate-shutdown', call_args[1]['data'])
|
||||
|
||||
def test_delete_instance_failure(self):
|
||||
"""Test deleting instance with failure"""
|
||||
self.mock_requests.request.side_effect = Exception("API Error")
|
||||
|
||||
result = self.ibm.delete_instance('pvm-123')
|
||||
|
||||
self.assertEqual(result, False)
|
||||
|
||||
def test_reboot_instances_hard_reboot(self):
|
||||
"""Test hard reboot of instance"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 200
|
||||
self.mock_requests.request.return_value = mock_response
|
||||
|
||||
result = self.ibm.reboot_instances('pvm-123', soft=False)
|
||||
|
||||
self.assertTrue(result)
|
||||
call_args = self.mock_requests.request.call_args
|
||||
self.assertIn('hard-reboot', call_args[1]['data'])
|
||||
|
||||
def test_reboot_instances_soft_reboot(self):
|
||||
"""Test soft reboot of instance"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 200
|
||||
self.mock_requests.request.return_value = mock_response
|
||||
|
||||
result = self.ibm.reboot_instances('pvm-123', soft=True)
|
||||
|
||||
self.assertTrue(result)
|
||||
call_args = self.mock_requests.request.call_args
|
||||
self.assertIn('soft-reboot', call_args[1]['data'])
|
||||
|
||||
def test_reboot_instances_failure(self):
|
||||
"""Test reboot instance with failure"""
|
||||
self.mock_requests.request.side_effect = Exception("API Error")
|
||||
|
||||
result = self.ibm.reboot_instances('pvm-123')
|
||||
|
||||
self.assertEqual(result, False)
|
||||
|
||||
def test_stop_instances_success(self):
|
||||
"""Test stopping instance successfully"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 200
|
||||
self.mock_requests.request.return_value = mock_response
|
||||
|
||||
result = self.ibm.stop_instances('pvm-123')
|
||||
|
||||
self.assertTrue(result)
|
||||
call_args = self.mock_requests.request.call_args
|
||||
self.assertIn('stop', call_args[1]['data'])
|
||||
|
||||
def test_stop_instances_failure(self):
|
||||
"""Test stopping instance with failure"""
|
||||
self.mock_requests.request.side_effect = Exception("API Error")
|
||||
|
||||
result = self.ibm.stop_instances('pvm-123')
|
||||
|
||||
self.assertEqual(result, False)
|
||||
|
||||
def test_start_instances_success(self):
|
||||
"""Test starting instance successfully"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 200
|
||||
self.mock_requests.request.return_value = mock_response
|
||||
|
||||
result = self.ibm.start_instances('pvm-123')
|
||||
|
||||
self.assertTrue(result)
|
||||
call_args = self.mock_requests.request.call_args
|
||||
self.assertIn('start', call_args[1]['data'])
|
||||
|
||||
def test_start_instances_failure(self):
|
||||
"""Test starting instance with failure"""
|
||||
self.mock_requests.request.side_effect = Exception("API Error")
|
||||
|
||||
result = self.ibm.start_instances('pvm-123')
|
||||
|
||||
self.assertEqual(result, False)
|
||||
|
||||
def test_list_instances_success(self):
|
||||
"""Test listing instances successfully"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {
|
||||
'pvmInstances': [
|
||||
type('obj', (object,), {'serverName': 'node-1', 'pvmInstanceID': 'pvm-1'}),
|
||||
type('obj', (object,), {'serverName': 'node-2', 'pvmInstanceID': 'pvm-2'})
|
||||
]
|
||||
}
|
||||
self.mock_requests.request.return_value = mock_response
|
||||
|
||||
instances = self.ibm.list_instances()
|
||||
|
||||
self.assertEqual(len(instances), 2)
|
||||
self.assertEqual(instances[0]['serverName'], 'node-1')
|
||||
self.assertEqual(instances[1]['serverName'], 'node-2')
|
||||
|
||||
def test_list_instances_failure(self):
|
||||
"""Test listing instances with failure"""
|
||||
self.mock_requests.request.side_effect = Exception("API Error")
|
||||
|
||||
with self.assertRaises(SystemExit):
|
||||
self.ibm.list_instances()
|
||||
|
||||
def test_get_instance_status_success(self):
|
||||
"""Test getting instance status successfully"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {'status': 'ACTIVE'}
|
||||
self.mock_requests.request.return_value = mock_response
|
||||
|
||||
status = self.ibm.get_instance_status('pvm-123')
|
||||
|
||||
self.assertEqual(status, 'ACTIVE')
|
||||
|
||||
def test_get_instance_status_failure(self):
|
||||
"""Test getting instance status with failure"""
|
||||
self.mock_requests.request.side_effect = Exception("API Error")
|
||||
|
||||
status = self.ibm.get_instance_status('pvm-123')
|
||||
|
||||
self.assertIsNone(status)
|
||||
|
||||
def test_wait_until_deleted_success(self):
|
||||
"""Test waiting until instance is deleted"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {'status': None}
|
||||
self.mock_requests.request.side_effect = [
|
||||
mock_response,
|
||||
Exception("Not found")
|
||||
]
|
||||
|
||||
affected_node = MagicMock(spec=AffectedNode)
|
||||
|
||||
with patch('time.time', side_effect=[100, 105]), \
|
||||
patch('time.sleep'):
|
||||
result = self.ibm.wait_until_deleted('pvm-123', timeout=60, affected_node=affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
affected_node.set_affected_node_status.assert_called_once()
|
||||
|
||||
def test_wait_until_deleted_timeout(self):
|
||||
"""Test waiting until deleted with timeout"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {'status': 'DELETING'}
|
||||
self.mock_requests.request.return_value = mock_response
|
||||
|
||||
with patch('time.sleep'):
|
||||
result = self.ibm.wait_until_deleted('pvm-123', timeout=5)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_wait_until_running_success(self):
|
||||
"""Test waiting until instance is running"""
|
||||
mock_responses = [
|
||||
Mock(status_code=200, json=lambda: {'status': 'BUILD'}),
|
||||
Mock(status_code=200, json=lambda: {'status': 'ACTIVE'})
|
||||
]
|
||||
self.mock_requests.request.side_effect = mock_responses
|
||||
|
||||
affected_node = MagicMock(spec=AffectedNode)
|
||||
|
||||
with patch('time.time', side_effect=[100, 105]), \
|
||||
patch('time.sleep'):
|
||||
result = self.ibm.wait_until_running('pvm-123', timeout=60, affected_node=affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
affected_node.set_affected_node_status.assert_called_once_with("running", 5)
|
||||
|
||||
def test_wait_until_running_timeout(self):
|
||||
"""Test waiting until running with timeout"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {'status': 'BUILD'}
|
||||
self.mock_requests.request.return_value = mock_response
|
||||
|
||||
with patch('time.sleep'):
|
||||
result = self.ibm.wait_until_running('pvm-123', timeout=5)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_wait_until_stopped_success(self):
|
||||
"""Test waiting until instance is stopped"""
|
||||
mock_responses = [
|
||||
Mock(status_code=200, json=lambda: {'status': 'STOPPING'}),
|
||||
Mock(status_code=200, json=lambda: {'status': 'STOPPED'})
|
||||
]
|
||||
self.mock_requests.request.side_effect = mock_responses
|
||||
|
||||
affected_node = MagicMock(spec=AffectedNode)
|
||||
|
||||
with patch('time.time', side_effect=[100, 105]), \
|
||||
patch('time.sleep'):
|
||||
result = self.ibm.wait_until_stopped('pvm-123', timeout=60, affected_node=affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
affected_node.set_affected_node_status.assert_called_once_with("stopped", 5)
|
||||
|
||||
def test_wait_until_stopped_timeout(self):
|
||||
"""Test waiting until stopped with timeout"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {'status': 'STOPPING'}
|
||||
self.mock_requests.request.return_value = mock_response
|
||||
|
||||
with patch('time.sleep'):
|
||||
result = self.ibm.wait_until_stopped('pvm-123', timeout=5, affected_node=None)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_wait_until_rebooted_success(self):
|
||||
"""Test waiting until instance is rebooted"""
|
||||
# wait_until_rebooted calls get_instance_status until NOT in reboot state,
|
||||
# then calls wait_until_running which also calls get_instance_status
|
||||
mock_responses = [
|
||||
Mock(status_code=200, json=lambda: {'status': 'HARD_REBOOT'}), # First check - still rebooting
|
||||
Mock(status_code=200, json=lambda: {'status': 'ACTIVE'}), # Second check - done rebooting
|
||||
Mock(status_code=200, json=lambda: {'status': 'ACTIVE'}) # wait_until_running check
|
||||
]
|
||||
self.mock_requests.request.side_effect = mock_responses
|
||||
|
||||
affected_node = MagicMock(spec=AffectedNode)
|
||||
|
||||
# Mock all time() calls - need many values because logging uses time.time() extensively
|
||||
time_values = [100] * 20 # Just provide enough time values
|
||||
with patch('time.time', side_effect=time_values), \
|
||||
patch('time.sleep'):
|
||||
result = self.ibm.wait_until_rebooted('pvm-123', timeout=60, affected_node=affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
|
||||
def test_wait_until_rebooted_timeout(self):
|
||||
"""Test waiting until rebooted with timeout"""
|
||||
mock_response = Mock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.json.return_value = {'status': 'HARD_REBOOT'}
|
||||
self.mock_requests.request.return_value = mock_response
|
||||
|
||||
with patch('time.sleep'):
|
||||
result = self.ibm.wait_until_rebooted('pvm-123', timeout=5, affected_node=None)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_find_id_in_list(self):
|
||||
"""Test finding ID in VPC list"""
|
||||
vpc_list = [
|
||||
{'vpc_name': 'vpc-1', 'vpc_id': 'id-1'},
|
||||
{'vpc_name': 'vpc-2', 'vpc_id': 'id-2'}
|
||||
]
|
||||
|
||||
vpc_id = self.ibm.find_id_in_list('vpc-2', vpc_list)
|
||||
|
||||
self.assertEqual(vpc_id, 'id-2')
|
||||
|
||||
def test_find_id_in_list_not_found(self):
|
||||
"""Test finding ID in VPC list when not found"""
|
||||
vpc_list = [
|
||||
{'vpc_name': 'vpc-1', 'vpc_id': 'id-1'}
|
||||
]
|
||||
|
||||
vpc_id = self.ibm.find_id_in_list('vpc-3', vpc_list)
|
||||
|
||||
self.assertIsNone(vpc_id)
|
||||
|
||||
|
||||
class TestIbmCloudPowerNodeScenarios(unittest.TestCase):
|
||||
"""Test cases for ibmcloud_power_node_scenarios class"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
# Mock KrknKubernetes
|
||||
self.mock_kubecli = MagicMock(spec=KrknKubernetes)
|
||||
self.affected_nodes_status = AffectedNodeStatus()
|
||||
|
||||
# Mock the IbmCloudPower class entirely to avoid any real API calls
|
||||
self.ibm_cloud_patcher = patch('krkn.scenario_plugins.node_actions.ibmcloud_power_node_scenarios.IbmCloudPower')
|
||||
self.mock_ibm_cloud_class = self.ibm_cloud_patcher.start()
|
||||
|
||||
# Create a mock instance that will be returned when IbmCloudPower() is called
|
||||
self.mock_ibm_cloud_instance = MagicMock()
|
||||
self.mock_ibm_cloud_class.return_value = self.mock_ibm_cloud_instance
|
||||
|
||||
# Create ibmcloud_power_node_scenarios instance
|
||||
self.scenario = ibmcloud_power_node_scenarios(
|
||||
kubecli=self.mock_kubecli,
|
||||
node_action_kube_check=True,
|
||||
affected_nodes_status=self.affected_nodes_status,
|
||||
disable_ssl_verification=False
|
||||
)
|
||||
|
||||
def tearDown(self):
|
||||
"""Clean up after tests"""
|
||||
self.ibm_cloud_patcher.stop()
|
||||
|
||||
def test_init(self):
|
||||
"""Test ibmcloud_power_node_scenarios initialization"""
|
||||
self.assertIsNotNone(self.scenario.ibmcloud_power)
|
||||
self.assertTrue(self.scenario.node_action_kube_check)
|
||||
self.assertEqual(self.scenario.kubecli, self.mock_kubecli)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
def test_node_start_scenario_success(self, mock_wait_ready):
|
||||
"""Test node start scenario successfully"""
|
||||
# Configure mock methods
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'pvm-123'
|
||||
self.mock_ibm_cloud_instance.start_instances.return_value = True
|
||||
self.mock_ibm_cloud_instance.wait_until_running.return_value = True
|
||||
|
||||
self.scenario.node_start_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
poll_interval=5
|
||||
)
|
||||
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
self.assertEqual(self.affected_nodes_status.affected_nodes[0].node_name, 'test-node')
|
||||
mock_wait_ready.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
def test_node_start_scenario_no_kube_check(self, mock_wait_ready):
|
||||
"""Test node start scenario without Kubernetes check"""
|
||||
self.scenario.node_action_kube_check = False
|
||||
|
||||
# Configure mock methods
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'pvm-123'
|
||||
self.mock_ibm_cloud_instance.start_instances.return_value = True
|
||||
self.mock_ibm_cloud_instance.wait_until_running.return_value = True
|
||||
|
||||
self.scenario.node_start_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
poll_interval=5
|
||||
)
|
||||
|
||||
mock_wait_ready.assert_not_called()
|
||||
|
||||
def test_node_stop_scenario_success(self):
|
||||
"""Test node stop scenario successfully"""
|
||||
# Configure mock methods
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'pvm-123'
|
||||
self.mock_ibm_cloud_instance.stop_instances.return_value = True
|
||||
self.mock_ibm_cloud_instance.wait_until_stopped.return_value = True
|
||||
|
||||
self.scenario.node_stop_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
poll_interval=5
|
||||
)
|
||||
|
||||
# Verify methods were called
|
||||
self.mock_ibm_cloud_instance.get_instance_id.assert_called_once_with('test-node')
|
||||
self.mock_ibm_cloud_instance.stop_instances.assert_called_once_with('pvm-123')
|
||||
|
||||
# Note: affected_nodes are not appended in stop scenario based on the code
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_unknown_status')
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
def test_node_reboot_scenario_hard_reboot(self, mock_wait_ready, mock_wait_unknown):
|
||||
"""Test node hard reboot scenario"""
|
||||
# Configure mock methods
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'pvm-123'
|
||||
self.mock_ibm_cloud_instance.reboot_instances.return_value = True
|
||||
self.mock_ibm_cloud_instance.wait_until_rebooted.return_value = True
|
||||
|
||||
self.scenario.node_reboot_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
soft_reboot=False
|
||||
)
|
||||
|
||||
# Verify methods were called
|
||||
self.mock_ibm_cloud_instance.reboot_instances.assert_called_once_with('pvm-123', False)
|
||||
mock_wait_unknown.assert_called_once()
|
||||
mock_wait_ready.assert_called_once()
|
||||
|
||||
# Note: affected_nodes are not appended in reboot scenario based on the code
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_unknown_status')
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
def test_node_reboot_scenario_soft_reboot(self, mock_wait_ready, mock_wait_unknown):
|
||||
"""Test node soft reboot scenario"""
|
||||
# Configure mock methods
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'pvm-123'
|
||||
self.mock_ibm_cloud_instance.reboot_instances.return_value = True
|
||||
self.mock_ibm_cloud_instance.wait_until_rebooted.return_value = True
|
||||
|
||||
self.scenario.node_reboot_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
soft_reboot=True
|
||||
)
|
||||
|
||||
# Verify methods were called
|
||||
self.mock_ibm_cloud_instance.reboot_instances.assert_called_once_with('pvm-123', True)
|
||||
mock_wait_unknown.assert_called_once()
|
||||
mock_wait_ready.assert_called_once()
|
||||
|
||||
# Note: affected_nodes are not appended in reboot scenario based on the code
|
||||
|
||||
def test_node_terminate_scenario_success(self):
|
||||
"""Test node terminate scenario successfully"""
|
||||
# Configure mock methods
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'pvm-123'
|
||||
self.mock_ibm_cloud_instance.delete_instance.return_value = None
|
||||
self.mock_ibm_cloud_instance.wait_until_deleted.return_value = True
|
||||
|
||||
self.scenario.node_terminate_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
poll_interval=5
|
||||
)
|
||||
|
||||
# Verify methods were called
|
||||
self.mock_ibm_cloud_instance.delete_instance.assert_called_once_with('pvm-123')
|
||||
self.mock_ibm_cloud_instance.wait_until_deleted.assert_called_once()
|
||||
|
||||
# Note: affected_nodes are not appended in terminate scenario based on the code
|
||||
|
||||
def test_node_scenario_multiple_kill_count(self):
|
||||
"""Test node scenario with multiple kill count"""
|
||||
# Configure mock methods
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'pvm-123'
|
||||
self.mock_ibm_cloud_instance.stop_instances.return_value = True
|
||||
self.mock_ibm_cloud_instance.wait_until_stopped.return_value = True
|
||||
|
||||
self.scenario.node_stop_scenario(
|
||||
instance_kill_count=2,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
poll_interval=5
|
||||
)
|
||||
|
||||
# Verify stop was called twice (kill_count=2)
|
||||
self.assertEqual(self.mock_ibm_cloud_instance.stop_instances.call_count, 2)
|
||||
|
||||
# Note: affected_nodes are not appended in stop scenario based on the code
|
||||
|
||||
def test_node_start_scenario_exception(self):
|
||||
"""Test node start scenario with exception during operation"""
|
||||
# Configure mock - get_instance_id succeeds but start_instances fails
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'pvm-123'
|
||||
self.mock_ibm_cloud_instance.start_instances.side_effect = Exception("API Error")
|
||||
|
||||
# Should handle exception gracefully
|
||||
self.scenario.node_start_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
poll_interval=5
|
||||
)
|
||||
|
||||
# Verify affected node still added even on failure
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
def test_node_reboot_scenario_exception(self):
|
||||
"""Test node reboot scenario with exception during operation"""
|
||||
# Configure mock - get_instance_id succeeds but reboot_instances fails
|
||||
self.mock_ibm_cloud_instance.get_instance_id.return_value = 'pvm-123'
|
||||
self.mock_ibm_cloud_instance.reboot_instances.side_effect = Exception("API Error")
|
||||
|
||||
# Should handle exception gracefully
|
||||
self.scenario.node_reboot_scenario(
|
||||
instance_kill_count=1,
|
||||
node='test-node',
|
||||
timeout=60,
|
||||
soft_reboot=False
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
@@ -1,726 +0,0 @@
|
||||
import unittest
|
||||
from unittest.mock import Mock, patch
|
||||
from arcaflow_plugin_sdk import plugin
|
||||
|
||||
from krkn.scenario_plugins.native.network import ingress_shaping
|
||||
|
||||
|
||||
class NetworkScenariosTest(unittest.TestCase):
|
||||
|
||||
def test_serialization(self):
|
||||
"""Test serialization of configuration and output objects"""
|
||||
plugin.test_object_serialization(
|
||||
ingress_shaping.NetworkScenarioConfig(
|
||||
node_interface_name={"foo": ["bar"]},
|
||||
network_params={
|
||||
"latency": "50ms",
|
||||
"loss": "0.02",
|
||||
"bandwidth": "100mbit",
|
||||
},
|
||||
),
|
||||
self.fail,
|
||||
)
|
||||
plugin.test_object_serialization(
|
||||
ingress_shaping.NetworkScenarioSuccessOutput(
|
||||
filter_direction="ingress",
|
||||
test_interfaces={"foo": ["bar"]},
|
||||
network_parameters={
|
||||
"latency": "50ms",
|
||||
"loss": "0.02",
|
||||
"bandwidth": "100mbit",
|
||||
},
|
||||
execution_type="parallel",
|
||||
),
|
||||
self.fail,
|
||||
)
|
||||
plugin.test_object_serialization(
|
||||
ingress_shaping.NetworkScenarioErrorOutput(
|
||||
error="Hello World",
|
||||
),
|
||||
self.fail,
|
||||
)
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_get_default_interface(self, mock_kube_helper):
|
||||
"""Test getting default interface from a node"""
|
||||
# Setup mocks
|
||||
mock_cli = Mock()
|
||||
mock_pod_template = Mock()
|
||||
mock_pod_template.render.return_value = "pod_yaml_content"
|
||||
|
||||
mock_kube_helper.create_pod.return_value = None
|
||||
mock_kube_helper.exec_cmd_in_pod.return_value = (
|
||||
"default via 192.168.1.1 dev eth0 proto dhcp metric 100\n"
|
||||
"172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1"
|
||||
)
|
||||
mock_kube_helper.delete_pod.return_value = None
|
||||
|
||||
# Test
|
||||
result = ingress_shaping.get_default_interface(
|
||||
node="test-node",
|
||||
pod_template=mock_pod_template,
|
||||
cli=mock_cli,
|
||||
image="quay.io/krkn-chaos/krkn:tools"
|
||||
)
|
||||
|
||||
# Assertions
|
||||
self.assertEqual(result, ["eth0"])
|
||||
mock_kube_helper.create_pod.assert_called_once()
|
||||
mock_kube_helper.exec_cmd_in_pod.assert_called_once()
|
||||
mock_kube_helper.delete_pod.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_verify_interface_with_empty_list(self, mock_kube_helper):
|
||||
"""Test verifying interface when input list is empty"""
|
||||
# Setup mocks
|
||||
mock_cli = Mock()
|
||||
mock_pod_template = Mock()
|
||||
mock_pod_template.render.return_value = "pod_yaml_content"
|
||||
|
||||
mock_kube_helper.create_pod.return_value = None
|
||||
mock_kube_helper.exec_cmd_in_pod.return_value = (
|
||||
"default via 192.168.1.1 dev eth0 proto dhcp metric 100\n"
|
||||
)
|
||||
mock_kube_helper.delete_pod.return_value = None
|
||||
|
||||
# Test
|
||||
result = ingress_shaping.verify_interface(
|
||||
input_interface_list=[],
|
||||
node="test-node",
|
||||
pod_template=mock_pod_template,
|
||||
cli=mock_cli,
|
||||
image="quay.io/krkn-chaos/krkn:tools"
|
||||
)
|
||||
|
||||
# Assertions
|
||||
self.assertEqual(result, ["eth0"])
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_verify_interface_with_valid_interfaces(self, mock_kube_helper):
|
||||
"""Test verifying interface with valid interface list"""
|
||||
# Setup mocks
|
||||
mock_cli = Mock()
|
||||
mock_pod_template = Mock()
|
||||
mock_pod_template.render.return_value = "pod_yaml_content"
|
||||
|
||||
mock_kube_helper.create_pod.return_value = None
|
||||
mock_kube_helper.exec_cmd_in_pod.return_value = (
|
||||
"eth0 UP 192.168.1.10/24\n"
|
||||
"eth1 UP 10.0.0.5/24\n"
|
||||
"lo UNKNOWN 127.0.0.1/8\n"
|
||||
)
|
||||
mock_kube_helper.delete_pod.return_value = None
|
||||
|
||||
# Test
|
||||
result = ingress_shaping.verify_interface(
|
||||
input_interface_list=["eth0", "eth1"],
|
||||
node="test-node",
|
||||
pod_template=mock_pod_template,
|
||||
cli=mock_cli,
|
||||
image="quay.io/krkn-chaos/krkn:tools"
|
||||
)
|
||||
|
||||
# Assertions
|
||||
self.assertEqual(result, ["eth0", "eth1"])
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_verify_interface_with_invalid_interface(self, mock_kube_helper):
|
||||
"""Test verifying interface with an interface that doesn't exist"""
|
||||
# Setup mocks
|
||||
mock_cli = Mock()
|
||||
mock_pod_template = Mock()
|
||||
mock_pod_template.render.return_value = "pod_yaml_content"
|
||||
|
||||
mock_kube_helper.create_pod.return_value = None
|
||||
mock_kube_helper.exec_cmd_in_pod.return_value = (
|
||||
"eth0 UP 192.168.1.10/24\n"
|
||||
"lo UNKNOWN 127.0.0.1/8\n"
|
||||
)
|
||||
mock_kube_helper.delete_pod.return_value = None
|
||||
|
||||
# Test - should raise exception
|
||||
with self.assertRaises(Exception) as context:
|
||||
ingress_shaping.verify_interface(
|
||||
input_interface_list=["eth0", "eth99"],
|
||||
node="test-node",
|
||||
pod_template=mock_pod_template,
|
||||
cli=mock_cli,
|
||||
image="quay.io/krkn-chaos/krkn:tools"
|
||||
)
|
||||
|
||||
self.assertIn("Interface eth99 not found", str(context.exception))
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.get_default_interface')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_get_node_interfaces_with_label_selector(self, mock_kube_helper, mock_get_default_interface):
|
||||
"""Test getting node interfaces using label selector"""
|
||||
# Setup mocks
|
||||
mock_cli = Mock()
|
||||
mock_pod_template = Mock()
|
||||
mock_kube_helper.get_node.return_value = ["node1", "node2"]
|
||||
mock_get_default_interface.return_value = ["eth0"]
|
||||
|
||||
# Test
|
||||
result = ingress_shaping.get_node_interfaces(
|
||||
node_interface_dict=None,
|
||||
label_selector="node-role.kubernetes.io/worker",
|
||||
instance_count=2,
|
||||
pod_template=mock_pod_template,
|
||||
cli=mock_cli,
|
||||
image="quay.io/krkn-chaos/krkn:tools"
|
||||
)
|
||||
|
||||
# Assertions
|
||||
self.assertEqual(result, {"node1": ["eth0"], "node2": ["eth0"]})
|
||||
self.assertEqual(mock_get_default_interface.call_count, 2)
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.verify_interface')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_get_node_interfaces_with_node_dict(self, mock_kube_helper, mock_verify_interface):
|
||||
"""Test getting node interfaces with provided node interface dictionary"""
|
||||
# Setup mocks
|
||||
mock_cli = Mock()
|
||||
mock_pod_template = Mock()
|
||||
mock_kube_helper.get_node.return_value = ["node1"]
|
||||
mock_verify_interface.return_value = ["eth0", "eth1"]
|
||||
|
||||
# Test
|
||||
result = ingress_shaping.get_node_interfaces(
|
||||
node_interface_dict={"node1": ["eth0", "eth1"]},
|
||||
label_selector=None,
|
||||
instance_count=1,
|
||||
pod_template=mock_pod_template,
|
||||
cli=mock_cli,
|
||||
image="quay.io/krkn-chaos/krkn:tools"
|
||||
)
|
||||
|
||||
# Assertions
|
||||
self.assertEqual(result, {"node1": ["eth0", "eth1"]})
|
||||
mock_verify_interface.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_get_node_interfaces_no_selector_no_dict(self, mock_kube_helper):
|
||||
"""Test that exception is raised when both node dict and label selector are missing"""
|
||||
mock_cli = Mock()
|
||||
mock_pod_template = Mock()
|
||||
|
||||
with self.assertRaises(Exception) as context:
|
||||
ingress_shaping.get_node_interfaces(
|
||||
node_interface_dict=None,
|
||||
label_selector=None,
|
||||
instance_count=1,
|
||||
pod_template=mock_pod_template,
|
||||
cli=mock_cli,
|
||||
image="quay.io/krkn-chaos/krkn:tools"
|
||||
)
|
||||
|
||||
self.assertIn("label selector must be provided", str(context.exception))
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_create_ifb(self, mock_kube_helper):
|
||||
"""Test creating virtual interfaces"""
|
||||
mock_cli = Mock()
|
||||
mock_kube_helper.exec_cmd_in_pod.return_value = None
|
||||
|
||||
# Test
|
||||
ingress_shaping.create_ifb(cli=mock_cli, number=2, pod_name="test-pod")
|
||||
|
||||
# Assertions
|
||||
# Should call modprobe once and ip link set for each interface
|
||||
self.assertEqual(mock_kube_helper.exec_cmd_in_pod.call_count, 3)
|
||||
|
||||
# Verify modprobe call
|
||||
first_call = mock_kube_helper.exec_cmd_in_pod.call_args_list[0]
|
||||
self.assertIn("modprobe", first_call[0][1])
|
||||
self.assertIn("numifbs=2", first_call[0][1])
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_delete_ifb(self, mock_kube_helper):
|
||||
"""Test deleting virtual interfaces"""
|
||||
mock_cli = Mock()
|
||||
mock_kube_helper.exec_cmd_in_pod.return_value = None
|
||||
|
||||
# Test
|
||||
ingress_shaping.delete_ifb(cli=mock_cli, pod_name="test-pod")
|
||||
|
||||
# Assertions
|
||||
mock_kube_helper.exec_cmd_in_pod.assert_called_once()
|
||||
call_args = mock_kube_helper.exec_cmd_in_pod.call_args[0][1]
|
||||
self.assertIn("modprobe", call_args)
|
||||
self.assertIn("-r", call_args)
|
||||
self.assertIn("ifb", call_args)
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_get_job_pods(self, mock_kube_helper):
|
||||
"""Test getting pods associated with a job"""
|
||||
mock_cli = Mock()
|
||||
mock_api_response = Mock()
|
||||
mock_api_response.metadata.labels = {"controller-uid": "test-uid-123"}
|
||||
|
||||
mock_kube_helper.list_pods.return_value = ["pod1", "pod2"]
|
||||
|
||||
# Test
|
||||
result = ingress_shaping.get_job_pods(cli=mock_cli, api_response=mock_api_response)
|
||||
|
||||
# Assertions
|
||||
self.assertEqual(result, "pod1")
|
||||
mock_kube_helper.list_pods.assert_called_once_with(
|
||||
mock_cli,
|
||||
label_selector="controller-uid=test-uid-123",
|
||||
namespace="default"
|
||||
)
|
||||
|
||||
@patch('time.sleep', return_value=None)
|
||||
@patch('time.time')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_wait_for_job_success(self, mock_kube_helper, mock_time, mock_sleep):
|
||||
"""Test waiting for jobs to complete successfully"""
|
||||
mock_batch_cli = Mock()
|
||||
mock_time.side_effect = [0, 10, 20] # Simulate time progression
|
||||
|
||||
# First job succeeds
|
||||
mock_response1 = Mock()
|
||||
mock_response1.status.succeeded = 1
|
||||
mock_response1.status.failed = None
|
||||
|
||||
# Second job succeeds
|
||||
mock_response2 = Mock()
|
||||
mock_response2.status.succeeded = 1
|
||||
mock_response2.status.failed = None
|
||||
|
||||
mock_kube_helper.get_job_status.side_effect = [mock_response1, mock_response2]
|
||||
|
||||
# Test
|
||||
ingress_shaping.wait_for_job(
|
||||
batch_cli=mock_batch_cli,
|
||||
job_list=["job1", "job2"],
|
||||
timeout=300
|
||||
)
|
||||
|
||||
# Assertions
|
||||
self.assertEqual(mock_kube_helper.get_job_status.call_count, 2)
|
||||
|
||||
@patch('time.sleep', return_value=None)
|
||||
@patch('time.time')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_wait_for_job_timeout(self, mock_kube_helper, mock_time, mock_sleep):
|
||||
"""Test waiting for jobs times out"""
|
||||
mock_batch_cli = Mock()
|
||||
mock_time.side_effect = [0, 350] # Simulate timeout
|
||||
|
||||
mock_response = Mock()
|
||||
mock_response.status.succeeded = None
|
||||
mock_response.status.failed = None
|
||||
|
||||
mock_kube_helper.get_job_status.return_value = mock_response
|
||||
|
||||
# Test - should raise exception
|
||||
with self.assertRaises(Exception) as context:
|
||||
ingress_shaping.wait_for_job(
|
||||
batch_cli=mock_batch_cli,
|
||||
job_list=["job1"],
|
||||
timeout=300
|
||||
)
|
||||
|
||||
self.assertIn("timeout", str(context.exception))
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_delete_jobs(self, mock_kube_helper):
|
||||
"""Test deleting jobs"""
|
||||
mock_cli = Mock()
|
||||
mock_batch_cli = Mock()
|
||||
|
||||
mock_response = Mock()
|
||||
mock_response.status.failed = None
|
||||
mock_kube_helper.get_job_status.return_value = mock_response
|
||||
mock_kube_helper.delete_job.return_value = None
|
||||
|
||||
# Test
|
||||
ingress_shaping.delete_jobs(
|
||||
cli=mock_cli,
|
||||
batch_cli=mock_batch_cli,
|
||||
job_list=["job1", "job2"]
|
||||
)
|
||||
|
||||
# Assertions
|
||||
self.assertEqual(mock_kube_helper.get_job_status.call_count, 2)
|
||||
self.assertEqual(mock_kube_helper.delete_job.call_count, 2)
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.get_job_pods')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_delete_jobs_with_failed_job(self, mock_kube_helper, mock_get_job_pods):
|
||||
"""Test deleting jobs when one has failed"""
|
||||
mock_cli = Mock()
|
||||
mock_batch_cli = Mock()
|
||||
|
||||
mock_response = Mock()
|
||||
mock_response.status.failed = 1
|
||||
|
||||
mock_pod_status = Mock()
|
||||
mock_pod_status.status.container_statuses = []
|
||||
|
||||
mock_log_response = Mock()
|
||||
mock_log_response.data.decode.return_value = "Error log content"
|
||||
|
||||
mock_kube_helper.get_job_status.return_value = mock_response
|
||||
mock_get_job_pods.return_value = "failed-pod"
|
||||
mock_kube_helper.read_pod.return_value = mock_pod_status
|
||||
mock_kube_helper.get_pod_log.return_value = mock_log_response
|
||||
mock_kube_helper.delete_job.return_value = None
|
||||
|
||||
# Test
|
||||
ingress_shaping.delete_jobs(
|
||||
cli=mock_cli,
|
||||
batch_cli=mock_batch_cli,
|
||||
job_list=["failed-job"]
|
||||
)
|
||||
|
||||
# Assertions
|
||||
mock_kube_helper.read_pod.assert_called_once()
|
||||
mock_kube_helper.get_pod_log.assert_called_once()
|
||||
|
||||
def test_get_ingress_cmd_basic(self):
|
||||
"""Test generating ingress traffic shaping commands"""
|
||||
result = ingress_shaping.get_ingress_cmd(
|
||||
interface_list=["eth0"],
|
||||
network_parameters={"latency": "50ms"},
|
||||
duration=120
|
||||
)
|
||||
|
||||
# Assertions
|
||||
self.assertIn("tc qdisc add dev eth0 handle ffff: ingress", result)
|
||||
self.assertIn("tc filter add dev eth0", result)
|
||||
self.assertIn("ifb0", result)
|
||||
self.assertIn("delay 50ms", result)
|
||||
self.assertIn("sleep 120", result)
|
||||
self.assertIn("tc qdisc del", result)
|
||||
|
||||
def test_get_ingress_cmd_multiple_interfaces(self):
|
||||
"""Test generating commands for multiple interfaces"""
|
||||
result = ingress_shaping.get_ingress_cmd(
|
||||
interface_list=["eth0", "eth1"],
|
||||
network_parameters={"latency": "50ms", "bandwidth": "100mbit"},
|
||||
duration=120
|
||||
)
|
||||
|
||||
# Assertions
|
||||
self.assertIn("eth0", result)
|
||||
self.assertIn("eth1", result)
|
||||
self.assertIn("ifb0", result)
|
||||
self.assertIn("ifb1", result)
|
||||
self.assertIn("delay 50ms", result)
|
||||
self.assertIn("rate 100mbit", result)
|
||||
|
||||
def test_get_ingress_cmd_all_parameters(self):
|
||||
"""Test generating commands with all network parameters"""
|
||||
result = ingress_shaping.get_ingress_cmd(
|
||||
interface_list=["eth0"],
|
||||
network_parameters={
|
||||
"latency": "50ms",
|
||||
"loss": "0.02",
|
||||
"bandwidth": "100mbit"
|
||||
},
|
||||
duration=120
|
||||
)
|
||||
|
||||
# Assertions
|
||||
self.assertIn("delay 50ms", result)
|
||||
self.assertIn("loss 0.02", result)
|
||||
self.assertIn("rate 100mbit", result)
|
||||
|
||||
def test_get_ingress_cmd_invalid_interface(self):
|
||||
"""Test that invalid interface names raise an exception"""
|
||||
with self.assertRaises(Exception) as context:
|
||||
ingress_shaping.get_ingress_cmd(
|
||||
interface_list=["eth0; rm -rf /"],
|
||||
network_parameters={"latency": "50ms"},
|
||||
duration=120
|
||||
)
|
||||
|
||||
self.assertIn("does not match the required regex pattern", str(context.exception))
|
||||
|
||||
@patch('yaml.safe_load')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.create_virtual_interfaces')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.get_ingress_cmd')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_apply_ingress_filter(self, mock_kube_helper, mock_get_cmd, mock_create_virtual, mock_yaml):
|
||||
"""Test applying ingress filters to a node"""
|
||||
# Setup mocks
|
||||
mock_cli = Mock()
|
||||
mock_batch_cli = Mock()
|
||||
mock_pod_template = Mock()
|
||||
mock_job_template = Mock()
|
||||
mock_job_template.render.return_value = "job_yaml"
|
||||
|
||||
mock_cfg = ingress_shaping.NetworkScenarioConfig(
|
||||
node_interface_name={"node1": ["eth0"]},
|
||||
network_params={"latency": "50ms"},
|
||||
test_duration=120
|
||||
)
|
||||
|
||||
mock_yaml.return_value = {"metadata": {"name": "test-job"}}
|
||||
mock_get_cmd.return_value = "tc commands"
|
||||
mock_kube_helper.create_job.return_value = Mock()
|
||||
|
||||
# Test
|
||||
result = ingress_shaping.apply_ingress_filter(
|
||||
cfg=mock_cfg,
|
||||
interface_list=["eth0"],
|
||||
node="node1",
|
||||
pod_template=mock_pod_template,
|
||||
job_template=mock_job_template,
|
||||
batch_cli=mock_batch_cli,
|
||||
cli=mock_cli,
|
||||
create_interfaces=True,
|
||||
param_selector="all",
|
||||
image="quay.io/krkn-chaos/krkn:tools"
|
||||
)
|
||||
|
||||
# Assertions
|
||||
mock_create_virtual.assert_called_once()
|
||||
mock_get_cmd.assert_called_once()
|
||||
mock_kube_helper.create_job.assert_called_once()
|
||||
self.assertEqual(result, "test-job")
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_create_virtual_interfaces(self, mock_kube_helper):
|
||||
"""Test creating virtual interfaces on a node"""
|
||||
mock_cli = Mock()
|
||||
mock_pod_template = Mock()
|
||||
mock_pod_template.render.return_value = "pod_yaml"
|
||||
|
||||
mock_kube_helper.create_pod.return_value = None
|
||||
mock_kube_helper.exec_cmd_in_pod.return_value = None
|
||||
mock_kube_helper.delete_pod.return_value = None
|
||||
|
||||
# Test
|
||||
ingress_shaping.create_virtual_interfaces(
|
||||
cli=mock_cli,
|
||||
interface_list=["eth0", "eth1"],
|
||||
node="test-node",
|
||||
pod_template=mock_pod_template,
|
||||
image="quay.io/krkn-chaos/krkn:tools"
|
||||
)
|
||||
|
||||
# Assertions
|
||||
mock_kube_helper.create_pod.assert_called_once()
|
||||
mock_kube_helper.delete_pod.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.delete_ifb')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_delete_virtual_interfaces(self, mock_kube_helper, mock_delete_ifb):
|
||||
"""Test deleting virtual interfaces from nodes"""
|
||||
mock_cli = Mock()
|
||||
mock_pod_template = Mock()
|
||||
mock_pod_template.render.return_value = "pod_yaml"
|
||||
|
||||
mock_kube_helper.create_pod.return_value = None
|
||||
mock_kube_helper.delete_pod.return_value = None
|
||||
|
||||
# Test
|
||||
ingress_shaping.delete_virtual_interfaces(
|
||||
cli=mock_cli,
|
||||
node_list=["node1", "node2"],
|
||||
pod_template=mock_pod_template,
|
||||
image="quay.io/krkn-chaos/krkn:tools"
|
||||
)
|
||||
|
||||
# Assertions
|
||||
self.assertEqual(mock_kube_helper.create_pod.call_count, 2)
|
||||
self.assertEqual(mock_delete_ifb.call_count, 2)
|
||||
self.assertEqual(mock_kube_helper.delete_pod.call_count, 2)
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.Environment')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.FileSystemLoader')
|
||||
@patch('yaml.safe_load')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.delete_jobs')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.delete_virtual_interfaces')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.wait_for_job')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.apply_ingress_filter')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.get_node_interfaces')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_network_chaos_parallel_execution(
|
||||
self, mock_kube_helper, mock_get_nodes, mock_apply_filter,
|
||||
mock_wait_job, mock_delete_virtual, mock_delete_jobs, mock_yaml,
|
||||
mock_file_loader, mock_env
|
||||
):
|
||||
"""Test network chaos with parallel execution"""
|
||||
# Setup mocks
|
||||
mock_cli = Mock()
|
||||
mock_batch_cli = Mock()
|
||||
mock_yaml.return_value = {"metadata": {"name": "test-pod"}}
|
||||
mock_kube_helper.setup_kubernetes.return_value = (mock_cli, mock_batch_cli)
|
||||
mock_get_nodes.return_value = {"node1": ["eth0"], "node2": ["eth1"]}
|
||||
mock_apply_filter.side_effect = ["job1", "job2"]
|
||||
|
||||
# Test
|
||||
cfg = ingress_shaping.NetworkScenarioConfig(
|
||||
label_selector="node-role.kubernetes.io/worker",
|
||||
instance_count=2,
|
||||
network_params={"latency": "50ms"},
|
||||
execution_type="parallel",
|
||||
test_duration=120,
|
||||
wait_duration=30
|
||||
)
|
||||
|
||||
output_id, output_data = ingress_shaping.network_chaos(params=cfg, run_id="test-run")
|
||||
|
||||
# Assertions
|
||||
self.assertEqual(output_id, "success")
|
||||
self.assertEqual(output_data.filter_direction, "ingress")
|
||||
self.assertEqual(output_data.execution_type, "parallel")
|
||||
self.assertEqual(mock_apply_filter.call_count, 2)
|
||||
mock_wait_job.assert_called_once()
|
||||
mock_delete_virtual.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.Environment')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.FileSystemLoader')
|
||||
@patch('yaml.safe_load')
|
||||
@patch('time.sleep', return_value=None)
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.delete_jobs')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.delete_virtual_interfaces')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.wait_for_job')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.apply_ingress_filter')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.get_node_interfaces')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_network_chaos_serial_execution(
|
||||
self, mock_kube_helper, mock_get_nodes, mock_apply_filter,
|
||||
mock_wait_job, mock_delete_virtual, mock_delete_jobs, mock_sleep, mock_yaml,
|
||||
mock_file_loader, mock_env
|
||||
):
|
||||
"""Test network chaos with serial execution"""
|
||||
# Setup mocks
|
||||
mock_cli = Mock()
|
||||
mock_batch_cli = Mock()
|
||||
mock_yaml.return_value = {"metadata": {"name": "test-pod"}}
|
||||
mock_kube_helper.setup_kubernetes.return_value = (mock_cli, mock_batch_cli)
|
||||
mock_get_nodes.return_value = {"node1": ["eth0"]}
|
||||
mock_apply_filter.return_value = "job1"
|
||||
|
||||
# Test
|
||||
cfg = ingress_shaping.NetworkScenarioConfig(
|
||||
label_selector="node-role.kubernetes.io/worker",
|
||||
instance_count=1,
|
||||
network_params={"latency": "50ms", "bandwidth": "100mbit"},
|
||||
execution_type="serial",
|
||||
test_duration=120,
|
||||
wait_duration=30
|
||||
)
|
||||
|
||||
output_id, output_data = ingress_shaping.network_chaos(params=cfg, run_id="test-run")
|
||||
|
||||
# Assertions
|
||||
self.assertEqual(output_id, "success")
|
||||
self.assertEqual(output_data.execution_type, "serial")
|
||||
# Should be called once per parameter per node
|
||||
self.assertEqual(mock_apply_filter.call_count, 2)
|
||||
# Should wait for jobs twice (once per parameter)
|
||||
self.assertEqual(mock_wait_job.call_count, 2)
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.Environment')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.FileSystemLoader')
|
||||
@patch('yaml.safe_load')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.delete_jobs')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.delete_virtual_interfaces')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.get_node_interfaces')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_network_chaos_invalid_execution_type(
|
||||
self, mock_kube_helper, mock_get_nodes, mock_delete_virtual, mock_delete_jobs, mock_yaml,
|
||||
mock_file_loader, mock_env
|
||||
):
|
||||
"""Test network chaos with invalid execution type"""
|
||||
# Setup mocks
|
||||
mock_cli = Mock()
|
||||
mock_batch_cli = Mock()
|
||||
mock_yaml.return_value = {"metadata": {"name": "test-pod"}}
|
||||
mock_kube_helper.setup_kubernetes.return_value = (mock_cli, mock_batch_cli)
|
||||
mock_get_nodes.return_value = {"node1": ["eth0"]}
|
||||
|
||||
# Test
|
||||
cfg = ingress_shaping.NetworkScenarioConfig(
|
||||
label_selector="node-role.kubernetes.io/worker",
|
||||
instance_count=1,
|
||||
network_params={"latency": "50ms"},
|
||||
execution_type="invalid_type",
|
||||
test_duration=120
|
||||
)
|
||||
|
||||
output_id, output_data = ingress_shaping.network_chaos(params=cfg, run_id="test-run")
|
||||
|
||||
# Assertions
|
||||
self.assertEqual(output_id, "error")
|
||||
self.assertIn("Invalid execution type", output_data.error)
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.Environment')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.FileSystemLoader')
|
||||
@patch('yaml.safe_load')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.delete_jobs')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.delete_virtual_interfaces')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.get_node_interfaces')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_network_chaos_get_nodes_error(
|
||||
self, mock_kube_helper, mock_get_nodes, mock_delete_virtual, mock_delete_jobs, mock_yaml,
|
||||
mock_file_loader, mock_env
|
||||
):
|
||||
"""Test network chaos when getting nodes fails"""
|
||||
# Setup mocks
|
||||
mock_cli = Mock()
|
||||
mock_batch_cli = Mock()
|
||||
mock_yaml.return_value = {"metadata": {"name": "test-pod"}}
|
||||
mock_kube_helper.setup_kubernetes.return_value = (mock_cli, mock_batch_cli)
|
||||
mock_get_nodes.side_effect = Exception("Failed to get nodes")
|
||||
|
||||
# Test
|
||||
cfg = ingress_shaping.NetworkScenarioConfig(
|
||||
label_selector="node-role.kubernetes.io/worker",
|
||||
instance_count=1,
|
||||
network_params={"latency": "50ms"}
|
||||
)
|
||||
|
||||
output_id, output_data = ingress_shaping.network_chaos(params=cfg, run_id="test-run")
|
||||
|
||||
# Assertions
|
||||
self.assertEqual(output_id, "error")
|
||||
self.assertIn("Failed to get nodes", output_data.error)
|
||||
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.Environment')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.FileSystemLoader')
|
||||
@patch('yaml.safe_load')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.delete_jobs')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.delete_virtual_interfaces')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.apply_ingress_filter')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.get_node_interfaces')
|
||||
@patch('krkn.scenario_plugins.native.network.ingress_shaping.kube_helper')
|
||||
def test_network_chaos_apply_filter_error(
|
||||
self, mock_kube_helper, mock_get_nodes, mock_apply_filter,
|
||||
mock_delete_virtual, mock_delete_jobs, mock_yaml,
|
||||
mock_file_loader, mock_env
|
||||
):
|
||||
"""Test network chaos when applying filter fails"""
|
||||
# Setup mocks
|
||||
mock_cli = Mock()
|
||||
mock_batch_cli = Mock()
|
||||
mock_yaml.return_value = {"metadata": {"name": "test-pod"}}
|
||||
mock_kube_helper.setup_kubernetes.return_value = (mock_cli, mock_batch_cli)
|
||||
mock_get_nodes.return_value = {"node1": ["eth0"]}
|
||||
mock_apply_filter.side_effect = Exception("Failed to apply filter")
|
||||
|
||||
# Test
|
||||
cfg = ingress_shaping.NetworkScenarioConfig(
|
||||
label_selector="node-role.kubernetes.io/worker",
|
||||
instance_count=1,
|
||||
network_params={"latency": "50ms"},
|
||||
execution_type="parallel"
|
||||
)
|
||||
|
||||
output_id, output_data = ingress_shaping.network_chaos(params=cfg, run_id="test-run")
|
||||
|
||||
# Assertions
|
||||
self.assertEqual(output_id, "error")
|
||||
self.assertIn("Failed to apply filter", output_data.error)
|
||||
# Cleanup should still be called
|
||||
mock_delete_virtual.assert_called_once()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -1,27 +1,11 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Test suite for KubeVirt VM Outage Scenario Plugin
|
||||
Test suite for KubeVirt VM Outage Scenario Plugin class
|
||||
|
||||
This comprehensive test suite covers the KubevirtVmOutageScenarioPlugin class
|
||||
using extensive mocks to avoid needing actual Kubernetes/KubeVirt infrastructure.
|
||||
|
||||
Test Coverage:
|
||||
- Core scenario flows: injection, recovery, deletion, waiting
|
||||
- Edge cases: timeouts, missing parameters, validation failures
|
||||
- API exceptions: 404, 500, and general exceptions
|
||||
- Helper methods: get_vmi, get_vmis, patch_vm_spec, validate_environment
|
||||
- Multiple VMI scenarios with kill_count
|
||||
- Auto-restart disable functionality
|
||||
|
||||
IMPORTANT: These tests use comprehensive mocking and do NOT require any Kubernetes
|
||||
cluster or KubeVirt installation. All API calls are mocked.
|
||||
Note: This test file uses mocks extensively to avoid needing actual Kubernetes/KubeVirt infrastructure.
|
||||
|
||||
Usage:
|
||||
# Run all tests
|
||||
python -m unittest tests.test_kubevirt_vm_outage -v
|
||||
|
||||
# Run with coverage
|
||||
python -m coverage run -a -m unittest tests/test_kubevirt_vm_outage.py -v
|
||||
|
||||
Assisted By: Claude Code
|
||||
@@ -32,7 +16,6 @@ import itertools
|
||||
import os
|
||||
import tempfile
|
||||
import unittest
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import yaml
|
||||
@@ -44,9 +27,8 @@ from kubernetes.client.rest import ApiException
|
||||
|
||||
from krkn.scenario_plugins.kubevirt_vm_outage.kubevirt_vm_outage_scenario_plugin import KubevirtVmOutageScenarioPlugin
|
||||
|
||||
|
||||
class TestKubevirtVmOutageScenarioPlugin(unittest.TestCase):
|
||||
|
||||
|
||||
def setUp(self):
|
||||
"""
|
||||
Set up test fixtures for KubevirtVmOutageScenarioPlugin
|
||||
@@ -70,32 +52,18 @@ class TestKubevirtVmOutageScenarioPlugin(unittest.TestCase):
|
||||
crd_item.spec.group = "kubevirt.io"
|
||||
crd_list.items = [crd_item]
|
||||
self.k8s_client.list_custom_resource_definition.return_value = crd_list
|
||||
|
||||
# Mock VMI data with timezone-aware timestamps
|
||||
base_time = datetime.now(timezone.utc)
|
||||
|
||||
# Mock VMI data
|
||||
self.mock_vmi = {
|
||||
"metadata": {
|
||||
"name": "test-vm",
|
||||
"namespace": "default",
|
||||
"creationTimestamp": base_time.isoformat() + "Z"
|
||||
"namespace": "default"
|
||||
},
|
||||
"status": {
|
||||
"phase": "Running"
|
||||
}
|
||||
}
|
||||
|
||||
# Mock VMI with new creation timestamp (after recreation)
|
||||
self.mock_vmi_recreated = {
|
||||
"metadata": {
|
||||
"name": "test-vm",
|
||||
"namespace": "default",
|
||||
"creationTimestamp": (base_time + timedelta(minutes=1)).isoformat() + "Z"
|
||||
},
|
||||
"status": {
|
||||
"phase": "Running"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Create test config
|
||||
self.config = {
|
||||
"scenarios": [
|
||||
@@ -105,18 +73,18 @@ class TestKubevirtVmOutageScenarioPlugin(unittest.TestCase):
|
||||
"parameters": {
|
||||
"vm_name": "test-vm",
|
||||
"namespace": "default",
|
||||
"duration": 0
|
||||
"duration": 0
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
# Create a temporary config file
|
||||
temp_dir = tempfile.gettempdir()
|
||||
self.scenario_file = os.path.join(temp_dir, "test_kubevirt_scenario.yaml")
|
||||
with open(self.scenario_file, "w") as f:
|
||||
yaml.dump(self.config, f)
|
||||
|
||||
|
||||
# Mock dependencies
|
||||
self.telemetry = MagicMock(spec=KrknTelemetryOpenshift)
|
||||
self.scenario_telemetry = MagicMock(spec=ScenarioTelemetry)
|
||||
@@ -126,152 +94,63 @@ class TestKubevirtVmOutageScenarioPlugin(unittest.TestCase):
|
||||
self.delete_count = 0
|
||||
self.wait_count = 0
|
||||
|
||||
def create_incrementing_time_function(self):
|
||||
"""
|
||||
Create an incrementing time function that returns sequential float values.
|
||||
Returns a callable that can be used with patch('time.time', side_effect=...)
|
||||
"""
|
||||
counter = itertools.count(1)
|
||||
def mock_time():
|
||||
return float(next(counter))
|
||||
return mock_time
|
||||
|
||||
def mock_delete(self, *args, **kwargs):
|
||||
"""Reusable mock for delete_vmi that tracks calls and sets up affected_pod"""
|
||||
self.delete_count += 1
|
||||
self.plugin.affected_pod = AffectedPod(pod_name=f"test-vm-{self.delete_count}", namespace="default")
|
||||
self.plugin.affected_pod.pod_rescheduling_time = 5.0
|
||||
return 0
|
||||
|
||||
def mock_wait(self, *args, **kwargs):
|
||||
"""Reusable mock for wait_for_running that tracks calls and sets pod_readiness_time"""
|
||||
self.wait_count += 1
|
||||
self.plugin.affected_pod.pod_readiness_time = 3.0
|
||||
return 0
|
||||
|
||||
# ==================== Core Scenario Tests ====================
|
||||
|
||||
|
||||
def test_successful_injection_and_recovery(self):
|
||||
"""
|
||||
Test successful deletion and recovery of a VMI using detailed mocking
|
||||
Test successful deletion and recovery of a VMI
|
||||
"""
|
||||
# Mock list_namespaces_by_regex to return a single namespace
|
||||
self.k8s_client.list_namespaces_by_regex = MagicMock(return_value=["default"])
|
||||
# Populate vmis_list to avoid randrange error
|
||||
self.plugin.vmis_list = [self.mock_vmi]
|
||||
|
||||
# Mock list_namespaced_custom_object to return our VMI
|
||||
self.custom_object_client.list_namespaced_custom_object = MagicMock(
|
||||
side_effect=[
|
||||
{"items": [self.mock_vmi]}, # For get_vmis
|
||||
{"items": [{"metadata": {"name": "test-vm"}}]}, # For validate_environment
|
||||
]
|
||||
)
|
||||
|
||||
# Mock get_namespaced_custom_object with a sequence that handles multiple calls
|
||||
# Call sequence:
|
||||
# 1. validate_environment: get original VMI
|
||||
# 2. execute_scenario: get VMI before deletion
|
||||
# 3. delete_vmi: loop checking if timestamp changed (returns recreated VMI on first check)
|
||||
# 4+. wait_for_running: loop until phase is Running (may call multiple times)
|
||||
get_vmi_responses = [
|
||||
self.mock_vmi, # Initial get in validate_environment
|
||||
self.mock_vmi, # Get before delete
|
||||
self.mock_vmi_recreated, # After delete (recreated with new timestamp)
|
||||
self.mock_vmi_recreated, # Check if running
|
||||
]
|
||||
|
||||
class GetVmiSideEffect:
|
||||
"""
|
||||
Callable helper that returns a predefined sequence of VMIs.
|
||||
If called more times than there are responses, it fails the test
|
||||
to surface unexpected additional calls instead of silently
|
||||
masking them.
|
||||
"""
|
||||
def __init__(self, responses):
|
||||
self._responses = responses
|
||||
self._call_iter = itertools.count()
|
||||
self.call_count = 0
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
call_num = next(self._call_iter)
|
||||
self.call_count = call_num + 1
|
||||
|
||||
if call_num >= len(self._responses):
|
||||
raise AssertionError(
|
||||
f"get_vmi_side_effect called more times ({call_num + 1}) "
|
||||
f"than expected ({len(self._responses)})."
|
||||
)
|
||||
return self._responses[call_num]
|
||||
|
||||
get_vmi_side_effect = GetVmiSideEffect(get_vmi_responses)
|
||||
self.custom_object_client.get_namespaced_custom_object = MagicMock(
|
||||
side_effect=get_vmi_side_effect
|
||||
)
|
||||
|
||||
# Mock delete operation
|
||||
self.custom_object_client.delete_namespaced_custom_object = MagicMock(return_value={})
|
||||
|
||||
with patch('time.time', side_effect=self.create_incrementing_time_function()), patch('time.sleep'):
|
||||
with patch("builtins.open", unittest.mock.mock_open(read_data=yaml.dump(self.config))):
|
||||
result = self.plugin.run("test-uuid", self.scenario_file, {}, self.telemetry, self.scenario_telemetry)
|
||||
# Mock get_vmis to not clear the list
|
||||
with patch.object(self.plugin, 'get_vmis'):
|
||||
# Mock get_vmi to return our mock VMI
|
||||
with patch.object(self.plugin, 'get_vmi', return_value=self.mock_vmi):
|
||||
# Mock validate_environment to return True
|
||||
with patch.object(self.plugin, 'validate_environment', return_value=True):
|
||||
# Mock delete_vmi and wait_for_running to simulate success
|
||||
with patch.object(self.plugin, 'delete_vmi', side_effect=self.mock_delete) as mock_delete:
|
||||
with patch.object(self.plugin, 'wait_for_running', side_effect=self.mock_wait) as mock_wait:
|
||||
with patch("builtins.open", unittest.mock.mock_open(read_data=yaml.dump(self.config))):
|
||||
result = self.plugin.run("test-uuid", self.scenario_file, {}, self.telemetry, self.scenario_telemetry)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
# Verify get_namespaced_custom_object was called exactly as many times as
|
||||
# there are predefined responses
|
||||
self.assertEqual(
|
||||
self.custom_object_client.get_namespaced_custom_object.call_count,
|
||||
len(get_vmi_responses),
|
||||
)
|
||||
|
||||
# Verify that the VMI delete operation was performed once with expected parameters
|
||||
self.custom_object_client.delete_namespaced_custom_object.assert_called_once_with(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace="default",
|
||||
plural="virtualmachineinstances",
|
||||
name="test-vm",
|
||||
)
|
||||
|
||||
mock_delete.assert_called_once_with("test-vm", "default", False)
|
||||
mock_wait.assert_called_once_with("test-vm", "default", 60)
|
||||
|
||||
def test_injection_failure(self):
|
||||
"""
|
||||
Test failure during VMI deletion
|
||||
"""
|
||||
# Mock list_namespaces_by_regex
|
||||
self.k8s_client.list_namespaces_by_regex = MagicMock(return_value=["default"])
|
||||
# Populate vmis_list to avoid randrange error
|
||||
self.plugin.vmis_list = [self.mock_vmi]
|
||||
|
||||
# Mock list to return VMI
|
||||
self.custom_object_client.list_namespaced_custom_object = MagicMock(
|
||||
side_effect=[
|
||||
{"items": [self.mock_vmi]}, # For get_vmis
|
||||
{"items": [{"metadata": {"name": "test-vm"}}]}, # For validate_environment
|
||||
]
|
||||
)
|
||||
|
||||
# Mock get_vmi
|
||||
self.custom_object_client.get_namespaced_custom_object = MagicMock(
|
||||
side_effect=[
|
||||
self.mock_vmi, # validate_environment
|
||||
self.mock_vmi, # get before delete
|
||||
]
|
||||
)
|
||||
|
||||
# Mock delete to raise an error
|
||||
self.custom_object_client.delete_namespaced_custom_object = MagicMock(
|
||||
side_effect=ApiException(status=500, reason="Internal Server Error")
|
||||
)
|
||||
|
||||
with patch("builtins.open", unittest.mock.mock_open(read_data=yaml.dump(self.config))):
|
||||
result = self.plugin.run("test-uuid", self.scenario_file, {}, self.telemetry, self.scenario_telemetry)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
# Verify delete was attempted before the error occurred
|
||||
self.custom_object_client.delete_namespaced_custom_object.assert_called_once_with(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace="default",
|
||||
plural="virtualmachineinstances",
|
||||
name="test-vm"
|
||||
)
|
||||
# Mock get_vmis to not clear the list
|
||||
with patch.object(self.plugin, 'get_vmis'):
|
||||
# Mock get_vmi to return our mock VMI
|
||||
with patch.object(self.plugin, 'get_vmi', return_value=self.mock_vmi):
|
||||
# Mock validate_environment to return True
|
||||
with patch.object(self.plugin, 'validate_environment', return_value=True):
|
||||
# Mock delete_vmi to simulate failure
|
||||
with patch.object(self.plugin, 'delete_vmi', return_value=1) as mock_delete:
|
||||
with patch.object(self.plugin, 'wait_for_running', return_value=0) as mock_wait:
|
||||
with patch("builtins.open", unittest.mock.mock_open(read_data=yaml.dump(self.config))):
|
||||
result = self.plugin.run("test-uuid", self.scenario_file, {}, self.telemetry, self.scenario_telemetry)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
mock_delete.assert_called_once_with("test-vm", "default", False)
|
||||
mock_wait.assert_not_called()
|
||||
|
||||
def test_disable_auto_restart(self):
|
||||
"""
|
||||
Test VM auto-restart can be disabled
|
||||
@@ -279,92 +158,66 @@ class TestKubevirtVmOutageScenarioPlugin(unittest.TestCase):
|
||||
# Configure test with disable_auto_restart=True
|
||||
self.config["scenarios"][0]["parameters"]["disable_auto_restart"] = True
|
||||
|
||||
# Mock list_namespaces_by_regex
|
||||
self.k8s_client.list_namespaces_by_regex = MagicMock(return_value=["default"])
|
||||
# Populate vmis_list to avoid randrange error
|
||||
self.plugin.vmis_list = [self.mock_vmi]
|
||||
|
||||
# Mock VM object for patching
|
||||
mock_vm = {
|
||||
"metadata": {"name": "test-vm", "namespace": "default"},
|
||||
"spec": {"running": True}
|
||||
}
|
||||
|
||||
# Mock list to return VMI
|
||||
self.custom_object_client.list_namespaced_custom_object = MagicMock(
|
||||
side_effect=[
|
||||
{"items": [self.mock_vmi]}, # For get_vmis
|
||||
{"items": [{"metadata": {"name": "test-vm"}}]}, # For validate_environment
|
||||
]
|
||||
)
|
||||
|
||||
# Mock get_namespaced_custom_object with detailed call sequence
|
||||
# Call sequence:
|
||||
# 1. execute_scenario: get VMI before deletion
|
||||
# 2. patch_vm_spec: get VM for patching
|
||||
# 3. delete_vmi: loop checking if VMI timestamp changed
|
||||
# 4+. wait_for_running: loop until VMI phase is Running
|
||||
self.custom_object_client.get_namespaced_custom_object = MagicMock(
|
||||
side_effect=[
|
||||
self.mock_vmi, # Call 1: get VMI before delete
|
||||
mock_vm, # Call 2: get VM for patching (different resource type)
|
||||
self.mock_vmi_recreated, # Call 3: delete_vmi detects new timestamp
|
||||
self.mock_vmi_recreated, # Call 4: wait_for_running checks phase
|
||||
]
|
||||
)
|
||||
|
||||
# Mock patch and delete operations
|
||||
self.custom_object_client.patch_namespaced_custom_object = MagicMock(return_value=mock_vm)
|
||||
self.custom_object_client.delete_namespaced_custom_object = MagicMock(return_value={})
|
||||
|
||||
with patch('time.time', side_effect=self.create_incrementing_time_function()), patch('time.sleep'):
|
||||
with patch("builtins.open", unittest.mock.mock_open(read_data=yaml.dump(self.config))):
|
||||
result = self.plugin.run("test-uuid", self.scenario_file, {}, self.telemetry, self.scenario_telemetry)
|
||||
# Mock get_vmis to not clear the list
|
||||
with patch.object(self.plugin, 'get_vmis'):
|
||||
# Mock get_vmi to return our mock VMI
|
||||
with patch.object(self.plugin, 'get_vmi', return_value=self.mock_vmi):
|
||||
# Mock validate_environment to return True
|
||||
with patch.object(self.plugin, 'validate_environment', return_value=True):
|
||||
# Mock delete_vmi and wait_for_running
|
||||
with patch.object(self.plugin, 'delete_vmi', side_effect=self.mock_delete) as mock_delete:
|
||||
with patch.object(self.plugin, 'wait_for_running', side_effect=self.mock_wait) as mock_wait:
|
||||
with patch("builtins.open", unittest.mock.mock_open(read_data=yaml.dump(self.config))):
|
||||
result = self.plugin.run("test-uuid", self.scenario_file, {}, self.telemetry, self.scenario_telemetry)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
# Verify patch was called to disable auto-restart
|
||||
self.custom_object_client.patch_namespaced_custom_object.assert_called()
|
||||
|
||||
# delete_vmi should be called with disable_auto_restart=True
|
||||
mock_delete.assert_called_once_with("test-vm", "default", True)
|
||||
mock_wait.assert_called_once_with("test-vm", "default", 60)
|
||||
|
||||
def test_recovery_when_vmi_does_not_exist(self):
|
||||
"""
|
||||
Test recovery logic when VMI does not exist after deletion
|
||||
"""
|
||||
# Initialize the plugin's custom_object_client
|
||||
self.plugin.custom_object_client = self.custom_object_client
|
||||
|
||||
# Store the original VMI in the plugin for recovery
|
||||
self.plugin.original_vmi = self.mock_vmi.copy()
|
||||
|
||||
# Initialize affected_pod which is used by wait_for_running
|
||||
self.plugin.affected_pod = AffectedPod(pod_name="test-vm", namespace="default")
|
||||
# Create a cleaned vmi_dict as the plugin would
|
||||
vmi_dict = self.mock_vmi.copy()
|
||||
|
||||
# Set up running VMI data for after recovery
|
||||
running_vmi = {
|
||||
"metadata": {
|
||||
"name": "test-vm",
|
||||
"namespace": "default",
|
||||
"creationTimestamp": (datetime.now(timezone.utc) + timedelta(minutes=2)).isoformat() + "Z"
|
||||
},
|
||||
"metadata": {"name": "test-vm", "namespace": "default"},
|
||||
"status": {"phase": "Running"}
|
||||
}
|
||||
|
||||
# Mock get_namespaced_custom_object call sequence triggered during recovery
|
||||
# Call sequence:
|
||||
# 1. wait_for_running: first loop iteration - VMI creation requested but not visible yet
|
||||
# 2. wait_for_running: subsequent iterations - VMI exists and is running
|
||||
self.custom_object_client.get_namespaced_custom_object = MagicMock(
|
||||
side_effect=[
|
||||
ApiException(status=404, reason="Not Found"), # VMI not visible yet after create
|
||||
running_vmi, # VMI now exists and is running
|
||||
]
|
||||
)
|
||||
# Set up time.time to immediately exceed the timeout for auto-recovery
|
||||
with patch('time.time', side_effect=[0, 301, 301, 301, 301, 310, 320]):
|
||||
# Mock get_vmi to always return None (not auto-recovered)
|
||||
with patch.object(self.plugin, 'get_vmi', side_effect=[None, None, running_vmi]):
|
||||
# Mock the custom object API to return success
|
||||
self.custom_object_client.create_namespaced_custom_object = MagicMock(return_value=running_vmi)
|
||||
|
||||
# Mock the create API to return success
|
||||
self.custom_object_client.create_namespaced_custom_object = MagicMock(return_value=running_vmi)
|
||||
|
||||
# Run recovery with mocked time
|
||||
with patch('time.time', side_effect=self.create_incrementing_time_function()), patch('time.sleep'):
|
||||
result = self.plugin.recover("test-vm", "default", False)
|
||||
# Run recovery with mocked time.sleep
|
||||
with patch('time.sleep'):
|
||||
result = self.plugin.recover("test-vm", "default", False)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
# Verify create was called with the right arguments
|
||||
self.custom_object_client.create_namespaced_custom_object.assert_called_once()
|
||||
|
||||
# Verify create was called with the right arguments for our API version and kind
|
||||
self.custom_object_client.create_namespaced_custom_object.assert_called_once_with(
|
||||
group="kubevirt.io",
|
||||
version="v1",
|
||||
namespace="default",
|
||||
plural="virtualmachineinstances",
|
||||
body=vmi_dict
|
||||
)
|
||||
|
||||
def test_validation_failure(self):
|
||||
"""
|
||||
Test validation failure when KubeVirt is not installed
|
||||
@@ -383,32 +236,34 @@ class TestKubevirtVmOutageScenarioPlugin(unittest.TestCase):
|
||||
|
||||
# When validation fails, run() returns 1 due to exception handling
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
# ==================== Timeout Tests ====================
|
||||
|
||||
|
||||
def test_delete_vmi_timeout(self):
|
||||
"""
|
||||
Test timeout during VMI deletion
|
||||
"""
|
||||
# Store original VMI
|
||||
self.plugin.original_vmi = self.mock_vmi
|
||||
# Initialize the plugin's custom_object_client and required attributes
|
||||
self.plugin.custom_object_client = self.custom_object_client
|
||||
|
||||
# Initialize required attributes
|
||||
self.plugin.affected_pod = AffectedPod(pod_name="test-vm", namespace="default")
|
||||
# Initialize original_vmi which is required by delete_vmi
|
||||
self.plugin.original_vmi = self.mock_vmi.copy()
|
||||
self.plugin.original_vmi['metadata']['creationTimestamp'] = '2023-01-01T00:00:00Z'
|
||||
|
||||
# Initialize pods_status which delete_vmi needs
|
||||
from krkn_lib.models.k8s import PodsStatus, AffectedPod
|
||||
self.plugin.pods_status = PodsStatus()
|
||||
self.plugin.affected_pod = AffectedPod(pod_name="test-vm", namespace="default")
|
||||
|
||||
# Mock successful delete operation
|
||||
self.custom_object_client.delete_namespaced_custom_object = MagicMock(return_value={})
|
||||
|
||||
# Mock get_vmi to always return the same VMI with unchanged creationTimestamp
|
||||
# This simulates that the VMI has NOT been recreated after deletion
|
||||
self.custom_object_client.get_namespaced_custom_object = MagicMock(
|
||||
return_value=self.mock_vmi
|
||||
)
|
||||
# Mock that get_vmi always returns VMI with same creationTimestamp (never gets recreated)
|
||||
mock_vmi_with_time = self.mock_vmi.copy()
|
||||
mock_vmi_with_time['metadata']['creationTimestamp'] = '2023-01-01T00:00:00Z'
|
||||
|
||||
# Simulate timeout by making time.time return values that exceed the timeout
|
||||
with patch('time.sleep'), patch('time.time', side_effect=[0, 10, 20, 130, 140]):
|
||||
result = self.plugin.delete_vmi("test-vm", "default", False, timeout=120)
|
||||
with patch.object(self.plugin, 'get_vmi', return_value=mock_vmi_with_time):
|
||||
# Simulate timeout by making time.time return values that exceed the timeout
|
||||
with patch('time.sleep'), patch('time.time', side_effect=[0, 10, 20, 130, 130, 130, 130, 140]):
|
||||
result = self.plugin.delete_vmi("test-vm", "default", False)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
self.custom_object_client.delete_namespaced_custom_object.assert_called_once_with(
|
||||
@@ -419,29 +274,12 @@ class TestKubevirtVmOutageScenarioPlugin(unittest.TestCase):
|
||||
name="test-vm"
|
||||
)
|
||||
|
||||
def test_wait_for_running_timeout(self):
|
||||
"""
|
||||
Test wait_for_running times out when VMI doesn't reach Running state
|
||||
"""
|
||||
self.plugin.affected_pod = AffectedPod(pod_name="test-vm", namespace="default")
|
||||
|
||||
# Mock VMI in Pending state
|
||||
pending_vmi = self.mock_vmi.copy()
|
||||
pending_vmi['status']['phase'] = 'Pending'
|
||||
|
||||
with patch.object(self.plugin, 'get_vmi', return_value=pending_vmi):
|
||||
with patch('time.sleep'):
|
||||
with patch('time.time', side_effect=[0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 121]):
|
||||
result = self.plugin.wait_for_running("test-vm", "default", 120)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
# ==================== API Exception Tests ====================
|
||||
|
||||
def test_get_vmi_api_exception_non_404(self):
|
||||
"""
|
||||
Test get_vmi raises ApiException for non-404 errors
|
||||
"""
|
||||
|
||||
# Mock API exception with non-404 status
|
||||
api_error = ApiException(status=500, reason="Internal Server Error")
|
||||
self.custom_object_client.get_namespaced_custom_object = MagicMock(side_effect=api_error)
|
||||
@@ -461,10 +299,37 @@ class TestKubevirtVmOutageScenarioPlugin(unittest.TestCase):
|
||||
with self.assertRaises(Exception):
|
||||
self.plugin.get_vmi("test-vm", "default")
|
||||
|
||||
def test_get_vmis_with_regex_matching(self):
|
||||
"""
|
||||
Test get_vmis successfully filters VMIs by regex pattern
|
||||
"""
|
||||
# Mock namespace list
|
||||
self.k8s_client.list_namespaces_by_regex = MagicMock(return_value=["default", "test-ns"])
|
||||
|
||||
# Mock VMI list with multiple VMIs
|
||||
vmi_list = {
|
||||
"items": [
|
||||
{"metadata": {"name": "test-vm-1"}, "status": {"phase": "Running"}},
|
||||
{"metadata": {"name": "test-vm-2"}, "status": {"phase": "Running"}},
|
||||
{"metadata": {"name": "other-vm"}, "status": {"phase": "Running"}},
|
||||
]
|
||||
}
|
||||
self.custom_object_client.list_namespaced_custom_object = MagicMock(return_value=vmi_list)
|
||||
|
||||
# Test with regex pattern that matches test-vm-*
|
||||
self.plugin.get_vmis("test-vm-.*", "default")
|
||||
|
||||
# Should have 4 VMs (2 per namespace * 2 namespaces)
|
||||
self.assertEqual(len(self.plugin.vmis_list), 4)
|
||||
# Verify only test-vm-* were added
|
||||
for vmi in self.plugin.vmis_list:
|
||||
self.assertTrue(vmi["metadata"]["name"].startswith("test-vm-"))
|
||||
|
||||
def test_get_vmis_api_exception_404(self):
|
||||
"""
|
||||
Test get_vmis handles 404 ApiException gracefully
|
||||
"""
|
||||
|
||||
self.k8s_client.list_namespaces_by_regex = MagicMock(return_value=["default"])
|
||||
api_error = ApiException(status=404, reason="Not Found")
|
||||
self.custom_object_client.list_namespaced_custom_object = MagicMock(side_effect=api_error)
|
||||
@@ -477,6 +342,7 @@ class TestKubevirtVmOutageScenarioPlugin(unittest.TestCase):
|
||||
"""
|
||||
Test get_vmis raises ApiException for non-404 errors
|
||||
"""
|
||||
|
||||
self.k8s_client.list_namespaces_by_regex = MagicMock(return_value=["default"])
|
||||
api_error = ApiException(status=500, reason="Internal Server Error")
|
||||
self.custom_object_client.list_namespaced_custom_object = MagicMock(side_effect=api_error)
|
||||
@@ -484,10 +350,52 @@ class TestKubevirtVmOutageScenarioPlugin(unittest.TestCase):
|
||||
with self.assertRaises(ApiException):
|
||||
self.plugin.get_vmis("test-vm", "default")
|
||||
|
||||
def test_patch_vm_spec_success(self):
|
||||
"""
|
||||
Test patch_vm_spec successfully patches VM
|
||||
"""
|
||||
mock_vm = {
|
||||
"metadata": {"name": "test-vm", "namespace": "default"},
|
||||
"spec": {"running": True}
|
||||
}
|
||||
|
||||
self.custom_object_client.get_namespaced_custom_object = MagicMock(return_value=mock_vm)
|
||||
self.custom_object_client.patch_namespaced_custom_object = MagicMock(return_value=mock_vm)
|
||||
|
||||
result = self.plugin.patch_vm_spec("test-vm", "default", False)
|
||||
|
||||
self.assertTrue(result)
|
||||
self.custom_object_client.patch_namespaced_custom_object.assert_called_once()
|
||||
|
||||
def test_patch_vm_spec_api_exception(self):
|
||||
"""
|
||||
Test patch_vm_spec handles ApiException
|
||||
"""
|
||||
|
||||
api_error = ApiException(status=404, reason="Not Found")
|
||||
self.custom_object_client.get_namespaced_custom_object = MagicMock(side_effect=api_error)
|
||||
|
||||
result = self.plugin.patch_vm_spec("test-vm", "default", False)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_patch_vm_spec_general_exception(self):
|
||||
"""
|
||||
Test patch_vm_spec handles general exceptions
|
||||
"""
|
||||
self.custom_object_client.get_namespaced_custom_object = MagicMock(
|
||||
side_effect=Exception("Connection error")
|
||||
)
|
||||
|
||||
result = self.plugin.patch_vm_spec("test-vm", "default", False)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_delete_vmi_api_exception_404(self):
|
||||
"""
|
||||
Test delete_vmi handles 404 ApiException during deletion
|
||||
"""
|
||||
|
||||
# Initialize required attributes
|
||||
self.plugin.original_vmi = self.mock_vmi.copy()
|
||||
self.plugin.original_vmi['metadata']['creationTimestamp'] = '2023-01-01T00:00:00Z'
|
||||
@@ -518,103 +426,6 @@ class TestKubevirtVmOutageScenarioPlugin(unittest.TestCase):
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
def test_patch_vm_spec_api_exception(self):
|
||||
"""
|
||||
Test patch_vm_spec handles ApiException
|
||||
"""
|
||||
api_error = ApiException(status=404, reason="Not Found")
|
||||
self.custom_object_client.get_namespaced_custom_object = MagicMock(side_effect=api_error)
|
||||
|
||||
result = self.plugin.patch_vm_spec("test-vm", "default", False)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_patch_vm_spec_general_exception(self):
|
||||
"""
|
||||
Test patch_vm_spec handles general exceptions
|
||||
"""
|
||||
self.custom_object_client.get_namespaced_custom_object = MagicMock(
|
||||
side_effect=Exception("Connection error")
|
||||
)
|
||||
|
||||
result = self.plugin.patch_vm_spec("test-vm", "default", False)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
# ==================== Helper Method Tests ====================
|
||||
|
||||
def test_get_vmis_with_regex_matching(self):
|
||||
"""
|
||||
Test get_vmis successfully filters VMIs by regex pattern
|
||||
"""
|
||||
# Mock namespace list
|
||||
self.k8s_client.list_namespaces_by_regex = MagicMock(return_value=["default", "test-ns"])
|
||||
|
||||
# Mock VMI list with multiple VMIs
|
||||
vmi_list = {
|
||||
"items": [
|
||||
{"metadata": {"name": "test-vm-1"}, "status": {"phase": "Running"}},
|
||||
{"metadata": {"name": "test-vm-2"}, "status": {"phase": "Running"}},
|
||||
{"metadata": {"name": "other-vm"}, "status": {"phase": "Running"}},
|
||||
]
|
||||
}
|
||||
self.custom_object_client.list_namespaced_custom_object = MagicMock(return_value=vmi_list)
|
||||
|
||||
# Test with regex pattern that matches test-vm-*
|
||||
self.plugin.get_vmis("test-vm-.*", "default")
|
||||
|
||||
# Should have 4 VMs (2 per namespace * 2 namespaces)
|
||||
self.assertEqual(len(self.plugin.vmis_list), 4)
|
||||
# Verify only test-vm-* were added
|
||||
for vmi in self.plugin.vmis_list:
|
||||
self.assertTrue(vmi["metadata"]["name"].startswith("test-vm-"))
|
||||
|
||||
def test_patch_vm_spec_success(self):
|
||||
"""
|
||||
Test patch_vm_spec successfully patches VM
|
||||
"""
|
||||
mock_vm = {
|
||||
"metadata": {"name": "test-vm", "namespace": "default"},
|
||||
"spec": {"running": True}
|
||||
}
|
||||
|
||||
self.custom_object_client.get_namespaced_custom_object = MagicMock(return_value=mock_vm)
|
||||
self.custom_object_client.patch_namespaced_custom_object = MagicMock(return_value=mock_vm)
|
||||
|
||||
result = self.plugin.patch_vm_spec("test-vm", "default", False)
|
||||
|
||||
self.assertTrue(result)
|
||||
self.custom_object_client.patch_namespaced_custom_object.assert_called_once()
|
||||
|
||||
def test_validate_environment_exception(self):
|
||||
"""
|
||||
Test validate_environment handles exceptions
|
||||
"""
|
||||
self.custom_object_client.list_namespaced_custom_object = MagicMock(
|
||||
side_effect=Exception("Connection error")
|
||||
)
|
||||
|
||||
result = self.plugin.validate_environment("test-vm", "default")
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_validate_environment_vmi_not_found(self):
|
||||
"""
|
||||
Test validate_environment when VMI doesn't exist
|
||||
"""
|
||||
# Mock CRDs exist
|
||||
mock_crd_list = MagicMock()
|
||||
mock_crd_list.items = MagicMock(return_value=["item1"])
|
||||
self.custom_object_client.list_namespaced_custom_object = MagicMock(return_value=mock_crd_list)
|
||||
|
||||
# Mock VMI not found
|
||||
with patch.object(self.plugin, 'get_vmi', return_value=None):
|
||||
result = self.plugin.validate_environment("test-vm", "default")
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
# ==================== Delete VMI Tests ====================
|
||||
|
||||
def test_delete_vmi_successful_recreation(self):
|
||||
"""
|
||||
Test delete_vmi succeeds when VMI is recreated with new creationTimestamp
|
||||
@@ -668,7 +479,22 @@ class TestKubevirtVmOutageScenarioPlugin(unittest.TestCase):
|
||||
# When VMI stays deleted (None), delete_vmi waits for recreation and times out
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
# ==================== Wait for Running Tests ====================
|
||||
def test_wait_for_running_timeout(self):
|
||||
"""
|
||||
Test wait_for_running times out when VMI doesn't reach Running state
|
||||
"""
|
||||
self.plugin.affected_pod = AffectedPod(pod_name="test-vm", namespace="default")
|
||||
|
||||
# Mock VMI in Pending state
|
||||
pending_vmi = self.mock_vmi.copy()
|
||||
pending_vmi['status']['phase'] = 'Pending'
|
||||
|
||||
with patch.object(self.plugin, 'get_vmi', return_value=pending_vmi):
|
||||
with patch('time.sleep'):
|
||||
with patch('time.time', side_effect=[0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 121]):
|
||||
result = self.plugin.wait_for_running("test-vm", "default", 120)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
def test_wait_for_running_vmi_not_exists(self):
|
||||
"""
|
||||
@@ -682,15 +508,13 @@ class TestKubevirtVmOutageScenarioPlugin(unittest.TestCase):
|
||||
|
||||
with patch.object(self.plugin, 'get_vmi', side_effect=[None, None, running_vmi]):
|
||||
with patch('time.sleep'):
|
||||
# time.time() called: start_time (0), iteration 1 (1), iteration 2 (2), iteration 3 (3), end_time (3)
|
||||
# time.time() called: start_time (0), while loop iteration 1 (1), iteration 2 (2), iteration 3 (3), end_time (3)
|
||||
with patch('time.time', side_effect=[0, 1, 2, 3, 3]):
|
||||
result = self.plugin.wait_for_running("test-vm", "default", 120)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
self.assertIsNotNone(self.plugin.affected_pod.pod_readiness_time)
|
||||
|
||||
# ==================== Recovery Tests ====================
|
||||
|
||||
def test_recover_no_original_vmi(self):
|
||||
"""
|
||||
Test recover fails when no original VMI is captured
|
||||
@@ -718,8 +542,6 @@ class TestKubevirtVmOutageScenarioPlugin(unittest.TestCase):
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
# ==================== Execute Scenario Tests ====================
|
||||
|
||||
def test_execute_scenario_missing_vm_name(self):
|
||||
"""
|
||||
Test execute_scenario fails when vm_name is missing
|
||||
@@ -819,12 +641,38 @@ class TestKubevirtVmOutageScenarioPlugin(unittest.TestCase):
|
||||
# Should have unrecovered pod
|
||||
self.assertEqual(len(result.unrecovered), 1)
|
||||
|
||||
# ==================== Initialization Tests ====================
|
||||
def test_validate_environment_exception(self):
|
||||
"""
|
||||
Test validate_environment handles exceptions
|
||||
"""
|
||||
self.custom_object_client.list_namespaced_custom_object = MagicMock(
|
||||
side_effect=Exception("Connection error")
|
||||
)
|
||||
|
||||
result = self.plugin.validate_environment("test-vm", "default")
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_validate_environment_vmi_not_found(self):
|
||||
"""
|
||||
Test validate_environment when VMI doesn't exist
|
||||
"""
|
||||
# Mock CRDs exist
|
||||
mock_crd_list = MagicMock()
|
||||
mock_crd_list.items = MagicMock(return_value=["item1"])
|
||||
self.custom_object_client.list_namespaced_custom_object = MagicMock(return_value=mock_crd_list)
|
||||
|
||||
# Mock VMI not found
|
||||
with patch.object(self.plugin, 'get_vmi', return_value=None):
|
||||
result = self.plugin.validate_environment("test-vm", "default")
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
def test_init_clients(self):
|
||||
"""
|
||||
Test init_clients initializes k8s client correctly
|
||||
"""
|
||||
|
||||
mock_k8s = MagicMock(spec=KrknKubernetes)
|
||||
mock_custom_client = MagicMock()
|
||||
mock_k8s.custom_object_client = mock_custom_client
|
||||
|
||||
@@ -10,12 +10,12 @@ Assisted By: Claude Code
|
||||
"""
|
||||
|
||||
import unittest
|
||||
from unittest.mock import Mock, patch, call
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
from krkn.scenario_plugins.managed_cluster.managed_cluster_scenario_plugin import ManagedClusterScenarioPlugin
|
||||
from krkn.scenario_plugins.managed_cluster import common_functions
|
||||
|
||||
|
||||
class TestManagedClusterScenarioPlugin(unittest.TestCase):
|
||||
@@ -25,7 +25,6 @@ class TestManagedClusterScenarioPlugin(unittest.TestCase):
|
||||
Set up test fixtures for ManagedClusterScenarioPlugin
|
||||
"""
|
||||
self.plugin = ManagedClusterScenarioPlugin()
|
||||
self.mock_kubecli = Mock(spec=KrknKubernetes)
|
||||
|
||||
def test_get_scenario_types(self):
|
||||
"""
|
||||
@@ -37,191 +36,5 @@ class TestManagedClusterScenarioPlugin(unittest.TestCase):
|
||||
self.assertEqual(len(result), 1)
|
||||
|
||||
|
||||
class TestCommonFunctions(unittest.TestCase):
|
||||
"""
|
||||
Test suite for common_functions module
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
"""
|
||||
Set up test fixtures for common_functions tests
|
||||
"""
|
||||
self.mock_kubecli = Mock(spec=KrknKubernetes)
|
||||
|
||||
def test_get_managedcluster_with_specific_name_exists(self):
|
||||
"""
|
||||
Test get_managedcluster returns the specified cluster when it exists
|
||||
"""
|
||||
self.mock_kubecli.list_killable_managedclusters.return_value = ["cluster1", "cluster2", "cluster3"]
|
||||
|
||||
result = common_functions.get_managedcluster(
|
||||
"cluster1", "", 1, self.mock_kubecli
|
||||
)
|
||||
|
||||
self.assertEqual(result, ["cluster1"])
|
||||
self.mock_kubecli.list_killable_managedclusters.assert_called_once_with()
|
||||
|
||||
def test_get_managedcluster_with_specific_name_not_exists(self):
|
||||
"""
|
||||
Test get_managedcluster falls back to label selector when specified cluster doesn't exist
|
||||
"""
|
||||
self.mock_kubecli.list_killable_managedclusters.side_effect = [
|
||||
["cluster2", "cluster3"],
|
||||
["cluster2", "cluster3"]
|
||||
]
|
||||
|
||||
result = common_functions.get_managedcluster(
|
||||
"cluster1", "env=test", 1, self.mock_kubecli
|
||||
)
|
||||
|
||||
self.assertEqual(len(result), 1)
|
||||
self.assertIn(result[0], ["cluster2", "cluster3"])
|
||||
|
||||
def test_get_managedcluster_with_label_selector(self):
|
||||
"""
|
||||
Test get_managedcluster returns clusters matching label selector
|
||||
"""
|
||||
self.mock_kubecli.list_killable_managedclusters.side_effect = [
|
||||
["cluster1", "cluster2", "cluster3"],
|
||||
["cluster1", "cluster2", "cluster3"],
|
||||
]
|
||||
|
||||
result = common_functions.get_managedcluster(
|
||||
"", "env=production", 2, self.mock_kubecli
|
||||
)
|
||||
|
||||
self.assertEqual(len(result), 2)
|
||||
# Should be called once without and once with label_selector
|
||||
self.assertEqual(
|
||||
self.mock_kubecli.list_killable_managedclusters.call_count,
|
||||
2,
|
||||
)
|
||||
self.mock_kubecli.list_killable_managedclusters.assert_has_calls(
|
||||
[call(), call("env=production")]
|
||||
)
|
||||
|
||||
def test_get_managedcluster_no_available_clusters(self):
|
||||
"""
|
||||
Test get_managedcluster raises exception when no clusters are available
|
||||
"""
|
||||
self.mock_kubecli.list_killable_managedclusters.return_value = []
|
||||
|
||||
with self.assertRaises(Exception) as context:
|
||||
common_functions.get_managedcluster(
|
||||
"", "env=nonexistent", 1, self.mock_kubecli
|
||||
)
|
||||
|
||||
self.assertIn("Available managedclusters with the provided label selector do not exist", str(context.exception))
|
||||
|
||||
def test_get_managedcluster_kill_count_equals_available(self):
|
||||
"""
|
||||
Test get_managedcluster returns all clusters when instance_kill_count equals available clusters
|
||||
"""
|
||||
available_clusters = ["cluster1", "cluster2", "cluster3"]
|
||||
self.mock_kubecli.list_killable_managedclusters.return_value = available_clusters
|
||||
|
||||
result = common_functions.get_managedcluster(
|
||||
"", "env=test", 3, self.mock_kubecli
|
||||
)
|
||||
|
||||
self.assertEqual(result, available_clusters)
|
||||
self.assertEqual(len(result), 3)
|
||||
|
||||
@patch('logging.info')
|
||||
def test_get_managedcluster_return_empty_when_count_is_zero(self, mock_logging):
|
||||
"""
|
||||
Test get_managedcluster returns empty list when instance_kill_count is 0
|
||||
"""
|
||||
available_clusters = ["cluster1", "cluster2", "cluster3"]
|
||||
self.mock_kubecli.list_killable_managedclusters.return_value = available_clusters
|
||||
|
||||
result = common_functions.get_managedcluster(
|
||||
"", "env=test", 0, self.mock_kubecli
|
||||
)
|
||||
|
||||
self.assertEqual(result, [])
|
||||
mock_logging.assert_called()
|
||||
|
||||
@patch('random.randint')
|
||||
def test_get_managedcluster_random_selection(self, mock_randint):
|
||||
"""
|
||||
Test get_managedcluster randomly selects the specified number of clusters
|
||||
"""
|
||||
available_clusters = ["cluster1", "cluster2", "cluster3", "cluster4", "cluster5"]
|
||||
self.mock_kubecli.list_killable_managedclusters.return_value = available_clusters.copy()
|
||||
mock_randint.side_effect = [1, 0, 2]
|
||||
|
||||
result = common_functions.get_managedcluster(
|
||||
"", "env=test", 3, self.mock_kubecli
|
||||
)
|
||||
|
||||
self.assertEqual(len(result), 3)
|
||||
for cluster in result:
|
||||
self.assertIn(cluster, available_clusters)
|
||||
# Ensure no duplicates
|
||||
self.assertEqual(len(result), len(set(result)))
|
||||
|
||||
@patch('logging.info')
|
||||
def test_get_managedcluster_logs_available_clusters(self, mock_logging):
|
||||
"""
|
||||
Test get_managedcluster logs available clusters with label selector
|
||||
"""
|
||||
available_clusters = ["cluster1", "cluster2"]
|
||||
self.mock_kubecli.list_killable_managedclusters.return_value = available_clusters
|
||||
|
||||
common_functions.get_managedcluster(
|
||||
"", "env=test", 1, self.mock_kubecli
|
||||
)
|
||||
|
||||
mock_logging.assert_called()
|
||||
call_args = str(mock_logging.call_args)
|
||||
self.assertIn("Available managedclusters with the label selector", call_args)
|
||||
|
||||
@patch('logging.info')
|
||||
def test_get_managedcluster_logs_when_name_not_found(self, mock_logging):
|
||||
"""
|
||||
Test get_managedcluster logs when specified cluster name doesn't exist
|
||||
"""
|
||||
self.mock_kubecli.list_killable_managedclusters.side_effect = [
|
||||
["cluster2"],
|
||||
["cluster2"]
|
||||
]
|
||||
|
||||
common_functions.get_managedcluster(
|
||||
"nonexistent-cluster", "env=test", 1, self.mock_kubecli
|
||||
)
|
||||
# Check that logging was called multiple times (including the info message about unavailable cluster)
|
||||
self.assertGreaterEqual(mock_logging.call_count, 1)
|
||||
# Check all calls for the expected message
|
||||
all_calls = [str(call) for call in mock_logging.call_args_list]
|
||||
found_message = any("managedcluster with provided managedcluster_name does not exist" in call
|
||||
for call in all_calls)
|
||||
self.assertTrue(found_message)
|
||||
|
||||
def test_wait_for_available_status(self):
|
||||
"""
|
||||
Test wait_for_available_status calls watch_managedcluster_status with correct parameters
|
||||
"""
|
||||
common_functions.wait_for_available_status(
|
||||
"test-cluster", 300, self.mock_kubecli
|
||||
)
|
||||
|
||||
self.mock_kubecli.watch_managedcluster_status.assert_called_once_with(
|
||||
"test-cluster", "True", 300
|
||||
)
|
||||
|
||||
def test_wait_for_unavailable_status(self):
|
||||
"""
|
||||
Test wait_for_unavailable_status calls watch_managedcluster_status with correct parameters
|
||||
"""
|
||||
common_functions.wait_for_unavailable_status(
|
||||
"test-cluster", 300, self.mock_kubecli
|
||||
)
|
||||
|
||||
self.mock_kubecli.watch_managedcluster_status.assert_called_once_with(
|
||||
"test-cluster", "Unknown", 300
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
||||
@@ -10,10 +10,12 @@ Assisted By: Claude Code
|
||||
"""
|
||||
|
||||
import unittest
|
||||
from unittest.mock import patch
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
from krkn.scenario_plugins.network_chaos_ng.network_chaos_ng_scenario_plugin import NetworkChaosNgScenarioPlugin
|
||||
from krkn.scenario_plugins.network_chaos_ng.modules import utils
|
||||
|
||||
|
||||
class TestNetworkChaosNgScenarioPlugin(unittest.TestCase):
|
||||
@@ -34,80 +36,5 @@ class TestNetworkChaosNgScenarioPlugin(unittest.TestCase):
|
||||
self.assertEqual(len(result), 1)
|
||||
|
||||
|
||||
class TestNetworkChaosNgUtils(unittest.TestCase):
|
||||
|
||||
@patch("krkn.scenario_plugins.network_chaos_ng.modules.utils.logging.info")
|
||||
def test_log_info_non_parallel(self, mock_logging_info):
|
||||
"""
|
||||
Test log_info function with parallel=False
|
||||
"""
|
||||
utils.log_info("Test message")
|
||||
mock_logging_info.assert_called_once_with("Test message")
|
||||
|
||||
@patch("krkn.scenario_plugins.network_chaos_ng.modules.utils.logging.info")
|
||||
def test_log_info_parallel(self, mock_logging_info):
|
||||
"""
|
||||
Test log_info function with parallel=True
|
||||
"""
|
||||
utils.log_info("Test message", parallel=True, node_name="node1")
|
||||
mock_logging_info.assert_called_once_with("[node1]: Test message")
|
||||
|
||||
@patch("krkn.scenario_plugins.network_chaos_ng.modules.utils.logging.info")
|
||||
def test_log_info_parallel_missing_node_name(self, mock_logging_info):
|
||||
"""
|
||||
Test log_info with parallel=True and missing node_name
|
||||
"""
|
||||
utils.log_info("Test message", parallel=True)
|
||||
mock_logging_info.assert_called_once_with("[]: Test message")
|
||||
|
||||
@patch("krkn.scenario_plugins.network_chaos_ng.modules.utils.logging.error")
|
||||
def test_log_error_non_parallel(self, mock_logging_error):
|
||||
"""
|
||||
Test log_error function with parallel=False
|
||||
"""
|
||||
utils.log_error("Error message")
|
||||
mock_logging_error.assert_called_once_with("Error message")
|
||||
|
||||
@patch("krkn.scenario_plugins.network_chaos_ng.modules.utils.logging.error")
|
||||
def test_log_error_parallel(self, mock_logging_error):
|
||||
"""
|
||||
Test log_error function with parallel=True
|
||||
"""
|
||||
utils.log_error("Error message", parallel=True, node_name="node2")
|
||||
mock_logging_error.assert_called_once_with("[node2]: Error message")
|
||||
|
||||
@patch("krkn.scenario_plugins.network_chaos_ng.modules.utils.logging.error")
|
||||
def test_log_error_parallel_missing_node_name(self, mock_logging_error):
|
||||
"""
|
||||
Test log_error with parallel=True and missing node_name
|
||||
"""
|
||||
utils.log_error("Error message", parallel=True)
|
||||
mock_logging_error.assert_called_once_with("[]: Error message")
|
||||
|
||||
@patch("krkn.scenario_plugins.network_chaos_ng.modules.utils.logging.warning")
|
||||
def test_log_warning_non_parallel(self, mock_logging_warning):
|
||||
"""
|
||||
Test log_warning function with parallel=False
|
||||
"""
|
||||
utils.log_warning("Warning message")
|
||||
mock_logging_warning.assert_called_once_with("Warning message")
|
||||
|
||||
@patch("krkn.scenario_plugins.network_chaos_ng.modules.utils.logging.warning")
|
||||
def test_log_warning_parallel(self, mock_logging_warning):
|
||||
"""
|
||||
Test log_warning function with parallel=True
|
||||
"""
|
||||
utils.log_warning("Warning message", parallel=True, node_name="node3")
|
||||
mock_logging_warning.assert_called_once_with("[node3]: Warning message")
|
||||
|
||||
@patch("krkn.scenario_plugins.network_chaos_ng.modules.utils.logging.warning")
|
||||
def test_log_warning_parallel_missing_node_name(self, mock_logging_warning):
|
||||
"""
|
||||
Test log_warning with parallel=True and missing node_name
|
||||
"""
|
||||
utils.log_warning("Warning message", parallel=True)
|
||||
mock_logging_warning.assert_called_once_with("[]: Warning message")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
||||
@@ -10,15 +10,10 @@ Assisted By: Claude Code
|
||||
"""
|
||||
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, Mock, patch, mock_open, call
|
||||
import yaml
|
||||
import tempfile
|
||||
import os
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.models.k8s import AffectedNodeStatus
|
||||
|
||||
from krkn.scenario_plugins.node_actions.node_actions_scenario_plugin import NodeActionsScenarioPlugin
|
||||
|
||||
@@ -29,16 +24,7 @@ class TestNodeActionsScenarioPlugin(unittest.TestCase):
|
||||
"""
|
||||
Set up test fixtures for NodeActionsScenarioPlugin
|
||||
"""
|
||||
# Reset node_general global variable before each test
|
||||
import krkn.scenario_plugins.node_actions.node_actions_scenario_plugin as plugin_module
|
||||
plugin_module.node_general = False
|
||||
|
||||
self.plugin = NodeActionsScenarioPlugin()
|
||||
self.mock_kubecli = Mock(spec=KrknKubernetes)
|
||||
self.mock_lib_telemetry = Mock(spec=KrknTelemetryOpenshift)
|
||||
self.mock_lib_telemetry.get_lib_kubernetes.return_value = self.mock_kubecli
|
||||
self.mock_scenario_telemetry = Mock(spec=ScenarioTelemetry)
|
||||
self.mock_scenario_telemetry.affected_nodes = []
|
||||
|
||||
def test_get_scenario_types(self):
|
||||
"""
|
||||
@@ -49,700 +35,6 @@ class TestNodeActionsScenarioPlugin(unittest.TestCase):
|
||||
self.assertEqual(result, ["node_scenarios"])
|
||||
self.assertEqual(len(result), 1)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.general_node_scenarios')
|
||||
def test_get_node_scenario_object_generic(self, mock_general_scenarios):
|
||||
"""
|
||||
Test get_node_scenario_object returns general_node_scenarios for generic cloud type
|
||||
"""
|
||||
node_scenario = {"cloud_type": "generic"}
|
||||
mock_general_instance = Mock()
|
||||
mock_general_scenarios.return_value = mock_general_instance
|
||||
|
||||
result = self.plugin.get_node_scenario_object(node_scenario, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, mock_general_instance)
|
||||
mock_general_scenarios.assert_called_once()
|
||||
args = mock_general_scenarios.call_args[0]
|
||||
self.assertEqual(args[0], self.mock_kubecli)
|
||||
self.assertTrue(args[1]) # node_action_kube_check defaults to True
|
||||
self.assertIsInstance(args[2], AffectedNodeStatus)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.general_node_scenarios')
|
||||
def test_get_node_scenario_object_no_cloud_type(self, mock_general_scenarios):
|
||||
"""
|
||||
Test get_node_scenario_object returns general_node_scenarios when cloud_type is not specified
|
||||
"""
|
||||
node_scenario = {}
|
||||
mock_general_instance = Mock()
|
||||
mock_general_scenarios.return_value = mock_general_instance
|
||||
|
||||
result = self.plugin.get_node_scenario_object(node_scenario, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, mock_general_instance)
|
||||
mock_general_scenarios.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.aws_node_scenarios')
|
||||
def test_get_node_scenario_object_aws(self, mock_aws_scenarios):
|
||||
"""
|
||||
Test get_node_scenario_object returns aws_node_scenarios for AWS cloud type
|
||||
"""
|
||||
node_scenario = {"cloud_type": "aws"}
|
||||
mock_aws_instance = Mock()
|
||||
mock_aws_scenarios.return_value = mock_aws_instance
|
||||
|
||||
result = self.plugin.get_node_scenario_object(node_scenario, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, mock_aws_instance)
|
||||
mock_aws_scenarios.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.gcp_node_scenarios')
|
||||
def test_get_node_scenario_object_gcp(self, mock_gcp_scenarios):
|
||||
"""
|
||||
Test get_node_scenario_object returns gcp_node_scenarios for GCP cloud type
|
||||
"""
|
||||
node_scenario = {"cloud_type": "gcp"}
|
||||
mock_gcp_instance = Mock()
|
||||
mock_gcp_scenarios.return_value = mock_gcp_instance
|
||||
|
||||
result = self.plugin.get_node_scenario_object(node_scenario, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, mock_gcp_instance)
|
||||
mock_gcp_scenarios.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.azure_node_scenarios')
|
||||
def test_get_node_scenario_object_azure(self, mock_azure_scenarios):
|
||||
"""
|
||||
Test get_node_scenario_object returns azure_node_scenarios for Azure cloud type
|
||||
"""
|
||||
node_scenario = {"cloud_type": "azure"}
|
||||
mock_azure_instance = Mock()
|
||||
mock_azure_scenarios.return_value = mock_azure_instance
|
||||
|
||||
result = self.plugin.get_node_scenario_object(node_scenario, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, mock_azure_instance)
|
||||
mock_azure_scenarios.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.azure_node_scenarios')
|
||||
def test_get_node_scenario_object_az(self, mock_azure_scenarios):
|
||||
"""
|
||||
Test get_node_scenario_object returns azure_node_scenarios for 'az' cloud type alias
|
||||
"""
|
||||
node_scenario = {"cloud_type": "az"}
|
||||
mock_azure_instance = Mock()
|
||||
mock_azure_scenarios.return_value = mock_azure_instance
|
||||
|
||||
result = self.plugin.get_node_scenario_object(node_scenario, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, mock_azure_instance)
|
||||
mock_azure_scenarios.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.docker_node_scenarios')
|
||||
def test_get_node_scenario_object_docker(self, mock_docker_scenarios):
|
||||
"""
|
||||
Test get_node_scenario_object returns docker_node_scenarios for Docker cloud type
|
||||
"""
|
||||
node_scenario = {"cloud_type": "docker"}
|
||||
mock_docker_instance = Mock()
|
||||
mock_docker_scenarios.return_value = mock_docker_instance
|
||||
|
||||
result = self.plugin.get_node_scenario_object(node_scenario, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, mock_docker_instance)
|
||||
mock_docker_scenarios.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.vmware_node_scenarios')
|
||||
def test_get_node_scenario_object_vmware(self, mock_vmware_scenarios):
|
||||
"""
|
||||
Test get_node_scenario_object returns vmware_node_scenarios for VMware cloud type
|
||||
"""
|
||||
node_scenario = {"cloud_type": "vmware"}
|
||||
mock_vmware_instance = Mock()
|
||||
mock_vmware_scenarios.return_value = mock_vmware_instance
|
||||
|
||||
result = self.plugin.get_node_scenario_object(node_scenario, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, mock_vmware_instance)
|
||||
mock_vmware_scenarios.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.vmware_node_scenarios')
|
||||
def test_get_node_scenario_object_vsphere(self, mock_vmware_scenarios):
|
||||
"""
|
||||
Test get_node_scenario_object returns vmware_node_scenarios for vSphere cloud type alias
|
||||
"""
|
||||
node_scenario = {"cloud_type": "vsphere"}
|
||||
mock_vmware_instance = Mock()
|
||||
mock_vmware_scenarios.return_value = mock_vmware_instance
|
||||
|
||||
result = self.plugin.get_node_scenario_object(node_scenario, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, mock_vmware_instance)
|
||||
mock_vmware_scenarios.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.ibm_node_scenarios')
|
||||
def test_get_node_scenario_object_ibm(self, mock_ibm_scenarios):
|
||||
"""
|
||||
Test get_node_scenario_object returns ibm_node_scenarios for IBM cloud type
|
||||
"""
|
||||
node_scenario = {"cloud_type": "ibm"}
|
||||
mock_ibm_instance = Mock()
|
||||
mock_ibm_scenarios.return_value = mock_ibm_instance
|
||||
|
||||
result = self.plugin.get_node_scenario_object(node_scenario, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, mock_ibm_instance)
|
||||
mock_ibm_scenarios.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.ibm_node_scenarios')
|
||||
def test_get_node_scenario_object_ibmcloud(self, mock_ibm_scenarios):
|
||||
"""
|
||||
Test get_node_scenario_object returns ibm_node_scenarios for ibmcloud cloud type alias
|
||||
"""
|
||||
node_scenario = {"cloud_type": "ibmcloud", "disable_ssl_verification": False}
|
||||
mock_ibm_instance = Mock()
|
||||
mock_ibm_scenarios.return_value = mock_ibm_instance
|
||||
|
||||
result = self.plugin.get_node_scenario_object(node_scenario, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, mock_ibm_instance)
|
||||
args = mock_ibm_scenarios.call_args[0]
|
||||
self.assertFalse(args[3]) # disable_ssl_verification should be False
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.ibmcloud_power_node_scenarios')
|
||||
def test_get_node_scenario_object_ibmpower(self, mock_ibmpower_scenarios):
|
||||
"""
|
||||
Test get_node_scenario_object returns ibmcloud_power_node_scenarios for ibmpower cloud type
|
||||
"""
|
||||
node_scenario = {"cloud_type": "ibmpower"}
|
||||
mock_ibmpower_instance = Mock()
|
||||
mock_ibmpower_scenarios.return_value = mock_ibmpower_instance
|
||||
|
||||
result = self.plugin.get_node_scenario_object(node_scenario, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, mock_ibmpower_instance)
|
||||
mock_ibmpower_scenarios.assert_called_once()
|
||||
|
||||
def test_get_node_scenario_object_openstack(self):
|
||||
"""
|
||||
Test get_node_scenario_object returns openstack_node_scenarios for OpenStack cloud type
|
||||
"""
|
||||
with patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.openstack_node_scenarios') as mock_openstack:
|
||||
node_scenario = {"cloud_type": "openstack"}
|
||||
mock_openstack_instance = Mock()
|
||||
mock_openstack.return_value = mock_openstack_instance
|
||||
|
||||
result = self.plugin.get_node_scenario_object(node_scenario, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, mock_openstack_instance)
|
||||
mock_openstack.assert_called_once()
|
||||
|
||||
def test_get_node_scenario_object_alibaba(self):
|
||||
"""
|
||||
Test get_node_scenario_object returns alibaba_node_scenarios for Alibaba cloud type
|
||||
"""
|
||||
with patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.alibaba_node_scenarios') as mock_alibaba:
|
||||
node_scenario = {"cloud_type": "alibaba"}
|
||||
mock_alibaba_instance = Mock()
|
||||
mock_alibaba.return_value = mock_alibaba_instance
|
||||
|
||||
result = self.plugin.get_node_scenario_object(node_scenario, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, mock_alibaba_instance)
|
||||
mock_alibaba.assert_called_once()
|
||||
|
||||
def test_get_node_scenario_object_alicloud(self):
|
||||
"""
|
||||
Test get_node_scenario_object returns alibaba_node_scenarios for alicloud alias
|
||||
"""
|
||||
with patch('krkn.scenario_plugins.node_actions.alibaba_node_scenarios.alibaba_node_scenarios') as mock_alibaba:
|
||||
node_scenario = {"cloud_type": "alicloud"}
|
||||
mock_alibaba_instance = Mock()
|
||||
mock_alibaba.return_value = mock_alibaba_instance
|
||||
|
||||
result = self.plugin.get_node_scenario_object(node_scenario, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, mock_alibaba_instance)
|
||||
mock_alibaba.assert_called_once()
|
||||
|
||||
def test_get_node_scenario_object_bm(self):
|
||||
"""
|
||||
Test get_node_scenario_object returns bm_node_scenarios for bare metal cloud type
|
||||
"""
|
||||
with patch('krkn.scenario_plugins.node_actions.bm_node_scenarios.bm_node_scenarios') as mock_bm:
|
||||
node_scenario = {
|
||||
"cloud_type": "bm",
|
||||
"bmc_info": "192.168.1.1",
|
||||
"bmc_user": "admin",
|
||||
"bmc_password": "password"
|
||||
}
|
||||
mock_bm_instance = Mock()
|
||||
mock_bm.return_value = mock_bm_instance
|
||||
|
||||
result = self.plugin.get_node_scenario_object(node_scenario, self.mock_kubecli)
|
||||
|
||||
self.assertEqual(result, mock_bm_instance)
|
||||
args = mock_bm.call_args[0]
|
||||
self.assertEqual(args[0], "192.168.1.1")
|
||||
self.assertEqual(args[1], "admin")
|
||||
self.assertEqual(args[2], "password")
|
||||
|
||||
def test_get_node_scenario_object_unsupported_cloud(self):
|
||||
"""
|
||||
Test get_node_scenario_object raises exception for unsupported cloud type
|
||||
"""
|
||||
node_scenario = {"cloud_type": "unsupported_cloud"}
|
||||
|
||||
with self.assertRaises(Exception) as context:
|
||||
self.plugin.get_node_scenario_object(node_scenario, self.mock_kubecli)
|
||||
|
||||
self.assertIn("not currently supported", str(context.exception))
|
||||
self.assertIn("unsupported_cloud", str(context.exception))
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.common_node_functions')
|
||||
def test_inject_node_scenario_with_node_name(self, mock_common_funcs):
|
||||
"""
|
||||
Test inject_node_scenario with specific node name
|
||||
"""
|
||||
node_scenario = {
|
||||
"node_name": "node1,node2",
|
||||
"instance_count": 2,
|
||||
"runs": 1,
|
||||
"timeout": 120,
|
||||
"duration": 60,
|
||||
"poll_interval": 15
|
||||
}
|
||||
action = "node_stop_start_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
mock_scenario_object.affected_nodes_status = AffectedNodeStatus()
|
||||
mock_scenario_object.affected_nodes_status.affected_nodes = []
|
||||
|
||||
mock_common_funcs.get_node_by_name.return_value = ["node1", "node2"]
|
||||
|
||||
self.plugin.inject_node_scenario(
|
||||
action,
|
||||
node_scenario,
|
||||
mock_scenario_object,
|
||||
self.mock_kubecli,
|
||||
self.mock_scenario_telemetry
|
||||
)
|
||||
|
||||
mock_common_funcs.get_node_by_name.assert_called_once_with(["node1", "node2"], self.mock_kubecli)
|
||||
self.assertEqual(mock_scenario_object.node_stop_start_scenario.call_count, 2)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.common_node_functions')
|
||||
def test_inject_node_scenario_with_label_selector(self, mock_common_funcs):
|
||||
"""
|
||||
Test inject_node_scenario with label selector
|
||||
"""
|
||||
node_scenario = {
|
||||
"label_selector": "node-role.kubernetes.io/worker",
|
||||
"instance_count": 1
|
||||
}
|
||||
action = "node_reboot_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
mock_scenario_object.affected_nodes_status = AffectedNodeStatus()
|
||||
mock_scenario_object.affected_nodes_status.affected_nodes = []
|
||||
|
||||
mock_common_funcs.get_node.return_value = ["worker-node-1"]
|
||||
|
||||
self.plugin.inject_node_scenario(
|
||||
action,
|
||||
node_scenario,
|
||||
mock_scenario_object,
|
||||
self.mock_kubecli,
|
||||
self.mock_scenario_telemetry
|
||||
)
|
||||
|
||||
mock_common_funcs.get_node.assert_called_once_with("node-role.kubernetes.io/worker", 1, self.mock_kubecli)
|
||||
mock_scenario_object.node_reboot_scenario.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.common_node_functions')
|
||||
def test_inject_node_scenario_with_exclude_label(self, mock_common_funcs):
|
||||
"""
|
||||
Test inject_node_scenario with exclude label
|
||||
"""
|
||||
node_scenario = {
|
||||
"label_selector": "node-role.kubernetes.io/worker",
|
||||
"exclude_label": "node-role.kubernetes.io/master",
|
||||
"instance_count": 2
|
||||
}
|
||||
action = "node_stop_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
mock_scenario_object.affected_nodes_status = AffectedNodeStatus()
|
||||
mock_scenario_object.affected_nodes_status.affected_nodes = []
|
||||
|
||||
mock_common_funcs.get_node.side_effect = [
|
||||
["worker-1", "master-1"],
|
||||
["master-1"]
|
||||
]
|
||||
|
||||
self.plugin.inject_node_scenario(
|
||||
action,
|
||||
node_scenario,
|
||||
mock_scenario_object,
|
||||
self.mock_kubecli,
|
||||
self.mock_scenario_telemetry
|
||||
)
|
||||
|
||||
self.assertEqual(mock_common_funcs.get_node.call_count, 2)
|
||||
# Should only process worker-1 after excluding master-1
|
||||
self.assertEqual(mock_scenario_object.node_stop_scenario.call_count, 1)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.common_node_functions')
|
||||
def test_inject_node_scenario_parallel_mode(self, mock_common_funcs):
|
||||
"""
|
||||
Test inject_node_scenario with parallel processing
|
||||
"""
|
||||
node_scenario = {
|
||||
"node_name": "node1,node2,node3",
|
||||
"parallel": True
|
||||
}
|
||||
action = "restart_kubelet_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
mock_scenario_object.affected_nodes_status = AffectedNodeStatus()
|
||||
mock_scenario_object.affected_nodes_status.affected_nodes = []
|
||||
|
||||
mock_common_funcs.get_node_by_name.return_value = ["node1", "node2", "node3"]
|
||||
|
||||
with patch.object(self.plugin, 'multiprocess_nodes') as mock_multiprocess:
|
||||
self.plugin.inject_node_scenario(
|
||||
action,
|
||||
node_scenario,
|
||||
mock_scenario_object,
|
||||
self.mock_kubecli,
|
||||
self.mock_scenario_telemetry
|
||||
)
|
||||
|
||||
mock_multiprocess.assert_called_once()
|
||||
args = mock_multiprocess.call_args[0]
|
||||
self.assertEqual(args[0], ["node1", "node2", "node3"])
|
||||
self.assertEqual(args[2], action)
|
||||
|
||||
def test_run_node_node_start_scenario(self):
|
||||
"""
|
||||
Test run_node executes node_start_scenario action
|
||||
"""
|
||||
node_scenario = {"runs": 2, "timeout": 300, "poll_interval": 10}
|
||||
action = "node_start_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
|
||||
self.plugin.run_node("test-node", mock_scenario_object, action, node_scenario)
|
||||
|
||||
mock_scenario_object.node_start_scenario.assert_called_once_with(2, "test-node", 300, 10)
|
||||
|
||||
def test_run_node_node_stop_scenario(self):
|
||||
"""
|
||||
Test run_node executes node_stop_scenario action
|
||||
"""
|
||||
node_scenario = {"runs": 1, "timeout": 120, "poll_interval": 15}
|
||||
action = "node_stop_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
|
||||
self.plugin.run_node("test-node", mock_scenario_object, action, node_scenario)
|
||||
|
||||
mock_scenario_object.node_stop_scenario.assert_called_once_with(1, "test-node", 120, 15)
|
||||
|
||||
def test_run_node_node_stop_start_scenario(self):
|
||||
"""
|
||||
Test run_node executes node_stop_start_scenario action
|
||||
"""
|
||||
node_scenario = {"runs": 1, "timeout": 120, "duration": 60, "poll_interval": 15}
|
||||
action = "node_stop_start_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
|
||||
self.plugin.run_node("test-node", mock_scenario_object, action, node_scenario)
|
||||
|
||||
mock_scenario_object.node_stop_start_scenario.assert_called_once_with(1, "test-node", 120, 60, 15)
|
||||
|
||||
def test_run_node_node_termination_scenario(self):
|
||||
"""
|
||||
Test run_node executes node_termination_scenario action
|
||||
"""
|
||||
node_scenario = {}
|
||||
action = "node_termination_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
|
||||
self.plugin.run_node("test-node", mock_scenario_object, action, node_scenario)
|
||||
|
||||
mock_scenario_object.node_termination_scenario.assert_called_once_with(1, "test-node", 120, 15)
|
||||
|
||||
def test_run_node_node_reboot_scenario(self):
|
||||
"""
|
||||
Test run_node executes node_reboot_scenario action
|
||||
"""
|
||||
node_scenario = {"soft_reboot": True}
|
||||
action = "node_reboot_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
|
||||
self.plugin.run_node("test-node", mock_scenario_object, action, node_scenario)
|
||||
|
||||
mock_scenario_object.node_reboot_scenario.assert_called_once_with(1, "test-node", 120, True)
|
||||
|
||||
def test_run_node_node_disk_detach_attach_scenario(self):
|
||||
"""
|
||||
Test run_node executes node_disk_detach_attach_scenario action
|
||||
"""
|
||||
node_scenario = {"duration": 90}
|
||||
action = "node_disk_detach_attach_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
|
||||
self.plugin.run_node("test-node", mock_scenario_object, action, node_scenario)
|
||||
|
||||
mock_scenario_object.node_disk_detach_attach_scenario.assert_called_once_with(1, "test-node", 120, 90)
|
||||
|
||||
def test_run_node_stop_start_kubelet_scenario(self):
|
||||
"""
|
||||
Test run_node executes stop_start_kubelet_scenario action
|
||||
"""
|
||||
node_scenario = {}
|
||||
action = "stop_start_kubelet_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
|
||||
self.plugin.run_node("test-node", mock_scenario_object, action, node_scenario)
|
||||
|
||||
mock_scenario_object.stop_start_kubelet_scenario.assert_called_once_with(1, "test-node", 120)
|
||||
|
||||
def test_run_node_restart_kubelet_scenario(self):
|
||||
"""
|
||||
Test run_node executes restart_kubelet_scenario action
|
||||
"""
|
||||
node_scenario = {}
|
||||
action = "restart_kubelet_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
|
||||
self.plugin.run_node("test-node", mock_scenario_object, action, node_scenario)
|
||||
|
||||
mock_scenario_object.restart_kubelet_scenario.assert_called_once_with(1, "test-node", 120)
|
||||
|
||||
def test_run_node_stop_kubelet_scenario(self):
|
||||
"""
|
||||
Test run_node executes stop_kubelet_scenario action
|
||||
"""
|
||||
node_scenario = {}
|
||||
action = "stop_kubelet_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
|
||||
self.plugin.run_node("test-node", mock_scenario_object, action, node_scenario)
|
||||
|
||||
mock_scenario_object.stop_kubelet_scenario.assert_called_once_with(1, "test-node", 120)
|
||||
|
||||
def test_run_node_node_crash_scenario(self):
|
||||
"""
|
||||
Test run_node executes node_crash_scenario action
|
||||
"""
|
||||
node_scenario = {}
|
||||
action = "node_crash_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
|
||||
self.plugin.run_node("test-node", mock_scenario_object, action, node_scenario)
|
||||
|
||||
mock_scenario_object.node_crash_scenario.assert_called_once_with(1, "test-node", 120)
|
||||
|
||||
def test_run_node_node_block_scenario(self):
|
||||
"""
|
||||
Test run_node executes node_block_scenario action
|
||||
"""
|
||||
node_scenario = {"duration": 100}
|
||||
action = "node_block_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
|
||||
self.plugin.run_node("test-node", mock_scenario_object, action, node_scenario)
|
||||
|
||||
mock_scenario_object.node_block_scenario.assert_called_once_with(1, "test-node", 120, 100)
|
||||
|
||||
@patch('logging.info')
|
||||
def test_run_node_stop_start_helper_node_scenario_openstack(self, mock_logging):
|
||||
"""
|
||||
Test run_node executes stop_start_helper_node_scenario for OpenStack
|
||||
"""
|
||||
node_scenario = {
|
||||
"cloud_type": "openstack",
|
||||
"helper_node_ip": "192.168.1.100",
|
||||
"service": "neutron-server"
|
||||
}
|
||||
action = "stop_start_helper_node_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
|
||||
self.plugin.run_node("test-node", mock_scenario_object, action, node_scenario)
|
||||
|
||||
mock_scenario_object.helper_node_stop_start_scenario.assert_called_once_with(1, "192.168.1.100", 120)
|
||||
mock_scenario_object.helper_node_service_status.assert_called_once()
|
||||
|
||||
@patch('logging.error')
|
||||
def test_run_node_stop_start_helper_node_scenario_non_openstack(self, mock_logging):
|
||||
"""
|
||||
Test run_node logs error for stop_start_helper_node_scenario on non-OpenStack
|
||||
"""
|
||||
node_scenario = {
|
||||
"cloud_type": "aws",
|
||||
"helper_node_ip": "192.168.1.100"
|
||||
}
|
||||
action = "stop_start_helper_node_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
|
||||
self.plugin.run_node("test-node", mock_scenario_object, action, node_scenario)
|
||||
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("not supported", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.error')
|
||||
def test_run_node_stop_start_helper_node_scenario_missing_ip(self, mock_logging):
|
||||
"""
|
||||
Test run_node raises exception when helper_node_ip is missing
|
||||
"""
|
||||
node_scenario = {
|
||||
"cloud_type": "openstack",
|
||||
"helper_node_ip": None
|
||||
}
|
||||
action = "stop_start_helper_node_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
|
||||
with self.assertRaises(Exception) as context:
|
||||
self.plugin.run_node("test-node", mock_scenario_object, action, node_scenario)
|
||||
|
||||
self.assertIn("Helper node IP address is not provided", str(context.exception))
|
||||
|
||||
@patch('logging.info')
|
||||
def test_run_node_generic_cloud_skip_unsupported_action(self, mock_logging):
|
||||
"""
|
||||
Test run_node skips unsupported actions for generic cloud type
|
||||
"""
|
||||
# Set node_general to True for this test
|
||||
import krkn.scenario_plugins.node_actions.node_actions_scenario_plugin as plugin_module
|
||||
plugin_module.node_general = True
|
||||
|
||||
node_scenario = {}
|
||||
action = "node_stop_scenario"
|
||||
mock_scenario_object = Mock()
|
||||
|
||||
self.plugin.run_node("test-node", mock_scenario_object, action, node_scenario)
|
||||
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("not set up for generic cloud type", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.info')
|
||||
def test_run_node_unknown_action(self, mock_logging):
|
||||
"""
|
||||
Test run_node logs info for unknown action
|
||||
"""
|
||||
node_scenario = {}
|
||||
action = "unknown_action"
|
||||
mock_scenario_object = Mock()
|
||||
|
||||
self.plugin.run_node("test-node", mock_scenario_object, action, node_scenario)
|
||||
|
||||
mock_logging.assert_called()
|
||||
# Could be either message depending on node_general state
|
||||
call_str = str(mock_logging.call_args)
|
||||
self.assertTrue(
|
||||
"no node action that matches" in call_str or
|
||||
"not set up for generic cloud type" in call_str
|
||||
)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.cerberus')
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.common_node_functions')
|
||||
@patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.general_node_scenarios')
|
||||
@patch('builtins.open', new_callable=mock_open)
|
||||
@patch('time.time')
|
||||
def test_run_successful(self, mock_time, mock_file, mock_general_scenarios, mock_common_funcs, mock_cerberus):
|
||||
"""
|
||||
Test successful run of node actions scenario
|
||||
"""
|
||||
scenario_yaml = {
|
||||
"node_scenarios": [
|
||||
{
|
||||
"cloud_type": "generic",
|
||||
"node_name": "test-node",
|
||||
"actions": ["stop_kubelet_scenario"]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
mock_file.return_value.__enter__.return_value.read.return_value = yaml.dump(scenario_yaml)
|
||||
mock_time.side_effect = [1000, 1100]
|
||||
mock_scenario_object = Mock()
|
||||
mock_scenario_object.affected_nodes_status = AffectedNodeStatus()
|
||||
mock_scenario_object.affected_nodes_status.affected_nodes = []
|
||||
mock_general_scenarios.return_value = mock_scenario_object
|
||||
mock_common_funcs.get_node_by_name.return_value = ["test-node"]
|
||||
mock_cerberus.get_status.return_value = None
|
||||
|
||||
with patch('yaml.full_load', return_value=scenario_yaml):
|
||||
result = self.plugin.run(
|
||||
"test-uuid",
|
||||
"/path/to/scenario.yaml",
|
||||
{},
|
||||
self.mock_lib_telemetry,
|
||||
self.mock_scenario_telemetry
|
||||
)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
mock_cerberus.get_status.assert_called_once_with({}, 1000, 1100)
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('builtins.open', new_callable=mock_open)
|
||||
def test_run_with_exception(self, mock_file, mock_logging):
|
||||
"""
|
||||
Test run handles exceptions and returns 1
|
||||
"""
|
||||
scenario_yaml = {
|
||||
"node_scenarios": [
|
||||
{
|
||||
"cloud_type": "unsupported"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
with patch('yaml.full_load', return_value=scenario_yaml):
|
||||
result = self.plugin.run(
|
||||
"test-uuid",
|
||||
"/path/to/scenario.yaml",
|
||||
{},
|
||||
self.mock_lib_telemetry,
|
||||
self.mock_scenario_telemetry
|
||||
)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
mock_logging.assert_called()
|
||||
|
||||
@patch('logging.info')
|
||||
def test_multiprocess_nodes(self, mock_logging):
|
||||
"""
|
||||
Test multiprocess_nodes executes run_node for multiple nodes in parallel
|
||||
"""
|
||||
nodes = ["node1", "node2", "node3"]
|
||||
mock_scenario_object = Mock()
|
||||
action = "restart_kubelet_scenario"
|
||||
node_scenario = {}
|
||||
|
||||
with patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.ThreadPool') as mock_pool:
|
||||
mock_pool_instance = Mock()
|
||||
mock_pool.return_value = mock_pool_instance
|
||||
|
||||
self.plugin.multiprocess_nodes(nodes, mock_scenario_object, action, node_scenario)
|
||||
|
||||
mock_pool.assert_called_once_with(processes=3)
|
||||
mock_pool_instance.starmap.assert_called_once()
|
||||
mock_pool_instance.close.assert_called_once()
|
||||
|
||||
@patch('logging.info')
|
||||
def test_multiprocess_nodes_with_exception(self, mock_logging):
|
||||
"""
|
||||
Test multiprocess_nodes handles exceptions gracefully
|
||||
"""
|
||||
nodes = ["node1", "node2"]
|
||||
mock_scenario_object = Mock()
|
||||
action = "node_reboot_scenario"
|
||||
node_scenario = {}
|
||||
|
||||
with patch('krkn.scenario_plugins.node_actions.node_actions_scenario_plugin.ThreadPool') as mock_pool:
|
||||
mock_pool.side_effect = Exception("Pool error")
|
||||
|
||||
self.plugin.multiprocess_nodes(nodes, mock_scenario_object, action, node_scenario)
|
||||
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("Error on pool multiprocessing", str(mock_logging.call_args))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
||||
@@ -1,719 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Test suite for OpenStack node scenarios
|
||||
|
||||
This test suite covers both the OPENSTACKCLOUD class and openstack_node_scenarios class
|
||||
using mocks to avoid actual OpenStack CLI calls.
|
||||
|
||||
Usage:
|
||||
python -m coverage run -a -m unittest tests/test_openstack_node_scenarios.py -v
|
||||
|
||||
Assisted By: Claude Code
|
||||
"""
|
||||
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch, Mock
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.models.k8s import AffectedNode, AffectedNodeStatus
|
||||
from krkn.scenario_plugins.node_actions.openstack_node_scenarios import (
|
||||
OPENSTACKCLOUD,
|
||||
openstack_node_scenarios
|
||||
)
|
||||
|
||||
|
||||
class TestOPENSTACKCLOUD(unittest.TestCase):
|
||||
"""Test cases for OPENSTACKCLOUD class"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
self.openstack = OPENSTACKCLOUD()
|
||||
|
||||
def test_openstackcloud_init(self):
|
||||
"""Test OPENSTACKCLOUD class initialization"""
|
||||
self.assertEqual(self.openstack.Wait, 30)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.OPENSTACKCLOUD.get_openstack_nodename')
|
||||
def test_get_instance_id(self, mock_get_nodename):
|
||||
"""Test getting instance ID by node IP"""
|
||||
node_ip = '10.0.1.100'
|
||||
node_name = 'test-openstack-node'
|
||||
|
||||
mock_get_nodename.return_value = node_name
|
||||
|
||||
result = self.openstack.get_instance_id(node_ip)
|
||||
|
||||
self.assertEqual(result, node_name)
|
||||
mock_get_nodename.assert_called_once_with(node_ip)
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.runcommand.invoke')
|
||||
def test_start_instances_success(self, mock_invoke, mock_logging):
|
||||
"""Test starting instance successfully"""
|
||||
node_name = 'test-node'
|
||||
|
||||
self.openstack.start_instances(node_name)
|
||||
|
||||
mock_invoke.assert_called_once_with('openstack server start %s' % node_name)
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("started", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.runcommand.invoke')
|
||||
def test_start_instances_failure(self, mock_invoke, mock_logging):
|
||||
"""Test starting instance with failure"""
|
||||
node_name = 'test-node'
|
||||
mock_invoke.side_effect = Exception("OpenStack error")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.openstack.start_instances(node_name)
|
||||
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("Failed to start", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.runcommand.invoke')
|
||||
def test_stop_instances_success(self, mock_invoke, mock_logging):
|
||||
"""Test stopping instance successfully"""
|
||||
node_name = 'test-node'
|
||||
|
||||
self.openstack.stop_instances(node_name)
|
||||
|
||||
mock_invoke.assert_called_once_with('openstack server stop %s' % node_name)
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("stopped", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.runcommand.invoke')
|
||||
def test_stop_instances_failure(self, mock_invoke, mock_logging):
|
||||
"""Test stopping instance with failure"""
|
||||
node_name = 'test-node'
|
||||
mock_invoke.side_effect = Exception("OpenStack error")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.openstack.stop_instances(node_name)
|
||||
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("Failed to stop", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.runcommand.invoke')
|
||||
def test_reboot_instances_success(self, mock_invoke, mock_logging):
|
||||
"""Test rebooting instance successfully"""
|
||||
node_name = 'test-node'
|
||||
|
||||
self.openstack.reboot_instances(node_name)
|
||||
|
||||
mock_invoke.assert_called_once_with('openstack server reboot --soft %s' % node_name)
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("rebooted", str(mock_logging.call_args))
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.runcommand.invoke')
|
||||
def test_reboot_instances_failure(self, mock_invoke, mock_logging):
|
||||
"""Test rebooting instance with failure"""
|
||||
node_name = 'test-node'
|
||||
mock_invoke.side_effect = Exception("OpenStack error")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.openstack.reboot_instances(node_name)
|
||||
|
||||
mock_logging.assert_called()
|
||||
self.assertIn("Failed to reboot", str(mock_logging.call_args))
|
||||
|
||||
@patch('time.time')
|
||||
@patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.OPENSTACKCLOUD.get_instance_status')
|
||||
def test_wait_until_running_success(self, mock_get_status, mock_time):
|
||||
"""Test waiting until instance is running successfully"""
|
||||
node_name = 'test-node'
|
||||
timeout = 300
|
||||
|
||||
mock_time.side_effect = [100, 110]
|
||||
mock_get_status.return_value = True
|
||||
|
||||
affected_node = Mock(spec=AffectedNode)
|
||||
result = self.openstack.wait_until_running(node_name, timeout, affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
mock_get_status.assert_called_once_with(node_name, "ACTIVE", timeout)
|
||||
affected_node.set_affected_node_status.assert_called_once_with("running", 10)
|
||||
|
||||
@patch('time.time')
|
||||
@patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.OPENSTACKCLOUD.get_instance_status')
|
||||
def test_wait_until_running_without_affected_node(self, mock_get_status, mock_time):
|
||||
"""Test waiting until running without affected node tracking"""
|
||||
node_name = 'test-node'
|
||||
timeout = 300
|
||||
|
||||
mock_get_status.return_value = True
|
||||
|
||||
result = self.openstack.wait_until_running(node_name, timeout, None)
|
||||
|
||||
self.assertTrue(result)
|
||||
|
||||
@patch('time.time')
|
||||
@patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.OPENSTACKCLOUD.get_instance_status')
|
||||
def test_wait_until_stopped_success(self, mock_get_status, mock_time):
|
||||
"""Test waiting until instance is stopped successfully"""
|
||||
node_name = 'test-node'
|
||||
timeout = 300
|
||||
|
||||
mock_time.side_effect = [100, 115]
|
||||
mock_get_status.return_value = True
|
||||
|
||||
affected_node = Mock(spec=AffectedNode)
|
||||
result = self.openstack.wait_until_stopped(node_name, timeout, affected_node)
|
||||
|
||||
self.assertTrue(result)
|
||||
mock_get_status.assert_called_once_with(node_name, "SHUTOFF", timeout)
|
||||
affected_node.set_affected_node_status.assert_called_once_with("stopped", 15)
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.runcommand.invoke')
|
||||
def test_get_instance_status_success(self, mock_invoke, mock_logging, mock_sleep):
|
||||
"""Test getting instance status when it matches expected status"""
|
||||
node_name = 'test-node'
|
||||
expected_status = 'ACTIVE'
|
||||
timeout = 60
|
||||
|
||||
mock_invoke.return_value = 'ACTIVE'
|
||||
|
||||
result = self.openstack.get_instance_status(node_name, expected_status, timeout)
|
||||
|
||||
self.assertTrue(result)
|
||||
mock_invoke.assert_called()
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.runcommand.invoke')
|
||||
def test_get_instance_status_timeout(self, mock_invoke, mock_logging, mock_sleep):
|
||||
"""Test getting instance status with timeout"""
|
||||
node_name = 'test-node'
|
||||
expected_status = 'ACTIVE'
|
||||
timeout = 2
|
||||
|
||||
mock_invoke.return_value = 'SHUTOFF'
|
||||
|
||||
result = self.openstack.get_instance_status(node_name, expected_status, timeout)
|
||||
|
||||
self.assertFalse(result)
|
||||
|
||||
@patch('time.sleep')
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.runcommand.invoke')
|
||||
def test_get_instance_status_with_whitespace(self, mock_invoke, mock_logging, mock_sleep):
|
||||
"""Test getting instance status with whitespace in response"""
|
||||
node_name = 'test-node'
|
||||
expected_status = 'ACTIVE'
|
||||
timeout = 60
|
||||
|
||||
mock_invoke.return_value = ' ACTIVE '
|
||||
|
||||
result = self.openstack.get_instance_status(node_name, expected_status, timeout)
|
||||
|
||||
self.assertTrue(result)
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.runcommand.invoke')
|
||||
def test_get_openstack_nodename_success(self, mock_invoke, mock_logging):
|
||||
"""Test getting OpenStack node name by IP"""
|
||||
node_ip = '10.0.1.100'
|
||||
|
||||
# Mock OpenStack server list output
|
||||
mock_output = """| 12345 | test-node | ACTIVE | network1=10.0.1.100 |"""
|
||||
mock_invoke.return_value = mock_output
|
||||
|
||||
result = self.openstack.get_openstack_nodename(node_ip)
|
||||
|
||||
self.assertEqual(result, 'test-node')
|
||||
mock_invoke.assert_called_once()
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.runcommand.invoke')
|
||||
def test_get_openstack_nodename_multiple_servers(self, mock_invoke, mock_logging):
|
||||
"""Test getting OpenStack node name with multiple servers"""
|
||||
node_ip = '10.0.1.101'
|
||||
|
||||
# Mock OpenStack server list output with multiple servers
|
||||
mock_output = """| 12345 | test-node-1 | ACTIVE | network1=10.0.1.100 |
|
||||
| 67890 | test-node-2 | ACTIVE | network1=10.0.1.101 |"""
|
||||
mock_invoke.return_value = mock_output
|
||||
|
||||
result = self.openstack.get_openstack_nodename(node_ip)
|
||||
|
||||
self.assertEqual(result, 'test-node-2')
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.runcommand.invoke')
|
||||
def test_get_openstack_nodename_no_match(self, mock_invoke):
|
||||
"""Test getting OpenStack node name with no matching IP"""
|
||||
node_ip = '10.0.1.200'
|
||||
|
||||
mock_output = """| 12345 | test-node | ACTIVE | network1=10.0.1.100 |"""
|
||||
mock_invoke.return_value = mock_output
|
||||
|
||||
result = self.openstack.get_openstack_nodename(node_ip)
|
||||
|
||||
self.assertIsNone(result)
|
||||
|
||||
|
||||
class TestOpenstackNodeScenarios(unittest.TestCase):
|
||||
"""Test cases for openstack_node_scenarios class"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
self.kubecli = MagicMock(spec=KrknKubernetes)
|
||||
self.affected_nodes_status = AffectedNodeStatus()
|
||||
|
||||
# Mock the OPENSTACKCLOUD class
|
||||
with patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.OPENSTACKCLOUD') as mock_openstack_class:
|
||||
self.mock_openstack = MagicMock()
|
||||
mock_openstack_class.return_value = self.mock_openstack
|
||||
self.scenario = openstack_node_scenarios(
|
||||
kubecli=self.kubecli,
|
||||
node_action_kube_check=True,
|
||||
affected_nodes_status=self.affected_nodes_status
|
||||
)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
def test_node_start_scenario_success(self, mock_wait_ready):
|
||||
"""Test node start scenario successfully"""
|
||||
node = 'test-node'
|
||||
node_ip = '10.0.1.100'
|
||||
openstack_node_name = 'openstack-test-node'
|
||||
|
||||
self.kubecli.get_node_ip.return_value = node_ip
|
||||
self.mock_openstack.get_instance_id.return_value = openstack_node_name
|
||||
self.mock_openstack.start_instances.return_value = None
|
||||
self.mock_openstack.wait_until_running.return_value = True
|
||||
|
||||
self.scenario.node_start_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
self.kubecli.get_node_ip.assert_called_once_with(node)
|
||||
self.mock_openstack.get_instance_id.assert_called_once_with(node_ip)
|
||||
self.mock_openstack.start_instances.assert_called_once_with(openstack_node_name)
|
||||
self.mock_openstack.wait_until_running.assert_called_once()
|
||||
mock_wait_ready.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
def test_node_start_scenario_no_kube_check(self, mock_wait_ready):
|
||||
"""Test node start scenario without kube check"""
|
||||
node = 'test-node'
|
||||
node_ip = '10.0.1.100'
|
||||
openstack_node_name = 'openstack-test-node'
|
||||
|
||||
# Create scenario with node_action_kube_check=False
|
||||
with patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.OPENSTACKCLOUD') as mock_openstack_class:
|
||||
mock_openstack = MagicMock()
|
||||
mock_openstack_class.return_value = mock_openstack
|
||||
scenario = openstack_node_scenarios(
|
||||
kubecli=self.kubecli,
|
||||
node_action_kube_check=False,
|
||||
affected_nodes_status=AffectedNodeStatus()
|
||||
)
|
||||
|
||||
self.kubecli.get_node_ip.return_value = node_ip
|
||||
mock_openstack.get_instance_id.return_value = openstack_node_name
|
||||
mock_openstack.start_instances.return_value = None
|
||||
mock_openstack.wait_until_running.return_value = True
|
||||
|
||||
scenario.node_start_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
# Should not call wait_for_ready_status
|
||||
mock_wait_ready.assert_not_called()
|
||||
|
||||
def test_node_start_scenario_failure(self):
|
||||
"""Test node start scenario with failure"""
|
||||
node = 'test-node'
|
||||
node_ip = '10.0.1.100'
|
||||
|
||||
self.kubecli.get_node_ip.return_value = node_ip
|
||||
self.mock_openstack.get_instance_id.side_effect = Exception("OpenStack error")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.scenario.node_start_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
def test_node_start_scenario_multiple_kills(self):
|
||||
"""Test node start scenario with multiple kill counts"""
|
||||
node = 'test-node'
|
||||
node_ip = '10.0.1.100'
|
||||
openstack_node_name = 'openstack-test-node'
|
||||
|
||||
with patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.OPENSTACKCLOUD') as mock_openstack_class:
|
||||
mock_openstack = MagicMock()
|
||||
mock_openstack_class.return_value = mock_openstack
|
||||
scenario = openstack_node_scenarios(
|
||||
kubecli=self.kubecli,
|
||||
node_action_kube_check=False,
|
||||
affected_nodes_status=AffectedNodeStatus()
|
||||
)
|
||||
|
||||
self.kubecli.get_node_ip.return_value = node_ip
|
||||
mock_openstack.get_instance_id.return_value = openstack_node_name
|
||||
mock_openstack.start_instances.return_value = None
|
||||
mock_openstack.wait_until_running.return_value = True
|
||||
|
||||
scenario.node_start_scenario(
|
||||
instance_kill_count=3,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
self.assertEqual(mock_openstack.start_instances.call_count, 3)
|
||||
self.assertEqual(len(scenario.affected_nodes_status.affected_nodes), 3)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_not_ready_status')
|
||||
def test_node_stop_scenario_success(self, mock_wait_not_ready):
|
||||
"""Test node stop scenario successfully"""
|
||||
node = 'test-node'
|
||||
node_ip = '10.0.1.100'
|
||||
openstack_node_name = 'openstack-test-node'
|
||||
|
||||
self.kubecli.get_node_ip.return_value = node_ip
|
||||
self.mock_openstack.get_instance_id.return_value = openstack_node_name
|
||||
self.mock_openstack.stop_instances.return_value = None
|
||||
self.mock_openstack.wait_until_stopped.return_value = True
|
||||
|
||||
self.scenario.node_stop_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
self.kubecli.get_node_ip.assert_called_once_with(node)
|
||||
self.mock_openstack.get_instance_id.assert_called_once_with(node_ip)
|
||||
self.mock_openstack.stop_instances.assert_called_once_with(openstack_node_name)
|
||||
self.mock_openstack.wait_until_stopped.assert_called_once()
|
||||
mock_wait_not_ready.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_not_ready_status')
|
||||
def test_node_stop_scenario_no_kube_check(self, mock_wait_not_ready):
|
||||
"""Test node stop scenario without kube check"""
|
||||
node = 'test-node'
|
||||
node_ip = '10.0.1.100'
|
||||
openstack_node_name = 'openstack-test-node'
|
||||
|
||||
# Create scenario with node_action_kube_check=False
|
||||
with patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.OPENSTACKCLOUD') as mock_openstack_class:
|
||||
mock_openstack = MagicMock()
|
||||
mock_openstack_class.return_value = mock_openstack
|
||||
scenario = openstack_node_scenarios(
|
||||
kubecli=self.kubecli,
|
||||
node_action_kube_check=False,
|
||||
affected_nodes_status=AffectedNodeStatus()
|
||||
)
|
||||
|
||||
self.kubecli.get_node_ip.return_value = node_ip
|
||||
mock_openstack.get_instance_id.return_value = openstack_node_name
|
||||
mock_openstack.stop_instances.return_value = None
|
||||
mock_openstack.wait_until_stopped.return_value = True
|
||||
|
||||
scenario.node_stop_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
# Should not call wait_for_not_ready_status
|
||||
mock_wait_not_ready.assert_not_called()
|
||||
|
||||
def test_node_stop_scenario_failure(self):
|
||||
"""Test node stop scenario with failure"""
|
||||
node = 'test-node'
|
||||
node_ip = '10.0.1.100'
|
||||
|
||||
self.kubecli.get_node_ip.return_value = node_ip
|
||||
self.mock_openstack.get_instance_id.side_effect = Exception("OpenStack error")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.scenario.node_stop_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600,
|
||||
poll_interval=15
|
||||
)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_unknown_status')
|
||||
def test_node_reboot_scenario_success(self, mock_wait_unknown, mock_wait_ready):
|
||||
"""Test node reboot scenario successfully"""
|
||||
node = 'test-node'
|
||||
node_ip = '10.0.1.100'
|
||||
openstack_node_name = 'openstack-test-node'
|
||||
|
||||
self.kubecli.get_node_ip.return_value = node_ip
|
||||
self.mock_openstack.get_instance_id.return_value = openstack_node_name
|
||||
self.mock_openstack.reboot_instances.return_value = None
|
||||
|
||||
self.scenario.node_reboot_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
self.kubecli.get_node_ip.assert_called_once_with(node)
|
||||
self.mock_openstack.get_instance_id.assert_called_once_with(node_ip)
|
||||
self.mock_openstack.reboot_instances.assert_called_once_with(openstack_node_name)
|
||||
mock_wait_unknown.assert_called_once()
|
||||
mock_wait_ready.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_ready_status')
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.wait_for_unknown_status')
|
||||
def test_node_reboot_scenario_no_kube_check(self, mock_wait_unknown, mock_wait_ready):
|
||||
"""Test node reboot scenario without kube check"""
|
||||
node = 'test-node'
|
||||
node_ip = '10.0.1.100'
|
||||
openstack_node_name = 'openstack-test-node'
|
||||
|
||||
# Create scenario with node_action_kube_check=False
|
||||
with patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.OPENSTACKCLOUD') as mock_openstack_class:
|
||||
mock_openstack = MagicMock()
|
||||
mock_openstack_class.return_value = mock_openstack
|
||||
scenario = openstack_node_scenarios(
|
||||
kubecli=self.kubecli,
|
||||
node_action_kube_check=False,
|
||||
affected_nodes_status=AffectedNodeStatus()
|
||||
)
|
||||
|
||||
self.kubecli.get_node_ip.return_value = node_ip
|
||||
mock_openstack.get_instance_id.return_value = openstack_node_name
|
||||
mock_openstack.reboot_instances.return_value = None
|
||||
|
||||
scenario.node_reboot_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
# Should not call wait functions
|
||||
mock_wait_unknown.assert_not_called()
|
||||
mock_wait_ready.assert_not_called()
|
||||
|
||||
def test_node_reboot_scenario_failure(self):
|
||||
"""Test node reboot scenario with failure"""
|
||||
node = 'test-node'
|
||||
node_ip = '10.0.1.100'
|
||||
|
||||
self.kubecli.get_node_ip.return_value = node_ip
|
||||
self.mock_openstack.get_instance_id.side_effect = Exception("OpenStack error")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.scenario.node_reboot_scenario(
|
||||
instance_kill_count=1,
|
||||
node=node,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
def test_node_reboot_scenario_multiple_kills(self):
|
||||
"""Test node reboot scenario with multiple kill counts"""
|
||||
node = 'test-node'
|
||||
node_ip = '10.0.1.100'
|
||||
openstack_node_name = 'openstack-test-node'
|
||||
|
||||
with patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.OPENSTACKCLOUD') as mock_openstack_class:
|
||||
mock_openstack = MagicMock()
|
||||
mock_openstack_class.return_value = mock_openstack
|
||||
scenario = openstack_node_scenarios(
|
||||
kubecli=self.kubecli,
|
||||
node_action_kube_check=False,
|
||||
affected_nodes_status=AffectedNodeStatus()
|
||||
)
|
||||
|
||||
self.kubecli.get_node_ip.return_value = node_ip
|
||||
mock_openstack.get_instance_id.return_value = openstack_node_name
|
||||
mock_openstack.reboot_instances.return_value = None
|
||||
|
||||
scenario.node_reboot_scenario(
|
||||
instance_kill_count=3,
|
||||
node=node,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
self.assertEqual(mock_openstack.reboot_instances.call_count, 3)
|
||||
self.assertEqual(len(scenario.affected_nodes_status.affected_nodes), 3)
|
||||
|
||||
def test_helper_node_start_scenario_success(self):
|
||||
"""Test helper node start scenario successfully"""
|
||||
node_ip = '192.168.1.50'
|
||||
openstack_node_name = 'helper-node'
|
||||
|
||||
self.mock_openstack.get_openstack_nodename.return_value = openstack_node_name
|
||||
self.mock_openstack.start_instances.return_value = None
|
||||
self.mock_openstack.wait_until_running.return_value = True
|
||||
|
||||
self.scenario.helper_node_start_scenario(
|
||||
instance_kill_count=1,
|
||||
node_ip=node_ip,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
self.mock_openstack.get_openstack_nodename.assert_called_once_with(node_ip.strip())
|
||||
self.mock_openstack.start_instances.assert_called_once_with(openstack_node_name)
|
||||
self.mock_openstack.wait_until_running.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
def test_helper_node_start_scenario_failure(self):
|
||||
"""Test helper node start scenario with failure"""
|
||||
node_ip = '192.168.1.50'
|
||||
|
||||
self.mock_openstack.get_openstack_nodename.side_effect = Exception("OpenStack error")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.scenario.helper_node_start_scenario(
|
||||
instance_kill_count=1,
|
||||
node_ip=node_ip,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
def test_helper_node_start_scenario_multiple_kills(self):
|
||||
"""Test helper node start scenario with multiple kill counts"""
|
||||
node_ip = '192.168.1.50'
|
||||
openstack_node_name = 'helper-node'
|
||||
|
||||
with patch('krkn.scenario_plugins.node_actions.openstack_node_scenarios.OPENSTACKCLOUD') as mock_openstack_class:
|
||||
mock_openstack = MagicMock()
|
||||
mock_openstack_class.return_value = mock_openstack
|
||||
scenario = openstack_node_scenarios(
|
||||
kubecli=self.kubecli,
|
||||
node_action_kube_check=False,
|
||||
affected_nodes_status=AffectedNodeStatus()
|
||||
)
|
||||
|
||||
mock_openstack.get_openstack_nodename.return_value = openstack_node_name
|
||||
mock_openstack.start_instances.return_value = None
|
||||
mock_openstack.wait_until_running.return_value = True
|
||||
|
||||
scenario.helper_node_start_scenario(
|
||||
instance_kill_count=2,
|
||||
node_ip=node_ip,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
self.assertEqual(mock_openstack.start_instances.call_count, 2)
|
||||
self.assertEqual(len(scenario.affected_nodes_status.affected_nodes), 2)
|
||||
|
||||
def test_helper_node_stop_scenario_success(self):
|
||||
"""Test helper node stop scenario successfully"""
|
||||
node_ip = '192.168.1.50'
|
||||
openstack_node_name = 'helper-node'
|
||||
|
||||
self.mock_openstack.get_openstack_nodename.return_value = openstack_node_name
|
||||
self.mock_openstack.stop_instances.return_value = None
|
||||
self.mock_openstack.wait_until_stopped.return_value = True
|
||||
|
||||
self.scenario.helper_node_stop_scenario(
|
||||
instance_kill_count=1,
|
||||
node_ip=node_ip,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
self.mock_openstack.get_openstack_nodename.assert_called_once_with(node_ip.strip())
|
||||
self.mock_openstack.stop_instances.assert_called_once_with(openstack_node_name)
|
||||
self.mock_openstack.wait_until_stopped.assert_called_once()
|
||||
self.assertEqual(len(self.affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
def test_helper_node_stop_scenario_failure(self):
|
||||
"""Test helper node stop scenario with failure"""
|
||||
node_ip = '192.168.1.50'
|
||||
|
||||
self.mock_openstack.get_openstack_nodename.side_effect = Exception("OpenStack error")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.scenario.helper_node_stop_scenario(
|
||||
instance_kill_count=1,
|
||||
node_ip=node_ip,
|
||||
timeout=600
|
||||
)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.check_service_status')
|
||||
def test_helper_node_service_status_success(self, mock_check_service):
|
||||
"""Test helper node service status check successfully"""
|
||||
node_ip = '192.168.1.50'
|
||||
service = 'kubelet'
|
||||
ssh_private_key = '/path/to/key'
|
||||
timeout = 300
|
||||
|
||||
mock_check_service.return_value = None
|
||||
|
||||
self.scenario.helper_node_service_status(
|
||||
node_ip=node_ip,
|
||||
service=service,
|
||||
ssh_private_key=ssh_private_key,
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
mock_check_service.assert_called_once_with(
|
||||
node_ip.strip(),
|
||||
service,
|
||||
ssh_private_key,
|
||||
timeout
|
||||
)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.check_service_status')
|
||||
def test_helper_node_service_status_failure(self, mock_check_service):
|
||||
"""Test helper node service status check with failure"""
|
||||
node_ip = '192.168.1.50'
|
||||
service = 'kubelet'
|
||||
ssh_private_key = '/path/to/key'
|
||||
timeout = 300
|
||||
|
||||
mock_check_service.side_effect = Exception("Service check failed")
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.scenario.helper_node_service_status(
|
||||
node_ip=node_ip,
|
||||
service=service,
|
||||
ssh_private_key=ssh_private_key,
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
@patch('krkn.scenario_plugins.node_actions.common_node_functions.check_service_status')
|
||||
def test_helper_node_service_status_with_whitespace_ip(self, mock_check_service):
|
||||
"""Test helper node service status with whitespace in IP"""
|
||||
node_ip = ' 192.168.1.50 '
|
||||
service = 'kubelet'
|
||||
ssh_private_key = '/path/to/key'
|
||||
timeout = 300
|
||||
|
||||
mock_check_service.return_value = None
|
||||
|
||||
self.scenario.helper_node_service_status(
|
||||
node_ip=node_ip,
|
||||
service=service,
|
||||
ssh_private_key=ssh_private_key,
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
# Verify IP was stripped
|
||||
mock_check_service.assert_called_once_with(
|
||||
node_ip.strip(),
|
||||
service,
|
||||
ssh_private_key,
|
||||
timeout
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -9,22 +9,16 @@ Usage:
|
||||
Assisted By: Claude Code
|
||||
"""
|
||||
|
||||
import base64
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
import unittest
|
||||
from unittest.mock import MagicMock, patch
|
||||
import yaml
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
|
||||
from krkn.scenario_plugins.pvc.pvc_scenario_plugin import PvcScenarioPlugin
|
||||
from krkn.rollback.config import RollbackContent
|
||||
|
||||
|
||||
class TestPvcScenarioPlugin(unittest.TestCase):
|
||||
"""Unit tests for PvcScenarioPlugin class"""
|
||||
|
||||
def setUp(self):
|
||||
"""
|
||||
@@ -42,790 +36,5 @@ class TestPvcScenarioPlugin(unittest.TestCase):
|
||||
self.assertEqual(len(result), 1)
|
||||
|
||||
|
||||
class TestToKbytes(unittest.TestCase):
|
||||
"""Tests for the to_kbytes method"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
self.plugin = PvcScenarioPlugin()
|
||||
|
||||
def test_to_kbytes_1ki(self):
|
||||
"""Test to_kbytes with 1Ki"""
|
||||
self.assertEqual(self.plugin.to_kbytes("1Ki"), 1)
|
||||
|
||||
def test_to_kbytes_2ki(self):
|
||||
"""Test to_kbytes with 2Ki"""
|
||||
self.assertEqual(self.plugin.to_kbytes("2Ki"), 2)
|
||||
|
||||
def test_to_kbytes_1mi(self):
|
||||
"""Test to_kbytes with 1Mi"""
|
||||
self.assertEqual(self.plugin.to_kbytes("1Mi"), 1024)
|
||||
|
||||
def test_to_kbytes_2mi(self):
|
||||
"""Test to_kbytes with 2Mi"""
|
||||
self.assertEqual(self.plugin.to_kbytes("2Mi"), 2 * 1024)
|
||||
|
||||
def test_to_kbytes_1gi(self):
|
||||
"""Test to_kbytes with 1Gi"""
|
||||
self.assertEqual(self.plugin.to_kbytes("1Gi"), 1024 * 1024)
|
||||
|
||||
def test_to_kbytes_5gi(self):
|
||||
"""Test to_kbytes with 5Gi"""
|
||||
self.assertEqual(self.plugin.to_kbytes("5Gi"), 5 * 1024 * 1024)
|
||||
|
||||
def test_to_kbytes_1ti(self):
|
||||
"""Test to_kbytes with 1Ti"""
|
||||
self.assertEqual(self.plugin.to_kbytes("1Ti"), 1024 * 1024 * 1024)
|
||||
|
||||
def test_to_kbytes_invalid_missing_i(self):
|
||||
"""Test to_kbytes raises RuntimeError for 1K (missing i)"""
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.plugin.to_kbytes("1K")
|
||||
|
||||
def test_to_kbytes_invalid_format_mb(self):
|
||||
"""Test to_kbytes raises RuntimeError for 1MB"""
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.plugin.to_kbytes("1MB")
|
||||
|
||||
def test_to_kbytes_invalid_extra_char(self):
|
||||
"""Test to_kbytes raises RuntimeError for 1Gib"""
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.plugin.to_kbytes("1Gib")
|
||||
|
||||
def test_to_kbytes_invalid_non_numeric(self):
|
||||
"""Test to_kbytes raises RuntimeError for abc"""
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.plugin.to_kbytes("abc")
|
||||
|
||||
def test_to_kbytes_invalid_missing_unit(self):
|
||||
"""Test to_kbytes raises RuntimeError for 1024"""
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.plugin.to_kbytes("1024")
|
||||
|
||||
def test_to_kbytes_invalid_empty(self):
|
||||
"""Test to_kbytes raises RuntimeError for empty string"""
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.plugin.to_kbytes("")
|
||||
|
||||
def test_to_kbytes_invalid_unsupported_unit(self):
|
||||
"""Test to_kbytes raises RuntimeError for 1Pi (unsupported)"""
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.plugin.to_kbytes("1Pi")
|
||||
|
||||
|
||||
class TestRemoveTempFile(unittest.TestCase):
|
||||
"""Tests for the remove_temp_file method"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
self.plugin = PvcScenarioPlugin()
|
||||
|
||||
def test_remove_temp_file_success(self):
|
||||
"""Test successful removal of temp file"""
|
||||
mock_kubecli = MagicMock(spec=KrknKubernetes)
|
||||
# Simulate file not present in ls output after removal
|
||||
mock_kubecli.exec_cmd_in_pod.side_effect = [
|
||||
"", # rm -f command output
|
||||
"total 0\ndrwxr-xr-x 2 root root 40 Jan 1 00:00 .", # ls -lh output without kraken.tmp
|
||||
]
|
||||
|
||||
# Should not raise any exception
|
||||
self.plugin.remove_temp_file(
|
||||
file_name="kraken.tmp",
|
||||
full_path="/mnt/data/kraken.tmp",
|
||||
pod_name="test-pod",
|
||||
namespace="test-ns",
|
||||
container_name="test-container",
|
||||
mount_path="/mnt/data",
|
||||
file_size_kb=1024,
|
||||
kubecli=mock_kubecli,
|
||||
)
|
||||
|
||||
# Verify exec_cmd_in_pod was called twice (rm and ls)
|
||||
self.assertEqual(mock_kubecli.exec_cmd_in_pod.call_count, 2)
|
||||
|
||||
def test_remove_temp_file_failure(self):
|
||||
"""Test removal failure when file still exists"""
|
||||
mock_kubecli = MagicMock(spec=KrknKubernetes)
|
||||
# Simulate file still present in ls output after removal attempt
|
||||
mock_kubecli.exec_cmd_in_pod.side_effect = [
|
||||
"", # rm -f command output
|
||||
"total 1024\n-rw-r--r-- 1 root root 1M Jan 1 00:00 kraken.tmp", # ls -lh output with kraken.tmp
|
||||
]
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.plugin.remove_temp_file(
|
||||
file_name="kraken.tmp",
|
||||
full_path="/mnt/data/kraken.tmp",
|
||||
pod_name="test-pod",
|
||||
namespace="test-ns",
|
||||
container_name="test-container",
|
||||
mount_path="/mnt/data",
|
||||
file_size_kb=1024,
|
||||
kubecli=mock_kubecli,
|
||||
)
|
||||
|
||||
|
||||
class TestRollbackTempFile(unittest.TestCase):
|
||||
"""Tests for the rollback_temp_file static method"""
|
||||
|
||||
def test_rollback_temp_file_success(self):
|
||||
"""Test successful rollback removes temp file"""
|
||||
# Create mock telemetry
|
||||
mock_telemetry = MagicMock(spec=KrknTelemetryOpenshift)
|
||||
mock_kubecli = MagicMock()
|
||||
mock_telemetry.get_lib_kubernetes.return_value = mock_kubecli
|
||||
|
||||
# Simulate successful file removal
|
||||
mock_kubecli.exec_cmd_in_pod.side_effect = [
|
||||
"", # rm -f command output
|
||||
"total 0\ndrwxr-xr-x 2 root root 40 Jan 1 00:00 .", # ls -lh output without file
|
||||
]
|
||||
|
||||
# Create rollback data
|
||||
rollback_data = {
|
||||
"pod_name": "test-pod",
|
||||
"container_name": "test-container",
|
||||
"full_path": "/mnt/data/kraken.tmp",
|
||||
"file_name": "kraken.tmp",
|
||||
"mount_path": "/mnt/data",
|
||||
}
|
||||
encoded_data = base64.b64encode(
|
||||
json.dumps(rollback_data).encode("utf-8")
|
||||
).decode("utf-8")
|
||||
|
||||
rollback_content = RollbackContent(
|
||||
namespace="test-ns",
|
||||
resource_identifier=encoded_data,
|
||||
)
|
||||
|
||||
# Should not raise any exception
|
||||
PvcScenarioPlugin.rollback_temp_file(rollback_content, mock_telemetry)
|
||||
|
||||
# Verify exec_cmd_in_pod was called
|
||||
self.assertEqual(mock_kubecli.exec_cmd_in_pod.call_count, 2)
|
||||
|
||||
@patch("krkn.scenario_plugins.pvc.pvc_scenario_plugin.logging")
|
||||
def test_rollback_temp_file_invalid_data(self, mock_logging):
|
||||
"""Test rollback handles invalid encoded data gracefully and logs error"""
|
||||
mock_telemetry = MagicMock(spec=KrknTelemetryOpenshift)
|
||||
|
||||
rollback_content = RollbackContent(
|
||||
namespace="test-ns",
|
||||
resource_identifier="invalid-base64-data!!!",
|
||||
)
|
||||
|
||||
# Should not raise exception, just log the error
|
||||
PvcScenarioPlugin.rollback_temp_file(rollback_content, mock_telemetry)
|
||||
|
||||
# Verify error was logged to inform users of rollback failure
|
||||
mock_logging.error.assert_called_once()
|
||||
error_message = mock_logging.error.call_args[0][0]
|
||||
self.assertIn("Failed to rollback PVC scenario temp file", error_message)
|
||||
|
||||
|
||||
class TestPvcScenarioPluginRun(unittest.TestCase):
|
||||
"""Tests for the run method of PvcScenarioPlugin"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures"""
|
||||
self.plugin = PvcScenarioPlugin()
|
||||
|
||||
def create_scenario_file(self, config: dict, temp_dir: str) -> str:
|
||||
"""Helper to create a temporary scenario YAML file in the given directory"""
|
||||
path = os.path.join(temp_dir, "scenario.yaml")
|
||||
with open(path, "w") as f:
|
||||
yaml.dump(config, f)
|
||||
return path
|
||||
|
||||
def test_run_missing_namespace(self):
|
||||
"""Test run returns 1 when namespace is missing"""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
scenario_config = {
|
||||
"pvc_scenario": {
|
||||
"pvc_name": "test-pvc",
|
||||
# namespace is missing
|
||||
}
|
||||
}
|
||||
scenario_path = self.create_scenario_file(scenario_config, temp_dir)
|
||||
|
||||
mock_telemetry = MagicMock(spec=KrknTelemetryOpenshift)
|
||||
mock_scenario_telemetry = MagicMock()
|
||||
|
||||
result = self.plugin.run(
|
||||
run_uuid="test-uuid",
|
||||
scenario=scenario_path,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
assert result == 1
|
||||
|
||||
def test_run_missing_pvc_and_pod_name(self):
|
||||
"""Test run returns 1 when both pvc_name and pod_name are missing"""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
scenario_config = {
|
||||
"pvc_scenario": {
|
||||
"namespace": "test-ns",
|
||||
# pvc_name and pod_name are missing
|
||||
}
|
||||
}
|
||||
scenario_path = self.create_scenario_file(scenario_config, temp_dir)
|
||||
|
||||
mock_telemetry = MagicMock(spec=KrknTelemetryOpenshift)
|
||||
mock_scenario_telemetry = MagicMock()
|
||||
|
||||
result = self.plugin.run(
|
||||
run_uuid="test-uuid",
|
||||
scenario=scenario_path,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
def test_run_pod_not_found(self):
|
||||
"""Test run returns 1 when pod doesn't exist"""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
scenario_config = {
|
||||
"pvc_scenario": {
|
||||
"namespace": "test-ns",
|
||||
"pod_name": "non-existent-pod",
|
||||
}
|
||||
}
|
||||
scenario_path = self.create_scenario_file(scenario_config, temp_dir)
|
||||
|
||||
mock_telemetry = MagicMock(spec=KrknTelemetryOpenshift)
|
||||
mock_kubecli = MagicMock()
|
||||
mock_telemetry.get_lib_kubernetes.return_value = mock_kubecli
|
||||
mock_kubecli.get_pod_info.return_value = None
|
||||
mock_scenario_telemetry = MagicMock()
|
||||
|
||||
result = self.plugin.run(
|
||||
run_uuid="test-uuid",
|
||||
scenario=scenario_path,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
def test_run_pvc_not_found_for_pod(self):
|
||||
"""Test run returns 1 when pod has no PVC"""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
scenario_config = {
|
||||
"pvc_scenario": {
|
||||
"namespace": "test-ns",
|
||||
"pod_name": "test-pod",
|
||||
}
|
||||
}
|
||||
scenario_path = self.create_scenario_file(scenario_config, temp_dir)
|
||||
|
||||
mock_telemetry = MagicMock(spec=KrknTelemetryOpenshift)
|
||||
mock_kubecli = MagicMock()
|
||||
mock_telemetry.get_lib_kubernetes.return_value = mock_kubecli
|
||||
|
||||
# Create mock pod with no PVC volumes
|
||||
mock_pod = MagicMock()
|
||||
mock_volume = MagicMock()
|
||||
mock_volume.pvcName = None # No PVC attached
|
||||
mock_pod.volumes = [mock_volume]
|
||||
mock_kubecli.get_pod_info.return_value = mock_pod
|
||||
|
||||
mock_scenario_telemetry = MagicMock()
|
||||
|
||||
result = self.plugin.run(
|
||||
run_uuid="test-uuid",
|
||||
scenario=scenario_path,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
def test_run_invalid_fill_percentage(self):
|
||||
"""Test run returns 1 when target fill percentage is invalid"""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
scenario_config = {
|
||||
"pvc_scenario": {
|
||||
"namespace": "test-ns",
|
||||
"pod_name": "test-pod",
|
||||
"fill_percentage": 10, # Lower than current usage
|
||||
}
|
||||
}
|
||||
scenario_path = self.create_scenario_file(scenario_config, temp_dir)
|
||||
|
||||
mock_telemetry = MagicMock(spec=KrknTelemetryOpenshift)
|
||||
mock_kubecli = MagicMock()
|
||||
mock_telemetry.get_lib_kubernetes.return_value = mock_kubecli
|
||||
|
||||
# Create mock pod with PVC volume
|
||||
mock_pod = MagicMock()
|
||||
mock_volume = MagicMock()
|
||||
mock_volume.pvcName = "test-pvc"
|
||||
mock_volume.name = "test-volume"
|
||||
mock_pod.volumes = [mock_volume]
|
||||
|
||||
# Create mock container with volume mount
|
||||
mock_container = MagicMock()
|
||||
mock_container.name = "test-container"
|
||||
mock_vol_mount = MagicMock()
|
||||
mock_vol_mount.name = "test-volume"
|
||||
mock_vol_mount.mountPath = "/mnt/data"
|
||||
mock_container.volumeMounts = [mock_vol_mount]
|
||||
mock_pod.containers = [mock_container]
|
||||
|
||||
mock_kubecli.get_pod_info.return_value = mock_pod
|
||||
|
||||
# Mock PVC info
|
||||
mock_pvc = MagicMock()
|
||||
mock_kubecli.get_pvc_info.return_value = mock_pvc
|
||||
|
||||
# Mock df command output: 50% used (50000 used, 50000 available, 100000 total)
|
||||
mock_kubecli.exec_cmd_in_pod.return_value = (
|
||||
"/dev/sda1 100000 50000 50000 50% /mnt/data"
|
||||
)
|
||||
|
||||
mock_scenario_telemetry = MagicMock()
|
||||
|
||||
result = self.plugin.run(
|
||||
run_uuid="test-uuid",
|
||||
scenario=scenario_path,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
# Should return 1 because target fill (10%) < current fill (50%)
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
@patch("krkn.scenario_plugins.pvc.pvc_scenario_plugin.time.sleep")
|
||||
@patch("krkn.scenario_plugins.pvc.pvc_scenario_plugin.cerberus.publish_kraken_status")
|
||||
def test_run_success_with_fallocate(self, mock_publish, mock_sleep):
|
||||
"""Test successful run using fallocate"""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
scenario_config = {
|
||||
"pvc_scenario": {
|
||||
"namespace": "test-ns",
|
||||
"pod_name": "test-pod",
|
||||
"fill_percentage": 80,
|
||||
"duration": 1,
|
||||
}
|
||||
}
|
||||
scenario_path = self.create_scenario_file(scenario_config, temp_dir)
|
||||
|
||||
mock_telemetry = MagicMock(spec=KrknTelemetryOpenshift)
|
||||
mock_kubecli = MagicMock()
|
||||
mock_telemetry.get_lib_kubernetes.return_value = mock_kubecli
|
||||
|
||||
# Create mock pod with PVC volume
|
||||
mock_pod = MagicMock()
|
||||
mock_volume = MagicMock()
|
||||
mock_volume.pvcName = "test-pvc"
|
||||
mock_volume.name = "test-volume"
|
||||
mock_pod.volumes = [mock_volume]
|
||||
|
||||
# Create mock container with volume mount
|
||||
mock_container = MagicMock()
|
||||
mock_container.name = "test-container"
|
||||
mock_vol_mount = MagicMock()
|
||||
mock_vol_mount.name = "test-volume"
|
||||
mock_vol_mount.mountPath = "/mnt/data"
|
||||
mock_container.volumeMounts = [mock_vol_mount]
|
||||
mock_pod.containers = [mock_container]
|
||||
|
||||
mock_kubecli.get_pod_info.return_value = mock_pod
|
||||
|
||||
# Mock PVC info
|
||||
mock_pvc = MagicMock()
|
||||
mock_kubecli.get_pvc_info.return_value = mock_pvc
|
||||
|
||||
# Set up exec_cmd_in_pod responses
|
||||
mock_kubecli.exec_cmd_in_pod.side_effect = [
|
||||
"/dev/sda1 100000 10000 90000 10% /mnt/data", # df command (10% used)
|
||||
"/usr/bin/fallocate", # command -v fallocate
|
||||
"/usr/bin/dd", # command -v dd
|
||||
"", # fallocate command
|
||||
"-rw-r--r-- 1 root root 70M Jan 1 00:00 kraken.tmp", # ls -lh (file created)
|
||||
"", # rm -f (cleanup)
|
||||
"total 0", # ls -lh (file removed)
|
||||
]
|
||||
|
||||
mock_scenario_telemetry = MagicMock()
|
||||
|
||||
result = self.plugin.run(
|
||||
run_uuid="test-uuid",
|
||||
scenario=scenario_path,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
mock_sleep.assert_called_once_with(1)
|
||||
|
||||
@patch("krkn.scenario_plugins.pvc.pvc_scenario_plugin.time.sleep")
|
||||
@patch("krkn.scenario_plugins.pvc.pvc_scenario_plugin.cerberus.publish_kraken_status")
|
||||
def test_run_success_with_dd(self, mock_publish, mock_sleep):
|
||||
"""Test successful run using dd when fallocate is not available"""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
scenario_config = {
|
||||
"pvc_scenario": {
|
||||
"namespace": "test-ns",
|
||||
"pod_name": "test-pod",
|
||||
"fill_percentage": 80,
|
||||
"duration": 1,
|
||||
}
|
||||
}
|
||||
scenario_path = self.create_scenario_file(scenario_config, temp_dir)
|
||||
|
||||
mock_telemetry = MagicMock(spec=KrknTelemetryOpenshift)
|
||||
mock_kubecli = MagicMock()
|
||||
mock_telemetry.get_lib_kubernetes.return_value = mock_kubecli
|
||||
|
||||
# Create mock pod with PVC volume
|
||||
mock_pod = MagicMock()
|
||||
mock_volume = MagicMock()
|
||||
mock_volume.pvcName = "test-pvc"
|
||||
mock_volume.name = "test-volume"
|
||||
mock_pod.volumes = [mock_volume]
|
||||
|
||||
# Create mock container with volume mount
|
||||
mock_container = MagicMock()
|
||||
mock_container.name = "test-container"
|
||||
mock_vol_mount = MagicMock()
|
||||
mock_vol_mount.name = "test-volume"
|
||||
mock_vol_mount.mountPath = "/mnt/data"
|
||||
mock_container.volumeMounts = [mock_vol_mount]
|
||||
mock_pod.containers = [mock_container]
|
||||
|
||||
mock_kubecli.get_pod_info.return_value = mock_pod
|
||||
|
||||
# Mock PVC info
|
||||
mock_pvc = MagicMock()
|
||||
mock_kubecli.get_pvc_info.return_value = mock_pvc
|
||||
|
||||
# Set up exec_cmd_in_pod responses (fallocate not available)
|
||||
mock_kubecli.exec_cmd_in_pod.side_effect = [
|
||||
"/dev/sda1 100000 10000 90000 10% /mnt/data", # df command
|
||||
"", # command -v fallocate (not found)
|
||||
"/usr/bin/dd", # command -v dd
|
||||
"", # dd command
|
||||
"-rw-r--r-- 1 root root 70M Jan 1 00:00 kraken.tmp", # ls -lh
|
||||
"", # rm -f
|
||||
"total 0", # ls -lh (file removed)
|
||||
]
|
||||
|
||||
mock_scenario_telemetry = MagicMock()
|
||||
|
||||
result = self.plugin.run(
|
||||
run_uuid="test-uuid",
|
||||
scenario=scenario_path,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
|
||||
def test_run_no_binary_available(self):
|
||||
"""Test run returns 1 when neither fallocate nor dd is available"""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
scenario_config = {
|
||||
"pvc_scenario": {
|
||||
"namespace": "test-ns",
|
||||
"pod_name": "test-pod",
|
||||
"fill_percentage": 80,
|
||||
}
|
||||
}
|
||||
scenario_path = self.create_scenario_file(scenario_config, temp_dir)
|
||||
|
||||
mock_telemetry = MagicMock(spec=KrknTelemetryOpenshift)
|
||||
mock_kubecli = MagicMock()
|
||||
mock_telemetry.get_lib_kubernetes.return_value = mock_kubecli
|
||||
|
||||
# Create mock pod with PVC volume
|
||||
mock_pod = MagicMock()
|
||||
mock_volume = MagicMock()
|
||||
mock_volume.pvcName = "test-pvc"
|
||||
mock_volume.name = "test-volume"
|
||||
mock_pod.volumes = [mock_volume]
|
||||
|
||||
# Create mock container with volume mount
|
||||
mock_container = MagicMock()
|
||||
mock_container.name = "test-container"
|
||||
mock_vol_mount = MagicMock()
|
||||
mock_vol_mount.name = "test-volume"
|
||||
mock_vol_mount.mountPath = "/mnt/data"
|
||||
mock_container.volumeMounts = [mock_vol_mount]
|
||||
mock_pod.containers = [mock_container]
|
||||
|
||||
mock_kubecli.get_pod_info.return_value = mock_pod
|
||||
|
||||
# Mock PVC info
|
||||
mock_pvc = MagicMock()
|
||||
mock_kubecli.get_pvc_info.return_value = mock_pvc
|
||||
|
||||
# Neither fallocate nor dd available
|
||||
mock_kubecli.exec_cmd_in_pod.side_effect = [
|
||||
"/dev/sda1 100000 10000 90000 10% /mnt/data", # df command
|
||||
"", # command -v fallocate (not found)
|
||||
"", # command -v dd (not found)
|
||||
]
|
||||
|
||||
mock_scenario_telemetry = MagicMock()
|
||||
|
||||
result = self.plugin.run(
|
||||
run_uuid="test-uuid",
|
||||
scenario=scenario_path,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
def test_run_file_not_found(self):
|
||||
"""Test run returns 1 when scenario file doesn't exist"""
|
||||
mock_telemetry = MagicMock(spec=KrknTelemetryOpenshift)
|
||||
mock_scenario_telemetry = MagicMock()
|
||||
|
||||
result = self.plugin.run(
|
||||
run_uuid="test-uuid",
|
||||
scenario="/non/existent/path.yaml",
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
def test_run_both_pvc_and_pod_name_provided(self):
|
||||
"""Test run when both pvc_name and pod_name are provided (pod_name is overridden)"""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
scenario_config = {
|
||||
"pvc_scenario": {
|
||||
"namespace": "test-ns",
|
||||
"pvc_name": "test-pvc",
|
||||
"pod_name": "ignored-pod", # This will be overridden
|
||||
"fill_percentage": 80,
|
||||
"duration": 1,
|
||||
}
|
||||
}
|
||||
scenario_path = self.create_scenario_file(scenario_config, temp_dir)
|
||||
|
||||
mock_telemetry = MagicMock(spec=KrknTelemetryOpenshift)
|
||||
mock_kubecli = MagicMock()
|
||||
mock_telemetry.get_lib_kubernetes.return_value = mock_kubecli
|
||||
|
||||
# Mock PVC info with pod names
|
||||
mock_pvc = MagicMock()
|
||||
mock_pvc.podNames = ["actual-pod-from-pvc"]
|
||||
mock_kubecli.get_pvc_info.return_value = mock_pvc
|
||||
|
||||
# Create mock pod with PVC volume
|
||||
mock_pod = MagicMock()
|
||||
mock_volume = MagicMock()
|
||||
mock_volume.pvcName = "test-pvc"
|
||||
mock_volume.name = "test-volume"
|
||||
mock_pod.volumes = [mock_volume]
|
||||
|
||||
# Create mock container with volume mount
|
||||
mock_container = MagicMock()
|
||||
mock_container.name = "test-container"
|
||||
mock_vol_mount = MagicMock()
|
||||
mock_vol_mount.name = "test-volume"
|
||||
mock_vol_mount.mountPath = "/mnt/data"
|
||||
mock_container.volumeMounts = [mock_vol_mount]
|
||||
mock_pod.containers = [mock_container]
|
||||
|
||||
mock_kubecli.get_pod_info.return_value = mock_pod
|
||||
|
||||
# Mock df command output: 10% used
|
||||
mock_kubecli.exec_cmd_in_pod.side_effect = [
|
||||
"/dev/sda1 100000 10000 90000 10% /mnt/data", # df command
|
||||
"/usr/bin/fallocate", # command -v fallocate
|
||||
"/usr/bin/dd", # command -v dd
|
||||
"", # fallocate command
|
||||
"-rw-r--r-- 1 root root 70M Jan 1 00:00 kraken.tmp", # ls -lh (file created)
|
||||
"", # rm -f (cleanup)
|
||||
"total 0", # ls -lh (file removed)
|
||||
]
|
||||
|
||||
mock_scenario_telemetry = MagicMock()
|
||||
|
||||
with patch("krkn.scenario_plugins.pvc.pvc_scenario_plugin.time.sleep"):
|
||||
with patch("krkn.scenario_plugins.pvc.pvc_scenario_plugin.cerberus.publish_kraken_status"):
|
||||
result = self.plugin.run(
|
||||
run_uuid="test-uuid",
|
||||
scenario=scenario_path,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
# get_pod_info should be called with "actual-pod-from-pvc", not "ignored-pod"
|
||||
mock_kubecli.get_pod_info.assert_called_with(
|
||||
name="actual-pod-from-pvc",
|
||||
namespace="test-ns"
|
||||
)
|
||||
|
||||
# Verify exec_cmd_in_pod uses the overridden pod name
|
||||
for call in mock_kubecli.exec_cmd_in_pod.call_args_list:
|
||||
kwargs = call[1]
|
||||
if 'pod_name' in kwargs:
|
||||
self.assertEqual(kwargs['pod_name'], "actual-pod-from-pvc")
|
||||
|
||||
def test_run_pvc_name_only_no_pods_associated(self):
|
||||
"""Test run returns 1 when pvc_name is provided but no pods are associated"""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
scenario_config = {
|
||||
"pvc_scenario": {
|
||||
"namespace": "test-ns",
|
||||
"pvc_name": "test-pvc",
|
||||
"fill_percentage": 80,
|
||||
}
|
||||
}
|
||||
scenario_path = self.create_scenario_file(scenario_config, temp_dir)
|
||||
|
||||
mock_telemetry = MagicMock(spec=KrknTelemetryOpenshift)
|
||||
mock_kubecli = MagicMock()
|
||||
mock_telemetry.get_lib_kubernetes.return_value = mock_kubecli
|
||||
|
||||
# Mock PVC info with empty pod names (no pods using this PVC)
|
||||
mock_pvc = MagicMock()
|
||||
mock_pvc.podNames = [] # No pods associated
|
||||
mock_kubecli.get_pvc_info.return_value = mock_pvc
|
||||
|
||||
mock_scenario_telemetry = MagicMock()
|
||||
|
||||
result = self.plugin.run(
|
||||
run_uuid="test-uuid",
|
||||
scenario=scenario_path,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
# Should return 1 because random.choice on empty list raises IndexError
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
def test_run_file_creation_failed(self):
|
||||
"""Test run returns 1 when file creation fails and verifies cleanup is attempted"""
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
scenario_config = {
|
||||
"pvc_scenario": {
|
||||
"namespace": "test-ns",
|
||||
"pod_name": "test-pod",
|
||||
"fill_percentage": 80,
|
||||
"duration": 1,
|
||||
}
|
||||
}
|
||||
scenario_path = self.create_scenario_file(scenario_config, temp_dir)
|
||||
|
||||
mock_telemetry = MagicMock(spec=KrknTelemetryOpenshift)
|
||||
mock_kubecli = MagicMock()
|
||||
mock_telemetry.get_lib_kubernetes.return_value = mock_kubecli
|
||||
|
||||
# Create mock pod with PVC volume
|
||||
mock_pod = MagicMock()
|
||||
mock_volume = MagicMock()
|
||||
mock_volume.pvcName = "test-pvc"
|
||||
mock_volume.name = "test-volume"
|
||||
mock_pod.volumes = [mock_volume]
|
||||
|
||||
# Create mock container with volume mount
|
||||
mock_container = MagicMock()
|
||||
mock_container.name = "test-container"
|
||||
mock_vol_mount = MagicMock()
|
||||
mock_vol_mount.name = "test-volume"
|
||||
mock_vol_mount.mountPath = "/mnt/data"
|
||||
mock_container.volumeMounts = [mock_vol_mount]
|
||||
mock_pod.containers = [mock_container]
|
||||
|
||||
mock_kubecli.get_pod_info.return_value = mock_pod
|
||||
|
||||
# Mock PVC info
|
||||
mock_pvc = MagicMock()
|
||||
mock_kubecli.get_pvc_info.return_value = mock_pvc
|
||||
|
||||
# Set up exec_cmd_in_pod responses - file creation fails
|
||||
mock_kubecli.exec_cmd_in_pod.side_effect = [
|
||||
"/dev/sda1 100000 10000 90000 10% /mnt/data", # df command
|
||||
"/usr/bin/fallocate", # command -v fallocate
|
||||
"/usr/bin/dd", # command -v dd
|
||||
"", # fallocate command
|
||||
"total 0", # ls -lh shows NO kraken.tmp (file creation failed)
|
||||
"", # rm -f (cleanup attempt)
|
||||
"total 0", # ls -lh (cleanup verification)
|
||||
]
|
||||
|
||||
mock_scenario_telemetry = MagicMock()
|
||||
|
||||
result = self.plugin.run(
|
||||
run_uuid="test-uuid",
|
||||
scenario=scenario_path,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
# Should return 1 because file creation failed
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
# Verify cleanup was attempted (7 calls total: df, 2x command -v, fallocate, ls, rm, ls)
|
||||
self.assertEqual(mock_kubecli.exec_cmd_in_pod.call_count, 7)
|
||||
|
||||
class TestRollbackTempFileEdgeCases(unittest.TestCase):
|
||||
"""Additional tests for rollback_temp_file edge cases"""
|
||||
|
||||
@patch("krkn.scenario_plugins.pvc.pvc_scenario_plugin.logging")
|
||||
def test_rollback_temp_file_still_exists(self, mock_logging):
|
||||
"""Test rollback when file still exists after removal attempt and logs warning"""
|
||||
mock_telemetry = MagicMock(spec=KrknTelemetryOpenshift)
|
||||
mock_kubecli = MagicMock()
|
||||
mock_telemetry.get_lib_kubernetes.return_value = mock_kubecli
|
||||
|
||||
# Simulate file still exists after rm command
|
||||
mock_kubecli.exec_cmd_in_pod.side_effect = [
|
||||
"", # rm -f command output
|
||||
"-rw-r--r-- 1 root root 70M Jan 1 00:00 kraken.tmp", # ls -lh shows file still exists
|
||||
]
|
||||
|
||||
# Create rollback data
|
||||
rollback_data = {
|
||||
"pod_name": "test-pod",
|
||||
"container_name": "test-container",
|
||||
"full_path": "/mnt/data/kraken.tmp",
|
||||
"file_name": "kraken.tmp",
|
||||
"mount_path": "/mnt/data",
|
||||
}
|
||||
encoded_data = base64.b64encode(
|
||||
json.dumps(rollback_data).encode("utf-8")
|
||||
).decode("utf-8")
|
||||
|
||||
rollback_content = RollbackContent(
|
||||
namespace="test-ns",
|
||||
resource_identifier=encoded_data,
|
||||
)
|
||||
|
||||
# Should not raise exception, just log warning
|
||||
PvcScenarioPlugin.rollback_temp_file(rollback_content, mock_telemetry)
|
||||
|
||||
# Verify exec_cmd_in_pod was called twice
|
||||
assert mock_kubecli.exec_cmd_in_pod.call_count == 2
|
||||
|
||||
# Verify warning was logged to inform operators of incomplete rollback
|
||||
mock_logging.warning.assert_called_once()
|
||||
warning_message = mock_logging.warning.call_args[0][0]
|
||||
self.assertIn("may still exist after rollback attempt", warning_message)
|
||||
self.assertIn("kraken.tmp", warning_message)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
||||
@@ -1,385 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Test suite for SimpleHTTPRequestHandler class
|
||||
|
||||
Usage:
|
||||
python -m coverage run -a -m unittest tests/test_server.py -v
|
||||
|
||||
Assisted By: Claude Code
|
||||
"""
|
||||
|
||||
import unittest
|
||||
from unittest.mock import Mock, patch, MagicMock
|
||||
from io import BytesIO
|
||||
|
||||
import server
|
||||
from server import SimpleHTTPRequestHandler
|
||||
|
||||
|
||||
class TestSimpleHTTPRequestHandler(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
"""
|
||||
Set up test fixtures for SimpleHTTPRequestHandler
|
||||
"""
|
||||
# Reset the global server_status before each test
|
||||
server.server_status = ""
|
||||
# Reset the requests_served counter
|
||||
SimpleHTTPRequestHandler.requests_served = 0
|
||||
|
||||
# Create a mock request
|
||||
self.mock_request = MagicMock()
|
||||
self.mock_client_address = ('127.0.0.1', 12345)
|
||||
self.mock_server = MagicMock()
|
||||
|
||||
def _create_handler(self, method='GET', path='/'):
|
||||
"""
|
||||
Helper method to create a handler instance with mocked request
|
||||
"""
|
||||
# Create a mock request with proper attributes
|
||||
mock_request = MagicMock()
|
||||
mock_request.makefile.return_value = BytesIO(
|
||||
f"{method} {path} HTTP/1.1\r\n\r\n".encode('utf-8')
|
||||
)
|
||||
|
||||
# Create handler
|
||||
handler = SimpleHTTPRequestHandler(
|
||||
mock_request,
|
||||
self.mock_client_address,
|
||||
self.mock_server
|
||||
)
|
||||
|
||||
# Mock the wfile (write file) for response
|
||||
handler.wfile = BytesIO()
|
||||
|
||||
return handler
|
||||
|
||||
def test_do_GET_root_path_calls_do_status(self):
|
||||
"""
|
||||
Test do_GET with root path calls do_status
|
||||
"""
|
||||
handler = self._create_handler('GET', '/')
|
||||
|
||||
with patch.object(handler, 'do_status') as mock_do_status:
|
||||
handler.do_GET()
|
||||
mock_do_status.assert_called_once()
|
||||
|
||||
def test_do_GET_non_root_path_does_nothing(self):
|
||||
"""
|
||||
Test do_GET with non-root path does not call do_status
|
||||
"""
|
||||
handler = self._create_handler('GET', '/other')
|
||||
|
||||
with patch.object(handler, 'do_status') as mock_do_status:
|
||||
handler.do_GET()
|
||||
mock_do_status.assert_not_called()
|
||||
|
||||
def test_do_status_sends_200_response(self):
|
||||
"""
|
||||
Test do_status sends 200 status code
|
||||
"""
|
||||
server.server_status = "TEST_STATUS"
|
||||
handler = self._create_handler()
|
||||
|
||||
with patch.object(handler, 'send_response') as mock_send_response:
|
||||
with patch.object(handler, 'end_headers'):
|
||||
handler.do_status()
|
||||
mock_send_response.assert_called_once_with(200)
|
||||
|
||||
def test_do_status_writes_server_status(self):
|
||||
"""
|
||||
Test do_status writes server_status to response
|
||||
"""
|
||||
server.server_status = "RUNNING"
|
||||
handler = self._create_handler()
|
||||
|
||||
with patch.object(handler, 'send_response'):
|
||||
with patch.object(handler, 'end_headers'):
|
||||
handler.do_status()
|
||||
|
||||
# Check that the status was written to wfile
|
||||
response_content = handler.wfile.getvalue().decode('utf-8')
|
||||
self.assertEqual(response_content, "RUNNING")
|
||||
|
||||
def test_do_status_increments_requests_served(self):
|
||||
"""
|
||||
Test do_status increments requests_served counter
|
||||
"""
|
||||
# Note: Creating a handler increments the counter by 1
|
||||
# Then do_status increments it again
|
||||
SimpleHTTPRequestHandler.requests_served = 0
|
||||
handler = self._create_handler()
|
||||
initial_count = SimpleHTTPRequestHandler.requests_served
|
||||
|
||||
with patch.object(handler, 'send_response'):
|
||||
with patch.object(handler, 'end_headers'):
|
||||
handler.do_status()
|
||||
|
||||
self.assertEqual(
|
||||
SimpleHTTPRequestHandler.requests_served,
|
||||
initial_count + 1
|
||||
)
|
||||
|
||||
def test_do_status_multiple_requests_increment_counter(self):
|
||||
"""
|
||||
Test multiple do_status calls increment counter correctly
|
||||
"""
|
||||
SimpleHTTPRequestHandler.requests_served = 0
|
||||
|
||||
for i in range(5):
|
||||
handler = self._create_handler()
|
||||
with patch.object(handler, 'send_response'):
|
||||
with patch.object(handler, 'end_headers'):
|
||||
handler.do_status()
|
||||
|
||||
# Each iteration: handler creation increments by 1, do_status increments by 1
|
||||
# Total: 5 * 2 = 10
|
||||
self.assertEqual(SimpleHTTPRequestHandler.requests_served, 10)
|
||||
|
||||
def test_do_POST_STOP_path_calls_set_stop(self):
|
||||
"""
|
||||
Test do_POST with /STOP path calls set_stop
|
||||
"""
|
||||
handler = self._create_handler('POST', '/STOP')
|
||||
|
||||
with patch.object(handler, 'set_stop') as mock_set_stop:
|
||||
handler.do_POST()
|
||||
mock_set_stop.assert_called_once()
|
||||
|
||||
def test_do_POST_RUN_path_calls_set_run(self):
|
||||
"""
|
||||
Test do_POST with /RUN path calls set_run
|
||||
"""
|
||||
handler = self._create_handler('POST', '/RUN')
|
||||
|
||||
with patch.object(handler, 'set_run') as mock_set_run:
|
||||
handler.do_POST()
|
||||
mock_set_run.assert_called_once()
|
||||
|
||||
def test_do_POST_PAUSE_path_calls_set_pause(self):
|
||||
"""
|
||||
Test do_POST with /PAUSE path calls set_pause
|
||||
"""
|
||||
handler = self._create_handler('POST', '/PAUSE')
|
||||
|
||||
with patch.object(handler, 'set_pause') as mock_set_pause:
|
||||
handler.do_POST()
|
||||
mock_set_pause.assert_called_once()
|
||||
|
||||
def test_do_POST_unknown_path_does_nothing(self):
|
||||
"""
|
||||
Test do_POST with unknown path does not call any setter
|
||||
"""
|
||||
handler = self._create_handler('POST', '/UNKNOWN')
|
||||
|
||||
with patch.object(handler, 'set_stop') as mock_set_stop:
|
||||
with patch.object(handler, 'set_run') as mock_set_run:
|
||||
with patch.object(handler, 'set_pause') as mock_set_pause:
|
||||
handler.do_POST()
|
||||
mock_set_stop.assert_not_called()
|
||||
mock_set_run.assert_not_called()
|
||||
mock_set_pause.assert_not_called()
|
||||
|
||||
def test_set_run_sets_status_to_RUN(self):
|
||||
"""
|
||||
Test set_run sets global server_status to 'RUN'
|
||||
"""
|
||||
handler = self._create_handler()
|
||||
|
||||
with patch.object(handler, 'send_response'):
|
||||
with patch.object(handler, 'end_headers'):
|
||||
handler.set_run()
|
||||
|
||||
self.assertEqual(server.server_status, 'RUN')
|
||||
|
||||
def test_set_run_sends_200_response(self):
|
||||
"""
|
||||
Test set_run sends 200 status code
|
||||
"""
|
||||
handler = self._create_handler()
|
||||
|
||||
with patch.object(handler, 'send_response') as mock_send_response:
|
||||
with patch.object(handler, 'end_headers'):
|
||||
handler.set_run()
|
||||
mock_send_response.assert_called_once_with(200)
|
||||
|
||||
def test_set_stop_sets_status_to_STOP(self):
|
||||
"""
|
||||
Test set_stop sets global server_status to 'STOP'
|
||||
"""
|
||||
handler = self._create_handler()
|
||||
|
||||
with patch.object(handler, 'send_response'):
|
||||
with patch.object(handler, 'end_headers'):
|
||||
handler.set_stop()
|
||||
|
||||
self.assertEqual(server.server_status, 'STOP')
|
||||
|
||||
def test_set_stop_sends_200_response(self):
|
||||
"""
|
||||
Test set_stop sends 200 status code
|
||||
"""
|
||||
handler = self._create_handler()
|
||||
|
||||
with patch.object(handler, 'send_response') as mock_send_response:
|
||||
with patch.object(handler, 'end_headers'):
|
||||
handler.set_stop()
|
||||
mock_send_response.assert_called_once_with(200)
|
||||
|
||||
def test_set_pause_sets_status_to_PAUSE(self):
|
||||
"""
|
||||
Test set_pause sets global server_status to 'PAUSE'
|
||||
"""
|
||||
handler = self._create_handler()
|
||||
|
||||
with patch.object(handler, 'send_response'):
|
||||
with patch.object(handler, 'end_headers'):
|
||||
handler.set_pause()
|
||||
|
||||
self.assertEqual(server.server_status, 'PAUSE')
|
||||
|
||||
def test_set_pause_sends_200_response(self):
|
||||
"""
|
||||
Test set_pause sends 200 status code
|
||||
"""
|
||||
handler = self._create_handler()
|
||||
|
||||
with patch.object(handler, 'send_response') as mock_send_response:
|
||||
with patch.object(handler, 'end_headers'):
|
||||
handler.set_pause()
|
||||
mock_send_response.assert_called_once_with(200)
|
||||
|
||||
def test_requests_served_is_class_variable(self):
|
||||
"""
|
||||
Test requests_served is shared across all instances
|
||||
"""
|
||||
SimpleHTTPRequestHandler.requests_served = 0
|
||||
|
||||
handler1 = self._create_handler() # Increments to 1
|
||||
handler2 = self._create_handler() # Increments to 2
|
||||
|
||||
with patch.object(handler1, 'send_response'):
|
||||
with patch.object(handler1, 'end_headers'):
|
||||
handler1.do_status() # Increments to 3
|
||||
|
||||
with patch.object(handler2, 'send_response'):
|
||||
with patch.object(handler2, 'end_headers'):
|
||||
handler2.do_status() # Increments to 4
|
||||
|
||||
# Both handlers should see the same counter
|
||||
# 2 handler creations + 2 do_status calls = 4
|
||||
self.assertEqual(handler1.requests_served, 4)
|
||||
self.assertEqual(handler2.requests_served, 4)
|
||||
self.assertEqual(SimpleHTTPRequestHandler.requests_served, 4)
|
||||
|
||||
|
||||
class TestServerModuleFunctions(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
"""
|
||||
Set up test fixtures for server module functions
|
||||
"""
|
||||
server.server_status = ""
|
||||
|
||||
def test_publish_kraken_status_sets_server_status(self):
|
||||
"""
|
||||
Test publish_kraken_status sets global server_status
|
||||
"""
|
||||
server.publish_kraken_status("NEW_STATUS")
|
||||
self.assertEqual(server.server_status, "NEW_STATUS")
|
||||
|
||||
def test_publish_kraken_status_overwrites_existing_status(self):
|
||||
"""
|
||||
Test publish_kraken_status overwrites existing status
|
||||
"""
|
||||
server.server_status = "OLD_STATUS"
|
||||
server.publish_kraken_status("NEW_STATUS")
|
||||
self.assertEqual(server.server_status, "NEW_STATUS")
|
||||
|
||||
@patch('server.HTTPServer')
|
||||
@patch('server._thread')
|
||||
def test_start_server_creates_http_server(self, mock_thread, mock_http_server):
|
||||
"""
|
||||
Test start_server creates HTTPServer with correct address
|
||||
"""
|
||||
address = ("localhost", 8080)
|
||||
mock_server_instance = MagicMock()
|
||||
mock_http_server.return_value = mock_server_instance
|
||||
|
||||
server.start_server(address, "RUNNING")
|
||||
|
||||
mock_http_server.assert_called_once_with(
|
||||
address,
|
||||
SimpleHTTPRequestHandler
|
||||
)
|
||||
|
||||
@patch('server.HTTPServer')
|
||||
@patch('server._thread')
|
||||
def test_start_server_starts_thread(self, mock_thread, mock_http_server):
|
||||
"""
|
||||
Test start_server starts a new thread for serve_forever
|
||||
"""
|
||||
address = ("localhost", 8080)
|
||||
mock_server_instance = MagicMock()
|
||||
mock_http_server.return_value = mock_server_instance
|
||||
|
||||
server.start_server(address, "RUNNING")
|
||||
|
||||
mock_thread.start_new_thread.assert_called_once()
|
||||
# Check that serve_forever was passed to the thread
|
||||
args = mock_thread.start_new_thread.call_args[0]
|
||||
self.assertEqual(args[0], mock_server_instance.serve_forever)
|
||||
|
||||
@patch('server.HTTPServer')
|
||||
@patch('server._thread')
|
||||
def test_start_server_publishes_status(self, mock_thread, mock_http_server):
|
||||
"""
|
||||
Test start_server publishes the provided status
|
||||
"""
|
||||
address = ("localhost", 8080)
|
||||
mock_server_instance = MagicMock()
|
||||
mock_http_server.return_value = mock_server_instance
|
||||
|
||||
server.start_server(address, "INITIAL_STATUS")
|
||||
|
||||
self.assertEqual(server.server_status, "INITIAL_STATUS")
|
||||
|
||||
@patch('server.HTTPConnection')
|
||||
def test_get_status_makes_http_request(self, mock_http_connection):
|
||||
"""
|
||||
Test get_status makes HTTP GET request to root path
|
||||
"""
|
||||
address = ("localhost", 8080)
|
||||
mock_connection = MagicMock()
|
||||
mock_response = MagicMock()
|
||||
mock_response.read.return_value = b"TEST_STATUS"
|
||||
mock_connection.getresponse.return_value = mock_response
|
||||
mock_http_connection.return_value = mock_connection
|
||||
|
||||
result = server.get_status(address)
|
||||
|
||||
mock_http_connection.assert_called_once_with("localhost", 8080)
|
||||
mock_connection.request.assert_called_once_with("GET", "/")
|
||||
self.assertEqual(result, "TEST_STATUS")
|
||||
|
||||
@patch('server.HTTPConnection')
|
||||
def test_get_status_returns_decoded_response(self, mock_http_connection):
|
||||
"""
|
||||
Test get_status returns decoded response string
|
||||
"""
|
||||
address = ("localhost", 8080)
|
||||
mock_connection = MagicMock()
|
||||
mock_response = MagicMock()
|
||||
mock_response.read.return_value = b"RUNNING"
|
||||
mock_connection.getresponse.return_value = mock_response
|
||||
mock_http_connection.return_value = mock_connection
|
||||
|
||||
result = server.get_status(address)
|
||||
|
||||
self.assertEqual(result, "RUNNING")
|
||||
self.assertIsInstance(result, str)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
@@ -9,21 +9,13 @@ Usage:
|
||||
Assisted By: Claude Code
|
||||
"""
|
||||
|
||||
import base64
|
||||
import json
|
||||
import tempfile
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
import uuid
|
||||
import yaml
|
||||
from krkn.rollback.config import RollbackContent
|
||||
from krkn.scenario_plugins.service_hijacking.service_hijacking_scenario_plugin import (
|
||||
ServiceHijackingScenarioPlugin,
|
||||
)
|
||||
|
||||
from krkn.scenario_plugins.service_hijacking.service_hijacking_scenario_plugin import ServiceHijackingScenarioPlugin
|
||||
|
||||
|
||||
class TestServiceHijackingScenarioPlugin(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
"""
|
||||
Set up test fixtures for ServiceHijackingScenarioPlugin
|
||||
@@ -40,374 +32,5 @@ class TestServiceHijackingScenarioPlugin(unittest.TestCase):
|
||||
self.assertEqual(len(result), 1)
|
||||
|
||||
|
||||
class TestRollbackServiceHijacking(unittest.TestCase):
|
||||
def test_rollback_service_hijacking(self):
|
||||
"""
|
||||
Test rollback functionality for ServiceHijackingScenarioPlugin
|
||||
"""
|
||||
# Create rollback data that matches what the plugin expects
|
||||
rollback_data = {
|
||||
"service_name": "test-service",
|
||||
"service_namespace": "default",
|
||||
"original_selectors": {"app": "original-app"},
|
||||
"webservice_pod_name": "test-webservice",
|
||||
}
|
||||
json_str = json.dumps(rollback_data)
|
||||
encoded_data = base64.b64encode(json_str.encode("utf-8")).decode("utf-8")
|
||||
|
||||
# Create RollbackContent with correct parameters
|
||||
rollback_content = RollbackContent(
|
||||
resource_identifier=encoded_data,
|
||||
namespace="default",
|
||||
)
|
||||
|
||||
# Create a mock KrknTelemetryOpenshift object
|
||||
mock_lib_telemetry = MagicMock()
|
||||
mock_lib_kubernetes = MagicMock()
|
||||
mock_lib_telemetry.get_lib_kubernetes.return_value = mock_lib_kubernetes
|
||||
|
||||
# Configure mock to return a successful service restoration
|
||||
mock_lib_kubernetes.replace_service_selector.return_value = {
|
||||
"metadata": {"name": "test-service"}
|
||||
}
|
||||
mock_lib_kubernetes.delete_pod.return_value = None
|
||||
|
||||
# Call the rollback method
|
||||
ServiceHijackingScenarioPlugin.rollback_service_hijacking(
|
||||
rollback_content, mock_lib_telemetry
|
||||
)
|
||||
|
||||
# Verify that the correct methods were called
|
||||
mock_lib_kubernetes.replace_service_selector.assert_called_once_with(
|
||||
["app=original-app"], "test-service", "default"
|
||||
)
|
||||
mock_lib_kubernetes.delete_pod.assert_called_once_with(
|
||||
"test-webservice", "default"
|
||||
)
|
||||
|
||||
@patch("krkn.scenario_plugins.service_hijacking.service_hijacking_scenario_plugin.logging")
|
||||
def test_rollback_service_hijacking_invalid_data(self, mock_logging):
|
||||
"""
|
||||
Test rollback functionality with invalid rollback content logs error
|
||||
"""
|
||||
# Create RollbackContent with invalid base64 data
|
||||
rollback_content = RollbackContent(
|
||||
resource_identifier="invalid_base64_data",
|
||||
namespace="default",
|
||||
)
|
||||
|
||||
# Create a mock KrknTelemetryOpenshift object
|
||||
mock_lib_telemetry = MagicMock()
|
||||
|
||||
# Call the rollback method - should not raise exception but log error
|
||||
ServiceHijackingScenarioPlugin.rollback_service_hijacking(
|
||||
rollback_content, mock_lib_telemetry
|
||||
)
|
||||
# Verify error was logged to inform operators of rollback failure
|
||||
mock_logging.error.assert_called_once()
|
||||
error_message = mock_logging.error.call_args[0][0]
|
||||
self.assertIn("Failed to rollback service hijacking", error_message)
|
||||
|
||||
|
||||
class TestServiceHijackingRun(unittest.TestCase):
|
||||
"""Tests for the run method of ServiceHijackingScenarioPlugin"""
|
||||
|
||||
def setUp(self):
|
||||
"""Set up test fixtures - create temporary directory"""
|
||||
self.temp_dir = tempfile.TemporaryDirectory()
|
||||
self.tmp_path = Path(self.temp_dir.name)
|
||||
|
||||
def tearDown(self):
|
||||
"""Clean up temporary directory after test"""
|
||||
self.temp_dir.cleanup()
|
||||
|
||||
def _create_scenario_file(self, config=None):
|
||||
"""Helper to create a temporary scenario YAML file"""
|
||||
default_config = {
|
||||
"service_name": "nginx-service",
|
||||
"service_namespace": "default",
|
||||
"service_target_port": "http-web-svc",
|
||||
"image": "quay.io/krkn-chaos/krkn-service-hijacking:v0.1.3",
|
||||
"chaos_duration": 1, # Use short duration for tests
|
||||
"privileged": True,
|
||||
"plan": [
|
||||
{
|
||||
"resource": "/test",
|
||||
"steps": {
|
||||
"GET": [
|
||||
{
|
||||
"duration": 1,
|
||||
"status": 200,
|
||||
"mime_type": "application/json",
|
||||
"payload": '{"status": "ok"}',
|
||||
}
|
||||
]
|
||||
},
|
||||
}
|
||||
],
|
||||
}
|
||||
if config:
|
||||
default_config.update(config)
|
||||
|
||||
scenario_file = self.tmp_path / "test_scenario.yaml"
|
||||
with open(scenario_file, "w") as f:
|
||||
yaml.dump(default_config, f)
|
||||
return str(scenario_file)
|
||||
|
||||
def _create_mocks(self):
|
||||
"""Helper to create mock objects for testing"""
|
||||
mock_lib_telemetry = MagicMock()
|
||||
mock_lib_kubernetes = MagicMock()
|
||||
mock_lib_telemetry.get_lib_kubernetes.return_value = mock_lib_kubernetes
|
||||
mock_scenario_telemetry = MagicMock()
|
||||
return mock_lib_telemetry, mock_lib_kubernetes, mock_scenario_telemetry
|
||||
|
||||
def test_run_successful(self):
|
||||
"""Test successful execution of the run method"""
|
||||
scenario_file = self._create_scenario_file()
|
||||
mock_lib_telemetry, mock_lib_kubernetes, mock_scenario_telemetry = (
|
||||
self._create_mocks()
|
||||
)
|
||||
|
||||
# Configure mocks for successful execution
|
||||
mock_lib_kubernetes.service_exists.return_value = True
|
||||
mock_webservice = MagicMock()
|
||||
mock_webservice.pod_name = "hijacker-pod"
|
||||
mock_webservice.selector = "app=hijacker"
|
||||
mock_lib_kubernetes.deploy_service_hijacking.return_value = mock_webservice
|
||||
mock_lib_kubernetes.replace_service_selector.return_value = {
|
||||
"metadata": {"name": "nginx-service"},
|
||||
"spec": {"selector": {"app": "nginx"}},
|
||||
}
|
||||
|
||||
plugin = ServiceHijackingScenarioPlugin()
|
||||
|
||||
result = plugin.run(
|
||||
run_uuid=str(uuid.uuid4()),
|
||||
scenario=scenario_file,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_lib_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
mock_lib_kubernetes.service_exists.assert_called_once_with(
|
||||
"nginx-service", "default"
|
||||
)
|
||||
mock_lib_kubernetes.deploy_service_hijacking.assert_called_once()
|
||||
self.assertEqual(mock_lib_kubernetes.replace_service_selector.call_count, 2)
|
||||
mock_lib_kubernetes.undeploy_service_hijacking.assert_called_once_with(
|
||||
mock_webservice
|
||||
)
|
||||
|
||||
def test_run_service_not_found(self):
|
||||
"""Test run method when service does not exist"""
|
||||
scenario_file = self._create_scenario_file()
|
||||
mock_lib_telemetry, mock_lib_kubernetes, mock_scenario_telemetry = (
|
||||
self._create_mocks()
|
||||
)
|
||||
|
||||
# Service does not exist
|
||||
mock_lib_kubernetes.service_exists.return_value = False
|
||||
|
||||
plugin = ServiceHijackingScenarioPlugin()
|
||||
|
||||
result = plugin.run(
|
||||
run_uuid=str(uuid.uuid4()),
|
||||
scenario=scenario_file,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_lib_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
assert result == 1
|
||||
mock_lib_kubernetes.service_exists.assert_called_once_with(
|
||||
"nginx-service", "default"
|
||||
)
|
||||
mock_lib_kubernetes.deploy_service_hijacking.assert_not_called()
|
||||
|
||||
def test_run_patch_service_failed(self):
|
||||
"""Test run method when patching the service fails"""
|
||||
scenario_file = self._create_scenario_file()
|
||||
mock_lib_telemetry, mock_lib_kubernetes, mock_scenario_telemetry = (
|
||||
self._create_mocks()
|
||||
)
|
||||
|
||||
mock_lib_kubernetes.service_exists.return_value = True
|
||||
mock_webservice = MagicMock()
|
||||
mock_webservice.pod_name = "hijacker-pod"
|
||||
mock_webservice.selector = "app=hijacker"
|
||||
mock_lib_kubernetes.deploy_service_hijacking.return_value = mock_webservice
|
||||
# Patching returns None (failure)
|
||||
mock_lib_kubernetes.replace_service_selector.return_value = None
|
||||
|
||||
plugin = ServiceHijackingScenarioPlugin()
|
||||
|
||||
result = plugin.run(
|
||||
run_uuid=str(uuid.uuid4()),
|
||||
scenario=scenario_file,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_lib_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
assert result == 1
|
||||
mock_lib_kubernetes.replace_service_selector.assert_called_once()
|
||||
|
||||
def test_run_restore_service_failed(self):
|
||||
"""Test run method when restoring the service fails"""
|
||||
scenario_file = self._create_scenario_file()
|
||||
mock_lib_telemetry, mock_lib_kubernetes, mock_scenario_telemetry = (
|
||||
self._create_mocks()
|
||||
)
|
||||
|
||||
mock_lib_kubernetes.service_exists.return_value = True
|
||||
mock_webservice = MagicMock()
|
||||
mock_webservice.pod_name = "hijacker-pod"
|
||||
mock_webservice.selector = "app=hijacker"
|
||||
mock_lib_kubernetes.deploy_service_hijacking.return_value = mock_webservice
|
||||
# First call (patch) succeeds, second call (restore) fails
|
||||
mock_lib_kubernetes.replace_service_selector.side_effect = [
|
||||
{"metadata": {"name": "nginx-service"}, "spec": {"selector": {"app": "nginx"}}},
|
||||
None, # Restore fails
|
||||
]
|
||||
|
||||
plugin = ServiceHijackingScenarioPlugin()
|
||||
|
||||
result = plugin.run(
|
||||
run_uuid=str(uuid.uuid4()),
|
||||
scenario=scenario_file,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_lib_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
assert result == 1
|
||||
assert mock_lib_kubernetes.replace_service_selector.call_count == 2
|
||||
|
||||
def test_run_with_numeric_port(self):
|
||||
"""Test run method with numeric target port"""
|
||||
scenario_file = self._create_scenario_file(
|
||||
{"service_target_port": 8080}
|
||||
)
|
||||
mock_lib_telemetry, mock_lib_kubernetes, mock_scenario_telemetry = (
|
||||
self._create_mocks()
|
||||
)
|
||||
|
||||
mock_lib_kubernetes.service_exists.return_value = True
|
||||
mock_webservice = MagicMock()
|
||||
mock_webservice.pod_name = "hijacker-pod"
|
||||
mock_webservice.selector = "app=hijacker"
|
||||
mock_lib_kubernetes.deploy_service_hijacking.return_value = mock_webservice
|
||||
mock_lib_kubernetes.replace_service_selector.return_value = {
|
||||
"metadata": {"name": "nginx-service"},
|
||||
"spec": {"selector": {"app": "nginx"}},
|
||||
}
|
||||
|
||||
plugin = ServiceHijackingScenarioPlugin()
|
||||
|
||||
result = plugin.run(
|
||||
run_uuid=str(uuid.uuid4()),
|
||||
scenario=scenario_file,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_lib_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
assert result == 0
|
||||
# Verify port_number was passed instead of port_name
|
||||
call_kwargs = mock_lib_kubernetes.deploy_service_hijacking.call_args
|
||||
assert call_kwargs[1]["port_number"] == 8080
|
||||
|
||||
def test_run_with_named_port(self):
|
||||
"""Test run method with named target port"""
|
||||
scenario_file = self._create_scenario_file(
|
||||
{"service_target_port": "http-web-svc"}
|
||||
)
|
||||
mock_lib_telemetry, mock_lib_kubernetes, mock_scenario_telemetry = (
|
||||
self._create_mocks()
|
||||
)
|
||||
|
||||
mock_lib_kubernetes.service_exists.return_value = True
|
||||
mock_webservice = MagicMock()
|
||||
mock_webservice.pod_name = "hijacker-pod"
|
||||
mock_webservice.selector = "app=hijacker"
|
||||
mock_lib_kubernetes.deploy_service_hijacking.return_value = mock_webservice
|
||||
mock_lib_kubernetes.replace_service_selector.return_value = {
|
||||
"metadata": {"name": "nginx-service"},
|
||||
"spec": {"selector": {"app": "nginx"}},
|
||||
}
|
||||
|
||||
plugin = ServiceHijackingScenarioPlugin()
|
||||
|
||||
result = plugin.run(
|
||||
run_uuid=str(uuid.uuid4()),
|
||||
scenario=scenario_file,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_lib_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
assert result == 0
|
||||
# Verify port_name was passed instead of port_number
|
||||
call_kwargs = mock_lib_kubernetes.deploy_service_hijacking.call_args
|
||||
assert call_kwargs[1]["port_name"] == "http-web-svc"
|
||||
|
||||
def test_run_exception_handling(self):
|
||||
"""Test run method handles exceptions gracefully"""
|
||||
scenario_file = self._create_scenario_file()
|
||||
mock_lib_telemetry, mock_lib_kubernetes, mock_scenario_telemetry = (
|
||||
self._create_mocks()
|
||||
)
|
||||
|
||||
mock_lib_kubernetes.service_exists.return_value = True
|
||||
mock_lib_kubernetes.deploy_service_hijacking.side_effect = Exception(
|
||||
"Deployment failed"
|
||||
)
|
||||
|
||||
plugin = ServiceHijackingScenarioPlugin()
|
||||
|
||||
result = plugin.run(
|
||||
run_uuid=str(uuid.uuid4()),
|
||||
scenario=scenario_file,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_lib_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
assert result == 1
|
||||
|
||||
def test_run_unprivileged_mode(self):
|
||||
"""Test run method with privileged set to False"""
|
||||
scenario_file = self._create_scenario_file({"privileged": False})
|
||||
mock_lib_telemetry, mock_lib_kubernetes, mock_scenario_telemetry = (
|
||||
self._create_mocks()
|
||||
)
|
||||
|
||||
mock_lib_kubernetes.service_exists.return_value = True
|
||||
mock_webservice = MagicMock()
|
||||
mock_webservice.pod_name = "hijacker-pod"
|
||||
mock_webservice.selector = "app=hijacker"
|
||||
mock_lib_kubernetes.deploy_service_hijacking.return_value = mock_webservice
|
||||
mock_lib_kubernetes.replace_service_selector.return_value = {
|
||||
"metadata": {"name": "nginx-service"},
|
||||
"spec": {"selector": {"app": "nginx"}},
|
||||
}
|
||||
|
||||
plugin = ServiceHijackingScenarioPlugin()
|
||||
|
||||
result = plugin.run(
|
||||
run_uuid=str(uuid.uuid4()),
|
||||
scenario=scenario_file,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_lib_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
assert result == 0
|
||||
call_kwargs = mock_lib_kubernetes.deploy_service_hijacking.call_args
|
||||
assert call_kwargs[1]["privileged"] is False
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
||||
@@ -10,12 +10,6 @@ Assisted By: Claude Code
|
||||
"""
|
||||
|
||||
import unittest
|
||||
from unittest.mock import Mock, patch, mock_open
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.models.k8s import AffectedNodeStatus
|
||||
|
||||
from krkn.scenario_plugins.shut_down.shut_down_scenario_plugin import ShutDownScenarioPlugin
|
||||
|
||||
@@ -27,11 +21,6 @@ class TestShutDownScenarioPlugin(unittest.TestCase):
|
||||
Set up test fixtures for ShutDownScenarioPlugin
|
||||
"""
|
||||
self.plugin = ShutDownScenarioPlugin()
|
||||
self.mock_kubecli = Mock(spec=KrknKubernetes)
|
||||
self.mock_lib_telemetry = Mock(spec=KrknTelemetryOpenshift)
|
||||
self.mock_lib_telemetry.get_lib_kubernetes.return_value = self.mock_kubecli
|
||||
self.mock_scenario_telemetry = Mock(spec=ScenarioTelemetry)
|
||||
self.mock_scenario_telemetry.affected_nodes = []
|
||||
|
||||
def test_get_scenario_types(self):
|
||||
"""
|
||||
@@ -42,456 +31,6 @@ class TestShutDownScenarioPlugin(unittest.TestCase):
|
||||
self.assertEqual(result, ["cluster_shut_down_scenarios"])
|
||||
self.assertEqual(len(result), 1)
|
||||
|
||||
@patch('krkn.scenario_plugins.shut_down.shut_down_scenario_plugin.cerberus')
|
||||
@patch('time.time')
|
||||
@patch('time.sleep')
|
||||
@patch('builtins.open', new_callable=mock_open)
|
||||
def test_run_success_aws(self, mock_file, mock_sleep, mock_time, mock_cerberus):
|
||||
"""
|
||||
Test successful run of shut down scenario with AWS cloud type
|
||||
"""
|
||||
scenario_yaml = {
|
||||
"cluster_shut_down_scenario": {
|
||||
"runs": 1,
|
||||
"shut_down_duration": 60,
|
||||
"cloud_type": "aws",
|
||||
"timeout": 300
|
||||
}
|
||||
}
|
||||
|
||||
mock_time.side_effect = [1000, 2000]
|
||||
self.mock_kubecli.list_nodes.return_value = ["node1", "node2"]
|
||||
|
||||
with patch('yaml.full_load', return_value=scenario_yaml):
|
||||
with patch.object(self.plugin, 'cluster_shut_down') as mock_cluster_shutdown:
|
||||
result = self.plugin.run(
|
||||
"test-uuid",
|
||||
"/path/to/scenario.yaml",
|
||||
{},
|
||||
self.mock_lib_telemetry,
|
||||
self.mock_scenario_telemetry
|
||||
)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
mock_cluster_shutdown.assert_called_once()
|
||||
mock_cerberus.publish_kraken_status.assert_called_once()
|
||||
|
||||
@patch('logging.error')
|
||||
@patch('builtins.open', new_callable=mock_open)
|
||||
def test_run_with_exception(self, mock_file, mock_logging):
|
||||
"""
|
||||
Test run handles exceptions and returns 1
|
||||
"""
|
||||
mock_file.return_value.__enter__.side_effect = Exception("File read error")
|
||||
|
||||
result = self.plugin.run(
|
||||
"test-uuid",
|
||||
"/path/to/scenario.yaml",
|
||||
{},
|
||||
self.mock_lib_telemetry,
|
||||
self.mock_scenario_telemetry
|
||||
)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
mock_logging.assert_called_once()
|
||||
logged_message = mock_logging.call_args[0][0]
|
||||
self.assertIn("File read error", logged_message)
|
||||
self.assertIn("/path/to/scenario.yaml", logged_message)
|
||||
|
||||
@patch('krkn.scenario_plugins.shut_down.shut_down_scenario_plugin.AWS')
|
||||
@patch('time.sleep')
|
||||
@patch('time.time')
|
||||
def test_cluster_shut_down_aws(self, mock_time, mock_sleep, mock_aws_class):
|
||||
"""
|
||||
Test cluster_shut_down with AWS cloud type
|
||||
"""
|
||||
shut_down_config = {
|
||||
"runs": 1,
|
||||
"shut_down_duration": 60,
|
||||
"cloud_type": "aws",
|
||||
"timeout": 300
|
||||
}
|
||||
|
||||
mock_cloud_object = Mock()
|
||||
mock_aws_class.return_value = mock_cloud_object
|
||||
mock_cloud_object.get_instance_id.return_value = "i-123"
|
||||
mock_cloud_object.wait_until_stopped.return_value = True
|
||||
mock_cloud_object.wait_until_running.return_value = True
|
||||
|
||||
self.mock_kubecli.list_nodes.return_value = ["node1", "node2"]
|
||||
affected_nodes_status = AffectedNodeStatus()
|
||||
mock_time.return_value = 1000
|
||||
|
||||
with patch.object(self.plugin, 'multiprocess_nodes') as mock_multiprocess:
|
||||
self.plugin.cluster_shut_down(shut_down_config, self.mock_kubecli, affected_nodes_status)
|
||||
|
||||
mock_aws_class.assert_called_once()
|
||||
self.assertEqual(mock_multiprocess.call_count, 2)
|
||||
self.assertEqual(len(affected_nodes_status.affected_nodes), 2)
|
||||
|
||||
@patch('krkn.scenario_plugins.shut_down.shut_down_scenario_plugin.GCP')
|
||||
@patch('time.sleep')
|
||||
@patch('time.time')
|
||||
def test_cluster_shut_down_gcp(self, mock_time, mock_sleep, mock_gcp_class):
|
||||
"""
|
||||
Test cluster_shut_down with GCP cloud type
|
||||
"""
|
||||
shut_down_config = {
|
||||
"runs": 1,
|
||||
"shut_down_duration": 30,
|
||||
"cloud_type": "gcp",
|
||||
"timeout": 300
|
||||
}
|
||||
|
||||
mock_cloud_object = Mock()
|
||||
mock_gcp_class.return_value = mock_cloud_object
|
||||
mock_cloud_object.get_instance_id.side_effect = ["gcp-1", "gcp-2"]
|
||||
mock_cloud_object.wait_until_stopped.return_value = True
|
||||
mock_cloud_object.wait_until_running.return_value = True
|
||||
|
||||
self.mock_kubecli.list_nodes.return_value = ["node1", "node2"]
|
||||
affected_nodes_status = AffectedNodeStatus()
|
||||
mock_time.return_value = 1000
|
||||
|
||||
with patch.object(self.plugin, 'multiprocess_nodes') as mock_multiprocess:
|
||||
self.plugin.cluster_shut_down(shut_down_config, self.mock_kubecli, affected_nodes_status)
|
||||
|
||||
mock_gcp_class.assert_called_once()
|
||||
# Verify that the 'processes' parameter is set to 1 for GCP cloud type
|
||||
calls = mock_multiprocess.call_args_list
|
||||
for call_args in calls:
|
||||
self.assertEqual(call_args[0][2], 1)
|
||||
|
||||
@patch('krkn.scenario_plugins.shut_down.shut_down_scenario_plugin.Azure')
|
||||
@patch('time.sleep')
|
||||
@patch('time.time')
|
||||
def test_cluster_shut_down_azure(self, mock_time, mock_sleep, mock_azure_class):
|
||||
"""
|
||||
Test cluster_shut_down with Azure cloud type
|
||||
"""
|
||||
shut_down_config = {
|
||||
"runs": 1,
|
||||
"shut_down_duration": 45,
|
||||
"cloud_type": "azure",
|
||||
"timeout": 300
|
||||
}
|
||||
|
||||
mock_cloud_object = Mock()
|
||||
mock_azure_class.return_value = mock_cloud_object
|
||||
mock_cloud_object.get_instance_id.side_effect = ["azure-1"]
|
||||
mock_cloud_object.wait_until_stopped.return_value = True
|
||||
mock_cloud_object.wait_until_running.return_value = True
|
||||
|
||||
self.mock_kubecli.list_nodes.return_value = ["node1"]
|
||||
affected_nodes_status = AffectedNodeStatus()
|
||||
mock_time.return_value = 1000
|
||||
|
||||
with patch.object(self.plugin, 'multiprocess_nodes'):
|
||||
self.plugin.cluster_shut_down(shut_down_config, self.mock_kubecli, affected_nodes_status)
|
||||
|
||||
mock_azure_class.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.shut_down.shut_down_scenario_plugin.Azure')
|
||||
@patch('time.sleep')
|
||||
@patch('time.time')
|
||||
def test_cluster_shut_down_az_alias(self, mock_time, mock_sleep, mock_azure_class):
|
||||
"""
|
||||
Test cluster_shut_down with 'az' cloud type alias for Azure
|
||||
"""
|
||||
shut_down_config = {
|
||||
"runs": 1,
|
||||
"shut_down_duration": 30,
|
||||
"cloud_type": "az",
|
||||
"timeout": 300
|
||||
}
|
||||
|
||||
mock_cloud_object = Mock()
|
||||
mock_azure_class.return_value = mock_cloud_object
|
||||
mock_cloud_object.get_instance_id.side_effect = ["azure-1"]
|
||||
mock_cloud_object.wait_until_stopped.return_value = True
|
||||
mock_cloud_object.wait_until_running.return_value = True
|
||||
|
||||
self.mock_kubecli.list_nodes.return_value = ["node1"]
|
||||
affected_nodes_status = AffectedNodeStatus()
|
||||
mock_time.return_value = 1000
|
||||
|
||||
with patch.object(self.plugin, 'multiprocess_nodes'):
|
||||
self.plugin.cluster_shut_down(shut_down_config, self.mock_kubecli, affected_nodes_status)
|
||||
|
||||
mock_azure_class.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.shut_down.shut_down_scenario_plugin.OPENSTACKCLOUD')
|
||||
@patch('time.sleep')
|
||||
@patch('time.time')
|
||||
def test_cluster_shut_down_openstack(self, mock_time, mock_sleep, mock_openstack_class):
|
||||
"""
|
||||
Test cluster_shut_down with OpenStack cloud type
|
||||
"""
|
||||
shut_down_config = {
|
||||
"runs": 1,
|
||||
"shut_down_duration": 60,
|
||||
"cloud_type": "openstack",
|
||||
"timeout": 300
|
||||
}
|
||||
|
||||
mock_cloud_object = Mock()
|
||||
mock_openstack_class.return_value = mock_cloud_object
|
||||
mock_cloud_object.get_instance_id.side_effect = ["os-1"]
|
||||
mock_cloud_object.wait_until_stopped.return_value = True
|
||||
mock_cloud_object.wait_until_running.return_value = True
|
||||
|
||||
self.mock_kubecli.list_nodes.return_value = ["node1"]
|
||||
affected_nodes_status = AffectedNodeStatus()
|
||||
mock_time.return_value = 1000
|
||||
|
||||
with patch.object(self.plugin, 'multiprocess_nodes'):
|
||||
self.plugin.cluster_shut_down(shut_down_config, self.mock_kubecli, affected_nodes_status)
|
||||
|
||||
mock_openstack_class.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.shut_down.shut_down_scenario_plugin.IbmCloud')
|
||||
@patch('time.sleep')
|
||||
@patch('time.time')
|
||||
def test_cluster_shut_down_ibm(self, mock_time, mock_sleep, mock_ibm_class):
|
||||
"""
|
||||
Test cluster_shut_down with IBM cloud type
|
||||
"""
|
||||
shut_down_config = {
|
||||
"runs": 1,
|
||||
"shut_down_duration": 60,
|
||||
"cloud_type": "ibm",
|
||||
"timeout": 300
|
||||
}
|
||||
|
||||
mock_cloud_object = Mock()
|
||||
mock_ibm_class.return_value = mock_cloud_object
|
||||
mock_cloud_object.get_instance_id.side_effect = ["ibm-1"]
|
||||
mock_cloud_object.wait_until_stopped.return_value = True
|
||||
mock_cloud_object.wait_until_running.return_value = True
|
||||
|
||||
self.mock_kubecli.list_nodes.return_value = ["node1"]
|
||||
affected_nodes_status = AffectedNodeStatus()
|
||||
mock_time.return_value = 1000
|
||||
|
||||
with patch.object(self.plugin, 'multiprocess_nodes'):
|
||||
self.plugin.cluster_shut_down(shut_down_config, self.mock_kubecli, affected_nodes_status)
|
||||
|
||||
mock_ibm_class.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.shut_down.shut_down_scenario_plugin.IbmCloud')
|
||||
@patch('time.sleep')
|
||||
@patch('time.time')
|
||||
def test_cluster_shut_down_ibmcloud_alias(self, mock_time, mock_sleep, mock_ibm_class):
|
||||
"""
|
||||
Test cluster_shut_down with 'ibmcloud' cloud type alias
|
||||
"""
|
||||
shut_down_config = {
|
||||
"runs": 1,
|
||||
"shut_down_duration": 60,
|
||||
"cloud_type": "ibmcloud",
|
||||
"timeout": 300
|
||||
}
|
||||
|
||||
mock_cloud_object = Mock()
|
||||
mock_ibm_class.return_value = mock_cloud_object
|
||||
mock_cloud_object.get_instance_id.side_effect = ["ibm-1"]
|
||||
mock_cloud_object.wait_until_stopped.return_value = True
|
||||
mock_cloud_object.wait_until_running.return_value = True
|
||||
|
||||
self.mock_kubecli.list_nodes.return_value = ["node1"]
|
||||
affected_nodes_status = AffectedNodeStatus()
|
||||
mock_time.return_value = 1000
|
||||
|
||||
with patch.object(self.plugin, 'multiprocess_nodes'):
|
||||
self.plugin.cluster_shut_down(shut_down_config, self.mock_kubecli, affected_nodes_status)
|
||||
|
||||
mock_ibm_class.assert_called_once()
|
||||
|
||||
@patch('logging.error')
|
||||
def test_cluster_shut_down_unsupported_cloud(self, mock_logging):
|
||||
"""
|
||||
Test cluster_shut_down raises exception for unsupported cloud type
|
||||
"""
|
||||
shut_down_config = {
|
||||
"runs": 1,
|
||||
"shut_down_duration": 60,
|
||||
"cloud_type": "unsupported",
|
||||
"timeout": 300
|
||||
}
|
||||
|
||||
affected_nodes_status = AffectedNodeStatus()
|
||||
|
||||
with self.assertRaises(RuntimeError):
|
||||
self.plugin.cluster_shut_down(shut_down_config, self.mock_kubecli, affected_nodes_status)
|
||||
|
||||
mock_logging.assert_called()
|
||||
logged_message = mock_logging.call_args[0][0]
|
||||
self.assertIn("not currently supported", logged_message)
|
||||
@patch('krkn.scenario_plugins.shut_down.shut_down_scenario_plugin.AWS')
|
||||
@patch('time.sleep')
|
||||
@patch('time.time')
|
||||
def test_cluster_shut_down_multiple_runs(self, mock_time, mock_sleep, mock_aws_class):
|
||||
"""
|
||||
Test cluster_shut_down with multiple runs
|
||||
"""
|
||||
shut_down_config = {
|
||||
"runs": 2,
|
||||
"shut_down_duration": 30,
|
||||
"cloud_type": "aws",
|
||||
"timeout": 300
|
||||
}
|
||||
|
||||
mock_cloud_object = Mock()
|
||||
mock_aws_class.return_value = mock_cloud_object
|
||||
mock_cloud_object.get_instance_id.return_value = "i-123"
|
||||
mock_cloud_object.wait_until_stopped.return_value = True
|
||||
mock_cloud_object.wait_until_running.return_value = True
|
||||
|
||||
self.mock_kubecli.list_nodes.return_value = ["node1"]
|
||||
affected_nodes_status = AffectedNodeStatus()
|
||||
mock_time.return_value = 1000
|
||||
|
||||
with patch.object(self.plugin, 'multiprocess_nodes') as mock_multiprocess:
|
||||
self.plugin.cluster_shut_down(shut_down_config, self.mock_kubecli, affected_nodes_status)
|
||||
|
||||
# Each run should call multiprocess_nodes twice (stop and start)
|
||||
self.assertEqual(mock_multiprocess.call_count, 4)
|
||||
|
||||
@patch('krkn.scenario_plugins.shut_down.shut_down_scenario_plugin.ThreadPool')
|
||||
def test_multiprocess_nodes_simple_list(self, mock_threadpool):
|
||||
"""
|
||||
Test multiprocess_nodes with simple list of nodes
|
||||
"""
|
||||
mock_pool_instance = Mock()
|
||||
mock_threadpool.return_value = mock_pool_instance
|
||||
|
||||
nodes = ["node1", "node2", "node3"]
|
||||
mock_cloud_function = Mock()
|
||||
|
||||
self.plugin.multiprocess_nodes(mock_cloud_function, nodes, processes=0)
|
||||
|
||||
mock_threadpool.assert_called_once_with(processes=3)
|
||||
mock_pool_instance.map.assert_called_once_with(mock_cloud_function, nodes)
|
||||
mock_pool_instance.close.assert_called_once()
|
||||
|
||||
@patch('krkn.scenario_plugins.shut_down.shut_down_scenario_plugin.ThreadPool')
|
||||
def test_multiprocess_nodes_with_custom_processes(self, mock_threadpool):
|
||||
"""
|
||||
Test multiprocess_nodes with custom process count
|
||||
"""
|
||||
mock_pool_instance = Mock()
|
||||
mock_threadpool.return_value = mock_pool_instance
|
||||
|
||||
nodes = ["node1", "node2", "node3", "node4"]
|
||||
mock_cloud_function = Mock()
|
||||
|
||||
self.plugin.multiprocess_nodes(mock_cloud_function, nodes, processes=2)
|
||||
|
||||
mock_threadpool.assert_called_once_with(processes=2)
|
||||
mock_pool_instance.map.assert_called_once_with(mock_cloud_function, nodes)
|
||||
mock_pool_instance.close.assert_called_once()
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.shut_down.shut_down_scenario_plugin.ThreadPool')
|
||||
def test_multiprocess_nodes_tuple_list(self, mock_threadpool, mock_logging):
|
||||
"""
|
||||
Test multiprocess_nodes with tuple list (node_info, node_id pairs)
|
||||
"""
|
||||
mock_pool_instance = Mock()
|
||||
mock_threadpool.return_value = mock_pool_instance
|
||||
|
||||
nodes = [("info1", "id1"), ("info2", "id2")]
|
||||
mock_cloud_function = Mock()
|
||||
|
||||
self.plugin.multiprocess_nodes(mock_cloud_function, nodes, processes=0)
|
||||
|
||||
mock_threadpool.assert_called_once_with(processes=2)
|
||||
mock_pool_instance.starmap.assert_called_once()
|
||||
# Verify starmap was called with zipped arguments
|
||||
call_args = mock_pool_instance.starmap.call_args[0]
|
||||
self.assertEqual(call_args[0], mock_cloud_function)
|
||||
mock_pool_instance.close.assert_called_once()
|
||||
|
||||
@patch('logging.info')
|
||||
@patch('krkn.scenario_plugins.shut_down.shut_down_scenario_plugin.ThreadPool')
|
||||
def test_multiprocess_nodes_with_exception(self, mock_threadpool, mock_logging):
|
||||
"""
|
||||
Test multiprocess_nodes handles exceptions gracefully
|
||||
"""
|
||||
mock_threadpool.side_effect = Exception("Pool creation error")
|
||||
|
||||
nodes = ["node1", "node2"]
|
||||
mock_cloud_function = Mock()
|
||||
|
||||
self.plugin.multiprocess_nodes(mock_cloud_function, nodes, processes=0)
|
||||
|
||||
mock_logging.assert_called()
|
||||
logged_args, logged_kwargs = mock_logging.call_args
|
||||
self.assertIn("Error on pool multiprocessing", logged_args[0])
|
||||
@patch('krkn.scenario_plugins.shut_down.shut_down_scenario_plugin.AWS')
|
||||
@patch('time.sleep')
|
||||
@patch('time.time')
|
||||
def test_cluster_shut_down_node_stop_timing(self, mock_time, mock_sleep, mock_aws_class):
|
||||
"""
|
||||
Test that cloud_stopping_time is set correctly
|
||||
"""
|
||||
shut_down_config = {
|
||||
"runs": 1,
|
||||
"shut_down_duration": 60,
|
||||
"cloud_type": "aws",
|
||||
"timeout": 300
|
||||
}
|
||||
|
||||
mock_cloud_object = Mock()
|
||||
mock_aws_class.return_value = mock_cloud_object
|
||||
mock_cloud_object.get_instance_id.return_value = "i-123"
|
||||
mock_cloud_object.wait_until_stopped.return_value = True
|
||||
mock_cloud_object.wait_until_running.return_value = True
|
||||
|
||||
self.mock_kubecli.list_nodes.return_value = ["node1"]
|
||||
affected_nodes_status = AffectedNodeStatus()
|
||||
|
||||
# Simulate time progression - provide enough values for all time.time() calls
|
||||
mock_time.side_effect = [1000, 1050, 1100, 1150, 1200]
|
||||
|
||||
with patch.object(self.plugin, 'multiprocess_nodes'):
|
||||
self.plugin.cluster_shut_down(shut_down_config, self.mock_kubecli, affected_nodes_status)
|
||||
|
||||
# Verify affected node was created
|
||||
self.assertEqual(len(affected_nodes_status.affected_nodes), 1)
|
||||
|
||||
@patch('krkn.scenario_plugins.shut_down.shut_down_scenario_plugin.AWS')
|
||||
@patch('time.sleep')
|
||||
@patch('time.time')
|
||||
def test_cluster_shut_down_wait_for_initialization(self, mock_time, mock_sleep, mock_aws_class):
|
||||
"""
|
||||
Test that cluster_shut_down waits 150s for component initialization
|
||||
"""
|
||||
shut_down_config = {
|
||||
"runs": 1,
|
||||
"shut_down_duration": 60,
|
||||
"cloud_type": "aws",
|
||||
"timeout": 300
|
||||
}
|
||||
|
||||
mock_cloud_object = Mock()
|
||||
mock_aws_class.return_value = mock_cloud_object
|
||||
mock_cloud_object.get_instance_id.return_value = "i-123"
|
||||
mock_cloud_object.wait_until_stopped.return_value = True
|
||||
mock_cloud_object.wait_until_running.return_value = True
|
||||
|
||||
self.mock_kubecli.list_nodes.return_value = ["node1"]
|
||||
affected_nodes_status = AffectedNodeStatus()
|
||||
mock_time.return_value = 1000
|
||||
|
||||
with patch.object(self.plugin, 'multiprocess_nodes'):
|
||||
self.plugin.cluster_shut_down(shut_down_config, self.mock_kubecli, affected_nodes_status)
|
||||
|
||||
# Verify sleep was called with correct durations
|
||||
sleep_calls = [call_args[0][0] for call_args in mock_sleep.call_args_list]
|
||||
self.assertIn(60, sleep_calls) # shut_down_duration
|
||||
self.assertIn(150, sleep_calls) # component initialization wait
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
||||
@@ -9,13 +9,8 @@ Usage:
|
||||
Assisted By: Claude Code
|
||||
"""
|
||||
|
||||
import base64
|
||||
import json
|
||||
import unittest
|
||||
import uuid
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from krkn.rollback.config import RollbackContent
|
||||
from krkn.scenario_plugins.syn_flood.syn_flood_scenario_plugin import SynFloodScenarioPlugin
|
||||
|
||||
|
||||
@@ -36,488 +31,6 @@ class TestSynFloodScenarioPlugin(unittest.TestCase):
|
||||
self.assertEqual(result, ["syn_flood_scenarios"])
|
||||
self.assertEqual(len(result), 1)
|
||||
|
||||
def test_check_key_value(self):
|
||||
"""
|
||||
Test check_key_value method
|
||||
"""
|
||||
test_dict = {
|
||||
"valid_key": "value",
|
||||
"empty_key": "",
|
||||
"none_key": None,
|
||||
"zero_key": 0,
|
||||
"false_key": False,
|
||||
}
|
||||
|
||||
self.assertTrue(self.plugin.check_key_value(test_dict, "valid_key"))
|
||||
self.assertFalse(self.plugin.check_key_value(test_dict, "empty_key"))
|
||||
self.assertFalse(self.plugin.check_key_value(test_dict, "none_key"))
|
||||
self.assertFalse(self.plugin.check_key_value(test_dict, "missing_key"))
|
||||
# 0 and False are valid values
|
||||
self.assertTrue(self.plugin.check_key_value(test_dict, "zero_key"))
|
||||
self.assertTrue(self.plugin.check_key_value(test_dict, "false_key"))
|
||||
|
||||
|
||||
class TestIsNodeAffinityCorrect(unittest.TestCase):
|
||||
"""Tests for is_node_affinity_correct method"""
|
||||
|
||||
def setUp(self):
|
||||
self.plugin = SynFloodScenarioPlugin()
|
||||
|
||||
def test_valid_node_affinity(self):
|
||||
"""Test valid node affinity configuration"""
|
||||
valid_affinity = {
|
||||
"node-role.kubernetes.io/worker": [""],
|
||||
}
|
||||
self.assertTrue(self.plugin.is_node_affinity_correct(valid_affinity))
|
||||
|
||||
def test_valid_node_affinity_multiple_labels(self):
|
||||
"""Test valid node affinity with multiple labels"""
|
||||
valid_affinity = {
|
||||
"node-role.kubernetes.io/worker": ["value1", "value2"],
|
||||
"topology.kubernetes.io/zone": ["us-east-1a"],
|
||||
}
|
||||
self.assertTrue(self.plugin.is_node_affinity_correct(valid_affinity))
|
||||
|
||||
def test_empty_dict_is_valid(self):
|
||||
"""Test empty dict is valid for node affinity"""
|
||||
self.assertTrue(self.plugin.is_node_affinity_correct({}))
|
||||
|
||||
def test_invalid_not_a_dict(self):
|
||||
"""Test non-dict input is invalid"""
|
||||
self.assertFalse(self.plugin.is_node_affinity_correct("not a dict"))
|
||||
self.assertFalse(self.plugin.is_node_affinity_correct(["list"]))
|
||||
self.assertFalse(self.plugin.is_node_affinity_correct(123))
|
||||
self.assertFalse(self.plugin.is_node_affinity_correct(None))
|
||||
|
||||
def test_invalid_non_string_key(self):
|
||||
"""Test non-string keys are invalid"""
|
||||
invalid_affinity = {
|
||||
123: ["value"],
|
||||
}
|
||||
self.assertFalse(self.plugin.is_node_affinity_correct(invalid_affinity))
|
||||
|
||||
def test_invalid_non_list_value(self):
|
||||
"""Test non-list values are invalid"""
|
||||
invalid_affinity = {
|
||||
"node-role.kubernetes.io/worker": "not a list",
|
||||
}
|
||||
self.assertFalse(self.plugin.is_node_affinity_correct(invalid_affinity))
|
||||
|
||||
|
||||
class TestParseConfig(unittest.TestCase):
|
||||
"""Tests for parse_config method"""
|
||||
|
||||
def setUp(self):
|
||||
self.plugin = SynFloodScenarioPlugin()
|
||||
|
||||
def _create_scenario_file(self, tmp_path, config=None):
|
||||
"""Helper to create a temporary scenario YAML file"""
|
||||
import yaml
|
||||
|
||||
default_config = {
|
||||
"packet-size": 120,
|
||||
"window-size": 64,
|
||||
"duration": 10,
|
||||
"namespace": "default",
|
||||
"target-service": "elasticsearch",
|
||||
"target-port": 9200,
|
||||
"target-service-label": "",
|
||||
"number-of-pods": 2,
|
||||
"image": "quay.io/krkn-chaos/krkn-syn-flood:v1.0.0",
|
||||
"attacker-nodes": {"node-role.kubernetes.io/worker": [""]},
|
||||
}
|
||||
if config:
|
||||
default_config.update(config)
|
||||
|
||||
scenario_file = tmp_path / "test_scenario.yaml"
|
||||
with open(scenario_file, "w") as f:
|
||||
yaml.dump(default_config, f)
|
||||
return str(scenario_file)
|
||||
|
||||
def test_parse_config_valid(self, tmp_path=None):
|
||||
"""Test parsing valid configuration"""
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
scenario_file = self._create_scenario_file(Path(tmp_dir))
|
||||
config = self.plugin.parse_config(scenario_file)
|
||||
|
||||
assert config["packet-size"] == 120
|
||||
assert config["window-size"] == 64
|
||||
assert config["duration"] == 10
|
||||
assert config["namespace"] == "default"
|
||||
assert config["target-service"] == "elasticsearch"
|
||||
assert config["target-port"] == 9200
|
||||
assert config["number-of-pods"] == 2
|
||||
|
||||
def test_parse_config_file_not_found(self):
|
||||
"""Test parsing non-existent file raises exception"""
|
||||
with self.assertRaises(Exception) as context:
|
||||
self.plugin.parse_config("/nonexistent/path/scenario.yaml")
|
||||
self.assertIn("failed to load scenario file", str(context.exception))
|
||||
|
||||
def test_parse_config_missing_required_params(self):
|
||||
"""Test parsing config with missing required parameters"""
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
# Missing packet-size and window-size
|
||||
scenario_file = self._create_scenario_file(
|
||||
Path(tmp_dir),
|
||||
{"packet-size": "", "window-size": None},
|
||||
)
|
||||
with self.assertRaises(Exception) as context:
|
||||
self.plugin.parse_config(scenario_file)
|
||||
self.assertIn("packet-size", str(context.exception))
|
||||
self.assertIn("window-size", str(context.exception))
|
||||
|
||||
def test_parse_config_both_target_service_and_label(self):
|
||||
"""Test parsing config with both target-service and target-service-label set"""
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
scenario_file = self._create_scenario_file(
|
||||
Path(tmp_dir),
|
||||
{
|
||||
"target-service": "elasticsearch",
|
||||
"target-service-label": "app=elasticsearch",
|
||||
},
|
||||
)
|
||||
with self.assertRaises(Exception) as context:
|
||||
self.plugin.parse_config(scenario_file)
|
||||
self.assertIn(
|
||||
"you cannot select both target-service and target-service-label",
|
||||
str(context.exception),
|
||||
)
|
||||
|
||||
def test_parse_config_neither_target_service_nor_label(self):
|
||||
"""Test parsing config with neither target-service nor target-service-label set"""
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
scenario_file = self._create_scenario_file(
|
||||
Path(tmp_dir),
|
||||
{"target-service": "", "target-service-label": ""},
|
||||
)
|
||||
with self.assertRaises(Exception) as context:
|
||||
self.plugin.parse_config(scenario_file)
|
||||
self.assertIn(
|
||||
"you have either to set a target service or a label",
|
||||
str(context.exception),
|
||||
)
|
||||
|
||||
def test_parse_config_invalid_attacker_nodes(self):
|
||||
"""Test parsing config with invalid attacker-nodes format"""
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
scenario_file = self._create_scenario_file(
|
||||
Path(tmp_dir),
|
||||
{"attacker-nodes": "invalid"},
|
||||
)
|
||||
with self.assertRaises(Exception) as context:
|
||||
self.plugin.parse_config(scenario_file)
|
||||
self.assertIn("attacker-nodes format is not correct", str(context.exception))
|
||||
|
||||
def test_parse_config_with_label_selector(self):
|
||||
"""Test parsing config with target-service-label instead of target-service"""
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
scenario_file = self._create_scenario_file(
|
||||
Path(tmp_dir),
|
||||
{"target-service": "", "target-service-label": "app=elasticsearch"},
|
||||
)
|
||||
config = self.plugin.parse_config(scenario_file)
|
||||
assert config["target-service-label"] == "app=elasticsearch"
|
||||
assert config["target-service"] == ""
|
||||
|
||||
|
||||
class TestSynFloodRun(unittest.TestCase):
|
||||
"""Tests for the run method of SynFloodScenarioPlugin"""
|
||||
|
||||
def _create_scenario_file(self, tmp_path, config=None):
|
||||
"""Helper to create a temporary scenario YAML file"""
|
||||
import yaml
|
||||
from pathlib import Path
|
||||
|
||||
default_config = {
|
||||
"packet-size": 120,
|
||||
"window-size": 64,
|
||||
"duration": 1,
|
||||
"namespace": "default",
|
||||
"target-service": "elasticsearch",
|
||||
"target-port": 9200,
|
||||
"target-service-label": "",
|
||||
"number-of-pods": 1,
|
||||
"image": "quay.io/krkn-chaos/krkn-syn-flood:v1.0.0",
|
||||
"attacker-nodes": {"node-role.kubernetes.io/worker": [""]},
|
||||
}
|
||||
if config:
|
||||
default_config.update(config)
|
||||
|
||||
scenario_file = Path(tmp_path) / "test_scenario.yaml"
|
||||
with open(scenario_file, "w") as f:
|
||||
yaml.dump(default_config, f)
|
||||
return str(scenario_file)
|
||||
|
||||
def _create_mocks(self):
|
||||
"""Helper to create mock objects for testing"""
|
||||
mock_lib_telemetry = MagicMock()
|
||||
mock_lib_kubernetes = MagicMock()
|
||||
mock_lib_telemetry.get_lib_kubernetes.return_value = mock_lib_kubernetes
|
||||
mock_scenario_telemetry = MagicMock()
|
||||
return mock_lib_telemetry, mock_lib_kubernetes, mock_scenario_telemetry
|
||||
|
||||
def test_run_successful_with_target_service(self):
|
||||
"""Test successful execution with target-service"""
|
||||
import tempfile
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
scenario_file = self._create_scenario_file(tmp_dir)
|
||||
mock_lib_telemetry, mock_lib_kubernetes, mock_scenario_telemetry = (
|
||||
self._create_mocks()
|
||||
)
|
||||
|
||||
mock_lib_kubernetes.service_exists.return_value = True
|
||||
# Pod finishes immediately
|
||||
mock_lib_kubernetes.is_pod_running.return_value = False
|
||||
|
||||
plugin = SynFloodScenarioPlugin()
|
||||
|
||||
result = plugin.run(
|
||||
run_uuid=str(uuid.uuid4()),
|
||||
scenario=scenario_file,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_lib_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
mock_lib_kubernetes.service_exists.assert_called_once_with(
|
||||
"elasticsearch", "default"
|
||||
)
|
||||
mock_lib_kubernetes.deploy_syn_flood.assert_called_once()
|
||||
|
||||
def test_run_successful_with_label_selector(self):
|
||||
"""Test successful execution with target-service-label"""
|
||||
import tempfile
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
scenario_file = self._create_scenario_file(
|
||||
tmp_dir,
|
||||
{"target-service": "", "target-service-label": "app=elasticsearch"},
|
||||
)
|
||||
mock_lib_telemetry, mock_lib_kubernetes, mock_scenario_telemetry = (
|
||||
self._create_mocks()
|
||||
)
|
||||
|
||||
mock_lib_kubernetes.select_service_by_label.return_value = [
|
||||
"elasticsearch-1",
|
||||
"elasticsearch-2",
|
||||
]
|
||||
mock_lib_kubernetes.service_exists.return_value = True
|
||||
mock_lib_kubernetes.is_pod_running.return_value = False
|
||||
|
||||
plugin = SynFloodScenarioPlugin()
|
||||
|
||||
result = plugin.run(
|
||||
run_uuid=str(uuid.uuid4()),
|
||||
scenario=scenario_file,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_lib_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
mock_lib_kubernetes.select_service_by_label.assert_called_once_with(
|
||||
"default", "app=elasticsearch"
|
||||
)
|
||||
# Should deploy pods for each service found
|
||||
self.assertEqual(mock_lib_kubernetes.deploy_syn_flood.call_count, 2)
|
||||
|
||||
def test_run_service_not_found(self):
|
||||
"""Test run method when service does not exist"""
|
||||
import tempfile
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
scenario_file = self._create_scenario_file(tmp_dir)
|
||||
mock_lib_telemetry, mock_lib_kubernetes, mock_scenario_telemetry = (
|
||||
self._create_mocks()
|
||||
)
|
||||
|
||||
mock_lib_kubernetes.service_exists.return_value = False
|
||||
|
||||
plugin = SynFloodScenarioPlugin()
|
||||
|
||||
result = plugin.run(
|
||||
run_uuid=str(uuid.uuid4()),
|
||||
scenario=scenario_file,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_lib_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
mock_lib_kubernetes.deploy_syn_flood.assert_not_called()
|
||||
|
||||
def test_run_multiple_pods(self):
|
||||
"""Test run method with multiple attacker pods"""
|
||||
import tempfile
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
scenario_file = self._create_scenario_file(tmp_dir, {"number-of-pods": 3})
|
||||
mock_lib_telemetry, mock_lib_kubernetes, mock_scenario_telemetry = (
|
||||
self._create_mocks()
|
||||
)
|
||||
|
||||
mock_lib_kubernetes.service_exists.return_value = True
|
||||
mock_lib_kubernetes.is_pod_running.return_value = False
|
||||
|
||||
plugin = SynFloodScenarioPlugin()
|
||||
|
||||
result = plugin.run(
|
||||
run_uuid=str(uuid.uuid4()),
|
||||
scenario=scenario_file,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_lib_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
self.assertEqual(mock_lib_kubernetes.deploy_syn_flood.call_count, 3)
|
||||
|
||||
def test_run_exception_handling(self):
|
||||
"""Test run method handles exceptions gracefully"""
|
||||
import tempfile
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
scenario_file = self._create_scenario_file(tmp_dir)
|
||||
mock_lib_telemetry, mock_lib_kubernetes, mock_scenario_telemetry = (
|
||||
self._create_mocks()
|
||||
)
|
||||
|
||||
mock_lib_kubernetes.service_exists.return_value = True
|
||||
mock_lib_kubernetes.deploy_syn_flood.side_effect = Exception("Deployment failed")
|
||||
|
||||
plugin = SynFloodScenarioPlugin()
|
||||
|
||||
result = plugin.run(
|
||||
run_uuid=str(uuid.uuid4()),
|
||||
scenario=scenario_file,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_lib_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
self.assertEqual(result, 1)
|
||||
|
||||
def test_run_waits_for_pods_to_finish(self):
|
||||
"""Test run method waits for pods to finish"""
|
||||
import tempfile
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
scenario_file = self._create_scenario_file(tmp_dir)
|
||||
mock_lib_telemetry, mock_lib_kubernetes, mock_scenario_telemetry = (
|
||||
self._create_mocks()
|
||||
)
|
||||
|
||||
mock_lib_kubernetes.service_exists.return_value = True
|
||||
# Pod runs for a few iterations then finishes
|
||||
mock_lib_kubernetes.is_pod_running.side_effect = [True, True, False]
|
||||
|
||||
plugin = SynFloodScenarioPlugin()
|
||||
|
||||
result = plugin.run(
|
||||
run_uuid=str(uuid.uuid4()),
|
||||
scenario=scenario_file,
|
||||
krkn_config={},
|
||||
lib_telemetry=mock_lib_telemetry,
|
||||
scenario_telemetry=mock_scenario_telemetry,
|
||||
)
|
||||
|
||||
self.assertEqual(result, 0)
|
||||
# Should have checked pod status multiple times
|
||||
self.assertGreaterEqual(mock_lib_kubernetes.is_pod_running.call_count, 1)
|
||||
|
||||
|
||||
class TestRollbackSynFloodPods(unittest.TestCase):
|
||||
"""Tests for rollback_syn_flood_pods static method"""
|
||||
def test_rollback_syn_flood_pods_successful(self):
|
||||
"""Test successful rollback of syn flood pods"""
|
||||
pod_names = ["syn-flood-abc123", "syn-flood-def456"]
|
||||
encoded_data = base64.b64encode(
|
||||
json.dumps(pod_names).encode("utf-8")
|
||||
).decode("utf-8")
|
||||
|
||||
rollback_content = RollbackContent(
|
||||
resource_identifier=encoded_data,
|
||||
namespace="default",
|
||||
)
|
||||
|
||||
mock_lib_telemetry = MagicMock()
|
||||
mock_lib_kubernetes = MagicMock()
|
||||
mock_lib_telemetry.get_lib_kubernetes.return_value = mock_lib_kubernetes
|
||||
|
||||
SynFloodScenarioPlugin.rollback_syn_flood_pods(
|
||||
rollback_content, mock_lib_telemetry
|
||||
)
|
||||
|
||||
assert mock_lib_kubernetes.delete_pod.call_count == 2
|
||||
mock_lib_kubernetes.delete_pod.assert_any_call("syn-flood-abc123", "default")
|
||||
mock_lib_kubernetes.delete_pod.assert_any_call("syn-flood-def456", "default")
|
||||
|
||||
def test_rollback_syn_flood_pods_empty_list(self):
|
||||
"""Test rollback with empty pod list"""
|
||||
pod_names = []
|
||||
encoded_data = base64.b64encode(
|
||||
json.dumps(pod_names).encode("utf-8")
|
||||
).decode("utf-8")
|
||||
|
||||
rollback_content = RollbackContent(
|
||||
resource_identifier=encoded_data,
|
||||
namespace="default",
|
||||
)
|
||||
|
||||
mock_lib_telemetry = MagicMock()
|
||||
mock_lib_kubernetes = MagicMock()
|
||||
mock_lib_telemetry.get_lib_kubernetes.return_value = mock_lib_kubernetes
|
||||
|
||||
SynFloodScenarioPlugin.rollback_syn_flood_pods(
|
||||
rollback_content, mock_lib_telemetry
|
||||
)
|
||||
|
||||
mock_lib_kubernetes.delete_pod.assert_not_called()
|
||||
|
||||
def test_rollback_syn_flood_pods_invalid_data(self):
|
||||
"""Test rollback with invalid encoded data handles error gracefully"""
|
||||
rollback_content = RollbackContent(
|
||||
resource_identifier="invalid_base64_data",
|
||||
namespace="default",
|
||||
)
|
||||
|
||||
mock_lib_telemetry = MagicMock()
|
||||
mock_lib_kubernetes = MagicMock()
|
||||
mock_lib_telemetry.get_lib_kubernetes.return_value = mock_lib_kubernetes
|
||||
|
||||
# Should not raise exception, just log error
|
||||
with self.assertLogs(level='ERROR') as log_context:
|
||||
SynFloodScenarioPlugin.rollback_syn_flood_pods(
|
||||
rollback_content, mock_lib_telemetry
|
||||
)
|
||||
|
||||
# Verify error was logged
|
||||
self.assertTrue(any('error' in log.lower() for log in log_context.output))
|
||||
|
||||
# Verify delete_pod was not called due to invalid data
|
||||
mock_lib_kubernetes.delete_pod.assert_not_called()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
||||
@@ -1,31 +0,0 @@
|
||||
# ⚠️ DEPRECATED
|
||||
|
||||
This directory is **no longer actively maintained** and will not accept new changes.
|
||||
|
||||
## Migration Notice
|
||||
|
||||
All development efforts have been moved to:
|
||||
|
||||
**[github.com/krkn-chaos/krkn-ai](https://github.com/krkn-chaos/krkn-ai)**
|
||||
|
||||
## What This Means
|
||||
|
||||
- ❌ No new features will be added here
|
||||
- ❌ Bug fixes will not be accepted
|
||||
- ❌ Pull requests will be closed and redirected
|
||||
- ℹ️ Existing code remains for historical reference only
|
||||
|
||||
## Next Steps
|
||||
|
||||
If you're looking to:
|
||||
- **Use** chaos engineering AI features → Visit [krkn-chaos/krkn-ai](https://github.com/krkn-chaos/krkn-ai)
|
||||
- **Contribute** improvements → Submit to [krkn-chaos/krkn-ai](https://github.com/krkn-chaos/krkn-ai)
|
||||
- **Report issues** → Open issues at [krkn-chaos/krkn-ai](https://github.com/krkn-chaos/krkn-ai/issues)
|
||||
|
||||
## Questions?
|
||||
|
||||
Please visit the new repository for documentation, examples, and community support.
|
||||
|
||||
---
|
||||
|
||||
**Last Updated:** January 2026
|
||||
304
utils/arcaflow/ocp-chaos/README.md
Normal file
304
utils/arcaflow/ocp-chaos/README.md
Normal file
@@ -0,0 +1,304 @@
|
||||
# OpenShift Shenanigans
|
||||
|
||||
## Workflow Description
|
||||
|
||||
Given a target OpenShift cluster, this workflow executes a
|
||||
[kube-burner plugin](https://github.com/redhat-performance/arcaflow-plugin-kube-burner)
|
||||
workflow to place a load on the cluster, repeatedly removes a targeted pod at a given time frequency with the [kill-pod plugin](https://github.com/krkn-chaos/arcaflow-plugin-kill-pod),
|
||||
and runs a [stress-ng](https://github.com/ColinIanKing/stress-ng) CPU workload on the cluster.
|
||||
Target your OpenShift cluster with the appropriate `kubeconfig` file, and add its file path as
|
||||
the value for `kubernetes_target.kubeconfig_path`, in the input file. Any combination of subworkflows can be disabled in the input file by setting either `cpu_hog_enabled`, `pod_chaos_enabled`, or `kubeburner_enabled` to `false`.
|
||||
|
||||
|
||||
## Files
|
||||
|
||||
- [`workflow.yaml`](workflow.yaml) -- Defines the workflow input schema, the plugins to run
|
||||
and their data relationships, and the output to present to the user
|
||||
- [`input.yaml`](input.yaml) -- The input parameters that the user provides for running
|
||||
the workflow
|
||||
- [`config.yaml`](config.yaml) -- Global config parameters that are passed to the Arcaflow
|
||||
engine
|
||||
- [`cpu-hog.yaml`](subworkflows/cpu-hog.yaml) -- The StressNG workload on the CPU.
|
||||
- [`kubeburner.yaml`](subworkflows/kubeburner.yaml) -- The KubeBurner workload for the Kubernetes Cluster API.
|
||||
- [`pod-chaos.yaml`](subworkflows/pod-chaos.yaml) -- The Kill Pod workflow for the Kubernetes infrastructure pods.
|
||||
|
||||
## Running the Workflow
|
||||
|
||||
### Workflow Dependencies
|
||||
|
||||
Install Python, at least `3.9`.
|
||||
|
||||
First, add the path to your Python interpreter to `config.yaml` as the value
|
||||
for `pythonPath` as shown here. A common choice for users working in
|
||||
distributions of Linux operating systems is `usr/bin/python`. Second, add a
|
||||
directory to which your Arcaflow process will have write access as the
|
||||
value for `workdir`, `/tmp` is a common choice because your process will likely be able to write to it.
|
||||
|
||||
```yaml
|
||||
deployers:
|
||||
python:
|
||||
pythonPath: /usr/bin/python
|
||||
workdir: /tmp
|
||||
```
|
||||
|
||||
To use this Python interpreter with our `kill-pod` plugin, go to the `deploy` section of the `kill_pod` step in [`pod-chaos.yaml`](subworkflows/pod-chaos.yaml). You can use the same `pythonPath` and `workdir` that you used in
|
||||
your `config.yaml`.
|
||||
|
||||
```yaml
|
||||
deploy:
|
||||
deployer_name: python
|
||||
modulePullPolicy: Always
|
||||
pythonPath: /usr/bin/python
|
||||
workdir: /tmp
|
||||
```
|
||||
|
||||
Download a Go binary of the latest version of the Arcaflow engine from: https://github.com/arcalot/arcaflow-engine/releases.
|
||||
|
||||
#### OpenShift Target
|
||||
|
||||
Target your desired OpenShift cluster by setting the `kubeconfig_path` variable for each subworkflow's parameter list in [`input.yaml`](input.yaml).
|
||||
|
||||
#### Kube-Burner Plugin
|
||||
|
||||
The `kube-burner` plugin generates and reports the UUID to which the
|
||||
`kube-burner` data is associated in your search database. The `uuidgen`
|
||||
workflow step uses the `arcaflow-plugin-utilities` `uuid` plugin step to
|
||||
randomly generate a UUID for you.
|
||||
|
||||
### Workflow Execution
|
||||
|
||||
Run the workflow:
|
||||
```
|
||||
$ export WFPATH=<path to this workflow directory>
|
||||
$ arcaflow --context ${WFPATH} --input input.yaml --config config.yaml --workflow workflow.yaml
|
||||
```
|
||||
|
||||
## Workflow Diagram
|
||||
This diagram shows the complete end-to-end workflow logic.
|
||||
|
||||
### Main Workflow
|
||||
|
||||
```mermaid
|
||||
%% Mermaid markdown workflow
|
||||
flowchart LR
|
||||
%% Success path
|
||||
input-->steps.cpu_hog_wf.enabling
|
||||
input-->steps.cpu_hog_wf.execute
|
||||
input-->steps.kubeburner_wf.enabling
|
||||
input-->steps.kubeburner_wf.execute
|
||||
input-->steps.pod_chaos_wf.enabling
|
||||
input-->steps.pod_chaos_wf.execute
|
||||
outputs.workflow_success.cpu_hog-->outputs.workflow_success
|
||||
outputs.workflow_success.cpu_hog.disabled-->outputs.workflow_success.cpu_hog
|
||||
outputs.workflow_success.cpu_hog.enabled-->outputs.workflow_success.cpu_hog
|
||||
outputs.workflow_success.kubeburner-->outputs.workflow_success
|
||||
outputs.workflow_success.kubeburner.disabled-->outputs.workflow_success.kubeburner
|
||||
outputs.workflow_success.kubeburner.enabled-->outputs.workflow_success.kubeburner
|
||||
outputs.workflow_success.pod_chaos-->outputs.workflow_success
|
||||
outputs.workflow_success.pod_chaos.disabled-->outputs.workflow_success.pod_chaos
|
||||
outputs.workflow_success.pod_chaos.enabled-->outputs.workflow_success.pod_chaos
|
||||
steps.cpu_hog_wf.closed-->steps.cpu_hog_wf.closed.result
|
||||
steps.cpu_hog_wf.disabled-->steps.cpu_hog_wf.disabled.output
|
||||
steps.cpu_hog_wf.disabled.output-->outputs.workflow_success.cpu_hog.disabled
|
||||
steps.cpu_hog_wf.enabling-->steps.cpu_hog_wf.closed
|
||||
steps.cpu_hog_wf.enabling-->steps.cpu_hog_wf.disabled
|
||||
steps.cpu_hog_wf.enabling-->steps.cpu_hog_wf.enabling.resolved
|
||||
steps.cpu_hog_wf.enabling-->steps.cpu_hog_wf.execute
|
||||
steps.cpu_hog_wf.execute-->steps.cpu_hog_wf.outputs
|
||||
steps.cpu_hog_wf.outputs-->steps.cpu_hog_wf.outputs.success
|
||||
steps.cpu_hog_wf.outputs.success-->outputs.workflow_success.cpu_hog.enabled
|
||||
steps.kubeburner_wf.closed-->steps.kubeburner_wf.closed.result
|
||||
steps.kubeburner_wf.disabled-->steps.kubeburner_wf.disabled.output
|
||||
steps.kubeburner_wf.disabled.output-->outputs.workflow_success.kubeburner.disabled
|
||||
steps.kubeburner_wf.enabling-->steps.kubeburner_wf.closed
|
||||
steps.kubeburner_wf.enabling-->steps.kubeburner_wf.disabled
|
||||
steps.kubeburner_wf.enabling-->steps.kubeburner_wf.enabling.resolved
|
||||
steps.kubeburner_wf.enabling-->steps.kubeburner_wf.execute
|
||||
steps.kubeburner_wf.execute-->steps.kubeburner_wf.outputs
|
||||
steps.kubeburner_wf.outputs-->steps.kubeburner_wf.outputs.success
|
||||
steps.kubeburner_wf.outputs.success-->outputs.workflow_success.kubeburner.enabled
|
||||
steps.pod_chaos_wf.closed-->steps.pod_chaos_wf.closed.result
|
||||
steps.pod_chaos_wf.disabled-->steps.pod_chaos_wf.disabled.output
|
||||
steps.pod_chaos_wf.disabled.output-->outputs.workflow_success.pod_chaos.disabled
|
||||
steps.pod_chaos_wf.enabling-->steps.pod_chaos_wf.closed
|
||||
steps.pod_chaos_wf.enabling-->steps.pod_chaos_wf.disabled
|
||||
steps.pod_chaos_wf.enabling-->steps.pod_chaos_wf.enabling.resolved
|
||||
steps.pod_chaos_wf.enabling-->steps.pod_chaos_wf.execute
|
||||
steps.pod_chaos_wf.execute-->steps.pod_chaos_wf.outputs
|
||||
steps.pod_chaos_wf.outputs-->steps.pod_chaos_wf.outputs.success
|
||||
steps.pod_chaos_wf.outputs.success-->outputs.workflow_success.pod_chaos.enabled
|
||||
%% Error path
|
||||
steps.cpu_hog_wf.execute-->steps.cpu_hog_wf.failed
|
||||
steps.cpu_hog_wf.failed-->steps.cpu_hog_wf.failed.error
|
||||
steps.kubeburner_wf.execute-->steps.kubeburner_wf.failed
|
||||
steps.kubeburner_wf.failed-->steps.kubeburner_wf.failed.error
|
||||
steps.pod_chaos_wf.execute-->steps.pod_chaos_wf.failed
|
||||
steps.pod_chaos_wf.failed-->steps.pod_chaos_wf.failed.error
|
||||
%% Mermaid end
|
||||
```
|
||||
|
||||
### Pod Chaos Workflow
|
||||
|
||||
```mermaid
|
||||
%% Mermaid markdown workflow
|
||||
flowchart LR
|
||||
%% Success path
|
||||
input-->steps.kill_pod.starting
|
||||
steps.kill_pod.cancelled-->steps.kill_pod.closed
|
||||
steps.kill_pod.cancelled-->steps.kill_pod.outputs
|
||||
steps.kill_pod.closed-->steps.kill_pod.closed.result
|
||||
steps.kill_pod.deploy-->steps.kill_pod.closed
|
||||
steps.kill_pod.deploy-->steps.kill_pod.starting
|
||||
steps.kill_pod.disabled-->steps.kill_pod.disabled.output
|
||||
steps.kill_pod.enabling-->steps.kill_pod.closed
|
||||
steps.kill_pod.enabling-->steps.kill_pod.disabled
|
||||
steps.kill_pod.enabling-->steps.kill_pod.enabling.resolved
|
||||
steps.kill_pod.enabling-->steps.kill_pod.starting
|
||||
steps.kill_pod.outputs-->steps.kill_pod.outputs.success
|
||||
steps.kill_pod.outputs.success-->outputs.success
|
||||
steps.kill_pod.running-->steps.kill_pod.closed
|
||||
steps.kill_pod.running-->steps.kill_pod.outputs
|
||||
steps.kill_pod.starting-->steps.kill_pod.closed
|
||||
steps.kill_pod.starting-->steps.kill_pod.running
|
||||
steps.kill_pod.starting-->steps.kill_pod.starting.started
|
||||
%% Error path
|
||||
steps.kill_pod.cancelled-->steps.kill_pod.crashed
|
||||
steps.kill_pod.cancelled-->steps.kill_pod.deploy_failed
|
||||
steps.kill_pod.crashed-->steps.kill_pod.crashed.error
|
||||
steps.kill_pod.deploy-->steps.kill_pod.deploy_failed
|
||||
steps.kill_pod.deploy_failed-->steps.kill_pod.deploy_failed.error
|
||||
steps.kill_pod.enabling-->steps.kill_pod.crashed
|
||||
steps.kill_pod.outputs-->steps.kill_pod.outputs.error
|
||||
steps.kill_pod.running-->steps.kill_pod.crashed
|
||||
steps.kill_pod.starting-->steps.kill_pod.crashed
|
||||
%% Mermaid end
|
||||
```
|
||||
|
||||
### StressNG (CPU Hog) Workflow
|
||||
|
||||
```mermaid
|
||||
%% Mermaid markdown workflow
|
||||
flowchart LR
|
||||
%% Success path
|
||||
input-->steps.kubeconfig.starting
|
||||
input-->steps.stressng.deploy
|
||||
input-->steps.stressng.starting
|
||||
steps.kubeconfig.cancelled-->steps.kubeconfig.closed
|
||||
steps.kubeconfig.cancelled-->steps.kubeconfig.outputs
|
||||
steps.kubeconfig.closed-->steps.kubeconfig.closed.result
|
||||
steps.kubeconfig.deploy-->steps.kubeconfig.closed
|
||||
steps.kubeconfig.deploy-->steps.kubeconfig.starting
|
||||
steps.kubeconfig.disabled-->steps.kubeconfig.disabled.output
|
||||
steps.kubeconfig.enabling-->steps.kubeconfig.closed
|
||||
steps.kubeconfig.enabling-->steps.kubeconfig.disabled
|
||||
steps.kubeconfig.enabling-->steps.kubeconfig.enabling.resolved
|
||||
steps.kubeconfig.enabling-->steps.kubeconfig.starting
|
||||
steps.kubeconfig.outputs-->steps.kubeconfig.outputs.success
|
||||
steps.kubeconfig.outputs.success-->steps.stressng.deploy
|
||||
steps.kubeconfig.running-->steps.kubeconfig.closed
|
||||
steps.kubeconfig.running-->steps.kubeconfig.outputs
|
||||
steps.kubeconfig.starting-->steps.kubeconfig.closed
|
||||
steps.kubeconfig.starting-->steps.kubeconfig.running
|
||||
steps.kubeconfig.starting-->steps.kubeconfig.starting.started
|
||||
steps.stressng.cancelled-->steps.stressng.closed
|
||||
steps.stressng.cancelled-->steps.stressng.outputs
|
||||
steps.stressng.closed-->steps.stressng.closed.result
|
||||
steps.stressng.deploy-->steps.stressng.closed
|
||||
steps.stressng.deploy-->steps.stressng.starting
|
||||
steps.stressng.disabled-->steps.stressng.disabled.output
|
||||
steps.stressng.enabling-->steps.stressng.closed
|
||||
steps.stressng.enabling-->steps.stressng.disabled
|
||||
steps.stressng.enabling-->steps.stressng.enabling.resolved
|
||||
steps.stressng.enabling-->steps.stressng.starting
|
||||
steps.stressng.outputs-->steps.stressng.outputs.success
|
||||
steps.stressng.outputs.success-->outputs.success
|
||||
steps.stressng.running-->steps.stressng.closed
|
||||
steps.stressng.running-->steps.stressng.outputs
|
||||
steps.stressng.starting-->steps.stressng.closed
|
||||
steps.stressng.starting-->steps.stressng.running
|
||||
steps.stressng.starting-->steps.stressng.starting.started
|
||||
%% Error path
|
||||
steps.kubeconfig.cancelled-->steps.kubeconfig.crashed
|
||||
steps.kubeconfig.cancelled-->steps.kubeconfig.deploy_failed
|
||||
steps.kubeconfig.crashed-->steps.kubeconfig.crashed.error
|
||||
steps.kubeconfig.deploy-->steps.kubeconfig.deploy_failed
|
||||
steps.kubeconfig.deploy_failed-->steps.kubeconfig.deploy_failed.error
|
||||
steps.kubeconfig.enabling-->steps.kubeconfig.crashed
|
||||
steps.kubeconfig.outputs-->steps.kubeconfig.outputs.error
|
||||
steps.kubeconfig.running-->steps.kubeconfig.crashed
|
||||
steps.kubeconfig.starting-->steps.kubeconfig.crashed
|
||||
steps.stressng.cancelled-->steps.stressng.crashed
|
||||
steps.stressng.cancelled-->steps.stressng.deploy_failed
|
||||
steps.stressng.crashed-->steps.stressng.crashed.error
|
||||
steps.stressng.deploy-->steps.stressng.deploy_failed
|
||||
steps.stressng.deploy_failed-->steps.stressng.deploy_failed.error
|
||||
steps.stressng.enabling-->steps.stressng.crashed
|
||||
steps.stressng.outputs-->steps.stressng.outputs.error
|
||||
steps.stressng.running-->steps.stressng.crashed
|
||||
steps.stressng.starting-->steps.stressng.crashed
|
||||
%% Mermaid end
|
||||
```
|
||||
|
||||
### Kube-Burner Workflow
|
||||
|
||||
```mermaid
|
||||
%% Mermaid markdown workflow
|
||||
flowchart LR
|
||||
%% Success path
|
||||
input-->steps.kubeburner.starting
|
||||
steps.kubeburner.cancelled-->steps.kubeburner.closed
|
||||
steps.kubeburner.cancelled-->steps.kubeburner.outputs
|
||||
steps.kubeburner.closed-->steps.kubeburner.closed.result
|
||||
steps.kubeburner.deploy-->steps.kubeburner.closed
|
||||
steps.kubeburner.deploy-->steps.kubeburner.starting
|
||||
steps.kubeburner.disabled-->steps.kubeburner.disabled.output
|
||||
steps.kubeburner.enabling-->steps.kubeburner.closed
|
||||
steps.kubeburner.enabling-->steps.kubeburner.disabled
|
||||
steps.kubeburner.enabling-->steps.kubeburner.enabling.resolved
|
||||
steps.kubeburner.enabling-->steps.kubeburner.starting
|
||||
steps.kubeburner.outputs-->steps.kubeburner.outputs.success
|
||||
steps.kubeburner.outputs.success-->outputs.success
|
||||
steps.kubeburner.running-->steps.kubeburner.closed
|
||||
steps.kubeburner.running-->steps.kubeburner.outputs
|
||||
steps.kubeburner.starting-->steps.kubeburner.closed
|
||||
steps.kubeburner.starting-->steps.kubeburner.running
|
||||
steps.kubeburner.starting-->steps.kubeburner.starting.started
|
||||
steps.uuidgen.cancelled-->steps.uuidgen.closed
|
||||
steps.uuidgen.cancelled-->steps.uuidgen.outputs
|
||||
steps.uuidgen.closed-->steps.uuidgen.closed.result
|
||||
steps.uuidgen.deploy-->steps.uuidgen.closed
|
||||
steps.uuidgen.deploy-->steps.uuidgen.starting
|
||||
steps.uuidgen.disabled-->steps.uuidgen.disabled.output
|
||||
steps.uuidgen.enabling-->steps.uuidgen.closed
|
||||
steps.uuidgen.enabling-->steps.uuidgen.disabled
|
||||
steps.uuidgen.enabling-->steps.uuidgen.enabling.resolved
|
||||
steps.uuidgen.enabling-->steps.uuidgen.starting
|
||||
steps.uuidgen.outputs-->steps.uuidgen.outputs.success
|
||||
steps.uuidgen.outputs.success-->steps.kubeburner.starting
|
||||
steps.uuidgen.running-->steps.uuidgen.closed
|
||||
steps.uuidgen.running-->steps.uuidgen.outputs
|
||||
steps.uuidgen.starting-->steps.uuidgen.closed
|
||||
steps.uuidgen.starting-->steps.uuidgen.running
|
||||
steps.uuidgen.starting-->steps.uuidgen.starting.started
|
||||
%% Error path
|
||||
steps.kubeburner.cancelled-->steps.kubeburner.crashed
|
||||
steps.kubeburner.cancelled-->steps.kubeburner.deploy_failed
|
||||
steps.kubeburner.crashed-->steps.kubeburner.crashed.error
|
||||
steps.kubeburner.deploy-->steps.kubeburner.deploy_failed
|
||||
steps.kubeburner.deploy_failed-->steps.kubeburner.deploy_failed.error
|
||||
steps.kubeburner.enabling-->steps.kubeburner.crashed
|
||||
steps.kubeburner.outputs-->steps.kubeburner.outputs.error
|
||||
steps.kubeburner.running-->steps.kubeburner.crashed
|
||||
steps.kubeburner.starting-->steps.kubeburner.crashed
|
||||
steps.uuidgen.cancelled-->steps.uuidgen.crashed
|
||||
steps.uuidgen.cancelled-->steps.uuidgen.deploy_failed
|
||||
steps.uuidgen.crashed-->steps.uuidgen.crashed.error
|
||||
steps.uuidgen.deploy-->steps.uuidgen.deploy_failed
|
||||
steps.uuidgen.deploy_failed-->steps.uuidgen.deploy_failed.error
|
||||
steps.uuidgen.enabling-->steps.uuidgen.crashed
|
||||
steps.uuidgen.outputs-->steps.uuidgen.outputs.error
|
||||
steps.uuidgen.running-->steps.uuidgen.crashed
|
||||
steps.uuidgen.starting-->steps.uuidgen.crashed
|
||||
%% Mermaid end
|
||||
```
|
||||
|
||||
18
utils/arcaflow/ocp-chaos/config.yaml
Normal file
18
utils/arcaflow/ocp-chaos/config.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
---
|
||||
deployers:
|
||||
image:
|
||||
deployer_name: podman
|
||||
deployment:
|
||||
imagePullPolicy: IfNotPresent
|
||||
python:
|
||||
deployer_name: python
|
||||
modulePullPolicy: Always
|
||||
pythonPath: /usr/bin/python
|
||||
workdir: /tmp
|
||||
log:
|
||||
level: debug
|
||||
logged_outputs:
|
||||
error:
|
||||
level: debug
|
||||
success:
|
||||
level: debug
|
||||
41
utils/arcaflow/ocp-chaos/input.yaml
Normal file
41
utils/arcaflow/ocp-chaos/input.yaml
Normal file
@@ -0,0 +1,41 @@
|
||||
kubernetes_target:
|
||||
kubeconfig_path:
|
||||
cpu_hog_enabled: true
|
||||
pod_chaos_enabled: true
|
||||
kubeburner_enabled: true
|
||||
|
||||
kubeburner_list:
|
||||
- kubeburner:
|
||||
kubeconfig: 'given later in workflow by kubeconfig plugin'
|
||||
workload: 'cluster-density'
|
||||
qps: 20
|
||||
burst: 20
|
||||
log_level: 'info'
|
||||
timeout: '1m'
|
||||
iterations: 1
|
||||
churn: 'true'
|
||||
churn_duration: 1s
|
||||
churn_delay: 1s
|
||||
churn_percent: 10
|
||||
alerting: 'true'
|
||||
gc: 'true'
|
||||
|
||||
pod_chaos_list:
|
||||
- namespace_pattern: ^openshift-etcd$
|
||||
label_selector: k8s-app=etcd
|
||||
kill: 1
|
||||
krkn_pod_recovery_time: 1
|
||||
|
||||
cpu_hog_list:
|
||||
- namespace: default
|
||||
# set the node selector as a key-value pair eg.
|
||||
# node_selector:
|
||||
# kubernetes.io/hostname: kind-worker2
|
||||
node_selector: {}
|
||||
stressng_params:
|
||||
timeout: 1
|
||||
stressors:
|
||||
- stressor: cpu
|
||||
workers: 1
|
||||
cpu-load: 20
|
||||
cpu-method: all
|
||||
75
utils/arcaflow/ocp-chaos/subworkflows/cpu-hog.yaml
Normal file
75
utils/arcaflow/ocp-chaos/subworkflows/cpu-hog.yaml
Normal file
@@ -0,0 +1,75 @@
|
||||
version: v0.2.0
|
||||
input:
|
||||
root: CpuHog__KubernetesTarget
|
||||
objects:
|
||||
CpuHog__KubernetesTarget:
|
||||
id: CpuHog__KubernetesTarget
|
||||
properties:
|
||||
constant:
|
||||
type:
|
||||
type_id: ref
|
||||
id: KubernetesTarget
|
||||
item:
|
||||
type:
|
||||
type_id: ref
|
||||
id: CpuHog
|
||||
KubernetesTarget:
|
||||
id: KubernetesTarget
|
||||
properties:
|
||||
kubeconfig_path:
|
||||
type:
|
||||
type_id: string
|
||||
CpuHog:
|
||||
id: CpuHog
|
||||
properties:
|
||||
namespace:
|
||||
display:
|
||||
description: The namespace where the container will be deployed
|
||||
name: Namespace
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
node_selector:
|
||||
display:
|
||||
description: kubernetes node name where the plugin must be deployed
|
||||
type:
|
||||
type_id: map
|
||||
values:
|
||||
type_id: string
|
||||
keys:
|
||||
type_id: string
|
||||
required: true
|
||||
stressng_params:
|
||||
type:
|
||||
type_id: ref
|
||||
id: StressNGParams
|
||||
namespace: $.steps.stressng.starting.inputs.input
|
||||
|
||||
steps:
|
||||
kubeconfig:
|
||||
plugin:
|
||||
src: quay.io/arcalot/arcaflow-plugin-kubeconfig:0.3.1
|
||||
deployment_type: image
|
||||
input:
|
||||
kubeconfig: !expr 'readFile($.input.constant.kubeconfig_path)'
|
||||
stressng:
|
||||
plugin:
|
||||
src: quay.io/arcalot/arcaflow-plugin-stressng:0.8.0
|
||||
deployment_type: image
|
||||
step: workload
|
||||
input: !expr $.input.item.stressng_params
|
||||
deploy:
|
||||
deployer_name: kubernetes
|
||||
connection: !expr $.steps.kubeconfig.outputs.success.connection
|
||||
pod:
|
||||
metadata:
|
||||
namespace: !expr $.input.item.namespace
|
||||
labels:
|
||||
arcaflow: stressng
|
||||
spec:
|
||||
nodeSelector: !expr $.input.item.node_selector
|
||||
pluginContainer:
|
||||
imagePullPolicy: Always
|
||||
|
||||
outputs:
|
||||
success: !expr $.steps.stressng.outputs.success
|
||||
54
utils/arcaflow/ocp-chaos/subworkflows/kubeburner.yaml
Normal file
54
utils/arcaflow/ocp-chaos/subworkflows/kubeburner.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
version: v0.2.0
|
||||
input:
|
||||
root: KubeBurner__KubernetesTarget
|
||||
objects:
|
||||
KubeBurner__KubernetesTarget:
|
||||
id: KubeBurner__KubernetesTarget
|
||||
properties:
|
||||
constant:
|
||||
type:
|
||||
type_id: ref
|
||||
id: KubernetesTarget
|
||||
item:
|
||||
type:
|
||||
type_id: ref
|
||||
id: KubeBurner
|
||||
KubernetesTarget:
|
||||
id: KubernetesTarget
|
||||
properties:
|
||||
kubeconfig_path:
|
||||
type:
|
||||
type_id: string
|
||||
KubeBurner:
|
||||
id: KubeBurner
|
||||
properties:
|
||||
kubeburner:
|
||||
type:
|
||||
type_id: ref
|
||||
id: KubeBurnerInputParams
|
||||
namespace: $.steps.kubeburner.starting.inputs.input
|
||||
|
||||
steps:
|
||||
uuidgen:
|
||||
plugin:
|
||||
deployment_type: image
|
||||
src: quay.io/arcalot/arcaflow-plugin-utilities:0.6.0
|
||||
step: uuid
|
||||
input: {}
|
||||
kubeburner:
|
||||
plugin:
|
||||
deployment_type: image
|
||||
src: quay.io/redhat-performance/arcaflow-plugin-kube-burner:latest
|
||||
step: kube-burner
|
||||
input:
|
||||
kubeconfig: !expr 'readFile($.input.constant.kubeconfig_path)'
|
||||
uuid: !expr $.steps.uuidgen.outputs.success.uuid
|
||||
workload: !expr $.input.item.kubeburner.workload
|
||||
iterations: !expr $.input.item.kubeburner.iterations
|
||||
churn: !expr $.input.item.kubeburner.churn
|
||||
churn_duration: !expr $.input.item.kubeburner.churn_duration
|
||||
churn_delay: !expr $.input.item.kubeburner.churn_delay
|
||||
|
||||
outputs:
|
||||
success:
|
||||
burner: !expr $.steps.kubeburner.outputs.success
|
||||
108
utils/arcaflow/ocp-chaos/subworkflows/pod-chaos.yaml
Normal file
108
utils/arcaflow/ocp-chaos/subworkflows/pod-chaos.yaml
Normal file
@@ -0,0 +1,108 @@
|
||||
version: v0.2.0
|
||||
input:
|
||||
root: KillPodConfig__KubernetesTarget
|
||||
objects:
|
||||
KillPodConfig__KubernetesTarget:
|
||||
id: KillPodConfig__KubernetesTarget
|
||||
properties:
|
||||
constant:
|
||||
type:
|
||||
type_id: ref
|
||||
id: KubernetesTarget
|
||||
item:
|
||||
type:
|
||||
type_id: ref
|
||||
id: KillPodConfig
|
||||
KubernetesTarget:
|
||||
id: KubernetesTarget
|
||||
properties:
|
||||
kubeconfig_path:
|
||||
type:
|
||||
type_id: string
|
||||
KillPodConfig:
|
||||
id: KillPodConfig
|
||||
properties:
|
||||
backoff:
|
||||
default: '1'
|
||||
display:
|
||||
description: How many seconds to wait between checks for the target
|
||||
pod status.
|
||||
name: Backoff
|
||||
required: false
|
||||
type:
|
||||
type_id: integer
|
||||
kill:
|
||||
default: '1'
|
||||
display:
|
||||
description: How many pods should we attempt to kill?
|
||||
name: Number of pods to kill
|
||||
required: false
|
||||
type:
|
||||
min: 1
|
||||
type_id: integer
|
||||
krkn_pod_recovery_time:
|
||||
default: '60'
|
||||
display:
|
||||
description: The Expected Recovery time fo the pod (used by Krkn to
|
||||
monitor the pod lifecycle)
|
||||
name: Recovery Time
|
||||
required: false
|
||||
type:
|
||||
type_id: integer
|
||||
label_selector:
|
||||
display:
|
||||
description: 'Kubernetes label selector for the target pods. Required
|
||||
if name_pattern is not set.
|
||||
See https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
for details.'
|
||||
name: Label selector
|
||||
required: false
|
||||
required_if_not:
|
||||
- name_pattern
|
||||
type:
|
||||
type_id: string
|
||||
name_pattern:
|
||||
display:
|
||||
description: Regular expression for target pods. Required if label_selector
|
||||
is not set.
|
||||
name: Name pattern
|
||||
required: false
|
||||
required_if_not:
|
||||
- label_selector
|
||||
type:
|
||||
type_id: pattern
|
||||
namespace_pattern:
|
||||
display:
|
||||
description: Regular expression for target pod namespaces.
|
||||
name: Namespace pattern
|
||||
required: true
|
||||
type:
|
||||
type_id: pattern
|
||||
timeout:
|
||||
default: '180'
|
||||
display:
|
||||
description: Timeout to wait for the target pod(s) to be removed in
|
||||
seconds.
|
||||
name: Timeout
|
||||
required: false
|
||||
type:
|
||||
type_id: integer
|
||||
|
||||
steps:
|
||||
kill_pod:
|
||||
step: kill-pods
|
||||
plugin:
|
||||
deployment_type: python
|
||||
src: arcaflow-plugin-kill-pod@git+https://github.com/krkn-chaos/arcaflow-plugin-kill-pod.git@a9f87f88d8e7763d111613bd8b2c7862fc49624f
|
||||
input:
|
||||
namespace_pattern: !expr $.input.item.namespace_pattern
|
||||
label_selector: !expr $.input.item.label_selector
|
||||
kubeconfig_path: !expr $.input.constant.kubeconfig_path
|
||||
deploy:
|
||||
deployer_name: python
|
||||
modulePullPolicy: Always
|
||||
pythonPath: /usr/bin/python
|
||||
workdir: /tmp
|
||||
|
||||
outputs:
|
||||
success: !expr $.steps.kill_pod.outputs.success
|
||||
73
utils/arcaflow/ocp-chaos/workflow.yaml
Normal file
73
utils/arcaflow/ocp-chaos/workflow.yaml
Normal file
@@ -0,0 +1,73 @@
|
||||
version: v0.2.0
|
||||
input:
|
||||
root: RootObject
|
||||
objects:
|
||||
KubernetesTarget:
|
||||
id: KubernetesTarget
|
||||
properties:
|
||||
kubeconfig_path:
|
||||
type:
|
||||
type_id: string
|
||||
RootObject:
|
||||
id: RootObject
|
||||
properties:
|
||||
cpu_hog_enabled:
|
||||
type:
|
||||
type_id: bool
|
||||
pod_chaos_enabled:
|
||||
type:
|
||||
type_id: bool
|
||||
kubeburner_enabled:
|
||||
type:
|
||||
type_id: bool
|
||||
kubernetes_target:
|
||||
type:
|
||||
type_id: ref
|
||||
id: KubernetesTarget
|
||||
kubeburner_list:
|
||||
type:
|
||||
type_id: list
|
||||
items:
|
||||
type_id: ref
|
||||
id: KubeBurner
|
||||
namespace: $.steps.kubeburner_wf.execute.inputs.items
|
||||
pod_chaos_list:
|
||||
type:
|
||||
type_id: list
|
||||
items:
|
||||
type_id: ref
|
||||
id: KillPodConfig
|
||||
namespace: $.steps.pod_chaos_wf.execute.inputs.items
|
||||
cpu_hog_list:
|
||||
type:
|
||||
type_id: list
|
||||
items:
|
||||
type_id: ref
|
||||
id: CpuHog
|
||||
namespace: $.steps.cpu_hog_wf.execute.inputs.items
|
||||
|
||||
steps:
|
||||
kubeburner_wf:
|
||||
kind: foreach
|
||||
items: !expr 'bindConstants($.input.kubeburner_list, $.input.kubernetes_target)'
|
||||
workflow: subworkflows/kubeburner.yaml
|
||||
parallelism: 1
|
||||
enabled: !expr $.input.kubeburner_enabled
|
||||
pod_chaos_wf:
|
||||
kind: foreach
|
||||
items: !expr 'bindConstants($.input.pod_chaos_list, $.input.kubernetes_target)'
|
||||
workflow: subworkflows/pod-chaos.yaml
|
||||
parallelism: 1
|
||||
enabled: !expr $.input.pod_chaos_enabled
|
||||
cpu_hog_wf:
|
||||
kind: foreach
|
||||
items: !expr 'bindConstants($.input.cpu_hog_list, $.input.kubernetes_target)'
|
||||
workflow: subworkflows/cpu-hog.yaml
|
||||
parallelism: 1
|
||||
enabled: !expr $.input.cpu_hog_enabled
|
||||
|
||||
outputs:
|
||||
workflow_success:
|
||||
kubeburner: !ordisabled $.steps.kubeburner_wf.outputs.success
|
||||
pod_chaos: !ordisabled $.steps.pod_chaos_wf.outputs.success
|
||||
cpu_hog: !ordisabled $.steps.cpu_hog_wf.outputs.success
|
||||
@@ -1,17 +1,3 @@
|
||||
# ⚠️ DEPRECATED - This project has moved
|
||||
|
||||
> **All development has moved to [github.com/krkn-chaos/krkn-ai](https://github.com/krkn-chaos/krkn-ai)**
|
||||
>
|
||||
> This directory is no longer maintained. Please visit the new repository for:
|
||||
> - Latest features and updates
|
||||
> - Active development and support
|
||||
> - Bug fixes and improvements
|
||||
> - Documentation and examples
|
||||
>
|
||||
> See [../README.md](../README.md) for more information.
|
||||
|
||||
---
|
||||
|
||||
# aichaos
|
||||
Enhancing Chaos Engineering with AI-assisted fault injection for better resiliency and non-functional testing.
|
||||
|
||||
|
||||
@@ -2,5 +2,5 @@ numpy
|
||||
pandas
|
||||
requests
|
||||
Flask==2.2.5
|
||||
Werkzeug==3.1.5
|
||||
Werkzeug==3.0.6
|
||||
flasgger==0.9.5
|
||||
|
||||
@@ -3,9 +3,8 @@ pandas
|
||||
notebook
|
||||
jupyterlab
|
||||
jupyter
|
||||
seaborn==0.13.2
|
||||
seaborn
|
||||
requests
|
||||
wheel
|
||||
Flask==2.2.5
|
||||
Flask==2.1.0
|
||||
flasgger==0.9.5
|
||||
pillow==10.3.0
|
||||
@@ -1,17 +1,3 @@
|
||||
# ⚠️ DEPRECATED - This project has moved
|
||||
|
||||
> **All development has moved to [github.com/krkn-chaos/krkn-ai](https://github.com/krkn-chaos/krkn-ai)**
|
||||
>
|
||||
> This directory is no longer maintained. Please visit the new repository for:
|
||||
> - Latest features and updates
|
||||
> - Active development and support
|
||||
> - Bug fixes and improvements
|
||||
> - Documentation and examples
|
||||
>
|
||||
> See [../README.md](../README.md) for more information.
|
||||
|
||||
---
|
||||
|
||||
# Chaos Recommendation Tool
|
||||
|
||||
This tool, designed for Redhat Kraken, operates through the command line and offers recommendations for chaos testing. It suggests probable chaos test cases that can disrupt application services by analyzing their behavior and assessing their susceptibility to specific fault types.
|
||||
|
||||
@@ -224,7 +224,9 @@ def json_namespace(namespace, queries, analysis_data):
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Krkn Chaos Recommender Command-Line tool")
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Krkn Chaos Recommender Command-Line tool"
|
||||
)
|
||||
args = parse_arguments(parser)
|
||||
|
||||
if args.config_file is None and not args.options:
|
||||
@@ -234,133 +236,91 @@ def main():
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
if args.config_file is not None:
|
||||
(
|
||||
namespaces,
|
||||
kubeconfig,
|
||||
prometheus_endpoint,
|
||||
auth_token,
|
||||
scrape_duration,
|
||||
chaos_tests,
|
||||
log_level,
|
||||
threshold,
|
||||
heatmap_cpu_threshold,
|
||||
heatmap_mem_threshold,
|
||||
output_path,
|
||||
) = read_configuration(args.config_file)
|
||||
else:
|
||||
namespaces = args.namespaces
|
||||
kubeconfig = args.kubeconfig
|
||||
auth_token = args.token
|
||||
scrape_duration = args.scrape_duration
|
||||
log_level = args.log_level
|
||||
prometheus_endpoint = args.prometheus_endpoint
|
||||
output_path = args.json_output_file
|
||||
chaos_tests = {
|
||||
"MEM": args.MEM,
|
||||
"GENERIC": args.GENERIC,
|
||||
"CPU": args.CPU,
|
||||
"NETWORK": args.NETWORK,
|
||||
}
|
||||
threshold = args.threshold
|
||||
heatmap_mem_threshold = args.mem_threshold
|
||||
heatmap_cpu_threshold = args.cpu_threshold
|
||||
|
||||
if log_level not in ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]:
|
||||
logging.error(f"{log_level} not a valid log level")
|
||||
sys.exit(1)
|
||||
|
||||
logging.basicConfig(level=log_level)
|
||||
|
||||
if output_path is not False:
|
||||
if output_path is None:
|
||||
output_path = "./recommender_output"
|
||||
logging.info(
|
||||
f"Path for output file not specified. "
|
||||
f"Using default folder {output_path}"
|
||||
)
|
||||
if not os.path.exists(os.path.expanduser(output_path)):
|
||||
logging.error(f"Folder {output_path} for output not found.")
|
||||
sys.exit(1)
|
||||
|
||||
# Validate required inputs
|
||||
if not namespaces:
|
||||
logging.error("No namespaces provided")
|
||||
sys.exit(1)
|
||||
if not prometheus_endpoint:
|
||||
logging.error("Prometheus endpoint is required")
|
||||
sys.exit(1)
|
||||
if not auth_token:
|
||||
logging.error("Auth token is required")
|
||||
sys.exit(1)
|
||||
|
||||
logging.info("Loading inputs...")
|
||||
inputs = json_inputs(
|
||||
if args.config_file is not None:
|
||||
(
|
||||
namespaces,
|
||||
kubeconfig,
|
||||
prometheus_endpoint,
|
||||
auth_token,
|
||||
scrape_duration,
|
||||
chaos_tests,
|
||||
log_level,
|
||||
threshold,
|
||||
heatmap_cpu_threshold,
|
||||
heatmap_mem_threshold,
|
||||
)
|
||||
namespaces_data = []
|
||||
output_path,
|
||||
) = read_configuration(args.config_file)
|
||||
|
||||
logging.info("Starting Analysis...")
|
||||
if args.options:
|
||||
namespaces = args.namespaces
|
||||
kubeconfig = args.kubeconfig
|
||||
auth_token = args.token
|
||||
scrape_duration = args.scrape_duration
|
||||
log_level = args.log_level
|
||||
prometheus_endpoint = args.prometheus_endpoint
|
||||
output_path = args.json_output_file
|
||||
chaos_tests = {
|
||||
"MEM": args.MEM,
|
||||
"GENERIC": args.GENERIC,
|
||||
"CPU": args.CPU,
|
||||
"NETWORK": args.NETWORK,
|
||||
}
|
||||
threshold = args.threshold
|
||||
heatmap_mem_threshold = args.mem_threshold
|
||||
heatmap_cpu_threshold = args.cpu_threshold
|
||||
|
||||
try:
|
||||
# Initialize Prometheus client and fetch utilization data
|
||||
file_path, queries = prometheus.fetch_utilization_from_prometheus(
|
||||
prometheus_endpoint, auth_token, namespaces, scrape_duration
|
||||
)
|
||||
except prometheus.PrometheusConnectionError as e:
|
||||
logging.error(f"Failed to connect to Prometheus at {prometheus_endpoint}: {str(e)}")
|
||||
sys.exit(1)
|
||||
except prometheus.PrometheusQueryError as e:
|
||||
logging.error(f"Failed to execute Prometheus queries: {str(e)}")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
logging.error(f"Unexpected error while fetching Prometheus data: {str(e)}")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
analysis_data = analysis(
|
||||
file_path,
|
||||
namespaces,
|
||||
chaos_tests,
|
||||
threshold,
|
||||
heatmap_cpu_threshold,
|
||||
heatmap_mem_threshold,
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to analyze data: {str(e)}")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
for namespace in namespaces:
|
||||
namespace_data = json_namespace(
|
||||
namespace, queries[namespace], analysis_data[namespace]
|
||||
)
|
||||
namespaces_data.append(namespace_data)
|
||||
except KeyError as e:
|
||||
logging.error(f"Failed to process namespace data: {str(e)}")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
logging.error(f"Unexpected error while processing namespace data: {str(e)}")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
make_json_output(inputs, namespaces_data, output_path)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to create JSON output: {str(e)}")
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"An unexpected error occurred: {str(e)}")
|
||||
if log_level not in ["DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"]:
|
||||
logging.error(f"{log_level} not a valid log level")
|
||||
sys.exit(1)
|
||||
|
||||
logging.basicConfig(level=log_level)
|
||||
|
||||
if output_path is not False:
|
||||
if output_path is None:
|
||||
output_path = "./recommender_output"
|
||||
logging.info(
|
||||
f"Path for output file not specified. "
|
||||
f"Using default folder {output_path}"
|
||||
)
|
||||
if not os.path.exists(os.path.expanduser(output_path)):
|
||||
logging.error(f"Folder {output_path} for output not found.")
|
||||
sys.exit(1)
|
||||
|
||||
logging.info("Loading inputs...")
|
||||
inputs = json_inputs(
|
||||
namespaces,
|
||||
kubeconfig,
|
||||
prometheus_endpoint,
|
||||
scrape_duration,
|
||||
chaos_tests,
|
||||
threshold,
|
||||
heatmap_cpu_threshold,
|
||||
heatmap_mem_threshold,
|
||||
)
|
||||
namespaces_data = []
|
||||
|
||||
logging.info("Starting Analysis...")
|
||||
|
||||
file_path, queries = prometheus.fetch_utilization_from_prometheus(
|
||||
prometheus_endpoint, auth_token, namespaces, scrape_duration
|
||||
)
|
||||
|
||||
analysis_data = analysis(
|
||||
file_path,
|
||||
namespaces,
|
||||
chaos_tests,
|
||||
threshold,
|
||||
heatmap_cpu_threshold,
|
||||
heatmap_mem_threshold,
|
||||
)
|
||||
|
||||
for namespace in namespaces:
|
||||
namespace_data = json_namespace(
|
||||
namespace, queries[namespace], analysis_data[namespace]
|
||||
)
|
||||
namespaces_data.append(namespace_data)
|
||||
make_json_output(inputs, namespaces_data, output_path)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
Reference in New Issue
Block a user