mirror of
https://github.com/krkn-chaos/krkn.git
synced 2026-02-17 03:19:54 +00:00
Compare commits
65 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7a966a71d0 | ||
|
|
43d891afd3 | ||
|
|
27fabfd4af | ||
|
|
724068a978 | ||
|
|
c9778474f1 | ||
|
|
6efdb2eb84 | ||
|
|
0e852da7d4 | ||
|
|
86d1fda325 | ||
|
|
fc6344176b | ||
|
|
ff469579e9 | ||
|
|
8cbd1c5e7f | ||
|
|
5953e53b46 | ||
|
|
23f1fc044b | ||
|
|
69e386db53 | ||
|
|
fef77cfc0e | ||
|
|
eb2eabe029 | ||
|
|
f7f1b2dfb0 | ||
|
|
61356fd70b | ||
|
|
067969a81a | ||
|
|
972ac12921 | ||
|
|
ea813748ae | ||
|
|
782d04c1b1 | ||
|
|
2fb58f9897 | ||
|
|
5712721410 | ||
|
|
5567c06cd0 | ||
|
|
0ad4c11356 | ||
|
|
f6f686e8fe | ||
|
|
3a66f8a5a3 | ||
|
|
585d519687 | ||
|
|
e40fedcd44 | ||
|
|
1bb5b8ad04 | ||
|
|
725d58c8ce | ||
|
|
c6058da7a7 | ||
|
|
06a8ed220c | ||
|
|
2c6b50bcdc | ||
|
|
ed97c8df2b | ||
|
|
1baa68bcee | ||
|
|
ab84f09448 | ||
|
|
6ace3c952b | ||
|
|
cee5259fd3 | ||
|
|
f868000ebd | ||
|
|
d2d80be241 | ||
|
|
da464859c4 | ||
|
|
ef88005985 | ||
|
|
102bdfdc96 | ||
|
|
b569e6a9d5 | ||
|
|
dba38668b7 | ||
|
|
39c0152b7b | ||
|
|
491dc17267 | ||
|
|
b2b5002f45 | ||
|
|
fccd701dee | ||
|
|
570631ebfc | ||
|
|
3ab9ca4319 | ||
|
|
4084ffd9c6 | ||
|
|
19cc2c047f | ||
|
|
6197fc6722 | ||
|
|
2a8ac41ebf | ||
|
|
b4d235d31c | ||
|
|
e4e4620d10 | ||
|
|
a2c24ab7ed | ||
|
|
fe892fd9bf | ||
|
|
74613fdb4b | ||
|
|
28c37c9353 | ||
|
|
de0567b067 | ||
|
|
83486557f1 |
21
.github/workflows/build.yml
vendored
21
.github/workflows/build.yml
vendored
@@ -1,8 +1,5 @@
|
||||
name: Build Krkn
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
@@ -51,20 +48,4 @@ jobs:
|
||||
if-no-files-found: error
|
||||
- name: Check CI results
|
||||
run: grep Fail CI/results.markdown && false || true
|
||||
- name: Build the Docker images
|
||||
run: docker build --no-cache -t quay.io/redhat-chaos/krkn containers/
|
||||
- name: Login in quay
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker login quay.io -u ${QUAY_USER} -p ${QUAY_TOKEN}
|
||||
env:
|
||||
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}
|
||||
- name: Push the Docker images
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker push quay.io/redhat-chaos/krkn
|
||||
- name: Rebuild krkn-hub
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
uses: redhat-chaos/actions/krkn-hub@main
|
||||
with:
|
||||
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}
|
||||
|
||||
|
||||
30
.github/workflows/docker-image.yml
vendored
Normal file
30
.github/workflows/docker-image.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: Docker Image CI
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
- name: Build the Docker images
|
||||
run: docker build --no-cache -t quay.io/redhat-chaos/krkn containers/
|
||||
- name: Login in quay
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker login quay.io -u ${QUAY_USER} -p ${QUAY_TOKEN}
|
||||
env:
|
||||
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}
|
||||
- name: Push the Docker images
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
run: docker push quay.io/redhat-chaos/krkn
|
||||
- name: Rebuild krkn-hub
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
uses: redhat-chaos/actions/krkn-hub@main
|
||||
with:
|
||||
QUAY_USER: ${{ secrets.QUAY_USER_1 }}
|
||||
QUAY_TOKEN: ${{ secrets.QUAY_TOKEN_1 }}
|
||||
2
.github/workflows/functional_tests.yaml
vendored
2
.github/workflows/functional_tests.yaml
vendored
@@ -34,7 +34,7 @@ jobs:
|
||||
- name: Check out Kraken
|
||||
uses: actions/checkout@v3
|
||||
- name: Checkout Pull Request
|
||||
run: hub pr checkout ${{ github.event.issue.number }}
|
||||
run: gh pr checkout ${{ github.event.issue.number }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Install OC CLI
|
||||
|
||||
@@ -29,3 +29,15 @@ tunings:
|
||||
wait_duration: 6 # Duration to wait between each chaos scenario.
|
||||
iterations: 1 # Number of times to execute the scenarios.
|
||||
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever.
|
||||
telemetry:
|
||||
enabled: False # enable/disables the telemetry collection feature
|
||||
api_url: https://ulnmf9xv7j.execute-api.us-west-2.amazonaws.com/production #telemetry service endpoint
|
||||
username: username # telemetry service username
|
||||
password: password # telemetry service password
|
||||
prometheus_backup: True # enables/disables prometheus data collection
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
archive_path: /tmp # local path where the archive files will be temporarly stored
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 10000 # the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
|
||||
@@ -51,7 +51,7 @@ spec:
|
||||
claimName: kraken-test-pvc
|
||||
containers:
|
||||
- name: kraken-test-container
|
||||
image: 'image-registry.openshift-image-registry.svc:5000/openshift/httpd:latest'
|
||||
image: 'quay.io/centos7/httpd-24-centos7:latest'
|
||||
volumeMounts:
|
||||
- mountPath: "/home/krake-dir/"
|
||||
name: kraken-test-pv
|
||||
|
||||
10
README.md
10
README.md
@@ -1,5 +1,6 @@
|
||||
# Krkn aka Kraken
|
||||
[](https://quay.io/repository/redhat-chaos/krkn?tab=tags&tag=latest)
|
||||

|
||||
|
||||

|
||||
|
||||
@@ -29,6 +30,8 @@ The guide is hosted at https://redhat-chaos.github.io/krkn.
|
||||
### How to Get Started
|
||||
Instructions on how to setup, configure and run Kraken can be found at [Installation](docs/installation.md).
|
||||
|
||||
You may consider utilizing the chaos recommendation tool prior to initiating the chaos runs to profile the application service(s) under test. This tool discovers a list of Krkn scenarios with a high probability of causing failures or disruptions to your application service(s). The tool can be accessed at [Chaos-Recommender](utils/chaos_recommender/README.md).
|
||||
|
||||
See the [getting started doc](docs/getting_started.md) on support on how to get started with your own custom scenario or editing current scenarios for your specific usage.
|
||||
|
||||
After installation, refer back to the below sections for supported scenarios and how to tweak the kraken config to load them on your cluster.
|
||||
@@ -59,12 +62,13 @@ Instructions on how to setup the config and the options supported can be found a
|
||||
Scenario type | Kubernetes | OpenShift
|
||||
--------------------------- | ------------- |--------------------|
|
||||
[Pod Scenarios](docs/pod_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Pod Network Scenarios](docs/pod_network_scenarios.md) | :x: | :heavy_check_mark: |
|
||||
[Container Scenarios](docs/container_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Node Scenarios](docs/node_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Time Scenarios](docs/time_scenarios.md) | :x: | :heavy_check_mark: |
|
||||
[Hog Scenarios: CPU, Memory](docs/arcaflow_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Cluster Shut Down Scenarios](docs/cluster_shut_down_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Namespace Scenarios](docs/namespace_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Service Disruption Scenarios](docs/service_disruption_scenarios.md.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Zone Outage Scenarios](docs/zone_outage.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[Application_outages](docs/application_outages.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
[PVC scenario](docs/pvc_scenario.md) | :heavy_check_mark: | :heavy_check_mark: |
|
||||
@@ -105,11 +109,13 @@ Information on enabling and leveraging this feature can be found [here](docs/SLO
|
||||
|
||||
Kraken supports injecting faults into [Open Cluster Management (OCM)](https://open-cluster-management.io/) and [Red Hat Advanced Cluster Management for Kubernetes (ACM)](https://www.redhat.com/en/technologies/management/advanced-cluster-management) managed clusters through [ManagedCluster Scenarios](docs/managedcluster_scenarios.md).
|
||||
|
||||
|
||||
### Blogs and other useful resources
|
||||
- Blog post on introduction to Kraken: https://www.openshift.com/blog/introduction-to-kraken-a-chaos-tool-for-openshift/kubernetes
|
||||
- Discussion and demo on how Kraken can be leveraged to ensure OpenShift is reliable, performant and scalable: https://www.youtube.com/watch?v=s1PvupI5sD0&ab_channel=OpenShift
|
||||
- Blog post emphasizing the importance of making Chaos part of Performance and Scale runs to mimic the production environments: https://www.openshift.com/blog/making-chaos-part-of-kubernetes/openshift-performance-and-scalability-tests
|
||||
- Blog post on findings from Chaos test runs: https://cloud.redhat.com/blog/openshift/kubernetes-chaos-stories
|
||||
- Discussion with CNCF TAG App Delivery on Krkn workflow, features and addition to CNCF sandbox: [Github](https://github.com/cncf/sandbox/issues/44), [Tracker](https://github.com/cncf/tag-app-delivery/issues/465), [recording](https://www.youtube.com/watch?v=nXQkBFK_MWc&t=722s)
|
||||
|
||||
|
||||
### Roadmap
|
||||
@@ -127,5 +133,5 @@ Please read [this file]((CI/README.md#adding-a-test-case)) for more information
|
||||
|
||||
### Community
|
||||
Key Members(slack_usernames/full name): paigerube14/Paige Rubendall, mffiedler/Mike Fiedler, ravielluri/Naga Ravi Chaitanya Elluri.
|
||||
* [**#sig-scalability on Kubernetes Slack**](https://kubernetes.slack.com)
|
||||
* [**#krkn on Kubernetes Slack**](https://kubernetes.slack.com)
|
||||
* [**#forum-chaos on CoreOS Slack internal to Red Hat**](https://coreos.slack.com)
|
||||
|
||||
18
ROADMAP.md
18
ROADMAP.md
@@ -2,10 +2,14 @@
|
||||
|
||||
Following are a list of enhancements that we are planning to work on adding support in Krkn. Of course any help/contributions are greatly appreciated.
|
||||
|
||||
- [Ability to run multiple chaos scenarios in parallel under load to mimic real world outages](https://github.com/redhat-chaos/krkn/issues/424)
|
||||
- [Centralized storage for chaos experiments artifacts](https://github.com/redhat-chaos/krkn/issues/423)
|
||||
- [Support for causing DNS outages](https://github.com/redhat-chaos/krkn/issues/394)
|
||||
- [Support for pod level network traffic shaping](https://github.com/redhat-chaos/krkn/issues/393)
|
||||
- [Ability to visualize the metrics that are being captured by Kraken and stored in Elasticsearch](https://github.com/redhat-chaos/krkn/issues/124)
|
||||
- Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/redhat-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
|
||||
- Continue to improve [Chaos Testing Guide](https://redhat-chaos.github.io/krkn) in terms of adding best practices, test environment recommendations and scenarios to make sure the OpenShift platform, as well the applications running on top it, are resilient and performant under chaotic conditions.
|
||||
- [ ] [Ability to run multiple chaos scenarios in parallel under load to mimic real world outages](https://github.com/redhat-chaos/krkn/issues/424)
|
||||
- [x] [Centralized storage for chaos experiments artifacts](https://github.com/redhat-chaos/krkn/issues/423)
|
||||
- [ ] [Support for causing DNS outages](https://github.com/redhat-chaos/krkn/issues/394)
|
||||
- [x] [Chaos recommender](https://github.com/redhat-chaos/krkn/tree/main/utils/chaos-recommender) to suggest scenarios having probability of impacting the service under test using profiling results
|
||||
- [ ] Chaos AI integration to improve and automate test coverage
|
||||
- [x] [Support for pod level network traffic shaping](https://github.com/redhat-chaos/krkn/issues/393)
|
||||
- [ ] [Ability to visualize the metrics that are being captured by Kraken and stored in Elasticsearch](https://github.com/redhat-chaos/krkn/issues/124)
|
||||
- [ ] Support for running all the scenarios of Kraken on Kubernetes distribution - see https://github.com/redhat-chaos/krkn/issues/185, https://github.com/redhat-chaos/krkn/issues/186
|
||||
- [ ] Continue to improve [Chaos Testing Guide](https://redhat-chaos.github.io/krkn) in terms of adding best practices, test environment recommendations and scenarios to make sure the OpenShift platform, as well the applications running on top it, are resilient and performant under chaotic conditions.
|
||||
- [ ] [Switch documentation references to Kubernetes](https://github.com/redhat-chaos/krkn/issues/495)
|
||||
- [ ] [OCP and Kubernetes functionalities segregation](https://github.com/redhat-chaos/krkn/issues/497)
|
||||
|
||||
@@ -16,6 +16,42 @@
|
||||
description: etcd leader changes observed
|
||||
severity: warning
|
||||
|
||||
- expr: (last_over_time(etcd_mvcc_db_total_size_in_bytes[5m]) / last_over_time(etcd_server_quota_backend_bytes[5m]))*100 > 95
|
||||
description: etcd cluster database is running full.
|
||||
severity: critical
|
||||
|
||||
- expr: (last_over_time(etcd_mvcc_db_total_size_in_use_in_bytes[5m]) / last_over_time(etcd_mvcc_db_total_size_in_bytes[5m])) < 0.5
|
||||
description: etcd database size in use is less than 50% of the actual allocated storage.
|
||||
severity: warning
|
||||
|
||||
- expr: rate(etcd_server_proposals_failed_total{job=~".*etcd.*"}[15m]) > 5
|
||||
description: etcd cluster has high number of proposal failures.
|
||||
severity: warning
|
||||
|
||||
- expr: histogram_quantile(0.99, rate(etcd_network_peer_round_trip_time_seconds_bucket{job=~".*etcd.*"}[5m])) > 0.15
|
||||
description: etcd cluster member communication is slow.
|
||||
severity: warning
|
||||
|
||||
- expr: histogram_quantile(0.99, sum(rate(grpc_server_handling_seconds_bucket{job=~".*etcd.*", grpc_method!="Defragment", grpc_type="unary"}[5m])) without(grpc_type)) > 0.15
|
||||
description: etcd grpc requests are slow.
|
||||
severity: critical
|
||||
|
||||
- expr: 100 * sum(rate(grpc_server_handled_total{job=~".*etcd.*", grpc_code=~"Unknown|FailedPrecondition|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded"}[5m])) without (grpc_type, grpc_code) / sum(rate(grpc_server_handled_total{job=~".*etcd.*"}[5m])) without (grpc_type, grpc_code) > 5
|
||||
description: etcd cluster has high number of failed grpc requests.
|
||||
severity: critical
|
||||
|
||||
- expr: etcd_server_has_leader{job=~".*etcd.*"} == 0
|
||||
description: etcd cluster has no leader.
|
||||
severity: warning
|
||||
|
||||
- expr: sum(up{job=~".*etcd.*"} == bool 1) without (instance) < ((count(up{job=~".*etcd.*"}) without (instance) + 1) / 2)
|
||||
description: etcd cluster has insufficient number of members.
|
||||
severity: warning
|
||||
|
||||
- expr: max without (endpoint) ( sum without (instance) (up{job=~".*etcd.*"} == bool 0) or count without (To) ( sum without (instance) (rate(etcd_network_peer_sent_failures_total{job=~".*etcd.*"}[120s])) > 0.01 )) > 0
|
||||
description: etcd cluster members are down.
|
||||
severity: warning
|
||||
|
||||
# API server
|
||||
- expr: avg_over_time(histogram_quantile(0.99, sum(irate(apiserver_request_duration_seconds_bucket{apiserver="kube-apiserver", verb=~"POST|PUT|DELETE|PATCH", subresource!~"log|exec|portforward|attach|proxy"}[2m])) by (le, resource, verb))[10m:]) > 1
|
||||
description: 10 minutes avg. 99th mutating API call latency for {{$labels.verb}}/{{$labels.resource}} higher than 1 second. {{$value}}s
|
||||
@@ -40,7 +76,7 @@
|
||||
|
||||
- expr: up{namespace=~"openshift-etcd"} == 0
|
||||
description: "{{$labels.namespace}}/{{$labels.pod}} down"
|
||||
severity: error
|
||||
severity: warning
|
||||
|
||||
- expr: up{namespace=~"openshift-.*(kube-controller-manager|scheduler|controller-manager|sdn|ovn-kubernetes|dns)"} == 0
|
||||
description: "{{$labels.namespace}}/{{$labels.pod}} down"
|
||||
|
||||
@@ -6,40 +6,41 @@ kraken:
|
||||
signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details
|
||||
signal_address: 0.0.0.0 # Signal listening address
|
||||
port: 8081 # Signal port
|
||||
chaos_scenarios: # List of policies/chaos scenarios to load
|
||||
- arcaflow_scenarios:
|
||||
- scenarios/arcaflow/cpu-hog/input.yaml
|
||||
- scenarios/arcaflow/memory-hog/input.yaml
|
||||
- container_scenarios: # List of chaos pod scenarios to load
|
||||
chaos_scenarios:
|
||||
# List of policies/chaos scenarios to load
|
||||
- arcaflow_scenarios:
|
||||
- scenarios/arcaflow/cpu-hog/input.yaml
|
||||
- scenarios/arcaflow/memory-hog/input.yaml
|
||||
- scenarios/arcaflow/io-hog/input.yaml
|
||||
- application_outages:
|
||||
- scenarios/openshift/app_outage.yaml
|
||||
- container_scenarios: # List of chaos pod scenarios to load
|
||||
- - scenarios/openshift/container_etcd.yml
|
||||
- plugin_scenarios:
|
||||
- plugin_scenarios:
|
||||
- scenarios/openshift/etcd.yml
|
||||
- scenarios/openshift/regex_openshift_pod_kill.yml
|
||||
- scenarios/openshift/vmware_node_scenarios.yml
|
||||
- scenarios/openshift/ibmcloud_node_scenarios.yml
|
||||
- scenarios/openshift/network_chaos_ingress.yml
|
||||
- scenarios/openshift/pod_network_outage.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/prom_kill.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/node_scenarios_example.yml
|
||||
- plugin_scenarios:
|
||||
- plugin_scenarios:
|
||||
- scenarios/openshift/openshift-apiserver.yml
|
||||
- scenarios/openshift/openshift-kube-apiserver.yml
|
||||
- time_scenarios: # List of chaos time scenarios to load
|
||||
- time_scenarios: # List of chaos time scenarios to load
|
||||
- scenarios/openshift/time_scenarios_example.yml
|
||||
- cluster_shut_down_scenarios:
|
||||
- cluster_shut_down_scenarios:
|
||||
- - scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- scenarios/openshift/post_action_shut_down.py
|
||||
- namespace_scenarios:
|
||||
- service_disruption_scenarios:
|
||||
- - scenarios/openshift/regex_namespace.yaml
|
||||
- - scenarios/openshift/ingress_namespace.yaml
|
||||
- scenarios/openshift/post_action_namespace.py
|
||||
- zone_outages:
|
||||
- zone_outages:
|
||||
- scenarios/openshift/zone_outage.yaml
|
||||
- application_outages:
|
||||
- scenarios/openshift/app_outage.yaml
|
||||
- pvc_scenarios:
|
||||
- pvc_scenarios:
|
||||
- scenarios/openshift/pvc_scenario.yaml
|
||||
- network_chaos:
|
||||
- network_chaos:
|
||||
- scenarios/openshift/network_chaos.yaml
|
||||
|
||||
cerberus:
|
||||
@@ -58,9 +59,34 @@ performance_monitoring:
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
alert_profile: config/alerts # Path to alert profile with the prometheus queries
|
||||
alert_profile: config/alerts # Path or URL to alert profile with the prometheus queries
|
||||
check_critical_alerts: False # When enabled will check prometheus for critical alerts firing post chaos
|
||||
tunings:
|
||||
wait_duration: 60 # Duration to wait between each chaos scenario
|
||||
iterations: 1 # Number of times to execute the scenarios
|
||||
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever
|
||||
telemetry:
|
||||
enabled: False # enable/disables the telemetry collection feature
|
||||
api_url: https://ulnmf9xv7j.execute-api.us-west-2.amazonaws.com/production #telemetry service endpoint
|
||||
username: username # telemetry service username
|
||||
password: password # telemetry service password
|
||||
prometheus_backup: True # enables/disables prometheus data collection
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
archive_path: /tmp # local path where the archive files will be temporarly stored
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 500000 # the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
# the higher the number of archive files will be produced and uploaded (and processed by backup_threads
|
||||
# simultaneously).
|
||||
# For unstable/slow connection is better to keep this value low
|
||||
# increasing the number of backup_threads, in this way, on upload failure, the retry will happen only on the
|
||||
# failed chunk without affecting the whole upload.
|
||||
logs_backup: True
|
||||
logs_filter_patterns:
|
||||
- "(\\w{3}\\s\\d{1,2}\\s\\d{2}:\\d{2}:\\d{2}\\.\\d+).+" # Sep 9 11:20:36.123425532
|
||||
- "kinit (\\d+/\\d+/\\d+\\s\\d{2}:\\d{2}:\\d{2})\\s+" # kinit 2023/09/15 11:20:36 log
|
||||
- "(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z).+" # 2023-09-15T11:20:36.123425532Z log
|
||||
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
|
||||
events_backup: True # enables/disables cluster events collection
|
||||
|
||||
|
||||
@@ -13,6 +13,7 @@ kraken:
|
||||
- plugin_scenarios: # List of chaos pod scenarios to load
|
||||
- scenarios/openshift/etcd.yml
|
||||
- scenarios/openshift/regex_openshift_pod_kill.yml
|
||||
- scenarios/openshift/prom_kill.yml
|
||||
- node_scenarios: # List of chaos node scenarios to load
|
||||
- scenarios/openshift/node_scenarios_example.yml
|
||||
- plugin_scenarios:
|
||||
@@ -20,13 +21,10 @@ kraken:
|
||||
- scenarios/openshift/openshift-kube-apiserver.yml
|
||||
- time_scenarios: # List of chaos time scenarios to load
|
||||
- scenarios/openshift/time_scenarios_example.yml
|
||||
- litmus_scenarios: # List of litmus scenarios to load
|
||||
- - https://hub.litmuschaos.io/api/chaos/1.10.0?file=charts/generic/node-cpu-hog/rbac.yaml
|
||||
- scenarios/openshift/node_cpu_hog_engine.yaml
|
||||
- cluster_shut_down_scenarios:
|
||||
- - scenarios/openshift/cluster_shut_down_scenario.yml
|
||||
- scenarios/openshift/post_action_shut_down.py
|
||||
- namespace_scenarios:
|
||||
- service_disruption_scenarios:
|
||||
- scenarios/openshift/regex_namespace.yaml
|
||||
- scenarios/openshift/ingress_namespace.yaml
|
||||
- zone_outages:
|
||||
@@ -60,3 +58,27 @@ tunings:
|
||||
wait_duration: 60 # Duration to wait between each chaos scenario
|
||||
iterations: 1 # Number of times to execute the scenarios
|
||||
daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever
|
||||
|
||||
telemetry:
|
||||
enabled: False # enable/disables the telemetry collection feature
|
||||
api_url: https://ulnmf9xv7j.execute-api.us-west-2.amazonaws.com/production #telemetry service endpoint
|
||||
username: username # telemetry service username
|
||||
password: password # telemetry service password
|
||||
prometheus_backup: True # enables/disables prometheus data collection
|
||||
full_prometheus_backup: False # if is set to False only the /prometheus/wal folder will be downloaded.
|
||||
backup_threads: 5 # number of telemetry download/upload threads
|
||||
archive_path: /tmp # local path where the archive files will be temporarly stored
|
||||
max_retries: 0 # maximum number of upload retries (if 0 will retry forever)
|
||||
run_tag: '' # if set, this will be appended to the run folder in the bucket (useful to group the runs)
|
||||
archive_size: 500000 # the size of the prometheus data archive size in KB. The lower the size of archive is
|
||||
# the higher the number of archive files will be produced and uploaded (and processed by backup_threads
|
||||
# simultaneously).
|
||||
# For unstable/slow connection is better to keep this value low
|
||||
# increasing the number of backup_threads, in this way, on upload failure, the retry will happen only on the
|
||||
# failed chunk without affecting the whole upload.
|
||||
logs_backup: True
|
||||
logs_filter_patterns:
|
||||
- "(\\w{3}\\s\\d{1,2}\\s\\d{2}:\\d{2}:\\d{2}\\.\\d+).+" # Sep 9 11:20:36.123425532
|
||||
- "kinit (\\d+/\\d+/\\d+\\s\\d{2}:\\d{2}:\\d{2})\\s+" # kinit 2023/09/15 11:20:36 log
|
||||
- "(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z).+" # 2023-09-15T11:20:36.123425532Z log
|
||||
oc_cli_path: /usr/bin/oc # optional, if not specified will be search in $PATH
|
||||
|
||||
@@ -139,6 +139,39 @@ metrics:
|
||||
- query: histogram_quantile(0.99,sum(rate(etcd_request_duration_seconds_bucket[2m])) by (le,operation,apiserver)) > 0
|
||||
metricName: P99APIEtcdRequestLatency
|
||||
|
||||
- query: sum(grpc_server_started_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Watch",grpc_type="bidi_stream"})
|
||||
metricName: ActiveWatchStreams
|
||||
|
||||
- query: sum(grpc_server_started_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"}) - sum(grpc_server_handled_total{namespace="openshift-etcd",grpc_service="etcdserverpb.Lease",grpc_type="bidi_stream"})
|
||||
metricName: ActiveLeaseStreams
|
||||
|
||||
- query: sum(rate(etcd_debugging_snap_save_total_duration_seconds_sum{namespace="openshift-etcd"}[2m]))
|
||||
metricName: snapshotSaveLatency
|
||||
|
||||
- query: sum(rate(etcd_server_heartbeat_send_failures_total{namespace="openshift-etcd"}[2m]))
|
||||
metricName: HeartBeatFailures
|
||||
|
||||
- query: sum(rate(etcd_server_health_failures{namespace="openshift-etcd"}[2m]))
|
||||
metricName: HealthFailures
|
||||
|
||||
- query: sum(rate(etcd_server_slow_apply_total{namespace="openshift-etcd"}[2m]))
|
||||
metricName: SlowApplies
|
||||
|
||||
- query: sum(rate(etcd_server_slow_read_indexes_total{namespace="openshift-etcd"}[2m]))
|
||||
metricName: SlowIndexRead
|
||||
|
||||
- query: sum(etcd_server_proposals_pending)
|
||||
metricName: PendingProposals
|
||||
|
||||
- query: histogram_quantile(1.0, sum(rate(etcd_debugging_mvcc_db_compaction_pause_duration_milliseconds_bucket[1m])) by (le, instance))
|
||||
metricName: CompactionMaxPause
|
||||
|
||||
- query: sum by (instance) (apiserver_storage_objects)
|
||||
metricName: etcdTotalObjectCount
|
||||
|
||||
- query: topk(500, max by(resource) (apiserver_storage_objects))
|
||||
metricName: etcdTopObectCount
|
||||
|
||||
# Cluster metrics
|
||||
- query: count(kube_namespace_created)
|
||||
metricName: namespaceCount
|
||||
|
||||
29
config/recommender_config.yaml
Normal file
29
config/recommender_config.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
application: openshift-etcd
|
||||
namespace: openshift-etcd
|
||||
labels: app=openshift-etcd
|
||||
kubeconfig: ~/.kube/config.yaml
|
||||
prometheus_endpoint: <Prometheus_Endpoint>
|
||||
auth_token: <Auth_Token>
|
||||
scrape_duration: 10m
|
||||
chaos_library: "kraken"
|
||||
log_level: INFO
|
||||
|
||||
# for output purpose only do not change if not needed
|
||||
chaos_tests:
|
||||
GENERIC:
|
||||
- pod_failure
|
||||
- container_failure
|
||||
- node_failure
|
||||
- zone_outage
|
||||
- time_skew
|
||||
- namespace_failure
|
||||
- power_outage
|
||||
CPU:
|
||||
- node_cpu_hog
|
||||
NETWORK:
|
||||
- application_outage
|
||||
- node_network_chaos
|
||||
- pod_network_chaos
|
||||
MEM:
|
||||
- node_memory_hog
|
||||
- pvc_disk_fill
|
||||
@@ -1,29 +1,28 @@
|
||||
# Dockerfile for kraken
|
||||
|
||||
FROM quay.io/openshift/origin-tests:latest as origintests
|
||||
|
||||
FROM mcr.microsoft.com/azure-cli:latest as azure-cli
|
||||
|
||||
FROM quay.io/centos/centos:stream9
|
||||
FROM registry.access.redhat.com/ubi8/ubi:latest
|
||||
|
||||
LABEL org.opencontainers.image.authors="Red Hat OpenShift Chaos Engineering"
|
||||
|
||||
ENV KUBECONFIG /root/.kube/config
|
||||
|
||||
# Copy OpenShift CLI, Kubernetes CLI from origin-tests image
|
||||
COPY --from=origintests /usr/bin/oc /usr/bin/oc
|
||||
COPY --from=origintests /usr/bin/kubectl /usr/bin/kubectl
|
||||
|
||||
# Copy azure client binary from azure-cli image
|
||||
COPY --from=azure-cli /usr/local/bin/az /usr/bin/az
|
||||
|
||||
# Install dependencies
|
||||
RUN yum install epel-release -y && \
|
||||
yum install -y git python39 python3-pip jq gettext && \
|
||||
RUN yum install -y git python39 python3-pip jq gettext wget && \
|
||||
python3.9 -m pip install -U pip && \
|
||||
git clone https://github.com/redhat-chaos/krkn.git --branch v1.3.1 /root/kraken && \
|
||||
git clone https://github.com/redhat-chaos/krkn.git --branch v1.5.0 /root/kraken && \
|
||||
mkdir -p /root/.kube && cd /root/kraken && \
|
||||
pip3.9 install -r requirements.txt
|
||||
pip3.9 install -r requirements.txt && \
|
||||
pip3.9 install virtualenv && \
|
||||
wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq && chmod +x /usr/bin/yq
|
||||
|
||||
# Get Kubernetes and OpenShift clients from stable releases
|
||||
WORKDIR /tmp
|
||||
RUN wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-client-linux.tar.gz && tar -xvf openshift-client-linux.tar.gz && cp oc /usr/local/bin/oc && cp kubectl /usr/local/bin/kubectl
|
||||
|
||||
WORKDIR /root/kraken
|
||||
|
||||
|
||||
@@ -2,24 +2,28 @@
|
||||
|
||||
FROM ppc64le/centos:8
|
||||
|
||||
MAINTAINER Red Hat OpenShift Performance and Scale
|
||||
FROM mcr.microsoft.com/azure-cli:latest as azure-cli
|
||||
|
||||
LABEL org.opencontainers.image.authors="Red Hat OpenShift Chaos Engineering"
|
||||
|
||||
ENV KUBECONFIG /root/.kube/config
|
||||
|
||||
RUN curl -L -o kubernetes-client-linux-ppc64le.tar.gz https://dl.k8s.io/v1.19.0/kubernetes-client-linux-ppc64le.tar.gz \
|
||||
&& tar xf kubernetes-client-linux-ppc64le.tar.gz && mv kubernetes/client/bin/kubectl /usr/bin/ && rm -rf kubernetes-client-linux-ppc64le.tar.gz
|
||||
|
||||
RUN curl -L -o openshift-client-linux.tar.gz https://mirror.openshift.com/pub/openshift-v4/ppc64le/clients/ocp/stable/openshift-client-linux.tar.gz \
|
||||
&& tar xf openshift-client-linux.tar.gz -C /usr/bin && rm -rf openshift-client-linux.tar.gz
|
||||
# Copy azure client binary from azure-cli image
|
||||
COPY --from=azure-cli /usr/local/bin/az /usr/bin/az
|
||||
|
||||
# Install dependencies
|
||||
RUN yum install epel-release -y && \
|
||||
yum install -y git python36 python3-pip gcc libffi-devel python36-devel openssl-devel gcc-c++ make jq gettext && \
|
||||
git clone https://github.com/redhat-chaos/krkn.git --branch main /root/kraken && \
|
||||
mkdir -p /root/.kube && cd /root/kraken && \
|
||||
pip3 install cryptography==3.3.2 && \
|
||||
pip3 install -r requirements.txt setuptools==40.3.0 urllib3==1.25.4
|
||||
RUN yum install -y git python39 python3-pip jq gettext wget && \
|
||||
python3.9 -m pip install -U pip && \
|
||||
git clone https://github.com/redhat-chaos/krkn.git --branch v1.5.0 /root/kraken && \
|
||||
mkdir -p /root/.kube && cd /root/kraken && \
|
||||
pip3.9 install -r requirements.txt && \
|
||||
pip3.9 install virtualenv && \
|
||||
wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O /usr/bin/yq && chmod +x /usr/bin/yq
|
||||
|
||||
# Get Kubernetes and OpenShift clients from stable releases
|
||||
WORKDIR /tmp
|
||||
RUN wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-client-linux.tar.gz && tar -xvf openshift-client-linux.tar.gz && cp oc /usr/local/bin/oc && cp kubectl /usr/local/bin/kubectl
|
||||
|
||||
WORKDIR /root/kraken
|
||||
|
||||
ENTRYPOINT python3 run_kraken.py --config=config/config.yaml
|
||||
ENTRYPOINT python3.9 run_kraken.py --config=config/config.yaml
|
||||
|
||||
@@ -1,27 +1,20 @@
|
||||
|
||||
### Kraken image
|
||||
|
||||
|
||||
|
||||
Container image gets automatically built by quay.io at [Kraken image](https://quay.io/redhat-chaos/krkn).
|
||||
|
||||
|
||||
|
||||
### Run containerized version
|
||||
|
||||
Refer [instructions](https://github.com/redhat-chaos/krkn/blob/main/docs/installation.md#run-containerized-version) for information on how to run the containerized version of kraken.
|
||||
|
||||
|
||||
|
||||
|
||||
### Run Custom Kraken Image
|
||||
|
||||
Refer to [instructions](https://github.com/redhat-chaos/krkn/blob/main/containers/build_own_image-README.md) for information on how to run a custom containerized version of kraken using podman.
|
||||
|
||||
|
||||
|
||||
|
||||
### Kraken as a KubeApp
|
||||
### Kraken as a KubeApp ( Unsupported and not recommended )
|
||||
|
||||
#### GENERAL NOTES:
|
||||
|
||||
@@ -50,4 +43,4 @@ To run containerized Kraken as a Kubernetes/OpenShift Deployment, follow these s
|
||||
8. Create a ConfigMap named scenarios-kube-config using `kubectl create configmap scenarios-kube-config --from-file=<path_to_kraken>/scenarios/kube`
|
||||
9. Create a service account to run the kraken pod `kubectl create serviceaccount useroot`.
|
||||
10. In Openshift, add privileges to service account and execute `oc adm policy add-scc-to-user privileged -z useroot`.
|
||||
11. Create a Job using `kubectl apply -f <path_to_kraken>/containers/kraken.yml` and monitor the status using `oc get jobs` and `oc get pods`.
|
||||
11. Create a Job using `kubectl apply -f <path_to_kraken>/containers/kraken.yml` and monitor the status using `oc get jobs` and `oc get pods`.
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
# Building your own Kraken image
|
||||
|
||||
1. Git clone the Kraken repository using `git clone https://github.com/openshift-scale/kraken.git`.
|
||||
1. Git clone the Kraken repository using `git clone https://github.com/redhat-chaos/krkn.git`.
|
||||
2. Modify the python code and yaml files to address your needs.
|
||||
3. Execute `podman build -t <new_image_name>:latest .` in the containers directory within kraken to build an image from a Dockerfile.
|
||||
4. Execute `podman run --detach --name <container_name> <new_image_name>:latest` to start a container based on your new image.
|
||||
|
||||
# Building the Kraken image on IBM Power (ppc64le)
|
||||
|
||||
1. Git clone the Kraken repository using `git clone https://github.com/cloud-bulldozer/kraken.git` on an IBM Power Systems server.
|
||||
1. Git clone the Kraken repository using `git clone https://github.com/redhat-chaos/krkn.git` on an IBM Power Systems server.
|
||||
2. Modify the python code and yaml files to address your needs.
|
||||
3. Execute `podman build -t <new_image_name>:latest -f Dockerfile-ppc64le` in the containers directory within kraken to build an image from the Dockerfile for Power.
|
||||
4. Execute `podman run --detach --name <container_name> <new_image_name>:latest` to start a container based on your new image.
|
||||
|
||||
@@ -23,7 +23,7 @@ performance_monitoring:
|
||||
```
|
||||
|
||||
#### Alert profile
|
||||
A couple of [alert profiles](https://github.com/redhat-chaos/krkn/tree/main/config) [alerts](https://github.com/redhat-chaos/krkn/blob/main/config/alerts) are shipped by default and can be tweaked to add more queries to alert on. The following are a few alerts examples:
|
||||
A couple of [alert profiles](https://github.com/redhat-chaos/krkn/tree/main/config) [alerts](https://github.com/redhat-chaos/krkn/blob/main/config/alerts) are shipped by default and can be tweaked to add more queries to alert on. User can provide a URL or path to the file in the [config](https://github.com/redhat-chaos/krkn/blob/main/config/config.yaml). The following are a few alerts examples:
|
||||
|
||||
```
|
||||
- expr: avg_over_time(histogram_quantile(0.99, rate(etcd_disk_wal_fsync_duration_seconds_bucket[2m]))[5m:]) > 0.01
|
||||
|
||||
@@ -7,6 +7,7 @@ The engine uses containers to execute plugins and runs them either locally in Do
|
||||
#### Hog scenarios:
|
||||
- [CPU Hog](arcaflow_scenarios/cpu_hog.md)
|
||||
- [Memory Hog](arcaflow_scenarios/memory_hog.md)
|
||||
- [I/O Hog](arcaflow_scenarios/io_hog.md)
|
||||
|
||||
|
||||
### Prequisites
|
||||
@@ -64,4 +65,6 @@ Each step is represented by a container that will be executed from the deployer
|
||||
Note that we provide the scenarios as a template, but they can be manipulated to define more complex workflows.
|
||||
To have more details regarding the arcaflow workflows architecture and syntax it is suggested to refer to the [Arcaflow Documentation](https://arcalot.io/arcaflow/).
|
||||
|
||||
|
||||
This edit is no longer in quay image
|
||||
Working on fix in ticket: https://issues.redhat.com/browse/CHAOS-494
|
||||
This will effect all versions 4.12 and higher of OpenShift
|
||||
21
docs/arcaflow_scenarios/io_hog.md
Normal file
21
docs/arcaflow_scenarios/io_hog.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# I/O Hog
|
||||
This scenario is based on the arcaflow [arcaflow-plugin-stressng](https://github.com/arcalot/arcaflow-plugin-stressng) plugin.
|
||||
The purpose of this scenario is to create disk pressure on a particular node of the Kubernetes/OpenShift cluster for a time span.
|
||||
The scenario allows to attach a node path to the pod as a `hostPath` volume.
|
||||
To enable this plugin add the pointer to the scenario input file `scenarios/arcaflow/io-hog/input.yaml` as described in the
|
||||
Usage section.
|
||||
This scenario takes a list of objects named `input_list` with the following properties:
|
||||
|
||||
- **kubeconfig :** *string* the kubeconfig needed by the deployer to deploy the sysbench plugin in the target cluster
|
||||
- **namespace :** *string* the namespace where the scenario container will be deployed
|
||||
**Note:** this parameter will be automatically filled by kraken if the `kubeconfig_path` property is correctly set
|
||||
- **node_selector :** *key-value map* the node label that will be used as `nodeSelector` by the pod to target a specific cluster node
|
||||
- **duration :** *string* stop stress test after N seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y.
|
||||
- **target_pod_folder :** *string* the path in the pod where the volume is mounted
|
||||
- **target_pod_volume :** *object* the `hostPath` volume definition in the [Kubernetes/OpenShift](https://docs.openshift.com/container-platform/3.11/install_config/persistent_storage/using_hostpath.html) format, that will be attached to the pod as a volume
|
||||
- **io_write_bytes :** *string* writes N bytes for each hdd process. The size can be expressed as % of free space on the file system or in units of Bytes, KBytes, MBytes and GBytes using the suffix b, k, m or g
|
||||
- **io_block_size :** *string* size of each write in bytes. Size can be from 1 byte to 4m.
|
||||
|
||||
To perform several load tests in the same run simultaneously (eg. stress two or more nodes in the same run) add another item
|
||||
to the `input_list` with the same properties (and eventually different values eg. different node_selectors
|
||||
to schedule the pod on different nodes). To reduce (or increase) the parallelism change the value `parallelism` in `workload.yaml` file
|
||||
@@ -8,14 +8,14 @@ The following are the components of Kubernetes/OpenShift for which a basic chaos
|
||||
|
||||
```
|
||||
scenarios:
|
||||
- name: "<Name of scenario>"
|
||||
- name: "<name of scenario>"
|
||||
namespace: "<specific namespace>" # can specify "*" if you want to find in all namespaces
|
||||
label_selector: "<label of pod(s)>"
|
||||
container_name: "<specific container name>" # This is optional, can take out and will kill all containers in all pods found under namespace and label
|
||||
pod_names: # This is optional, can take out and will select all pods with given namespace and label
|
||||
- <pod_name>
|
||||
count: <number of containers to disrupt, default=1>
|
||||
action: <Action to run. For example kill 1 ( hang up ) or kill 9. Default is set to kill 1>
|
||||
action: <kill signal to run. For example 1 ( hang up ) or 9. Default is set to 1>
|
||||
expected_recovery_time: <number of seconds to wait for container to be running again> (defaults to 120seconds)
|
||||
```
|
||||
|
||||
|
||||
@@ -3,14 +3,19 @@
|
||||
The following ways are supported to run Kraken:
|
||||
|
||||
- Standalone python program through Git.
|
||||
- Containerized version using either Podman or Docker as the runtime.
|
||||
- Kubernetes or OpenShift deployment.
|
||||
- Containerized version using either Podman or Docker as the runtime via [Krkn-hub](https://github.com/redhat-chaos/krkn-hub)
|
||||
- Kubernetes or OpenShift deployment ( unsupported )
|
||||
|
||||
**NOTE**: It is recommended to run Kraken external to the cluster ( Standalone or Containerized ) hitting the Kubernetes/OpenShift API as running it internal to the cluster might be disruptive to itself and also might not report back the results if the chaos leads to cluster's API server instability.
|
||||
|
||||
**NOTE**: To run Kraken on Power (ppc64le) architecture, build and run a containerized version by following the
|
||||
instructions given [here](https://github.com/redhat-chaos/krkn/blob/main/containers/build_own_image-README.md).
|
||||
|
||||
**NOTE**: Helper functions for interactions in Krkn are part of [krkn-lib](https://github.com/redhat-chaos/krkn-lib).
|
||||
Please feel free to reuse and expand them as you see fit when adding a new scenario or expanding
|
||||
the capabilities of the current supported scenarios.
|
||||
|
||||
|
||||
### Git
|
||||
|
||||
#### Clone the repository
|
||||
@@ -35,26 +40,12 @@ $ python3.9 run_kraken.py --config <config_file_location>
|
||||
```
|
||||
|
||||
### Run containerized version
|
||||
Assuming that the latest docker ( 17.05 or greater with multi-build support ) is installed on the host, run:
|
||||
```
|
||||
$ docker pull quay.io/redhat-chaos/krkn:latest
|
||||
$ docker run --name=kraken --net=host -v <path_to_kubeconfig>:/root/.kube/config:Z -v <path_to_kraken_config>:/root/kraken/config/config.yaml:Z -d quay.io/redhat-chaos/krkn:latest
|
||||
$ docker run --name=kraken --net=host -v <path_to_kubeconfig>:/root/.kube/config:Z -v <path_to_kraken_config>:/root/kraken/config/config.yaml:Z -v <path_to_scenarios_directory>:/root/kraken/scenarios:Z -d quay.io/redhat-chaos/krkn:latest #custom or tweaked scenario configs
|
||||
$ docker logs -f kraken
|
||||
```
|
||||
[Krkn-hub](https://github.com/redhat-chaos/krkn-hub) is a wrapper that allows running Krkn chaos scenarios via podman or docker runtime with scenario parameters/configuration defined as environment variables.
|
||||
|
||||
Similarly, podman can be used to achieve the same:
|
||||
```
|
||||
$ podman pull quay.io/redhat-chaos/krkn
|
||||
$ podman run --name=kraken --net=host -v <path_to_kubeconfig>:/root/.kube/config:Z -v <path_to_kraken_config>:/root/kraken/config/config.yaml:Z -d quay.io/redhat-chaos/krkn:latest
|
||||
$ podman run --name=kraken --net=host -v <path_to_kubeconfig>:/root/.kube/config:Z -v <path_to_kraken_config>:/root/kraken/config/config.yaml:Z -v <path_to_scenarios_directory>:/root/kraken/scenarios:Z -d quay.io/redhat-chaos/krkn:latest #custom or tweaked scenario configs
|
||||
$ podman logs -f kraken
|
||||
```
|
||||
|
||||
If you want to build your own kraken image see [here](https://github.com/redhat-chaos/krkn/blob/main/containers/build_own_image-README.md)
|
||||
Refer [instructions](https://github.com/redhat-chaos/krkn-hub#supported-chaos-scenarios) to get started.
|
||||
|
||||
|
||||
### Run Kraken as a Kubernetes deployment
|
||||
### Run Kraken as a Kubernetes deployment ( unsupported option - standalone or containerized deployers are recommended )
|
||||
Refer [Instructions](https://github.com/redhat-chaos/krkn/blob/main/containers/README.md) on how to deploy and run Kraken as a Kubernetes/OpenShift deployment.
|
||||
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
## Pod network Scenarios
|
||||
|
||||
### Pod outage
|
||||
Scenario to block the traffic ( Ingress/Egress ) of a pod matching the labels for the specified duration of time to understand the behavior of the service/other services which depend on it during downtime. This helps with planning the requirements accordingly, be it improving the timeouts or tweaking the alerts etc.
|
||||
With the current network policies, it is not possible to explicitly block ports which are enabled by allowed network policy rule. This chaos scenario addresses this issue by using OVS flow rules to block ports related to the pod. It supports OpenShiftSDN and OVNKubernetes based networks.
|
||||
@@ -13,3 +15,23 @@ With the current network policies, it is not possible to explicitly block ports
|
||||
- 8443 # Blocks 8443, Default [], i.e. all ports.
|
||||
label_selector: 'component=ui' # Blocks access to openshift console
|
||||
```
|
||||
### Pod Network shaping
|
||||
Scenario to introduce network latency, packet loss, and bandwidth restriction in the Pod's network interface. The purpose of this scenario is to observe faults caused by random variations in the network.
|
||||
|
||||
##### Sample scenario config for egress traffic shaping (using plugin)
|
||||
```
|
||||
- id: pod_egress_shaping
|
||||
config:
|
||||
namespace: openshift-console # Required - Namespace of the pod to which filter need to be applied.
|
||||
label_selector: 'component=ui' # Applies traffic shaping to access openshift console.
|
||||
network_params:
|
||||
latency: 500ms # Add 500ms latency to egress traffic from the pod.
|
||||
```
|
||||
|
||||
##### Steps
|
||||
- Pick the pods to introduce the network anomaly either from label_selector or pod_name.
|
||||
- Identify the pod interface name on the node.
|
||||
- Set traffic shaping config on pod's interface using tc and netem.
|
||||
- Wait for the duration time.
|
||||
- Remove traffic shaping config on pod's interface.
|
||||
- Remove the job that spawned the pod.
|
||||
@@ -1,6 +1,6 @@
|
||||
### Delete Namespace Scenarios
|
||||
### Service Disruption Scenarios (Previously Delete Namespace Scenario)
|
||||
|
||||
Using this type of scenario configuration one is able to delete a specific namespace, or a namespace matching a certain regex string.
|
||||
Using this type of scenario configuration one is able to delete crucial objects in a specific namespace, or a namespace matching a certain regex string.
|
||||
|
||||
Configuration Options:
|
||||
|
||||
@@ -27,12 +27,20 @@ scenarios:
|
||||
sleep: 15
|
||||
```
|
||||
|
||||
**NOTE:** Many openshift namespaces have finalizers built that protect the namespace from being fully deleted: see documentation [here](https://kubernetes.io/blog/2021/05/14/using-finalizers-to-control-deletion/).
|
||||
The namespaces that do have finalizers enabled will be in left in a terminating state but all the pods running on that namespace will get deleted.
|
||||
|
||||
### Steps
|
||||
|
||||
This scenario will select a namespace (or multiple) dependent on the configuration and will kill all of the below object types in that namespace and will wait for them to be Running in the post action
|
||||
1. Services
|
||||
2. Daemonsets
|
||||
3. Statefulsets
|
||||
4. Replicasets
|
||||
5. Deployments
|
||||
|
||||
|
||||
#### Post Action
|
||||
|
||||
In all scenarios we do a post chaos check to wait and verify the specific component.
|
||||
We do a post chaos check to wait and verify the specific objects in each namespace are Ready
|
||||
|
||||
Here there are two options:
|
||||
|
||||
@@ -47,8 +55,8 @@ See [scenarios/post_action_namespace.py](https://github.com/cloud-bulldozer/krak
|
||||
```
|
||||
|
||||
|
||||
2. Allow kraken to wait and check the killed namespaces become 'Active' again. Kraken keeps a list of the specific
|
||||
namespaces that were killed to verify all that were affected recover properly.
|
||||
1. Allow kraken to wait and check all killed objects in the namespaces become 'Running' again. Kraken keeps a list of the specific
|
||||
objects in namespaces that were killed to verify all that were affected recover properly.
|
||||
|
||||
```
|
||||
wait_time: <seconds to wait for namespace to recover>
|
||||
@@ -4,25 +4,43 @@ import time
|
||||
import kraken.cerberus.setup as cerberus
|
||||
from jinja2 import Template
|
||||
import kraken.invoke.command as runcommand
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
|
||||
|
||||
# Reads the scenario config, applies and deletes a network policy to
|
||||
# block the traffic for the specified duration
|
||||
def run(scenarios_list, config, wait_duration):
|
||||
def run(scenarios_list, config, wait_duration, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
failed_post_scenarios = ""
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for app_outage_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = app_outage_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, app_outage_config)
|
||||
if len(app_outage_config) > 1:
|
||||
with open(app_outage_config, "r") as f:
|
||||
app_outage_config_yaml = yaml.full_load(f)
|
||||
scenario_config = app_outage_config_yaml["application_outage"]
|
||||
pod_selector = scenario_config.get("pod_selector", "{}")
|
||||
traffic_type = scenario_config.get("block", "[Ingress, Egress]")
|
||||
namespace = scenario_config.get("namespace", "")
|
||||
duration = scenario_config.get("duration", 60)
|
||||
try:
|
||||
with open(app_outage_config, "r") as f:
|
||||
app_outage_config_yaml = yaml.full_load(f)
|
||||
scenario_config = app_outage_config_yaml["application_outage"]
|
||||
pod_selector = get_yaml_item_value(
|
||||
scenario_config, "pod_selector", "{}"
|
||||
)
|
||||
traffic_type = get_yaml_item_value(
|
||||
scenario_config, "block", "[Ingress, Egress]"
|
||||
)
|
||||
namespace = get_yaml_item_value(
|
||||
scenario_config, "namespace", ""
|
||||
)
|
||||
duration = get_yaml_item_value(
|
||||
scenario_config, "duration", 60
|
||||
)
|
||||
|
||||
start_time = int(time.time())
|
||||
start_time = int(time.time())
|
||||
|
||||
network_policy_template = """---
|
||||
network_policy_template = """---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
@@ -31,28 +49,38 @@ spec:
|
||||
podSelector:
|
||||
matchLabels: {{ pod_selector }}
|
||||
policyTypes: {{ traffic_type }}
|
||||
"""
|
||||
t = Template(network_policy_template)
|
||||
rendered_spec = t.render(pod_selector=pod_selector, traffic_type=traffic_type)
|
||||
# Write the rendered template to a file
|
||||
with open("kraken_network_policy.yaml", "w") as f:
|
||||
f.write(rendered_spec)
|
||||
# Block the traffic by creating network policy
|
||||
logging.info("Creating the network policy")
|
||||
runcommand.invoke(
|
||||
"kubectl create -f %s -n %s --validate=false" % ("kraken_network_policy.yaml", namespace)
|
||||
)
|
||||
"""
|
||||
t = Template(network_policy_template)
|
||||
rendered_spec = t.render(pod_selector=pod_selector, traffic_type=traffic_type)
|
||||
# Write the rendered template to a file
|
||||
with open("kraken_network_policy.yaml", "w") as f:
|
||||
f.write(rendered_spec)
|
||||
# Block the traffic by creating network policy
|
||||
logging.info("Creating the network policy")
|
||||
runcommand.invoke(
|
||||
"kubectl create -f %s -n %s --validate=false" % ("kraken_network_policy.yaml", namespace)
|
||||
)
|
||||
|
||||
# wait for the specified duration
|
||||
logging.info("Waiting for the specified duration in the config: %s" % (duration))
|
||||
time.sleep(duration)
|
||||
# wait for the specified duration
|
||||
logging.info("Waiting for the specified duration in the config: %s" % (duration))
|
||||
time.sleep(duration)
|
||||
|
||||
# unblock the traffic by deleting the network policy
|
||||
logging.info("Deleting the network policy")
|
||||
runcommand.invoke("kubectl delete -f %s -n %s" % ("kraken_network_policy.yaml", namespace))
|
||||
# unblock the traffic by deleting the network policy
|
||||
logging.info("Deleting the network policy")
|
||||
runcommand.invoke("kubectl delete -f %s -n %s" % ("kraken_network_policy.yaml", namespace))
|
||||
|
||||
logging.info("End of scenario. Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
logging.info("End of scenario. Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
except Exception as e :
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(app_outage_config)
|
||||
log_exception(app_outage_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
|
||||
@@ -1,27 +1,37 @@
|
||||
import time
|
||||
import arcaflow
|
||||
import os
|
||||
import yaml
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
from .context_auth import ContextAuth
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
|
||||
|
||||
def run(scenarios_list: List[str], kubeconfig_path: str):
|
||||
def run(scenarios_list: List[str], kubeconfig_path: str, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_post_scenarios = []
|
||||
for scenario in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = scenario
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry,scenario)
|
||||
engine_args = build_args(scenario)
|
||||
run_workflow(engine_args, kubeconfig_path)
|
||||
status_code = run_workflow(engine_args, kubeconfig_path)
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetry.exitStatus = status_code
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
if status_code != 0:
|
||||
failed_post_scenarios.append(scenario)
|
||||
return failed_post_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
def run_workflow(engine_args: arcaflow.EngineArgs, kubeconfig_path: str):
|
||||
def run_workflow(engine_args: arcaflow.EngineArgs, kubeconfig_path: str) -> int:
|
||||
set_arca_kubeconfig(engine_args, kubeconfig_path)
|
||||
exit_status = arcaflow.run(engine_args)
|
||||
if exit_status != 0:
|
||||
logging.error(
|
||||
f"failed to run arcaflow scenario {engine_args.input}"
|
||||
)
|
||||
sys.exit(exit_status)
|
||||
return exit_status
|
||||
|
||||
|
||||
def build_args(input_file: str) -> arcaflow.EngineArgs:
|
||||
|
||||
3
kraken/chaos_recommender/__init__.py
Normal file
3
kraken/chaos_recommender/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .analysis import *
|
||||
from .kraken_tests import *
|
||||
from .prometheus import *
|
||||
90
kraken/chaos_recommender/analysis.py
Normal file
90
kraken/chaos_recommender/analysis.py
Normal file
@@ -0,0 +1,90 @@
|
||||
import logging
|
||||
|
||||
import pandas as pd
|
||||
import kraken.chaos_recommender.kraken_tests as kraken_tests
|
||||
import time
|
||||
|
||||
threshold = .7 # Adjust the threshold as needed
|
||||
heatmap_cpu_threshold = .5
|
||||
heatmap_mem_threshold = .5
|
||||
|
||||
KRAKEN_TESTS_PATH = "./kraken_chaos_tests.txt"
|
||||
|
||||
#Placeholder, this should be done with topology
|
||||
def return_critical_services():
|
||||
return ["web", "cart"]
|
||||
|
||||
|
||||
def load_telemetry_data(file_path):
|
||||
data = pd.read_csv(file_path, delimiter=r"\s+")
|
||||
return data
|
||||
|
||||
def calculate_zscores(data):
|
||||
zscores = pd.DataFrame()
|
||||
zscores["Service"] = data["service"]
|
||||
zscores["CPU"] = (data["CPU"] - data["CPU"].mean()) / data["CPU"].std()
|
||||
zscores["Memory"] = (data["MEM"] - data["MEM"].mean()) / data["MEM"].std()
|
||||
zscores["Network"] = (data["NETWORK"] - data["NETWORK"].mean()) / data["NETWORK"].std()
|
||||
return zscores
|
||||
|
||||
def identify_outliers(data):
|
||||
outliers_cpu = data[data["CPU"] > threshold]["Service"].tolist()
|
||||
outliers_memory = data[data["Memory"] > threshold]["Service"].tolist()
|
||||
outliers_network = data[data["Network"] > threshold]["Service"].tolist()
|
||||
|
||||
return outliers_cpu, outliers_memory, outliers_network
|
||||
|
||||
|
||||
def get_services_above_heatmap_threshold(dataframe, cpu_threshold, mem_threshold):
|
||||
# Filter the DataFrame based on CPU_HEATMAP and MEM_HEATMAP thresholds
|
||||
filtered_df = dataframe[((dataframe['CPU']/dataframe['CPU_LIMITS']) > cpu_threshold)]
|
||||
# Get the lists of services
|
||||
cpu_services = filtered_df['service'].tolist()
|
||||
|
||||
filtered_df = dataframe[((dataframe['MEM']/dataframe['MEM_LIMITS']) > mem_threshold)]
|
||||
mem_services = filtered_df['service'].tolist()
|
||||
|
||||
return cpu_services, mem_services
|
||||
|
||||
|
||||
def analysis(file_path, chaos_tests_config):
|
||||
# Load the telemetry data from file
|
||||
data = load_telemetry_data(file_path)
|
||||
|
||||
# Calculate Z-scores for CPU, Memory, and Network columns
|
||||
zscores = calculate_zscores(data)
|
||||
|
||||
# Identify outliers
|
||||
outliers_cpu, outliers_memory, outliers_network = identify_outliers(zscores)
|
||||
cpu_services, mem_services = get_services_above_heatmap_threshold(data, heatmap_cpu_threshold, heatmap_mem_threshold)
|
||||
|
||||
# Display the identified outliers
|
||||
logging.info("======================== Profiling ==================================")
|
||||
logging.info(f"CPU outliers: {outliers_cpu}")
|
||||
logging.info(f"Memory outliers: {outliers_memory}")
|
||||
logging.info(f"Network outliers: {outliers_network}")
|
||||
logging.info("===================== HeatMap Analysis ==============================")
|
||||
|
||||
if cpu_services:
|
||||
logging.info("Services with CPU_HEATMAP above threshold:", cpu_services)
|
||||
else:
|
||||
logging.info("There are no services that are using siginificant CPU compared to their assigned limits (infinite in case no limits are set).")
|
||||
if mem_services:
|
||||
logging.info("Services with MEM_HEATMAP above threshold:", mem_services)
|
||||
else:
|
||||
logging.info("There are no services that are using siginificant MEMORY compared to their assigned limits (infinite in case no limits are set).")
|
||||
time.sleep(2)
|
||||
logging.info("======================= Recommendations =============================")
|
||||
if cpu_services:
|
||||
logging.info(f"Recommended tests for {str(cpu_services)} :\n {chaos_tests_config['CPU']}")
|
||||
logging.info("\n")
|
||||
if mem_services:
|
||||
logging.info(f"Recommended tests for {str(mem_services)} :\n {chaos_tests_config['MEM']}")
|
||||
logging.info("\n")
|
||||
|
||||
if outliers_network:
|
||||
logging.info(f"Recommended tests for str(outliers_network) :\n {chaos_tests_config['NETWORK']}")
|
||||
logging.info("\n")
|
||||
|
||||
logging.info("\n")
|
||||
logging.info("Please check data in utilisation.txt for further analysis")
|
||||
30
kraken/chaos_recommender/kraken_tests.py
Normal file
30
kraken/chaos_recommender/kraken_tests.py
Normal file
@@ -0,0 +1,30 @@
|
||||
def get_entries_by_category(filename, category):
|
||||
# Read the file
|
||||
with open(filename, 'r') as file:
|
||||
content = file.read()
|
||||
|
||||
# Split the content into sections based on the square brackets
|
||||
sections = content.split('\n\n')
|
||||
|
||||
# Define the categories
|
||||
valid_categories = ['CPU', 'NETWORK', 'MEM', 'GENERIC']
|
||||
|
||||
# Validate the provided category
|
||||
if category not in valid_categories:
|
||||
return []
|
||||
|
||||
# Find the section corresponding to the specified category
|
||||
target_section = None
|
||||
for section in sections:
|
||||
if section.startswith(f"[{category}]"):
|
||||
target_section = section
|
||||
break
|
||||
|
||||
# If the category section was not found, return an empty list
|
||||
if target_section is None:
|
||||
return []
|
||||
|
||||
# Extract the entries from the category section
|
||||
entries = [entry.strip() for entry in target_section.split('\n') if entry and not entry.startswith('[')]
|
||||
|
||||
return entries
|
||||
96
kraken/chaos_recommender/prometheus.py
Normal file
96
kraken/chaos_recommender/prometheus.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import logging
|
||||
|
||||
import pandas
|
||||
from prometheus_api_client import PrometheusConnect
|
||||
import pandas as pd
|
||||
import urllib3
|
||||
|
||||
|
||||
saved_metrics_path = "./utilisation.txt"
|
||||
|
||||
def convert_data_to_dataframe(data, label):
|
||||
df = pd.DataFrame()
|
||||
df['service'] = [item['metric']['pod'] for item in data]
|
||||
df[label] = [item['value'][1] for item in data]
|
||||
|
||||
return df
|
||||
|
||||
|
||||
def convert_data(data, service):
|
||||
|
||||
result = {}
|
||||
for entry in data:
|
||||
pod_name = entry['metric']['pod']
|
||||
value = entry['value'][1]
|
||||
result[pod_name] = value
|
||||
return result.get(service, '100000000000') # for those pods whose limits are not defined they can take as much resources, there assigning a very high value
|
||||
|
||||
def save_utilization_to_file(cpu_data, cpu_limits_result, mem_data, mem_limits_result, network_data, filename):
|
||||
df_cpu = convert_data_to_dataframe(cpu_data, "CPU")
|
||||
merged_df = pd.DataFrame(columns=['service','CPU','CPU_LIMITS','MEM','MEM_LIMITS','NETWORK'])
|
||||
services = df_cpu.service.unique()
|
||||
logging.info(services)
|
||||
|
||||
for s in services:
|
||||
|
||||
new_row_df = pd.DataFrame( {"service": s, "CPU" : convert_data(cpu_data, s),
|
||||
"CPU_LIMITS" : convert_data(cpu_limits_result, s),
|
||||
"MEM" : convert_data(mem_data, s), "MEM_LIMITS" : convert_data(mem_limits_result, s),
|
||||
"NETWORK" : convert_data(network_data, s)}, index=[0])
|
||||
merged_df = pd.concat([merged_df, new_row_df], ignore_index=True)
|
||||
|
||||
|
||||
|
||||
# Convert columns to string
|
||||
merged_df['CPU'] = merged_df['CPU'].astype(str)
|
||||
merged_df['MEM'] = merged_df['MEM'].astype(str)
|
||||
merged_df['CPU_LIMITS'] = merged_df['CPU_LIMITS'].astype(str)
|
||||
merged_df['MEM_LIMITS'] = merged_df['MEM_LIMITS'].astype(str)
|
||||
merged_df['NETWORK'] = merged_df['NETWORK'].astype(str)
|
||||
|
||||
# Extract integer part before the decimal point
|
||||
merged_df['CPU'] = merged_df['CPU'].str.split('.').str[0]
|
||||
merged_df['MEM'] = merged_df['MEM'].str.split('.').str[0]
|
||||
merged_df['CPU_LIMITS'] = merged_df['CPU_LIMITS'].str.split('.').str[0]
|
||||
merged_df['MEM_LIMITS'] = merged_df['MEM_LIMITS'].str.split('.').str[0]
|
||||
merged_df['NETWORK'] = merged_df['NETWORK'].str.split('.').str[0]
|
||||
|
||||
merged_df.to_csv(filename, sep='\t', index=False)
|
||||
|
||||
def fetch_utilization_from_prometheus(prometheus_endpoint, auth_token, namespace, scrape_duration):
|
||||
urllib3.disable_warnings()
|
||||
prometheus = PrometheusConnect(url=prometheus_endpoint, headers={'Authorization':'Bearer {}'.format(auth_token)}, disable_ssl=True)
|
||||
|
||||
# Fetch CPU utilization
|
||||
cpu_query = 'sum (rate (container_cpu_usage_seconds_total{image!="", namespace="%s"}[%s])) by (pod) *1000' % (namespace,scrape_duration)
|
||||
logging.info(cpu_query)
|
||||
cpu_result = prometheus.custom_query(cpu_query)
|
||||
cpu_data = cpu_result
|
||||
|
||||
|
||||
cpu_limits_query = '(sum by (pod) (kube_pod_container_resource_limits{resource="cpu", namespace="%s"}))*1000' %(namespace)
|
||||
logging.info(cpu_limits_query)
|
||||
cpu_limits_result = prometheus.custom_query(cpu_limits_query)
|
||||
|
||||
|
||||
mem_query = 'sum by (pod) (avg_over_time(container_memory_usage_bytes{image!="", namespace="%s"}[%s]))' % (namespace, scrape_duration)
|
||||
logging.info(mem_query)
|
||||
mem_result = prometheus.custom_query(mem_query)
|
||||
mem_data = mem_result
|
||||
|
||||
mem_limits_query = 'sum by (pod) (kube_pod_container_resource_limits{resource="memory", namespace="%s"}) ' %(namespace)
|
||||
logging.info(mem_limits_query)
|
||||
mem_limits_result = prometheus.custom_query(mem_limits_query)
|
||||
|
||||
|
||||
network_query = 'sum by (pod) ((avg_over_time(container_network_transmit_bytes_total{namespace="%s"}[%s])) + \
|
||||
(avg_over_time(container_network_receive_bytes_total{namespace="%s"}[%s])))' % (namespace, scrape_duration, namespace, scrape_duration)
|
||||
network_result = prometheus.custom_query(network_query)
|
||||
logging.info(network_query)
|
||||
network_data = network_result
|
||||
|
||||
|
||||
save_utilization_to_file(cpu_data, cpu_limits_result, mem_data, mem_limits_result, network_data, saved_metrics_path)
|
||||
return saved_metrics_path
|
||||
|
||||
|
||||
@@ -3,7 +3,10 @@ import logging
|
||||
import urllib.request
|
||||
import shutil
|
||||
import sys
|
||||
import requests
|
||||
import tempfile
|
||||
import kraken.prometheus.client as prometheus
|
||||
from urllib.parse import urlparse
|
||||
|
||||
|
||||
def setup(url):
|
||||
@@ -72,6 +75,14 @@ def alerts(distribution, prometheus_url, prometheus_bearer_token, start_time, en
|
||||
Scrapes metrics defined in the profile from Prometheus and alerts based on the severity defined
|
||||
"""
|
||||
|
||||
is_url = urlparse(alert_profile)
|
||||
if is_url.scheme and is_url.netloc:
|
||||
response = requests.get(alert_profile)
|
||||
temp_alerts = tempfile.NamedTemporaryFile()
|
||||
temp_alerts.write(response.content)
|
||||
temp_alerts.flush()
|
||||
alert_profile = temp_alerts.name
|
||||
|
||||
if not prometheus_url:
|
||||
if distribution == "openshift":
|
||||
logging.info("Looks like prometheus_url is not defined, trying to use the default instance on the cluster")
|
||||
@@ -79,7 +90,7 @@ def alerts(distribution, prometheus_url, prometheus_bearer_token, start_time, en
|
||||
distribution, prometheus_url, prometheus_bearer_token
|
||||
)
|
||||
else:
|
||||
logging.error("Looks like proemtheus url is not defined, exiting")
|
||||
logging.error("Looks like prometheus url is not defined, exiting")
|
||||
sys.exit(1)
|
||||
command = (
|
||||
"./kube-burner check-alerts "
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
import kraken.invoke.command as runcommand
|
||||
import krkn_lib_kubernetes
|
||||
import logging
|
||||
import time
|
||||
import sys
|
||||
import requests
|
||||
import yaml
|
||||
import kraken.cerberus.setup as cerberus
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
# Inject litmus scenarios defined in the config
|
||||
def run(
|
||||
scenarios_list,
|
||||
@@ -16,7 +15,7 @@ def run(
|
||||
litmus_uninstall,
|
||||
wait_duration,
|
||||
litmus_namespace,
|
||||
kubecli: krkn_lib_kubernetes.KrknLibKubernetes
|
||||
kubecli: KrknKubernetes
|
||||
):
|
||||
# Loop to run the scenarios starts here
|
||||
for l_scenario in scenarios_list:
|
||||
@@ -94,8 +93,8 @@ def deploy_all_experiments(version_string, namespace):
|
||||
)
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
def wait_for_initialized(engine_name, experiment_name, namespace, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def wait_for_initialized(engine_name, experiment_name, namespace, kubecli: KrknKubernetes):
|
||||
|
||||
chaos_engine = kubecli.get_litmus_chaos_object(kind='chaosengine', name=engine_name,
|
||||
namespace=namespace).engineStatus
|
||||
@@ -119,13 +118,13 @@ def wait_for_initialized(engine_name, experiment_name, namespace, kubecli: krkn_
|
||||
return True
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
def wait_for_status(
|
||||
engine_name,
|
||||
expected_status,
|
||||
experiment_name,
|
||||
namespace,
|
||||
kubecli: krkn_lib_kubernetes.KrknLibKubernetes
|
||||
kubecli: KrknKubernetes
|
||||
):
|
||||
|
||||
if expected_status == "running":
|
||||
@@ -156,8 +155,8 @@ def wait_for_status(
|
||||
|
||||
|
||||
# Check status of experiment
|
||||
# krkn_lib_kubernetes
|
||||
def check_experiment(engine_name, experiment_name, namespace, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def check_experiment(engine_name, experiment_name, namespace, kubecli: KrknKubernetes):
|
||||
|
||||
wait_response = wait_for_status(engine_name, "running", experiment_name, namespace, kubecli)
|
||||
|
||||
@@ -183,8 +182,8 @@ def check_experiment(engine_name, experiment_name, namespace, kubecli: krkn_lib_
|
||||
|
||||
|
||||
# Delete all chaos engines in a given namespace
|
||||
# krkn_lib_kubernetes
|
||||
def delete_chaos_experiments(namespace, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def delete_chaos_experiments(namespace, kubecli: KrknKubernetes):
|
||||
|
||||
if kubecli.check_if_namespace_exists(namespace):
|
||||
chaos_exp_exists = runcommand.invoke_no_exit("kubectl get chaosexperiment")
|
||||
@@ -194,8 +193,8 @@ def delete_chaos_experiments(namespace, kubecli: krkn_lib_kubernetes.KrknLibKube
|
||||
|
||||
|
||||
# Delete all chaos engines in a given namespace
|
||||
# krkn_lib_kubernetes
|
||||
def delete_chaos(namespace, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def delete_chaos(namespace, kubecli:KrknKubernetes):
|
||||
|
||||
if kubecli.check_if_namespace_exists(namespace):
|
||||
logging.info("Deleting all litmus run objects")
|
||||
@@ -209,8 +208,8 @@ def delete_chaos(namespace, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
logging.info(namespace + " namespace doesn't exist")
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
def uninstall_litmus(version, litmus_namespace, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def uninstall_litmus(version, litmus_namespace, kubecli: KrknKubernetes):
|
||||
|
||||
if kubecli.check_if_namespace_exists(litmus_namespace):
|
||||
logging.info("Uninstalling Litmus operator")
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
import random
|
||||
import logging
|
||||
import krkn_lib_kubernetes
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
# Pick a random managedcluster with specified label selector
|
||||
def get_managedcluster(
|
||||
managedcluster_name,
|
||||
label_selector,
|
||||
instance_kill_count,
|
||||
kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
kubecli: KrknKubernetes):
|
||||
|
||||
if managedcluster_name in kubecli.list_killable_managedclusters():
|
||||
return [managedcluster_name]
|
||||
@@ -30,12 +30,12 @@ def get_managedcluster(
|
||||
|
||||
|
||||
# Wait until the managedcluster status becomes Available
|
||||
# krkn_lib_kubernetes
|
||||
def wait_for_available_status(managedcluster, timeout, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def wait_for_available_status(managedcluster, timeout, kubecli: KrknKubernetes):
|
||||
kubecli.watch_managedcluster_status(managedcluster, "True", timeout)
|
||||
|
||||
|
||||
# Wait until the managedcluster status becomes Not Available
|
||||
# krkn_lib_kubernetes
|
||||
def wait_for_unavailable_status(managedcluster, timeout, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def wait_for_unavailable_status(managedcluster, timeout, kubecli: KrknKubernetes):
|
||||
kubecli.watch_managedcluster_status(managedcluster, "Unknown", timeout)
|
||||
|
||||
@@ -4,19 +4,17 @@ import time
|
||||
import logging
|
||||
import sys
|
||||
import yaml
|
||||
import html
|
||||
import krkn_lib_kubernetes
|
||||
import kraken.managedcluster_scenarios.common_managedcluster_functions as common_managedcluster_functions
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
class GENERAL:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
class managedcluster_scenarios():
|
||||
kubecli: krkn_lib_kubernetes.KrknLibKubernetes
|
||||
def __init__(self, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
kubecli: KrknKubernetes
|
||||
def __init__(self, kubecli: KrknKubernetes):
|
||||
self.kubecli = kubecli
|
||||
self.general = GENERAL()
|
||||
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
import yaml
|
||||
import logging
|
||||
import time
|
||||
import krkn_lib_kubernetes
|
||||
from kraken.managedcluster_scenarios.managedcluster_scenarios import managedcluster_scenarios
|
||||
import kraken.managedcluster_scenarios.common_managedcluster_functions as common_managedcluster_functions
|
||||
import kraken.cerberus.setup as cerberus
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.utils.functions import get_yaml_item_value
|
||||
|
||||
# Get the managedcluster scenarios object of specfied cloud type
|
||||
# krkn_lib_kubernetes
|
||||
def get_managedcluster_scenario_object(managedcluster_scenario, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def get_managedcluster_scenario_object(managedcluster_scenario, kubecli: KrknKubernetes):
|
||||
return managedcluster_scenarios(kubecli)
|
||||
|
||||
# Run defined scenarios
|
||||
# krkn_lib_kubernetes
|
||||
def run(scenarios_list, config, wait_duration, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes):
|
||||
for managedcluster_scenario_config in scenarios_list:
|
||||
with open(managedcluster_scenario_config, "r") as f:
|
||||
managedcluster_scenario_config = yaml.full_load(f)
|
||||
@@ -32,14 +32,22 @@ def run(scenarios_list, config, wait_duration, kubecli: krkn_lib_kubernetes.Krkn
|
||||
|
||||
|
||||
# Inject the specified managedcluster scenario
|
||||
# krkn_lib_kubernetes
|
||||
def inject_managedcluster_scenario(action, managedcluster_scenario, managedcluster_scenario_object, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def inject_managedcluster_scenario(action, managedcluster_scenario, managedcluster_scenario_object, kubecli: KrknKubernetes):
|
||||
# Get the managedcluster scenario configurations
|
||||
run_kill_count = managedcluster_scenario.get("runs", 1)
|
||||
instance_kill_count = managedcluster_scenario.get("instance_count", 1)
|
||||
managedcluster_name = managedcluster_scenario.get("managedcluster_name", "")
|
||||
label_selector = managedcluster_scenario.get("label_selector", "")
|
||||
timeout = managedcluster_scenario.get("timeout", 120)
|
||||
run_kill_count = get_yaml_item_value(
|
||||
managedcluster_scenario, "runs", 1
|
||||
)
|
||||
instance_kill_count = get_yaml_item_value(
|
||||
managedcluster_scenario, "instance_count", 1
|
||||
)
|
||||
managedcluster_name = get_yaml_item_value(
|
||||
managedcluster_scenario, "managedcluster_name", ""
|
||||
)
|
||||
label_selector = get_yaml_item_value(
|
||||
managedcluster_scenario, "label_selector", ""
|
||||
)
|
||||
timeout = get_yaml_item_value(managedcluster_scenario, "timeout", 120)
|
||||
# Get the managedcluster to apply the scenario
|
||||
if managedcluster_name:
|
||||
managedcluster_name_list = managedcluster_name.split(",")
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
import time
|
||||
import random
|
||||
import logging
|
||||
import krkn_lib_kubernetes
|
||||
import kraken.cerberus.setup as cerberus
|
||||
import kraken.post_actions.actions as post_actions
|
||||
import yaml
|
||||
import sys
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
def run(
|
||||
scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
failed_post_scenarios,
|
||||
kubeconfig_path,
|
||||
kubecli: krkn_lib_kubernetes.KrknLibKubernetes
|
||||
):
|
||||
|
||||
for scenario_config in scenarios_list:
|
||||
if len(scenario_config) > 1:
|
||||
pre_action_output = post_actions.run(kubeconfig_path, scenario_config[1])
|
||||
else:
|
||||
pre_action_output = ""
|
||||
with open(scenario_config[0], "r") as f:
|
||||
scenario_config_yaml = yaml.full_load(f)
|
||||
for scenario in scenario_config_yaml["scenarios"]:
|
||||
scenario_namespace = scenario.get("namespace", "")
|
||||
scenario_label = scenario.get("label_selector", "")
|
||||
if scenario_namespace is not None and scenario_namespace.strip() != "":
|
||||
if scenario_label is not None and scenario_label.strip() != "":
|
||||
logging.error("You can only have namespace or label set in your namespace scenario")
|
||||
logging.error(
|
||||
"Current scenario config has namespace '%s' and label selector '%s'"
|
||||
% (scenario_namespace, scenario_label)
|
||||
)
|
||||
logging.error(
|
||||
"Please set either namespace to blank ('') or label_selector to blank ('') to continue"
|
||||
)
|
||||
sys.exit(1)
|
||||
delete_count = scenario.get("delete_count", 1)
|
||||
run_count = scenario.get("runs", 1)
|
||||
run_sleep = scenario.get("sleep", 10)
|
||||
wait_time = scenario.get("wait_time", 30)
|
||||
killed_namespaces = []
|
||||
start_time = int(time.time())
|
||||
for i in range(run_count):
|
||||
namespaces = kubecli.check_namespaces([scenario_namespace], scenario_label)
|
||||
for j in range(delete_count):
|
||||
if len(namespaces) == 0:
|
||||
logging.error(
|
||||
"Couldn't delete %s namespaces, not enough namespaces matching %s with label %s"
|
||||
% (str(run_count), scenario_namespace, str(scenario_label))
|
||||
)
|
||||
sys.exit(1)
|
||||
selected_namespace = namespaces[random.randint(0, len(namespaces) - 1)]
|
||||
killed_namespaces.append(selected_namespace)
|
||||
try:
|
||||
kubecli.delete_namespace(selected_namespace)
|
||||
logging.info("Delete on namespace %s was successful" % str(selected_namespace))
|
||||
except Exception as e:
|
||||
logging.info("Delete on namespace %s was unsuccessful" % str(selected_namespace))
|
||||
logging.info("Namespace action error: " + str(e))
|
||||
sys.exit(1)
|
||||
namespaces.remove(selected_namespace)
|
||||
logging.info("Waiting %s seconds between namespace deletions" % str(run_sleep))
|
||||
time.sleep(run_sleep)
|
||||
|
||||
logging.info("Waiting for the specified duration: %s" % wait_duration)
|
||||
time.sleep(wait_duration)
|
||||
if len(scenario_config) > 1:
|
||||
try:
|
||||
failed_post_scenarios = post_actions.check_recovery(
|
||||
kubeconfig_path, scenario_config, failed_post_scenarios, pre_action_output
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to run post action checks: %s" % e)
|
||||
sys.exit(1)
|
||||
else:
|
||||
failed_post_scenarios = check_active_namespace(killed_namespaces, wait_time, kubecli)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
def check_active_namespace(killed_namespaces, wait_time, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
active_namespace = []
|
||||
timer = 0
|
||||
while timer < wait_time and killed_namespaces:
|
||||
for namespace_name in killed_namespaces:
|
||||
if namespace_name in kubecli.list_namespaces():
|
||||
response = kubecli.get_namespace_status(namespace_name).strip()
|
||||
if response != "Active":
|
||||
continue
|
||||
else:
|
||||
active_namespace.append(namespace_name)
|
||||
killed_namespaces = set(killed_namespaces) - set(active_namespace)
|
||||
if len(killed_namespaces) == 0:
|
||||
return []
|
||||
|
||||
timer += 5
|
||||
time.sleep(5)
|
||||
logging.info("Waiting 5 seconds for namespaces to become active")
|
||||
|
||||
logging.error("Namespaces are still not active after waiting " + str(wait_time) + "seconds")
|
||||
logging.error("Non active namespaces " + str(killed_namespaces))
|
||||
return killed_namespaces
|
||||
@@ -1,99 +1,130 @@
|
||||
import yaml
|
||||
import logging
|
||||
import time
|
||||
import sys
|
||||
import os
|
||||
import random
|
||||
import krkn_lib_kubernetes
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
import kraken.cerberus.setup as cerberus
|
||||
import kraken.node_actions.common_node_functions as common_node_functions
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
# Reads the scenario config and introduces traffic variations in Node's host network interface.
|
||||
def run(scenarios_list, config, wait_duration, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
failed_post_scenarios = ""
|
||||
logging.info("Runing the Network Chaos tests")
|
||||
failed_post_scenarios = ""
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for net_config in scenarios_list:
|
||||
with open(net_config, "r") as file:
|
||||
param_lst = ["latency", "loss", "bandwidth"]
|
||||
test_config = yaml.safe_load(file)
|
||||
test_dict = test_config["network_chaos"]
|
||||
test_duration = int(test_dict.get("duration", 300))
|
||||
test_interface = test_dict.get("interfaces", [])
|
||||
test_node = test_dict.get("node_name", "")
|
||||
test_node_label = test_dict.get("label_selector", "node-role.kubernetes.io/master")
|
||||
test_execution = test_dict.get("execution", "serial")
|
||||
test_instance_count = test_dict.get("instance_count", 1)
|
||||
test_egress = test_dict.get("egress", {"bandwidth": "100mbit"})
|
||||
if test_node:
|
||||
node_name_list = test_node.split(",")
|
||||
else:
|
||||
node_name_list = [test_node]
|
||||
nodelst = []
|
||||
for single_node_name in node_name_list:
|
||||
nodelst.extend(common_node_functions.get_node(single_node_name, test_node_label, test_instance_count, kubecli))
|
||||
file_loader = FileSystemLoader(os.path.abspath(os.path.dirname(__file__)))
|
||||
env = Environment(loader=file_loader, autoescape=True)
|
||||
pod_template = env.get_template("pod.j2")
|
||||
test_interface = verify_interface(test_interface, nodelst, pod_template, kubecli)
|
||||
joblst = []
|
||||
egress_lst = [i for i in param_lst if i in test_egress]
|
||||
chaos_config = {
|
||||
"network_chaos": {
|
||||
"duration": test_duration,
|
||||
"interfaces": test_interface,
|
||||
"node_name": ",".join(nodelst),
|
||||
"execution": test_execution,
|
||||
"instance_count": test_instance_count,
|
||||
"egress": test_egress,
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = net_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, net_config)
|
||||
try:
|
||||
with open(net_config, "r") as file:
|
||||
param_lst = ["latency", "loss", "bandwidth"]
|
||||
test_config = yaml.safe_load(file)
|
||||
test_dict = test_config["network_chaos"]
|
||||
test_duration = int(
|
||||
get_yaml_item_value(test_dict, "duration", 300)
|
||||
)
|
||||
test_interface = get_yaml_item_value(
|
||||
test_dict, "interfaces", []
|
||||
)
|
||||
test_node = get_yaml_item_value(test_dict, "node_name", "")
|
||||
test_node_label = get_yaml_item_value(
|
||||
test_dict, "label_selector",
|
||||
"node-role.kubernetes.io/master"
|
||||
)
|
||||
test_execution = get_yaml_item_value(
|
||||
test_dict, "execution", "serial"
|
||||
)
|
||||
test_instance_count = get_yaml_item_value(
|
||||
test_dict, "instance_count", 1
|
||||
)
|
||||
test_egress = get_yaml_item_value(
|
||||
test_dict, "egress", {"bandwidth": "100mbit"}
|
||||
)
|
||||
if test_node:
|
||||
node_name_list = test_node.split(",")
|
||||
else:
|
||||
node_name_list = [test_node]
|
||||
nodelst = []
|
||||
for single_node_name in node_name_list:
|
||||
nodelst.extend(common_node_functions.get_node(single_node_name, test_node_label, test_instance_count, kubecli))
|
||||
file_loader = FileSystemLoader(os.path.abspath(os.path.dirname(__file__)))
|
||||
env = Environment(loader=file_loader, autoescape=True)
|
||||
pod_template = env.get_template("pod.j2")
|
||||
test_interface = verify_interface(test_interface, nodelst, pod_template, kubecli)
|
||||
joblst = []
|
||||
egress_lst = [i for i in param_lst if i in test_egress]
|
||||
chaos_config = {
|
||||
"network_chaos": {
|
||||
"duration": test_duration,
|
||||
"interfaces": test_interface,
|
||||
"node_name": ",".join(nodelst),
|
||||
"execution": test_execution,
|
||||
"instance_count": test_instance_count,
|
||||
"egress": test_egress,
|
||||
}
|
||||
}
|
||||
}
|
||||
logging.info("Executing network chaos with config \n %s" % yaml.dump(chaos_config))
|
||||
job_template = env.get_template("job.j2")
|
||||
try:
|
||||
for i in egress_lst:
|
||||
for node in nodelst:
|
||||
exec_cmd = get_egress_cmd(
|
||||
test_execution, test_interface, i, test_dict["egress"], duration=test_duration
|
||||
)
|
||||
logging.info("Executing %s on node %s" % (exec_cmd, node))
|
||||
job_body = yaml.safe_load(
|
||||
job_template.render(jobname=i + str(hash(node))[:5], nodename=node, cmd=exec_cmd)
|
||||
)
|
||||
joblst.append(job_body["metadata"]["name"])
|
||||
api_response = kubecli.create_job(job_body)
|
||||
if api_response is None:
|
||||
raise Exception("Error creating job")
|
||||
if test_execution == "serial":
|
||||
logging.info("Waiting for serial job to finish")
|
||||
logging.info("Executing network chaos with config \n %s" % yaml.dump(chaos_config))
|
||||
job_template = env.get_template("job.j2")
|
||||
try:
|
||||
for i in egress_lst:
|
||||
for node in nodelst:
|
||||
exec_cmd = get_egress_cmd(
|
||||
test_execution, test_interface, i, test_dict["egress"], duration=test_duration
|
||||
)
|
||||
logging.info("Executing %s on node %s" % (exec_cmd, node))
|
||||
job_body = yaml.safe_load(
|
||||
job_template.render(jobname=i + str(hash(node))[:5], nodename=node, cmd=exec_cmd)
|
||||
)
|
||||
joblst.append(job_body["metadata"]["name"])
|
||||
api_response = kubecli.create_job(job_body)
|
||||
if api_response is None:
|
||||
raise Exception("Error creating job")
|
||||
if test_execution == "serial":
|
||||
logging.info("Waiting for serial job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(joblst[:], kubecli, test_duration + 300)
|
||||
logging.info("Waiting for wait_duration %s" % wait_duration)
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
if test_execution == "parallel":
|
||||
break
|
||||
if test_execution == "parallel":
|
||||
logging.info("Waiting for parallel job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(joblst[:], kubecli, test_duration + 300)
|
||||
logging.info("Waiting for wait_duration %s" % wait_duration)
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
if test_execution == "parallel":
|
||||
break
|
||||
if test_execution == "parallel":
|
||||
logging.info("Waiting for parallel job to finish")
|
||||
start_time = int(time.time())
|
||||
wait_for_job(joblst[:], kubecli, test_duration + 300)
|
||||
logging.info("Waiting for wait_duration %s" % wait_duration)
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
except Exception as e:
|
||||
logging.error("Network Chaos exiting due to Exception %s" % e)
|
||||
sys.exit(1)
|
||||
finally:
|
||||
logging.info("Deleting jobs")
|
||||
delete_job(joblst[:], kubecli)
|
||||
except Exception as e:
|
||||
logging.error("Network Chaos exiting due to Exception %s" % e)
|
||||
raise RuntimeError()
|
||||
finally:
|
||||
logging.info("Deleting jobs")
|
||||
delete_job(joblst[:], kubecli)
|
||||
except (RuntimeError, Exception):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(net_config)
|
||||
log_exception(net_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
def verify_interface(test_interface, nodelst, template, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def verify_interface(test_interface, nodelst, template, kubecli: KrknKubernetes):
|
||||
pod_index = random.randint(0, len(nodelst) - 1)
|
||||
pod_body = yaml.safe_load(template.render(nodename=nodelst[pod_index]))
|
||||
logging.info("Creating pod to query interface on node %s" % nodelst[pod_index])
|
||||
@@ -110,23 +141,24 @@ def verify_interface(test_interface, nodelst, template, kubecli: krkn_lib_kubern
|
||||
for interface in test_interface:
|
||||
if interface not in interface_lst:
|
||||
logging.error("Interface %s not found in node %s interface list %s" % (interface, nodelst[pod_index], interface_lst))
|
||||
sys.exit(1)
|
||||
#sys.exit(1)
|
||||
raise RuntimeError()
|
||||
return test_interface
|
||||
finally:
|
||||
logging.info("Deleteing pod to query interface on node")
|
||||
kubecli.delete_pod("fedtools", "default")
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
def get_job_pods(api_response, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def get_job_pods(api_response, kubecli: KrknKubernetes):
|
||||
controllerUid = api_response.metadata.labels["controller-uid"]
|
||||
pod_label_selector = "controller-uid=" + controllerUid
|
||||
pods_list = kubecli.list_pods(label_selector=pod_label_selector, namespace="default")
|
||||
return pods_list[0]
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
def wait_for_job(joblst, kubecli: krkn_lib_kubernetes.KrknLibKubernetes, timeout=300):
|
||||
# krkn_lib
|
||||
def wait_for_job(joblst, kubecli: KrknKubernetes, timeout=300):
|
||||
waittime = time.time() + timeout
|
||||
count = 0
|
||||
joblen = len(joblst)
|
||||
@@ -144,8 +176,8 @@ def wait_for_job(joblst, kubecli: krkn_lib_kubernetes.KrknLibKubernetes, timeout
|
||||
time.sleep(5)
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
def delete_job(joblst, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def delete_job(joblst, kubecli: KrknKubernetes):
|
||||
for jobname in joblst:
|
||||
try:
|
||||
api_response = kubecli.get_job_status(jobname, namespace="default")
|
||||
@@ -158,7 +190,7 @@ def delete_job(joblst, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
logging.error(pod_log)
|
||||
except Exception:
|
||||
logging.warning("Exception in getting job status")
|
||||
api_response = kubecli.delete_job(name=jobname, namespace="default")
|
||||
kubecli.delete_job(name=jobname, namespace="default")
|
||||
|
||||
|
||||
def get_egress_cmd(execution, test_interface, mod, vallst, duration=30):
|
||||
|
||||
@@ -2,12 +2,12 @@ import sys
|
||||
import logging
|
||||
import kraken.invoke.command as runcommand
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
import krkn_lib_kubernetes
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
class abstract_node_scenarios:
|
||||
kubecli: krkn_lib_kubernetes.KrknLibKubernetes
|
||||
def __init__(self, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
kubecli: KrknKubernetes
|
||||
def __init__(self, kubecli: KrknKubernetes):
|
||||
self.kubecli = kubecli
|
||||
# Node scenario to start the node
|
||||
def node_start_scenario(self, instance_kill_count, node, timeout):
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
import sys
|
||||
import time
|
||||
import krkn_lib_kubernetes
|
||||
import logging
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
import os
|
||||
import json
|
||||
from aliyunsdkcore.client import AcsClient
|
||||
from aliyunsdkecs.request.v20140526 import DescribeInstancesRequest, DeleteInstanceRequest
|
||||
from aliyunsdkecs.request.v20140526 import StopInstanceRequest, StartInstanceRequest, RebootInstanceRequest
|
||||
import logging
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
from kraken.node_actions.abstract_node_scenarios import abstract_node_scenarios
|
||||
import os
|
||||
import json
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
|
||||
class Alibaba:
|
||||
@@ -180,9 +180,9 @@ class Alibaba:
|
||||
logging.info("ECS %s is released" % instance_id)
|
||||
return True
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
class alibaba_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self,kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
def __init__(self,kubecli: KrknKubernetes):
|
||||
self.alibaba = Alibaba()
|
||||
|
||||
# Node scenario to start the node
|
||||
|
||||
@@ -2,10 +2,9 @@ import sys
|
||||
import time
|
||||
import boto3
|
||||
import logging
|
||||
import krkn_lib_kubernetes
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
from kraken.node_actions.abstract_node_scenarios import abstract_node_scenarios
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
class AWS:
|
||||
def __init__(self):
|
||||
@@ -27,7 +26,9 @@ class AWS:
|
||||
logging.error(
|
||||
"Failed to start node instance %s. Encountered following " "exception: %s." % (instance_id, e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Stop the node instance
|
||||
def stop_instances(self, instance_id):
|
||||
@@ -36,7 +37,9 @@ class AWS:
|
||||
logging.info("EC2 instance: " + str(instance_id) + " stopped")
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance %s. Encountered following " "exception: %s." % (instance_id, e))
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Terminate the node instance
|
||||
def terminate_instances(self, instance_id):
|
||||
@@ -47,7 +50,9 @@ class AWS:
|
||||
logging.error(
|
||||
"Failed to terminate node instance %s. Encountered following " "exception: %s." % (instance_id, e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Reboot the node instance
|
||||
def reboot_instances(self, instance_id):
|
||||
@@ -58,7 +63,9 @@ class AWS:
|
||||
logging.error(
|
||||
"Failed to reboot node instance %s. Encountered following " "exception: %s." % (instance_id, e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Below functions poll EC2.Client.describe_instances() every 15 seconds
|
||||
# until a successful state is reached. An error is returned after 40 failed checks
|
||||
@@ -102,7 +109,9 @@ class AWS:
|
||||
"Failed to create the default network_acl: %s"
|
||||
"Make sure you have aws cli configured on the host and set for the region of your vpc/subnet" % (e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
return acl_id
|
||||
|
||||
# Replace network acl association
|
||||
@@ -114,7 +123,9 @@ class AWS:
|
||||
new_association_id = status["NewAssociationId"]
|
||||
except Exception as e:
|
||||
logging.error("Failed to replace network acl association: %s" % (e))
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
return new_association_id
|
||||
|
||||
# Describe network acl
|
||||
@@ -131,7 +142,9 @@ class AWS:
|
||||
"Failed to describe network acl: %s."
|
||||
"Make sure you have aws cli configured on the host and set for the region of your vpc/subnet" % (e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
associations = response["NetworkAcls"][0]["Associations"]
|
||||
# grab the current network_acl in use
|
||||
original_acl_id = response["NetworkAcls"][0]["Associations"][0]["NetworkAclId"]
|
||||
@@ -148,11 +161,13 @@ class AWS:
|
||||
"Make sure you have aws cli configured on the host and set for the region of your vpc/subnet"
|
||||
% (acl_id, e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
class aws_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
def __init__(self, kubecli: KrknKubernetes):
|
||||
super().__init__(kubecli)
|
||||
self.aws = AWS()
|
||||
|
||||
@@ -173,7 +188,9 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to start node instance. Encountered following " "exception: %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -189,7 +206,9 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Encountered following exception: %s. " "Test Failed" % (e))
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -213,7 +232,9 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to terminate node instance. Encountered following exception:" " %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_termination_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -232,4 +253,6 @@ class aws_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to reboot node instance. Encountered following exception:" " %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
import sys
|
||||
|
||||
import time
|
||||
from azure.mgmt.compute import ComputeManagementClient
|
||||
from azure.identity import DefaultAzureCredential
|
||||
import yaml
|
||||
import kraken.invoke.command as runcommand
|
||||
import logging
|
||||
import krkn_lib_kubernetes
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
from kraken.node_actions.abstract_node_scenarios import abstract_node_scenarios
|
||||
import kraken.invoke.command as runcommand
|
||||
import yaml
|
||||
from azure.mgmt.compute import ComputeManagementClient
|
||||
from azure.identity import DefaultAzureCredential
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
|
||||
|
||||
class Azure:
|
||||
@@ -39,7 +40,9 @@ class Azure:
|
||||
logging.info("vm name " + str(vm_name) + " started")
|
||||
except Exception as e:
|
||||
logging.error("Failed to start node instance %s. Encountered following " "exception: %s." % (vm_name, e))
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Stop the node instance
|
||||
def stop_instances(self, group_name, vm_name):
|
||||
@@ -48,7 +51,9 @@ class Azure:
|
||||
logging.info("vm name " + str(vm_name) + " stopped")
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance %s. Encountered following " "exception: %s." % (vm_name, e))
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Terminate the node instance
|
||||
def terminate_instances(self, group_name, vm_name):
|
||||
@@ -59,7 +64,9 @@ class Azure:
|
||||
logging.error(
|
||||
"Failed to terminate node instance %s. Encountered following " "exception: %s." % (vm_name, e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Reboot the node instance
|
||||
def reboot_instances(self, group_name, vm_name):
|
||||
@@ -68,7 +75,9 @@ class Azure:
|
||||
logging.info("vm name " + str(vm_name) + " rebooted")
|
||||
except Exception as e:
|
||||
logging.error("Failed to reboot node instance %s. Encountered following " "exception: %s." % (vm_name, e))
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
def get_vm_status(self, resource_group, vm_name):
|
||||
statuses = self.compute_client.virtual_machines.instance_view(resource_group, vm_name).statuses
|
||||
@@ -121,9 +130,9 @@ class Azure:
|
||||
logging.info("Vm %s is terminated" % vm_name)
|
||||
return True
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
class azure_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
def __init__(self, kubecli: KrknKubernetes):
|
||||
super().__init__(kubecli)
|
||||
logging.info("init in azure")
|
||||
self.azure = Azure()
|
||||
@@ -145,7 +154,9 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to start node instance. Encountered following " "exception: %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -161,7 +172,9 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Encountered following exception: %s. " "Test Failed" % e)
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -185,7 +198,9 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to terminate node instance. Encountered following exception:" " %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_termination_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -204,4 +219,6 @@ class azure_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to reboot node instance. Encountered following exception:" " %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
@@ -1,6 +1,5 @@
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
from kraken.node_actions.abstract_node_scenarios import abstract_node_scenarios
|
||||
import krkn_lib_kubernetes
|
||||
import logging
|
||||
import openshift as oc
|
||||
import pyipmi
|
||||
@@ -8,7 +7,7 @@ import pyipmi.interfaces
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
class BM:
|
||||
def __init__(self, bm_info, user, passwd):
|
||||
@@ -105,9 +104,9 @@ class BM:
|
||||
while self.get_ipmi_connection(bmc_addr, node_name).get_chassis_status().power_on:
|
||||
time.sleep(1)
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
class bm_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, bm_info, user, passwd, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
def __init__(self, bm_info, user, passwd, kubecli: KrknKubernetes):
|
||||
super().__init__(kubecli)
|
||||
self.bm = BM(bm_info, user, passwd)
|
||||
|
||||
|
||||
@@ -2,14 +2,13 @@ import time
|
||||
import random
|
||||
import logging
|
||||
import paramiko
|
||||
import krkn_lib_kubernetes
|
||||
import kraken.invoke.command as runcommand
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
node_general = False
|
||||
|
||||
|
||||
# Pick a random node with specified label selector
|
||||
def get_node(node_name, label_selector, instance_kill_count, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
def get_node(node_name, label_selector, instance_kill_count, kubecli: KrknKubernetes):
|
||||
if node_name in kubecli.list_killable_nodes():
|
||||
return [node_name]
|
||||
elif node_name:
|
||||
@@ -29,21 +28,21 @@ def get_node(node_name, label_selector, instance_kill_count, kubecli: krkn_lib_k
|
||||
return nodes_to_return
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
# Wait until the node status becomes Ready
|
||||
def wait_for_ready_status(node, timeout, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
def wait_for_ready_status(node, timeout, kubecli: KrknKubernetes):
|
||||
resource_version = kubecli.get_node_resource_version(node)
|
||||
kubecli.watch_node_status(node, "True", timeout, resource_version)
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
# Wait until the node status becomes Not Ready
|
||||
def wait_for_not_ready_status(node, timeout, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
def wait_for_not_ready_status(node, timeout, kubecli: KrknKubernetes):
|
||||
resource_version = kubecli.get_node_resource_version(node)
|
||||
kubecli.watch_node_status(node, "False", timeout, resource_version)
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
# Wait until the node status becomes Unknown
|
||||
def wait_for_unknown_status(node, timeout, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
def wait_for_unknown_status(node, timeout, kubecli: KrknKubernetes):
|
||||
resource_version = kubecli.get_node_resource_version(node)
|
||||
kubecli.watch_node_status(node, "Unknown", timeout, resource_version)
|
||||
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
from kraken.node_actions.abstract_node_scenarios import abstract_node_scenarios
|
||||
import krkn_lib_kubernetes
|
||||
import logging
|
||||
import sys
|
||||
import docker
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
class Docker:
|
||||
def __init__(self):
|
||||
@@ -37,7 +36,7 @@ class Docker:
|
||||
|
||||
|
||||
class docker_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
def __init__(self, kubecli: KrknKubernetes):
|
||||
super().__init__(kubecli)
|
||||
self.docker = Docker()
|
||||
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import krkn_lib_kubernetes
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
from kraken.node_actions.abstract_node_scenarios import abstract_node_scenarios
|
||||
from googleapiclient import discovery
|
||||
from oauth2client.client import GoogleCredentials
|
||||
import kraken.invoke.command as runcommand
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
class GCP:
|
||||
def __init__(self):
|
||||
@@ -45,7 +44,9 @@ class GCP:
|
||||
logging.error(
|
||||
"Failed to start node instance %s. Encountered following " "exception: %s." % (instance_id, e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Stop the node instance
|
||||
def stop_instances(self, zone, instance_id):
|
||||
@@ -54,7 +55,9 @@ class GCP:
|
||||
logging.info("vm name " + str(instance_id) + " stopped")
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance %s. Encountered following " "exception: %s." % (instance_id, e))
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Start the node instance
|
||||
def suspend_instances(self, zone, instance_id):
|
||||
@@ -65,7 +68,9 @@ class GCP:
|
||||
logging.error(
|
||||
"Failed to suspend node instance %s. Encountered following " "exception: %s." % (instance_id, e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Terminate the node instance
|
||||
def terminate_instances(self, zone, instance_id):
|
||||
@@ -76,7 +81,9 @@ class GCP:
|
||||
logging.error(
|
||||
"Failed to start node instance %s. Encountered following " "exception: %s." % (instance_id, e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Reboot the node instance
|
||||
def reboot_instances(self, zone, instance_id):
|
||||
@@ -87,7 +94,9 @@ class GCP:
|
||||
logging.error(
|
||||
"Failed to start node instance %s. Encountered following " "exception: %s." % (instance_id, e)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Get instance status
|
||||
def get_instance_status(self, zone, instance_id, expected_status, timeout):
|
||||
@@ -133,9 +142,9 @@ class GCP:
|
||||
return True
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
class gcp_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
def __init__(self, kubecli: KrknKubernetes):
|
||||
super().__init__(kubecli)
|
||||
self.gcp = GCP()
|
||||
|
||||
@@ -156,7 +165,9 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to start node instance. Encountered following " "exception: %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -173,7 +184,9 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Encountered following exception: %s. " "Test Failed" % (e))
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to terminate the node
|
||||
def node_termination_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -197,7 +210,9 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to terminate node instance. Encountered following exception:" " %s. Test Failed" % e
|
||||
)
|
||||
logging.error("node_termination_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -215,4 +230,6 @@ class gcp_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to reboot node instance. Encountered following exception:" " %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
@@ -1,15 +1,14 @@
|
||||
import logging
|
||||
import krkn_lib_kubernetes
|
||||
from kraken.node_actions.abstract_node_scenarios import abstract_node_scenarios
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
class GENERAL:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
class general_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, kubecli: krkn_lib_kubernetes.KrknLibKubernetes ):
|
||||
def __init__(self, kubecli: KrknKubernetes):
|
||||
super().__init__(kubecli)
|
||||
self.general = GENERAL()
|
||||
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import krkn_lib_kubernetes
|
||||
import kraken.invoke.command as runcommand
|
||||
import kraken.node_actions.common_node_functions as nodeaction
|
||||
from kraken.node_actions.abstract_node_scenarios import abstract_node_scenarios
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
|
||||
class OPENSTACKCLOUD:
|
||||
def __init__(self):
|
||||
@@ -24,7 +23,9 @@ class OPENSTACKCLOUD:
|
||||
logging.info("Instance: " + str(node) + " started")
|
||||
except Exception as e:
|
||||
logging.error("Failed to start node instance %s. Encountered following " "exception: %s." % (node, e))
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Stop the node instance
|
||||
def stop_instances(self, node):
|
||||
@@ -33,7 +34,9 @@ class OPENSTACKCLOUD:
|
||||
logging.info("Instance: " + str(node) + " stopped")
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance %s. Encountered following " "exception: %s." % (node, e))
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Reboot the node instance
|
||||
def reboot_instances(self, node):
|
||||
@@ -42,7 +45,9 @@ class OPENSTACKCLOUD:
|
||||
logging.info("Instance: " + str(node) + " rebooted")
|
||||
except Exception as e:
|
||||
logging.error("Failed to reboot node instance %s. Encountered following " "exception: %s." % (node, e))
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Wait until the node instance is running
|
||||
def wait_until_running(self, node, timeout):
|
||||
@@ -87,9 +92,9 @@ class OPENSTACKCLOUD:
|
||||
return node_name
|
||||
counter += 1
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
class openstack_node_scenarios(abstract_node_scenarios):
|
||||
def __init__(self, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
def __init__(self, kubecli: KrknKubernetes):
|
||||
self.openstackcloud = OPENSTACKCLOUD()
|
||||
|
||||
# Node scenario to start the node
|
||||
@@ -109,7 +114,9 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to start node instance. Encountered following " "exception: %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_start_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to stop the node
|
||||
def node_stop_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -125,7 +132,9 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Encountered following exception: %s. " "Test Failed" % (e))
|
||||
logging.error("node_stop_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to reboot the node
|
||||
def node_reboot_scenario(self, instance_kill_count, node, timeout):
|
||||
@@ -144,7 +153,9 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to reboot node instance. Encountered following exception:" " %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("node_reboot_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to start the node
|
||||
def helper_node_start_scenario(self, instance_kill_count, node_ip, timeout):
|
||||
@@ -162,7 +173,9 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
"Failed to start node instance. Encountered following " "exception: %s. Test Failed" % (e)
|
||||
)
|
||||
logging.error("helper_node_start_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Node scenario to stop the node
|
||||
def helper_node_stop_scenario(self, instance_kill_count, node_ip, timeout):
|
||||
@@ -177,7 +190,9 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
except Exception as e:
|
||||
logging.error("Failed to stop node instance. Encountered following exception: %s. " "Test Failed" % (e))
|
||||
logging.error("helper_node_stop_scenario injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
def helper_node_service_status(self, node_ip, service, ssh_private_key, timeout):
|
||||
try:
|
||||
@@ -188,4 +203,6 @@ class openstack_node_scenarios(abstract_node_scenarios):
|
||||
except Exception as e:
|
||||
logging.error("Failed to check service status. Encountered following exception:" " %s. Test Failed" % (e))
|
||||
logging.error("helper_node_service_status injection failed!")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
@@ -2,7 +2,6 @@ import yaml
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import krkn_lib_kubernetes
|
||||
from kraken.node_actions.aws_node_scenarios import aws_node_scenarios
|
||||
from kraken.node_actions.general_cloud_node_scenarios import general_node_scenarios
|
||||
from kraken.node_actions.az_node_scenarios import azure_node_scenarios
|
||||
@@ -13,14 +12,15 @@ from kraken.node_actions.bm_node_scenarios import bm_node_scenarios
|
||||
from kraken.node_actions.docker_node_scenarios import docker_node_scenarios
|
||||
import kraken.node_actions.common_node_functions as common_node_functions
|
||||
import kraken.cerberus.setup as cerberus
|
||||
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
node_general = False
|
||||
|
||||
|
||||
# Get the node scenarios object of specfied cloud type
|
||||
# krkn_lib_kubernetes
|
||||
def get_node_scenario_object(node_scenario, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def get_node_scenario_object(node_scenario, kubecli: KrknKubernetes):
|
||||
if "cloud_type" not in node_scenario.keys() or node_scenario["cloud_type"] == "generic":
|
||||
global node_general
|
||||
node_general = True
|
||||
@@ -52,9 +52,15 @@ def get_node_scenario_object(node_scenario, kubecli: krkn_lib_kubernetes.KrknLib
|
||||
|
||||
|
||||
# Run defined scenarios
|
||||
# krkn_lib_kubernetes
|
||||
def run(scenarios_list, config, wait_duration, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for node_scenario_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = node_scenario_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, node_scenario_config)
|
||||
with open(node_scenario_config, "r") as f:
|
||||
node_scenario_config = yaml.full_load(f)
|
||||
for node_scenario in node_scenario_config["node_scenarios"]:
|
||||
@@ -62,25 +68,41 @@ def run(scenarios_list, config, wait_duration, kubecli: krkn_lib_kubernetes.Krkn
|
||||
if node_scenario["actions"]:
|
||||
for action in node_scenario["actions"]:
|
||||
start_time = int(time.time())
|
||||
inject_node_scenario(action, node_scenario, node_scenario_object, kubecli)
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.get_status(config, start_time, end_time)
|
||||
logging.info("")
|
||||
try:
|
||||
inject_node_scenario(action, node_scenario, node_scenario_object, kubecli)
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.get_status(config, start_time, end_time)
|
||||
logging.info("")
|
||||
except (RuntimeError, Exception) as e:
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(node_scenario_config)
|
||||
log_exception(node_scenario_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
# Inject the specified node scenario
|
||||
def inject_node_scenario(action, node_scenario, node_scenario_object, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
def inject_node_scenario(action, node_scenario, node_scenario_object, kubecli: KrknKubernetes):
|
||||
generic_cloud_scenarios = ("stop_kubelet_scenario", "node_crash_scenario")
|
||||
# Get the node scenario configurations
|
||||
run_kill_count = node_scenario.get("runs", 1)
|
||||
instance_kill_count = node_scenario.get("instance_count", 1)
|
||||
node_name = node_scenario.get("node_name", "")
|
||||
label_selector = node_scenario.get("label_selector", "")
|
||||
timeout = node_scenario.get("timeout", 120)
|
||||
service = node_scenario.get("service", "")
|
||||
ssh_private_key = node_scenario.get("ssh_private_key", "~/.ssh/id_rsa")
|
||||
run_kill_count = get_yaml_item_value(node_scenario, "runs", 1)
|
||||
instance_kill_count = get_yaml_item_value(
|
||||
node_scenario, "instance_count", 1
|
||||
)
|
||||
node_name = get_yaml_item_value(node_scenario, "node_name", "")
|
||||
label_selector = get_yaml_item_value(node_scenario, "label_selector", "")
|
||||
timeout = get_yaml_item_value(node_scenario, "timeout", 120)
|
||||
service = get_yaml_item_value(node_scenario, "service", "")
|
||||
ssh_private_key = get_yaml_item_value(
|
||||
node_scenario, "ssh_private_key", "~/.ssh/id_rsa"
|
||||
)
|
||||
# Get the node to apply the scenario
|
||||
if node_name:
|
||||
node_name_list = node_name.split(",")
|
||||
|
||||
@@ -12,6 +12,10 @@ import kraken.plugins.node_scenarios.ibmcloud_plugin as ibmcloud_plugin
|
||||
from kraken.plugins.run_python_plugin import run_python_file
|
||||
from kraken.plugins.network.ingress_shaping import network_chaos
|
||||
from kraken.plugins.pod_network_outage.pod_network_outage_plugin import pod_outage
|
||||
from kraken.plugins.pod_network_outage.pod_network_outage_plugin import pod_egress_shaping
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import log_exception
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
@@ -213,21 +217,36 @@ PLUGINS = Plugins(
|
||||
[
|
||||
"error"
|
||||
]
|
||||
),
|
||||
PluginStep(
|
||||
pod_egress_shaping,
|
||||
[
|
||||
"error"
|
||||
]
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def run(scenarios: List[str], kubeconfig_path: str, kraken_config: str, failed_post_scenarios: List[str], wait_duration: int) -> List[str]:
|
||||
def run(scenarios: List[str], kubeconfig_path: str, kraken_config: str, failed_post_scenarios: List[str], wait_duration: int, telemetry: KrknTelemetryKubernetes) -> (List[str], list[ScenarioTelemetry]):
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
for scenario in scenarios:
|
||||
logging.info('scenario '+ str(scenario))
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = scenario
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, scenario)
|
||||
logging.info('scenario ' + str(scenario))
|
||||
try:
|
||||
PLUGINS.run(scenario, kubeconfig_path, kraken_config)
|
||||
except Exception as e:
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_post_scenarios.append(scenario)
|
||||
logging.error("Error while running {}: {}".format(scenario, e))
|
||||
return failed_post_scenarios
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
log_exception(scenario)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
|
||||
return failed_post_scenarios
|
||||
return failed_post_scenarios, scenario_telemetries
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,15 +1,16 @@
|
||||
import logging
|
||||
|
||||
from arcaflow_plugin_sdk import serialization
|
||||
import arcaflow_plugin_kill_pod
|
||||
|
||||
import kraken.cerberus.setup as cerberus
|
||||
import kraken.post_actions.actions as post_actions
|
||||
import krkn_lib_kubernetes
|
||||
import time
|
||||
import yaml
|
||||
import sys
|
||||
import random
|
||||
import arcaflow_plugin_kill_pod
|
||||
import kraken.cerberus.setup as cerberus
|
||||
import kraken.post_actions.actions as post_actions
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from arcaflow_plugin_sdk import serialization
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
|
||||
|
||||
# Run pod based scenarios
|
||||
@@ -66,9 +67,24 @@ def run(kubeconfig_path, scenarios_list, config, failed_post_scenarios, wait_dur
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
return failed_post_scenarios
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
def container_run(kubeconfig_path, scenarios_list, config, failed_post_scenarios, wait_duration, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
|
||||
# krkn_lib
|
||||
def container_run(kubeconfig_path,
|
||||
scenarios_list,
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
wait_duration,
|
||||
kubecli: KrknKubernetes,
|
||||
telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
|
||||
failed_scenarios = []
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
|
||||
for container_scenario_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = container_scenario_config[0]
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, container_scenario_config[0])
|
||||
if len(container_scenario_config) > 1:
|
||||
pre_action_output = post_actions.run(kubeconfig_path, container_scenario_config[1])
|
||||
else:
|
||||
@@ -78,43 +94,63 @@ def container_run(kubeconfig_path, scenarios_list, config, failed_post_scenarios
|
||||
for cont_scenario in cont_scenario_config["scenarios"]:
|
||||
# capture start time
|
||||
start_time = int(time.time())
|
||||
killed_containers = container_killing_in_pod(cont_scenario, kubecli)
|
||||
|
||||
if len(container_scenario_config) > 1:
|
||||
try:
|
||||
try:
|
||||
killed_containers = container_killing_in_pod(cont_scenario, kubecli)
|
||||
if len(container_scenario_config) > 1:
|
||||
failed_post_scenarios = post_actions.check_recovery(
|
||||
kubeconfig_path, container_scenario_config, failed_post_scenarios, pre_action_output
|
||||
kubeconfig_path,
|
||||
container_scenario_config,
|
||||
failed_post_scenarios,
|
||||
pre_action_output
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to run post action checks: %s" % e)
|
||||
sys.exit(1)
|
||||
else:
|
||||
failed_post_scenarios = check_failed_containers(
|
||||
killed_containers, cont_scenario.get("retry_wait", 120), kubecli
|
||||
)
|
||||
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
|
||||
# capture end time
|
||||
end_time = int(time.time())
|
||||
|
||||
# publish cerberus status
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
except (RuntimeError, Exception):
|
||||
failed_scenarios.append(container_scenario_config[0])
|
||||
log_exception(container_scenario_config[0])
|
||||
scenario_telemetry.exitStatus = 1
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
else:
|
||||
failed_post_scenarios = check_failed_containers(
|
||||
killed_containers, cont_scenario.get("retry_wait", 120), kubecli
|
||||
)
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
logging.info("Waiting for the specified duration: %s" % (wait_duration))
|
||||
time.sleep(wait_duration)
|
||||
|
||||
# capture end time
|
||||
end_time = int(time.time())
|
||||
|
||||
# publish cerberus status
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
logging.info("")
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
def container_killing_in_pod(cont_scenario, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
scenario_name = cont_scenario.get("name", "")
|
||||
namespace = cont_scenario.get("namespace", "*")
|
||||
label_selector = cont_scenario.get("label_selector", None)
|
||||
pod_names = cont_scenario.get("pod_names", [])
|
||||
container_name = cont_scenario.get("container_name", "")
|
||||
kill_action = cont_scenario.get("action", "kill 1")
|
||||
kill_count = cont_scenario.get("count", 1)
|
||||
def container_killing_in_pod(cont_scenario, kubecli: KrknKubernetes):
|
||||
scenario_name = get_yaml_item_value(cont_scenario, "name", "")
|
||||
namespace = get_yaml_item_value(cont_scenario, "namespace", "*")
|
||||
label_selector = get_yaml_item_value(cont_scenario, "label_selector", None)
|
||||
pod_names = get_yaml_item_value(cont_scenario, "pod_names", [])
|
||||
container_name = get_yaml_item_value(cont_scenario, "container_name", "")
|
||||
kill_action = get_yaml_item_value(cont_scenario, "action", 1)
|
||||
kill_count = get_yaml_item_value(cont_scenario, "count", 1)
|
||||
if not isinstance(kill_action, int):
|
||||
logging.error("Please make sure the action parameter defined in the "
|
||||
"config is an integer")
|
||||
raise RuntimeError()
|
||||
if (kill_action < 1) or (kill_action > 15):
|
||||
logging.error("Only 1-15 kill signals are supported.")
|
||||
raise RuntimeError()
|
||||
kill_action = "kill " + str(kill_action)
|
||||
if type(pod_names) != list:
|
||||
logging.error("Please make sure your pod_names are in a list format")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
if len(pod_names) == 0:
|
||||
if namespace == "*":
|
||||
# returns double array of pod name and namespace
|
||||
@@ -126,7 +162,9 @@ def container_killing_in_pod(cont_scenario, kubecli: krkn_lib_kubernetes.KrknLib
|
||||
if namespace == "*":
|
||||
logging.error("You must specify the namespace to kill a container in a specific pod")
|
||||
logging.error("Scenario " + scenario_name + " failed")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
pods = pod_names
|
||||
# get container and pod name
|
||||
container_pod_list = []
|
||||
@@ -147,7 +185,9 @@ def container_killing_in_pod(cont_scenario, kubecli: krkn_lib_kubernetes.KrknLib
|
||||
if len(container_pod_list) == 0:
|
||||
logging.error("Trying to kill more containers than were found, try lowering kill count")
|
||||
logging.error("Scenario " + scenario_name + " failed")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
selected_container_pod = container_pod_list[random.randint(0, len(container_pod_list) - 1)]
|
||||
for c_name in selected_container_pod[2]:
|
||||
if container_name != "":
|
||||
@@ -165,7 +205,7 @@ def container_killing_in_pod(cont_scenario, kubecli: krkn_lib_kubernetes.KrknLib
|
||||
return killed_container_list
|
||||
|
||||
|
||||
def retry_container_killing(kill_action, podname, namespace, container_name, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
def retry_container_killing(kill_action, podname, namespace, container_name, kubecli: KrknKubernetes):
|
||||
i = 0
|
||||
while i < 5:
|
||||
logging.info("Killing container %s in pod %s (ns %s)" % (str(container_name), str(podname), str(namespace)))
|
||||
@@ -178,10 +218,11 @@ def retry_container_killing(kill_action, podname, namespace, container_name, kub
|
||||
time.sleep(2)
|
||||
continue
|
||||
else:
|
||||
logging.warning(response)
|
||||
continue
|
||||
|
||||
|
||||
def check_failed_containers(killed_container_list, wait_time, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
def check_failed_containers(killed_container_list, wait_time, kubecli: KrknKubernetes):
|
||||
|
||||
container_ready = []
|
||||
timer = 0
|
||||
|
||||
@@ -1,157 +1,247 @@
|
||||
import logging
|
||||
import random
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import krkn_lib_kubernetes
|
||||
import yaml
|
||||
|
||||
from ..cerberus import setup as cerberus
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
def run(scenarios_list, config, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def run(scenarios_list, config, kubecli: KrknKubernetes, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
"""
|
||||
Reads the scenario config and creates a temp file to fill up the PVC
|
||||
"""
|
||||
failed_post_scenarios = ""
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for app_config in scenarios_list:
|
||||
if len(app_config) > 1:
|
||||
with open(app_config, "r") as f:
|
||||
config_yaml = yaml.full_load(f)
|
||||
scenario_config = config_yaml["pvc_scenario"]
|
||||
pvc_name = scenario_config.get("pvc_name", "")
|
||||
pod_name = scenario_config.get("pod_name", "")
|
||||
namespace = scenario_config.get("namespace", "")
|
||||
target_fill_percentage = scenario_config.get(
|
||||
"fill_percentage", "50"
|
||||
)
|
||||
duration = scenario_config.get("duration", 60)
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = app_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, app_config)
|
||||
try:
|
||||
if len(app_config) > 1:
|
||||
with open(app_config, "r") as f:
|
||||
config_yaml = yaml.full_load(f)
|
||||
scenario_config = config_yaml["pvc_scenario"]
|
||||
pvc_name = get_yaml_item_value(
|
||||
scenario_config, "pvc_name", ""
|
||||
)
|
||||
pod_name = get_yaml_item_value(
|
||||
scenario_config, "pod_name", ""
|
||||
)
|
||||
namespace = get_yaml_item_value(
|
||||
scenario_config, "namespace", ""
|
||||
)
|
||||
target_fill_percentage = get_yaml_item_value(
|
||||
scenario_config, "fill_percentage", "50"
|
||||
)
|
||||
duration = get_yaml_item_value(
|
||||
scenario_config, "duration", 60
|
||||
)
|
||||
|
||||
logging.info(
|
||||
"Input params:\n"
|
||||
"pvc_name: '%s'\n"
|
||||
"pod_name: '%s'\n"
|
||||
"namespace: '%s'\n"
|
||||
"target_fill_percentage: '%s%%'\nduration: '%ss'"
|
||||
% (
|
||||
str(pvc_name),
|
||||
str(pod_name),
|
||||
str(namespace),
|
||||
str(target_fill_percentage),
|
||||
str(duration)
|
||||
)
|
||||
)
|
||||
|
||||
# Check input params
|
||||
if namespace is None:
|
||||
logging.error(
|
||||
"You must specify the namespace where the PVC is"
|
||||
)
|
||||
sys.exit(1)
|
||||
if pvc_name is None and pod_name is None:
|
||||
logging.error(
|
||||
"You must specify the pvc_name or the pod_name"
|
||||
)
|
||||
sys.exit(1)
|
||||
if pvc_name and pod_name:
|
||||
logging.info(
|
||||
"pod_name will be ignored, pod_name used will be "
|
||||
"a retrieved from the pod used in the pvc_name"
|
||||
"Input params:\n"
|
||||
"pvc_name: '%s'\n"
|
||||
"pod_name: '%s'\n"
|
||||
"namespace: '%s'\n"
|
||||
"target_fill_percentage: '%s%%'\nduration: '%ss'"
|
||||
% (
|
||||
str(pvc_name),
|
||||
str(pod_name),
|
||||
str(namespace),
|
||||
str(target_fill_percentage),
|
||||
str(duration)
|
||||
)
|
||||
)
|
||||
|
||||
# Get pod name
|
||||
if pvc_name:
|
||||
if pod_name:
|
||||
logging.info(
|
||||
"pod_name '%s' will be overridden with one of "
|
||||
"the pods mounted in the PVC" % (str(pod_name))
|
||||
)
|
||||
pvc = kubecli.get_pvc_info(pvc_name, namespace)
|
||||
try:
|
||||
# random generator not used for
|
||||
# security/cryptographic purposes.
|
||||
pod_name = random.choice(pvc.podNames) # nosec
|
||||
logging.info("Pod name: %s" % pod_name)
|
||||
except Exception:
|
||||
# Check input params
|
||||
if namespace is None:
|
||||
logging.error(
|
||||
"Pod associated with %s PVC, on namespace %s, "
|
||||
"not found" % (str(pvc_name), str(namespace))
|
||||
"You must specify the namespace where the PVC is"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Get volume name
|
||||
pod = kubecli.get_pod_info(name=pod_name, namespace=namespace)
|
||||
|
||||
if pod is None:
|
||||
logging.error(
|
||||
"Exiting as pod '%s' doesn't exist "
|
||||
"in namespace '%s'" % (
|
||||
str(pod_name),
|
||||
str(namespace)
|
||||
#sys.exit(1)
|
||||
raise RuntimeError()
|
||||
if pvc_name is None and pod_name is None:
|
||||
logging.error(
|
||||
"You must specify the pvc_name or the pod_name"
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
if pvc_name and pod_name:
|
||||
logging.info(
|
||||
"pod_name will be ignored, pod_name used will be "
|
||||
"a retrieved from the pod used in the pvc_name"
|
||||
)
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
for volume in pod.volumes:
|
||||
if volume.pvcName is not None:
|
||||
volume_name = volume.name
|
||||
pvc_name = volume.pvcName
|
||||
# Get pod name
|
||||
if pvc_name:
|
||||
if pod_name:
|
||||
logging.info(
|
||||
"pod_name '%s' will be overridden with one of "
|
||||
"the pods mounted in the PVC" % (str(pod_name))
|
||||
)
|
||||
pvc = kubecli.get_pvc_info(pvc_name, namespace)
|
||||
break
|
||||
if 'pvc' not in locals():
|
||||
logging.error(
|
||||
"Pod '%s' in namespace '%s' does not use a pvc" % (
|
||||
try:
|
||||
# random generator not used for
|
||||
# security/cryptographic purposes.
|
||||
pod_name = random.choice(pvc.podNames) # nosec
|
||||
logging.info("Pod name: %s" % pod_name)
|
||||
except Exception:
|
||||
logging.error(
|
||||
"Pod associated with %s PVC, on namespace %s, "
|
||||
"not found" % (str(pvc_name), str(namespace))
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Get volume name
|
||||
pod = kubecli.get_pod_info(name=pod_name, namespace=namespace)
|
||||
|
||||
if pod is None:
|
||||
logging.error(
|
||||
"Exiting as pod '%s' doesn't exist "
|
||||
"in namespace '%s'" % (
|
||||
str(pod_name),
|
||||
str(namespace)
|
||||
)
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
for volume in pod.volumes:
|
||||
if volume.pvcName is not None:
|
||||
volume_name = volume.name
|
||||
pvc_name = volume.pvcName
|
||||
pvc = kubecli.get_pvc_info(pvc_name, namespace)
|
||||
break
|
||||
if 'pvc' not in locals():
|
||||
logging.error(
|
||||
"Pod '%s' in namespace '%s' does not use a pvc" % (
|
||||
str(pod_name),
|
||||
str(namespace)
|
||||
)
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
logging.info("Volume name: %s" % volume_name)
|
||||
logging.info("PVC name: %s" % pvc_name)
|
||||
|
||||
# Get container name and mount path
|
||||
for container in pod.containers:
|
||||
for vol in container.volumeMounts:
|
||||
if vol.name == volume_name:
|
||||
mount_path = vol.mountPath
|
||||
container_name = container.name
|
||||
break
|
||||
logging.info("Container path: %s" % container_name)
|
||||
logging.info("Mount path: %s" % mount_path)
|
||||
|
||||
# Get PVC capacity and used bytes
|
||||
command = "df %s -B 1024 | sed 1d" % (str(mount_path))
|
||||
command_output = (
|
||||
kubecli.exec_cmd_in_pod(
|
||||
command,
|
||||
pod_name,
|
||||
namespace,
|
||||
container_name
|
||||
)
|
||||
).split()
|
||||
pvc_used_kb = int(command_output[2])
|
||||
pvc_capacity_kb = pvc_used_kb + int(command_output[3])
|
||||
logging.info("PVC used: %s KB" % pvc_used_kb)
|
||||
logging.info("PVC capacity: %s KB" % pvc_capacity_kb)
|
||||
|
||||
# Check valid fill percentage
|
||||
current_fill_percentage = pvc_used_kb / pvc_capacity_kb
|
||||
if not (
|
||||
current_fill_percentage * 100
|
||||
< float(target_fill_percentage)
|
||||
<= 99
|
||||
):
|
||||
logging.error(
|
||||
"Target fill percentage (%.2f%%) is lower than "
|
||||
"current fill percentage (%.2f%%) "
|
||||
"or higher than 99%%" % (
|
||||
target_fill_percentage,
|
||||
current_fill_percentage * 100
|
||||
)
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Calculate file size
|
||||
file_size_kb = int(
|
||||
(
|
||||
float(
|
||||
target_fill_percentage / 100
|
||||
) * float(pvc_capacity_kb)
|
||||
) - float(pvc_used_kb)
|
||||
)
|
||||
logging.debug("File size: %s KB" % file_size_kb)
|
||||
|
||||
file_name = "kraken.tmp"
|
||||
logging.info(
|
||||
"Creating %s file, %s KB size, in pod %s at %s (ns %s)"
|
||||
% (
|
||||
str(file_name),
|
||||
str(file_size_kb),
|
||||
str(pod_name),
|
||||
str(mount_path),
|
||||
str(namespace)
|
||||
)
|
||||
)
|
||||
sys.exit(1)
|
||||
logging.info("Volume name: %s" % volume_name)
|
||||
logging.info("PVC name: %s" % pvc_name)
|
||||
|
||||
# Get container name and mount path
|
||||
for container in pod.containers:
|
||||
for vol in container.volumeMounts:
|
||||
if vol.name == volume_name:
|
||||
mount_path = vol.mountPath
|
||||
container_name = container.name
|
||||
break
|
||||
logging.info("Container path: %s" % container_name)
|
||||
logging.info("Mount path: %s" % mount_path)
|
||||
|
||||
# Get PVC capacity and used bytes
|
||||
command = "df %s -B 1024 | sed 1d" % (str(mount_path))
|
||||
command_output = (
|
||||
start_time = int(time.time())
|
||||
# Create temp file in the PVC
|
||||
full_path = "%s/%s" % (str(mount_path), str(file_name))
|
||||
command = "fallocate -l $((%s*1024)) %s" % (
|
||||
str(file_size_kb),
|
||||
str(full_path)
|
||||
)
|
||||
logging.debug(
|
||||
"Create temp file in the PVC command:\n %s" % command
|
||||
)
|
||||
kubecli.exec_cmd_in_pod(
|
||||
command,
|
||||
pod_name,
|
||||
namespace,
|
||||
container_name,
|
||||
"sh"
|
||||
)
|
||||
).split()
|
||||
pvc_used_kb = int(command_output[2])
|
||||
pvc_capacity_kb = pvc_used_kb + int(command_output[3])
|
||||
logging.info("PVC used: %s KB" % pvc_used_kb)
|
||||
logging.info("PVC capacity: %s KB" % pvc_capacity_kb)
|
||||
|
||||
# Check valid fill percentage
|
||||
current_fill_percentage = pvc_used_kb / pvc_capacity_kb
|
||||
if not (
|
||||
current_fill_percentage * 100
|
||||
< float(target_fill_percentage)
|
||||
<= 99
|
||||
):
|
||||
logging.error(
|
||||
"Target fill percentage (%.2f%%) is lower than "
|
||||
"current fill percentage (%.2f%%) "
|
||||
"or higher than 99%%" % (
|
||||
target_fill_percentage,
|
||||
current_fill_percentage * 100
|
||||
)
|
||||
# Check if file is created
|
||||
command = "ls -lh %s" % (str(mount_path))
|
||||
logging.debug("Check file is created command:\n %s" % command)
|
||||
response = kubecli.exec_cmd_in_pod(
|
||||
command, pod_name, namespace, container_name
|
||||
)
|
||||
sys.exit(1)
|
||||
logging.info("\n" + str(response))
|
||||
if str(file_name).lower() in str(response).lower():
|
||||
logging.info(
|
||||
"%s file successfully created" % (str(full_path))
|
||||
)
|
||||
else:
|
||||
logging.error(
|
||||
"Failed to create tmp file with %s size" % (
|
||||
str(file_size_kb)
|
||||
)
|
||||
)
|
||||
remove_temp_file(
|
||||
file_name,
|
||||
full_path,
|
||||
pod_name,
|
||||
namespace,
|
||||
container_name,
|
||||
mount_path,
|
||||
file_size_kb,
|
||||
kubecli
|
||||
)
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# Calculate file size
|
||||
file_size_kb = int(
|
||||
@@ -186,26 +276,25 @@ def run(scenarios_list, config, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
"Create temp file in the PVC command:\n %s" % command
|
||||
)
|
||||
kubecli.exec_cmd_in_pod(
|
||||
command, pod_name, namespace, container_name, "sh"
|
||||
command, pod_name, namespace, container_name
|
||||
)
|
||||
|
||||
# Check if file is created
|
||||
command = "ls -lh %s" % (str(mount_path))
|
||||
logging.debug("Check file is created command:\n %s" % command)
|
||||
response = kubecli.exec_cmd_in_pod(
|
||||
command, pod_name, namespace, container_name, "sh"
|
||||
command, pod_name, namespace, container_name
|
||||
)
|
||||
logging.info("\n" + str(response))
|
||||
if str(file_name).lower() in str(response).lower():
|
||||
logging.info(
|
||||
"%s file successfully created" % (str(full_path))
|
||||
)
|
||||
else:
|
||||
logging.error(
|
||||
"Failed to create tmp file with %s size" % (
|
||||
str(file_size_kb)
|
||||
"Waiting for the specified duration in the config: %ss" % (
|
||||
duration
|
||||
)
|
||||
)
|
||||
time.sleep(duration)
|
||||
logging.info("Finish waiting")
|
||||
|
||||
remove_temp_file(
|
||||
file_name,
|
||||
full_path,
|
||||
@@ -216,38 +305,28 @@ def run(scenarios_list, config, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
file_size_kb,
|
||||
kubecli
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Wait for the specified duration
|
||||
logging.info(
|
||||
"Waiting for the specified duration in the config: %ss" % (
|
||||
duration
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
)
|
||||
time.sleep(duration)
|
||||
logging.info("Finish waiting")
|
||||
except (RuntimeError, Exception):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(app_config)
|
||||
log_exception(app_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
remove_temp_file(
|
||||
file_name,
|
||||
full_path,
|
||||
pod_name,
|
||||
namespace,
|
||||
container_name,
|
||||
mount_path,
|
||||
file_size_kb,
|
||||
kubecli
|
||||
)
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def remove_temp_file(
|
||||
file_name,
|
||||
full_path,
|
||||
@@ -256,19 +335,18 @@ def remove_temp_file(
|
||||
container_name,
|
||||
mount_path,
|
||||
file_size_kb,
|
||||
kubecli: krkn_lib_kubernetes.KrknLibKubernetes
|
||||
kubecli: KrknKubernetes
|
||||
):
|
||||
command = "rm -f %s" % (str(full_path))
|
||||
logging.debug("Remove temp file from the PVC command:\n %s" % command)
|
||||
kubecli.exec_cmd_in_pod(command, pod_name, namespace, container_name, "sh")
|
||||
kubecli.exec_cmd_in_pod(command, pod_name, namespace, container_name)
|
||||
command = "ls -lh %s" % (str(mount_path))
|
||||
logging.debug("Check temp file is removed command:\n %s" % command)
|
||||
response = kubecli.exec_cmd_in_pod(
|
||||
command,
|
||||
pod_name,
|
||||
namespace,
|
||||
container_name,
|
||||
"sh"
|
||||
container_name
|
||||
)
|
||||
logging.info("\n" + str(response))
|
||||
if not (str(file_name).lower() in str(response).lower()):
|
||||
@@ -277,7 +355,7 @@ def remove_temp_file(
|
||||
logging.error(
|
||||
"Failed to delete tmp file with %s size" % (str(file_size_kb))
|
||||
)
|
||||
sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
|
||||
def toKbytes(value):
|
||||
@@ -286,7 +364,7 @@ def toKbytes(value):
|
||||
"PVC capacity %s does not match expression "
|
||||
"regexp '^[0-9]+[K|M|G|T]i$'"
|
||||
)
|
||||
sys.exit(1)
|
||||
raise RuntimeError()
|
||||
unit = {"K": 0, "M": 1, "G": 2, "T": 3}
|
||||
base = 1024 if ("i" in value) else 1000
|
||||
exp = unit[value[-2:-1]]
|
||||
|
||||
325
kraken/service_disruption/common_service_disruption_functions.py
Normal file
325
kraken/service_disruption/common_service_disruption_functions.py
Normal file
@@ -0,0 +1,325 @@
|
||||
import time
|
||||
import random
|
||||
import logging
|
||||
import kraken.cerberus.setup as cerberus
|
||||
import kraken.post_actions.actions as post_actions
|
||||
import yaml
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
|
||||
|
||||
def delete_objects(kubecli, namespace):
|
||||
|
||||
services = delete_all_services_namespace(kubecli, namespace)
|
||||
daemonsets = delete_all_daemonset_namespace(kubecli, namespace)
|
||||
statefulsets = delete_all_statefulsets_namespace(kubecli, namespace)
|
||||
replicasets = delete_all_replicaset_namespace(kubecli, namespace)
|
||||
deployments = delete_all_deployment_namespace(kubecli, namespace)
|
||||
|
||||
objects = { "daemonsets": daemonsets,
|
||||
"deployments": deployments,
|
||||
"replicasets": replicasets,
|
||||
"statefulsets": statefulsets,
|
||||
"services": services
|
||||
}
|
||||
|
||||
return objects
|
||||
|
||||
|
||||
def get_list_running_pods(kubecli: KrknKubernetes, namespace: str):
|
||||
running_pods = []
|
||||
pods = kubecli.list_pods(namespace)
|
||||
for pod in pods:
|
||||
pod_status = kubecli.get_pod_info(pod, namespace)
|
||||
if pod_status and pod_status.status == "Running":
|
||||
running_pods.append(pod)
|
||||
logging.info('all running pods ' + str(running_pods))
|
||||
return running_pods
|
||||
|
||||
|
||||
def delete_all_deployment_namespace(kubecli: KrknKubernetes, namespace: str):
|
||||
"""
|
||||
Delete all the deployments in the specified namespace
|
||||
|
||||
:param kubecli: krkn kubernetes python package
|
||||
:param namespace: namespace
|
||||
"""
|
||||
try:
|
||||
deployments = kubecli.get_deployment_ns(namespace)
|
||||
for deployment in deployments:
|
||||
logging.info("Deleting deployment" + deployment)
|
||||
kubecli.delete_deployment(deployment, namespace)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling delete_all_deployment_namespace: %s\n",
|
||||
str(e),
|
||||
)
|
||||
raise e
|
||||
|
||||
return deployments
|
||||
|
||||
|
||||
def delete_all_daemonset_namespace(kubecli: KrknKubernetes, namespace: str):
|
||||
"""
|
||||
Delete all the daemonset in the specified namespace
|
||||
|
||||
:param kubecli: krkn kubernetes python package
|
||||
:param namespace: namespace
|
||||
"""
|
||||
try:
|
||||
daemonsets = kubecli.get_daemonset(namespace)
|
||||
for daemonset in daemonsets:
|
||||
logging.info("Deleting daemonset" + daemonset)
|
||||
kubecli.delete_daemonset(daemonset, namespace)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling delete_all_daemonset_namespace: %s\n",
|
||||
str(e),
|
||||
)
|
||||
raise e
|
||||
|
||||
return daemonsets
|
||||
|
||||
|
||||
def delete_all_statefulsets_namespace(kubecli: KrknKubernetes, namespace: str):
|
||||
"""
|
||||
Delete all the statefulsets in the specified namespace
|
||||
|
||||
|
||||
:param kubecli: krkn kubernetes python package
|
||||
:param namespace: namespace
|
||||
"""
|
||||
try:
|
||||
statefulsets = kubecli.get_all_statefulset(namespace)
|
||||
for statefulset in statefulsets:
|
||||
logging.info("Deleting statefulsets" + statefulsets)
|
||||
kubecli.delete_statefulset(statefulset, namespace)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling delete_all_statefulsets_namespace: %s\n",
|
||||
str(e),
|
||||
)
|
||||
raise e
|
||||
|
||||
return statefulsets
|
||||
|
||||
|
||||
def delete_all_replicaset_namespace(kubecli: KrknKubernetes, namespace: str):
|
||||
"""
|
||||
Delete all the replicasets in the specified namespace
|
||||
|
||||
:param kubecli: krkn kubernetes python package
|
||||
:param namespace: namespace
|
||||
"""
|
||||
try:
|
||||
replicasets = kubecli.get_all_replicasets(namespace)
|
||||
for replicaset in replicasets:
|
||||
logging.info("Deleting replicaset" + replicaset)
|
||||
kubecli.delete_replicaset(replicaset, namespace)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling delete_all_replicaset_namespace: %s\n",
|
||||
str(e),
|
||||
)
|
||||
raise e
|
||||
|
||||
return replicasets
|
||||
|
||||
def delete_all_services_namespace(kubecli: KrknKubernetes, namespace: str):
|
||||
"""
|
||||
Delete all the services in the specified namespace
|
||||
|
||||
|
||||
:param kubecli: krkn kubernetes python package
|
||||
:param namespace: namespace
|
||||
"""
|
||||
try:
|
||||
services = kubecli.get_all_services(namespace)
|
||||
for service in services:
|
||||
logging.info("Deleting services" + service)
|
||||
kubecli.delete_services(service, namespace)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
"Exception when calling delete_all_services_namespace: %s\n",
|
||||
str(e),
|
||||
)
|
||||
raise e
|
||||
|
||||
return services
|
||||
|
||||
|
||||
# krkn_lib
|
||||
def run(
|
||||
scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
failed_post_scenarios,
|
||||
kubeconfig_path,
|
||||
kubecli: KrknKubernetes,
|
||||
telemetry: KrknTelemetryKubernetes
|
||||
) -> (list[str], list[ScenarioTelemetry]):
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
for scenario_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = scenario_config[0]
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, scenario_config[0])
|
||||
try:
|
||||
if len(scenario_config) > 1:
|
||||
pre_action_output = post_actions.run(kubeconfig_path, scenario_config[1])
|
||||
else:
|
||||
pre_action_output = ""
|
||||
with open(scenario_config[0], "r") as f:
|
||||
scenario_config_yaml = yaml.full_load(f)
|
||||
for scenario in scenario_config_yaml["scenarios"]:
|
||||
scenario_namespace = get_yaml_item_value(
|
||||
scenario, "namespace", ""
|
||||
)
|
||||
scenario_label = get_yaml_item_value(
|
||||
scenario, "label_selector", ""
|
||||
)
|
||||
if scenario_namespace is not None and scenario_namespace.strip() != "":
|
||||
if scenario_label is not None and scenario_label.strip() != "":
|
||||
logging.error("You can only have namespace or label set in your namespace scenario")
|
||||
logging.error(
|
||||
"Current scenario config has namespace '%s' and label selector '%s'"
|
||||
% (scenario_namespace, scenario_label)
|
||||
)
|
||||
logging.error(
|
||||
"Please set either namespace to blank ('') or label_selector to blank ('') to continue"
|
||||
)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
delete_count = get_yaml_item_value(
|
||||
scenario, "delete_count", 1
|
||||
)
|
||||
run_count = get_yaml_item_value(scenario, "runs", 1)
|
||||
run_sleep = get_yaml_item_value(scenario, "sleep", 10)
|
||||
wait_time = get_yaml_item_value(scenario, "wait_time", 30)
|
||||
|
||||
logging.info(str(scenario_namespace) + str(scenario_label) + str(delete_count) + str(run_count) + str(run_sleep) + str(wait_time))
|
||||
logging.info("done")
|
||||
start_time = int(time.time())
|
||||
for i in range(run_count):
|
||||
killed_namespaces = {}
|
||||
namespaces = kubecli.check_namespaces([scenario_namespace], scenario_label)
|
||||
for j in range(delete_count):
|
||||
if len(namespaces) == 0:
|
||||
logging.error(
|
||||
"Couldn't delete %s namespaces, not enough namespaces matching %s with label %s"
|
||||
% (str(run_count), scenario_namespace, str(scenario_label))
|
||||
)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
selected_namespace = namespaces[random.randint(0, len(namespaces) - 1)]
|
||||
logging.info('Delete objects in selected namespace: ' + selected_namespace )
|
||||
try:
|
||||
# delete all pods in namespace
|
||||
objects = delete_objects(kubecli,selected_namespace)
|
||||
killed_namespaces[selected_namespace] = objects
|
||||
logging.info("Deleted all objects in namespace %s was successful" % str(selected_namespace))
|
||||
except Exception as e:
|
||||
logging.info("Delete all objects in namespace %s was unsuccessful" % str(selected_namespace))
|
||||
logging.info("Namespace action error: " + str(e))
|
||||
raise RuntimeError()
|
||||
namespaces.remove(selected_namespace)
|
||||
logging.info("Waiting %s seconds between namespace deletions" % str(run_sleep))
|
||||
time.sleep(run_sleep)
|
||||
|
||||
logging.info("Waiting for the specified duration: %s" % wait_duration)
|
||||
time.sleep(wait_duration)
|
||||
if len(scenario_config) > 1:
|
||||
try:
|
||||
failed_post_scenarios = post_actions.check_recovery(
|
||||
kubeconfig_path, scenario_config, failed_post_scenarios, pre_action_output
|
||||
)
|
||||
except Exception as e:
|
||||
logging.error("Failed to run post action checks: %s" % e)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
else:
|
||||
failed_post_scenarios = check_all_running_deployment(killed_namespaces, wait_time, kubecli)
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(config, failed_post_scenarios, start_time, end_time)
|
||||
except (Exception, RuntimeError):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(scenario_config[0])
|
||||
log_exception(scenario_config[0])
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
def check_all_running_pods(kubecli: KrknKubernetes, namespace_name, wait_time):
|
||||
|
||||
timer = 0
|
||||
while timer < wait_time:
|
||||
pod_list = kubecli.list_pods(namespace_name)
|
||||
pods_running = 0
|
||||
for pod in pod_list:
|
||||
pod_info = kubecli.get_pod_info(pod, namespace_name)
|
||||
if pod_info.status != "Running" and pod_info.status != "Succeeded":
|
||||
logging.info("Pods %s still not running or completed" % pod_info.name)
|
||||
break
|
||||
pods_running += 1
|
||||
if len(pod_list) == pods_running:
|
||||
break
|
||||
timer += 5
|
||||
time.sleep(5)
|
||||
logging.info("Waiting 5 seconds for pods to become active")
|
||||
|
||||
# krkn_lib
|
||||
def check_all_running_deployment(killed_namespaces, wait_time, kubecli: KrknKubernetes):
|
||||
|
||||
timer = 0
|
||||
while timer < wait_time and killed_namespaces:
|
||||
still_missing_ns = killed_namespaces.copy()
|
||||
for namespace_name, objects in killed_namespaces.items():
|
||||
still_missing_obj = objects.copy()
|
||||
for obj_name, obj_list in objects.items():
|
||||
if "deployments" == obj_name:
|
||||
deployments = kubecli.get_deployment_ns(namespace_name)
|
||||
if len(obj_list) == len(deployments):
|
||||
still_missing_obj.pop(obj_name)
|
||||
elif "replicasets" == obj_name:
|
||||
replicasets = kubecli.get_all_replicasets(namespace_name)
|
||||
if len(obj_list) == len(replicasets):
|
||||
still_missing_obj.pop(obj_name)
|
||||
elif "statefulsets" == obj_name:
|
||||
statefulsets = kubecli.get_all_statefulset(namespace_name)
|
||||
if len(obj_list) == len(statefulsets):
|
||||
still_missing_obj.pop(obj_name)
|
||||
elif "services" == obj_name:
|
||||
services = kubecli.get_all_services(namespace_name)
|
||||
if len(obj_list) == len(services):
|
||||
still_missing_obj.pop(obj_name)
|
||||
elif "daemonsets" == obj_name:
|
||||
daemonsets = kubecli.get_daemonset(namespace_name)
|
||||
if len(obj_list) == len(daemonsets):
|
||||
still_missing_obj.pop(obj_name)
|
||||
logging.info("Still missing objects " + str(still_missing_obj))
|
||||
killed_namespaces[namespace_name] = still_missing_obj.copy()
|
||||
if len(killed_namespaces[namespace_name].keys()) == 0:
|
||||
logging.info("Wait for pods to become running for namespace: " + namespace_name)
|
||||
check_all_running_pods(kubecli, namespace_name, wait_time)
|
||||
still_missing_ns.pop(namespace_name)
|
||||
killed_namespaces = still_missing_ns
|
||||
if len(killed_namespaces.keys()) == 0:
|
||||
return []
|
||||
|
||||
timer += 10
|
||||
time.sleep(10)
|
||||
logging.info("Waiting 10 seconds for objects in namespaces to become active")
|
||||
|
||||
logging.error("Objects are still not ready after waiting " + str(wait_time) + "seconds")
|
||||
logging.error("Non active namespaces " + str(killed_namespaces))
|
||||
return killed_namespaces
|
||||
@@ -1,19 +1,18 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import sys
|
||||
import yaml
|
||||
import logging
|
||||
import time
|
||||
import krkn_lib_kubernetes
|
||||
from multiprocessing.pool import ThreadPool
|
||||
|
||||
from ..cerberus import setup as cerberus
|
||||
from ..post_actions import actions as post_actions
|
||||
from ..node_actions.aws_node_scenarios import AWS
|
||||
from ..node_actions.openstack_node_scenarios import OPENSTACKCLOUD
|
||||
from ..node_actions.az_node_scenarios import Azure
|
||||
from ..node_actions.gcp_node_scenarios import GCP
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import log_exception
|
||||
|
||||
def multiprocess_nodes(cloud_object_function, nodes):
|
||||
try:
|
||||
@@ -40,8 +39,8 @@ def multiprocess_nodes(cloud_object_function, nodes):
|
||||
|
||||
|
||||
# Inject the cluster shut down scenario
|
||||
# krkn_lib_kubernetes
|
||||
def cluster_shut_down(shut_down_config, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def cluster_shut_down(shut_down_config, kubecli: KrknKubernetes):
|
||||
runs = shut_down_config["runs"]
|
||||
shut_down_duration = shut_down_config["shut_down_duration"]
|
||||
cloud_type = shut_down_config["cloud_type"]
|
||||
@@ -59,7 +58,9 @@ def cluster_shut_down(shut_down_config, kubecli: krkn_lib_kubernetes.KrknLibKube
|
||||
"Cloud type %s is not currently supported for cluster shut down" %
|
||||
cloud_type
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
nodes = kubecli.list_nodes()
|
||||
node_id = []
|
||||
@@ -126,11 +127,18 @@ def cluster_shut_down(shut_down_config, kubecli: krkn_lib_kubernetes.KrknLibKube
|
||||
|
||||
logging.info("Successfully injected cluster_shut_down scenario!")
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
|
||||
def run(scenarios_list, config, wait_duration, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
def run(scenarios_list, config, wait_duration, kubecli: KrknKubernetes, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
failed_post_scenarios = []
|
||||
failed_scenarios = []
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
|
||||
for shut_down_config in scenarios_list:
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = shut_down_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, shut_down_config[0])
|
||||
if len(shut_down_config) > 1:
|
||||
pre_action_output = post_actions.run("", shut_down_config[1])
|
||||
else:
|
||||
@@ -140,18 +148,32 @@ def run(scenarios_list, config, wait_duration, kubecli: krkn_lib_kubernetes.Krkn
|
||||
shut_down_config_scenario = \
|
||||
shut_down_config_yaml["cluster_shut_down_scenario"]
|
||||
start_time = int(time.time())
|
||||
cluster_shut_down(shut_down_config_scenario, kubecli)
|
||||
logging.info(
|
||||
"Waiting for the specified duration: %s" % (wait_duration)
|
||||
)
|
||||
time.sleep(wait_duration)
|
||||
failed_post_scenarios = post_actions.check_recovery(
|
||||
"", shut_down_config, failed_post_scenarios, pre_action_output
|
||||
)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
try:
|
||||
cluster_shut_down(shut_down_config_scenario, kubecli)
|
||||
logging.info(
|
||||
"Waiting for the specified duration: %s" % (wait_duration)
|
||||
)
|
||||
time.sleep(wait_duration)
|
||||
failed_post_scenarios = post_actions.check_recovery(
|
||||
"", shut_down_config, failed_post_scenarios, pre_action_output
|
||||
)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
|
||||
except (RuntimeError, Exception):
|
||||
log_exception(shut_down_config[0])
|
||||
failed_scenarios.append(shut_down_config[0])
|
||||
scenario_telemetry.exitStatus = 1
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
|
||||
@@ -2,15 +2,18 @@ import datetime
|
||||
import time
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
import yaml
|
||||
import random
|
||||
import krkn_lib_kubernetes
|
||||
from ..cerberus import setup as cerberus
|
||||
from ..invoke import command as runcommand
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import get_yaml_item_value, log_exception
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
def pod_exec(pod_name, command, namespace, container_name, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
|
||||
# krkn_lib
|
||||
def pod_exec(pod_name, command, namespace, container_name, kubecli:KrknKubernetes):
|
||||
for i in range(5):
|
||||
response = kubecli.exec_cmd_in_pod(
|
||||
command,
|
||||
@@ -39,8 +42,8 @@ def node_debug(node_name, command):
|
||||
return response
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
def get_container_name(pod_name, namespace, kubecli: krkn_lib_kubernetes.KrknLibKubernetes, container_name=""):
|
||||
# krkn_lib
|
||||
def get_container_name(pod_name, namespace, kubecli:KrknKubernetes, container_name=""):
|
||||
|
||||
container_names = kubecli.get_containers_in_pod(pod_name, namespace)
|
||||
if container_name != "":
|
||||
@@ -62,9 +65,9 @@ def get_container_name(pod_name, namespace, kubecli: krkn_lib_kubernetes.KrknLib
|
||||
return container_name
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
def skew_time(scenario, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
skew_command = "date --set "
|
||||
# krkn_lib
|
||||
def skew_time(scenario, kubecli:KrknKubernetes):
|
||||
skew_command = "date --date "
|
||||
if scenario["action"] == "skew_date":
|
||||
skewed_date = "00-01-01"
|
||||
skew_command += skewed_date
|
||||
@@ -87,13 +90,15 @@ def skew_time(scenario, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
return "node", node_names
|
||||
|
||||
elif "pod" in scenario["object_type"]:
|
||||
container_name = scenario.get("container_name", "")
|
||||
container_name = get_yaml_item_value(scenario, "container_name", "")
|
||||
pod_names = []
|
||||
if "object_name" in scenario.keys() and scenario["object_name"]:
|
||||
for name in scenario["object_name"]:
|
||||
if "namespace" not in scenario.keys():
|
||||
logging.error("Need to set namespace when using pod name")
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
pod_names.append([name, scenario["namespace"]])
|
||||
elif "namespace" in scenario.keys() and scenario["namespace"]:
|
||||
if "label_selector" not in scenario.keys():
|
||||
@@ -127,7 +132,9 @@ def skew_time(scenario, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
"Cannot find pods matching the namespace/label_selector, "
|
||||
"please check"
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
pod_counter = 0
|
||||
for pod in pod_names:
|
||||
if len(pod) > 1:
|
||||
@@ -152,7 +159,9 @@ def skew_time(scenario, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
"in pod %s in namespace %s"
|
||||
% (selected_container_name, pod[0], pod[1])
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
pod_names[pod_counter].append(selected_container_name)
|
||||
else:
|
||||
selected_container_name = get_container_name(
|
||||
@@ -178,7 +187,9 @@ def skew_time(scenario, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
scenario["namespace"]
|
||||
)
|
||||
)
|
||||
sys.exit(1)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
pod_names[pod_counter].append(selected_container_name)
|
||||
logging.info("Reset date/time on pod " + str(pod[0]))
|
||||
pod_counter += 1
|
||||
@@ -222,8 +233,8 @@ def string_to_date(obj_datetime):
|
||||
return datetime.datetime(datetime.MINYEAR, 1, 1)
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
def check_date_time(object_type, names, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def check_date_time(object_type, names, kubecli:KrknKubernetes):
|
||||
skew_command = "date"
|
||||
not_reset = []
|
||||
max_retries = 30
|
||||
@@ -298,25 +309,42 @@ def check_date_time(object_type, names, kubecli: krkn_lib_kubernetes.KrknLibKube
|
||||
return not_reset
|
||||
|
||||
|
||||
# krkn_lib_kubernetes
|
||||
def run(scenarios_list, config, wait_duration, kubecli: krkn_lib_kubernetes.KrknLibKubernetes):
|
||||
# krkn_lib
|
||||
def run(scenarios_list, config, wait_duration, kubecli:KrknKubernetes, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]):
|
||||
failed_scenarios = []
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
for time_scenario_config in scenarios_list:
|
||||
with open(time_scenario_config, "r") as f:
|
||||
scenario_config = yaml.full_load(f)
|
||||
for time_scenario in scenario_config["time_scenarios"]:
|
||||
start_time = int(time.time())
|
||||
object_type, object_names = skew_time(time_scenario, kubecli)
|
||||
not_reset = check_date_time(object_type, object_names, kubecli)
|
||||
if len(not_reset) > 0:
|
||||
logging.info("Object times were not reset")
|
||||
logging.info(
|
||||
"Waiting for the specified duration: %s" % (wait_duration)
|
||||
)
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
not_reset,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = time_scenario_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, time_scenario_config)
|
||||
try:
|
||||
with open(time_scenario_config, "r") as f:
|
||||
scenario_config = yaml.full_load(f)
|
||||
for time_scenario in scenario_config["time_scenarios"]:
|
||||
start_time = int(time.time())
|
||||
object_type, object_names = skew_time(time_scenario, kubecli)
|
||||
not_reset = check_date_time(object_type, object_names, kubecli)
|
||||
if len(not_reset) > 0:
|
||||
logging.info("Object times were not reset")
|
||||
logging.info(
|
||||
"Waiting for the specified duration: %s" % (wait_duration)
|
||||
)
|
||||
time.sleep(wait_duration)
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
not_reset,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
except (RuntimeError, Exception):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
log_exception(time_scenario_config)
|
||||
failed_scenarios.append(time_scenario_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
@@ -1,100 +1,121 @@
|
||||
import yaml
|
||||
import sys
|
||||
import logging
|
||||
import time
|
||||
from ..node_actions.aws_node_scenarios import AWS
|
||||
from ..cerberus import setup as cerberus
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.models.telemetry import ScenarioTelemetry
|
||||
from krkn_lib.utils.functions import log_exception
|
||||
|
||||
|
||||
def run(scenarios_list, config, wait_duration):
|
||||
def run(scenarios_list, config, wait_duration, telemetry: KrknTelemetryKubernetes) -> (list[str], list[ScenarioTelemetry]) :
|
||||
"""
|
||||
filters the subnet of interest and applies the network acl
|
||||
to create zone outage
|
||||
"""
|
||||
failed_post_scenarios = ""
|
||||
scenario_telemetries: list[ScenarioTelemetry] = []
|
||||
failed_scenarios = []
|
||||
|
||||
for zone_outage_config in scenarios_list:
|
||||
if len(zone_outage_config) > 1:
|
||||
with open(zone_outage_config, "r") as f:
|
||||
zone_outage_config_yaml = yaml.full_load(f)
|
||||
scenario_config = zone_outage_config_yaml["zone_outage"]
|
||||
vpc_id = scenario_config["vpc_id"]
|
||||
subnet_ids = scenario_config["subnet_id"]
|
||||
duration = scenario_config["duration"]
|
||||
cloud_type = scenario_config["cloud_type"]
|
||||
ids = {}
|
||||
acl_ids_created = []
|
||||
scenario_telemetry = ScenarioTelemetry()
|
||||
scenario_telemetry.scenario = zone_outage_config
|
||||
scenario_telemetry.startTimeStamp = time.time()
|
||||
telemetry.set_parameters_base64(scenario_telemetry, zone_outage_config)
|
||||
try:
|
||||
if len(zone_outage_config) > 1:
|
||||
with open(zone_outage_config, "r") as f:
|
||||
zone_outage_config_yaml = yaml.full_load(f)
|
||||
scenario_config = zone_outage_config_yaml["zone_outage"]
|
||||
vpc_id = scenario_config["vpc_id"]
|
||||
subnet_ids = scenario_config["subnet_id"]
|
||||
duration = scenario_config["duration"]
|
||||
cloud_type = scenario_config["cloud_type"]
|
||||
ids = {}
|
||||
acl_ids_created = []
|
||||
|
||||
if cloud_type.lower() == "aws":
|
||||
cloud_object = AWS()
|
||||
else:
|
||||
logging.error(
|
||||
"Cloud type %s is not currently supported for "
|
||||
"zone outage scenarios"
|
||||
% cloud_type
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
start_time = int(time.time())
|
||||
|
||||
for subnet_id in subnet_ids:
|
||||
logging.info("Targeting subnet_id")
|
||||
network_association_ids = []
|
||||
associations, original_acl_id = \
|
||||
cloud_object.describe_network_acls(vpc_id, subnet_id)
|
||||
for entry in associations:
|
||||
if entry["SubnetId"] == subnet_id:
|
||||
network_association_ids.append(
|
||||
entry["NetworkAclAssociationId"]
|
||||
)
|
||||
logging.info(
|
||||
"Network association ids associated with "
|
||||
"the subnet %s: %s"
|
||||
% (subnet_id, network_association_ids)
|
||||
)
|
||||
acl_id = cloud_object.create_default_network_acl(vpc_id)
|
||||
new_association_id = \
|
||||
cloud_object.replace_network_acl_association(
|
||||
network_association_ids[0], acl_id
|
||||
if cloud_type.lower() == "aws":
|
||||
cloud_object = AWS()
|
||||
else:
|
||||
logging.error(
|
||||
"Cloud type %s is not currently supported for "
|
||||
"zone outage scenarios"
|
||||
% cloud_type
|
||||
)
|
||||
# removed_exit
|
||||
# sys.exit(1)
|
||||
raise RuntimeError()
|
||||
|
||||
# capture the orginal_acl_id, created_acl_id and
|
||||
# new association_id to use during the recovery
|
||||
ids[new_association_id] = original_acl_id
|
||||
acl_ids_created.append(acl_id)
|
||||
start_time = int(time.time())
|
||||
|
||||
# wait for the specified duration
|
||||
logging.info(
|
||||
"Waiting for the specified duration "
|
||||
"in the config: %s" % (duration)
|
||||
)
|
||||
time.sleep(duration)
|
||||
for subnet_id in subnet_ids:
|
||||
logging.info("Targeting subnet_id")
|
||||
network_association_ids = []
|
||||
associations, original_acl_id = \
|
||||
cloud_object.describe_network_acls(vpc_id, subnet_id)
|
||||
for entry in associations:
|
||||
if entry["SubnetId"] == subnet_id:
|
||||
network_association_ids.append(
|
||||
entry["NetworkAclAssociationId"]
|
||||
)
|
||||
logging.info(
|
||||
"Network association ids associated with "
|
||||
"the subnet %s: %s"
|
||||
% (subnet_id, network_association_ids)
|
||||
)
|
||||
acl_id = cloud_object.create_default_network_acl(vpc_id)
|
||||
new_association_id = \
|
||||
cloud_object.replace_network_acl_association(
|
||||
network_association_ids[0], acl_id
|
||||
)
|
||||
|
||||
# replace the applied acl with the previous acl in use
|
||||
for new_association_id, original_acl_id in ids.items():
|
||||
cloud_object.replace_network_acl_association(
|
||||
new_association_id,
|
||||
original_acl_id
|
||||
# capture the orginal_acl_id, created_acl_id and
|
||||
# new association_id to use during the recovery
|
||||
ids[new_association_id] = original_acl_id
|
||||
acl_ids_created.append(acl_id)
|
||||
|
||||
# wait for the specified duration
|
||||
logging.info(
|
||||
"Waiting for the specified duration "
|
||||
"in the config: %s" % (duration)
|
||||
)
|
||||
logging.info(
|
||||
"Wating for 60 seconds to make sure "
|
||||
"the changes are in place"
|
||||
)
|
||||
time.sleep(60)
|
||||
time.sleep(duration)
|
||||
|
||||
# delete the network acl created for the run
|
||||
for acl_id in acl_ids_created:
|
||||
cloud_object.delete_network_acl(acl_id)
|
||||
# replace the applied acl with the previous acl in use
|
||||
for new_association_id, original_acl_id in ids.items():
|
||||
cloud_object.replace_network_acl_association(
|
||||
new_association_id,
|
||||
original_acl_id
|
||||
)
|
||||
logging.info(
|
||||
"Wating for 60 seconds to make sure "
|
||||
"the changes are in place"
|
||||
)
|
||||
time.sleep(60)
|
||||
|
||||
logging.info(
|
||||
"End of scenario. "
|
||||
"Waiting for the specified duration: %s" % (wait_duration)
|
||||
)
|
||||
time.sleep(wait_duration)
|
||||
# delete the network acl created for the run
|
||||
for acl_id in acl_ids_created:
|
||||
cloud_object.delete_network_acl(acl_id)
|
||||
|
||||
logging.info(
|
||||
"End of scenario. "
|
||||
"Waiting for the specified duration: %s" % (wait_duration)
|
||||
)
|
||||
time.sleep(wait_duration)
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
except (RuntimeError, Exception):
|
||||
scenario_telemetry.exitStatus = 1
|
||||
failed_scenarios.append(zone_outage_config)
|
||||
log_exception(zone_outage_config)
|
||||
else:
|
||||
scenario_telemetry.exitStatus = 0
|
||||
scenario_telemetry.endTimeStamp = time.time()
|
||||
scenario_telemetries.append(scenario_telemetry)
|
||||
return failed_scenarios, scenario_telemetries
|
||||
|
||||
end_time = int(time.time())
|
||||
cerberus.publish_kraken_status(
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
start_time,
|
||||
end_time
|
||||
)
|
||||
|
||||
@@ -1,41 +1,41 @@
|
||||
coverage
|
||||
datetime
|
||||
pyfiglet
|
||||
PyYAML>=5.1
|
||||
requests
|
||||
boto3
|
||||
google-api-python-client
|
||||
azure-mgmt-compute
|
||||
azure-keyvault
|
||||
azure-identity
|
||||
kubernetes
|
||||
oauth2client>=4.1.3
|
||||
python-openstackclient
|
||||
gitpython
|
||||
paramiko
|
||||
setuptools==65.5.1
|
||||
openshift-client
|
||||
python-ipmi
|
||||
podman-compose
|
||||
docker-compose
|
||||
docker
|
||||
jinja2==3.0.3
|
||||
itsdangerous==2.0.1
|
||||
werkzeug==2.2.3
|
||||
lxml >= 4.3.0
|
||||
pyVmomi >= 6.7
|
||||
zope.interface==5.4.0
|
||||
aliyun-python-sdk-core==2.13.36
|
||||
aliyun-python-sdk-ecs==4.24.25
|
||||
arcaflow-plugin-sdk>=0.9.0
|
||||
wheel
|
||||
service_identity
|
||||
git+https://github.com/vmware/vsphere-automation-sdk-python.git@v8.0.0.0
|
||||
arcaflow >= 0.8.0
|
||||
arcaflow-plugin-sdk >= 0.10.0
|
||||
azure-identity
|
||||
azure-keyvault
|
||||
azure-mgmt-compute
|
||||
boto3==1.28.61
|
||||
coverage
|
||||
datetime
|
||||
docker
|
||||
docker-compose
|
||||
git+https://github.com/redhat-chaos/arcaflow-plugin-kill-pod.git
|
||||
arcaflow >= 0.4.1
|
||||
prometheus_api_client
|
||||
git+https://github.com/vmware/vsphere-automation-sdk-python.git@v8.0.0.0
|
||||
gitpython
|
||||
google-api-python-client
|
||||
ibm_cloud_sdk_core
|
||||
ibm_vpc
|
||||
itsdangerous==2.0.1
|
||||
jinja2==3.0.3
|
||||
krkn-lib>=1.4.1
|
||||
kubernetes
|
||||
lxml >= 4.3.0
|
||||
oauth2client>=4.1.3
|
||||
openshift-client
|
||||
paramiko
|
||||
podman-compose
|
||||
prometheus_api_client
|
||||
pyVmomi >= 6.7
|
||||
pyfiglet
|
||||
pytest
|
||||
|
||||
krkn-lib-kubernetes >= 0.1.1
|
||||
python-ipmi
|
||||
python-openstackclient
|
||||
requests
|
||||
service_identity
|
||||
setuptools==65.5.1
|
||||
werkzeug==3.0.1
|
||||
wheel
|
||||
zope.interface==5.4.0
|
||||
pandas<2.0.0
|
||||
|
||||
255
run_kraken.py
255
run_kraken.py
@@ -1,5 +1,5 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import yaml
|
||||
@@ -12,7 +12,7 @@ import kraken.litmus.common_litmus as common_litmus
|
||||
import kraken.time_actions.common_time_functions as time_actions
|
||||
import kraken.performance_dashboards.setup as performance_dashboards
|
||||
import kraken.pod_scenarios.setup as pod_scenarios
|
||||
import kraken.namespace_actions.common_namespace_functions as namespace_actions
|
||||
import kraken.service_disruption.common_service_disruption_functions as service_disruption
|
||||
import kraken.shut_down.common_shut_down_func as shut_down
|
||||
import kraken.node_actions.run as nodeaction
|
||||
import kraken.managedcluster_scenarios.run as managedcluster_scenarios
|
||||
@@ -25,7 +25,14 @@ import kraken.arcaflow_plugin as arcaflow_plugin
|
||||
import server as server
|
||||
import kraken.prometheus.client as promcli
|
||||
from kraken import plugins
|
||||
from krkn_lib_kubernetes import KrknLibKubernetes
|
||||
|
||||
from krkn_lib.k8s import KrknKubernetes
|
||||
from krkn_lib.ocp import KrknOpenshift
|
||||
from krkn_lib.telemetry.k8s import KrknTelemetryKubernetes
|
||||
from krkn_lib.telemetry.ocp import KrknTelemetryOpenshift
|
||||
from krkn_lib.models.telemetry import ChaosRunTelemetry
|
||||
from krkn_lib.utils import SafeLogger
|
||||
from krkn_lib.utils.functions import get_yaml_item_value
|
||||
|
||||
KUBE_BURNER_URL = (
|
||||
"https://github.com/cloud-bulldozer/kube-burner/"
|
||||
@@ -45,50 +52,78 @@ def main(cfg):
|
||||
with open(cfg, "r") as f:
|
||||
config = yaml.full_load(f)
|
||||
global kubeconfig_path, wait_duration, kraken_config
|
||||
distribution = config["kraken"].get("distribution", "openshift")
|
||||
distribution = get_yaml_item_value(
|
||||
config["kraken"], "distribution", "openshift"
|
||||
)
|
||||
kubeconfig_path = os.path.expanduser(
|
||||
config["kraken"].get("kubeconfig_path", "")
|
||||
get_yaml_item_value(config["kraken"], "kubeconfig_path", "")
|
||||
)
|
||||
kraken_config = cfg
|
||||
chaos_scenarios = config["kraken"].get("chaos_scenarios", [])
|
||||
publish_running_status = config["kraken"].get("publish_kraken_status", False)
|
||||
port = config["kraken"].get("port")
|
||||
signal_address = config["kraken"].get("signal_address")
|
||||
run_signal = config["kraken"].get("signal_state", "RUN")
|
||||
litmus_install = config["kraken"].get("litmus_install", True)
|
||||
litmus_version = config["kraken"].get("litmus_version", "v1.9.1")
|
||||
litmus_uninstall = config["kraken"].get("litmus_uninstall", False)
|
||||
litmus_uninstall_before_run = config["kraken"].get(
|
||||
"litmus_uninstall_before_run", True
|
||||
chaos_scenarios = get_yaml_item_value(
|
||||
config["kraken"], "chaos_scenarios", []
|
||||
)
|
||||
wait_duration = config["tunings"].get("wait_duration", 60)
|
||||
iterations = config["tunings"].get("iterations", 1)
|
||||
daemon_mode = config["tunings"].get("daemon_mode", False)
|
||||
deploy_performance_dashboards = config["performance_monitoring"].get(
|
||||
"deploy_dashboards", False
|
||||
publish_running_status = get_yaml_item_value(
|
||||
config["kraken"], "publish_kraken_status", False
|
||||
)
|
||||
dashboard_repo = config["performance_monitoring"].get(
|
||||
"repo", "https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
port = get_yaml_item_value(config["kraken"], "port", 8081)
|
||||
signal_address = get_yaml_item_value(
|
||||
config["kraken"], "signal_address", "0.0.0.0")
|
||||
run_signal = get_yaml_item_value(
|
||||
config["kraken"], "signal_state", "RUN"
|
||||
)
|
||||
capture_metrics = config["performance_monitoring"].get("capture_metrics", False)
|
||||
kube_burner_url = config["performance_monitoring"].get(
|
||||
"kube_burner_binary_url",
|
||||
litmus_install = get_yaml_item_value(
|
||||
config["kraken"], "litmus_install", False
|
||||
)
|
||||
litmus_version = get_yaml_item_value(
|
||||
config["kraken"], "litmus_version", "v1.9.1"
|
||||
)
|
||||
litmus_uninstall = get_yaml_item_value(
|
||||
config["kraken"], "litmus_uninstall", True
|
||||
)
|
||||
litmus_uninstall_before_run = get_yaml_item_value(
|
||||
config["kraken"], "litmus_uninstall_before_run", True
|
||||
)
|
||||
wait_duration = get_yaml_item_value(
|
||||
config["tunings"], "wait_duration", 60
|
||||
)
|
||||
iterations = get_yaml_item_value(config["tunings"], "iterations", 1)
|
||||
daemon_mode = get_yaml_item_value(
|
||||
config["tunings"], "daemon_mode", False
|
||||
)
|
||||
deploy_performance_dashboards = get_yaml_item_value(
|
||||
config["performance_monitoring"], "deploy_dashboards", False
|
||||
)
|
||||
dashboard_repo = get_yaml_item_value(
|
||||
config["performance_monitoring"], "repo",
|
||||
"https://github.com/cloud-bulldozer/performance-dashboards.git"
|
||||
)
|
||||
capture_metrics = get_yaml_item_value(
|
||||
config["performance_monitoring"], "capture_metrics", False
|
||||
)
|
||||
kube_burner_url = get_yaml_item_value(
|
||||
config["performance_monitoring"], "kube_burner_binary_url",
|
||||
KUBE_BURNER_URL.format(version=KUBE_BURNER_VERSION),
|
||||
)
|
||||
config_path = config["performance_monitoring"].get(
|
||||
"config_path", "config/kube_burner.yaml"
|
||||
config_path = get_yaml_item_value(
|
||||
config["performance_monitoring"], "config_path",
|
||||
"config/kube_burner.yaml"
|
||||
)
|
||||
metrics_profile = config["performance_monitoring"].get(
|
||||
"metrics_profile_path", "config/metrics-aggregated.yaml"
|
||||
metrics_profile = get_yaml_item_value(
|
||||
config["performance_monitoring"], "metrics_profile_path",
|
||||
"config/metrics-aggregated.yaml"
|
||||
)
|
||||
prometheus_url = config["performance_monitoring"].get("prometheus_url", "")
|
||||
prometheus_url = config["performance_monitoring"].get("prometheus_url")
|
||||
prometheus_bearer_token = config["performance_monitoring"].get(
|
||||
"prometheus_bearer_token", ""
|
||||
"prometheus_bearer_token"
|
||||
)
|
||||
run_uuid = config["performance_monitoring"].get("uuid")
|
||||
enable_alerts = get_yaml_item_value(
|
||||
config["performance_monitoring"], "enable_alerts", False
|
||||
)
|
||||
alert_profile = config["performance_monitoring"].get("alert_profile")
|
||||
check_critical_alerts = get_yaml_item_value(
|
||||
config["performance_monitoring"], "check_critical_alerts", False
|
||||
)
|
||||
run_uuid = config["performance_monitoring"].get("uuid", "")
|
||||
enable_alerts = config["performance_monitoring"].get("enable_alerts", False)
|
||||
alert_profile = config["performance_monitoring"].get("alert_profile", "")
|
||||
check_critical_alerts = config["performance_monitoring"].get("check_critical_alerts", False)
|
||||
|
||||
# Initialize clients
|
||||
if (not os.path.isfile(kubeconfig_path) and
|
||||
@@ -98,13 +133,37 @@ def main(cfg):
|
||||
)
|
||||
sys.exit(1)
|
||||
logging.info("Initializing client to talk to the Kubernetes cluster")
|
||||
|
||||
# Generate uuid for the run
|
||||
if run_uuid:
|
||||
logging.info(
|
||||
"Using the uuid defined by the user for the run: %s" % run_uuid
|
||||
)
|
||||
else:
|
||||
run_uuid = str(uuid.uuid4())
|
||||
logging.info("Generated a uuid for the run: %s" % run_uuid)
|
||||
|
||||
# request_id for telemetry is generated once here and used everywhere
|
||||
telemetry_request_id = f"{int(time.time())}-{run_uuid}"
|
||||
if config["telemetry"].get("run_tag"):
|
||||
telemetry_request_id = f"{telemetry_request_id}-{config['telemetry']['run_tag']}"
|
||||
telemetry_log_file = f'{config["telemetry"]["archive_path"]}/{telemetry_request_id}.log'
|
||||
safe_logger = SafeLogger(filename=telemetry_log_file)
|
||||
|
||||
try:
|
||||
kubeconfig_path
|
||||
os.environ["KUBECONFIG"] = str(kubeconfig_path)
|
||||
kubecli = KrknLibKubernetes(kubeconfig_path=kubeconfig_path)
|
||||
except NameError:
|
||||
# krkn-lib-kubernetes init
|
||||
kubecli = KrknKubernetes(kubeconfig_path=kubeconfig_path)
|
||||
ocpcli = KrknOpenshift(kubeconfig_path=kubeconfig_path)
|
||||
except:
|
||||
kubecli.initialize_clients(None)
|
||||
|
||||
# KrknTelemetry init
|
||||
telemetry_k8s = KrknTelemetryKubernetes(safe_logger, kubecli)
|
||||
telemetry_ocp = KrknTelemetryOpenshift(safe_logger, ocpcli)
|
||||
|
||||
|
||||
# find node kraken might be running on
|
||||
kubecli.find_kraken_node()
|
||||
|
||||
@@ -129,7 +188,9 @@ def main(cfg):
|
||||
|
||||
# Cluster info
|
||||
logging.info("Fetching cluster info")
|
||||
cv = kubecli.get_clusterversion_string()
|
||||
cv = ""
|
||||
if config["kraken"]["distribution"] == "openshift":
|
||||
cv = ocpcli.get_clusterversion_string()
|
||||
if cv != "":
|
||||
logging.info(cv)
|
||||
else:
|
||||
@@ -141,14 +202,7 @@ def main(cfg):
|
||||
if deploy_performance_dashboards:
|
||||
performance_dashboards.setup(dashboard_repo, distribution)
|
||||
|
||||
# Generate uuid for the run
|
||||
if run_uuid:
|
||||
logging.info(
|
||||
"Using the uuid defined by the user for the run: %s" % run_uuid
|
||||
)
|
||||
else:
|
||||
run_uuid = str(uuid.uuid4())
|
||||
logging.info("Generated a uuid for the run: %s" % run_uuid)
|
||||
|
||||
|
||||
# Initialize the start iteration to 0
|
||||
iteration = 0
|
||||
@@ -171,7 +225,8 @@ def main(cfg):
|
||||
# Capture the start time
|
||||
start_time = int(time.time())
|
||||
litmus_installed = False
|
||||
|
||||
chaos_telemetry = ChaosRunTelemetry()
|
||||
chaos_telemetry.run_uuid = run_uuid
|
||||
# Loop to run the chaos starts here
|
||||
while int(iteration) < iterations and run_signal != "STOP":
|
||||
# Inject chaos scenarios specified in the config
|
||||
@@ -203,38 +258,43 @@ def main(cfg):
|
||||
)
|
||||
sys.exit(1)
|
||||
elif scenario_type == "arcaflow_scenarios":
|
||||
failed_post_scenarios = arcaflow_plugin.run(
|
||||
scenarios_list, kubeconfig_path
|
||||
failed_post_scenarios, scenario_telemetries = arcaflow_plugin.run(
|
||||
scenarios_list, kubeconfig_path, telemetry_k8s
|
||||
)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
elif scenario_type == "plugin_scenarios":
|
||||
failed_post_scenarios = plugins.run(
|
||||
failed_post_scenarios, scenario_telemetries = plugins.run(
|
||||
scenarios_list,
|
||||
kubeconfig_path,
|
||||
kraken_config,
|
||||
failed_post_scenarios,
|
||||
wait_duration,
|
||||
telemetry_k8s
|
||||
)
|
||||
# krkn_lib_kubernetes
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
# krkn_lib
|
||||
elif scenario_type == "container_scenarios":
|
||||
logging.info("Running container scenarios")
|
||||
failed_post_scenarios = pod_scenarios.container_run(
|
||||
failed_post_scenarios, scenario_telemetries = pod_scenarios.container_run(
|
||||
kubeconfig_path,
|
||||
scenarios_list,
|
||||
config,
|
||||
failed_post_scenarios,
|
||||
wait_duration,
|
||||
kubecli
|
||||
kubecli,
|
||||
telemetry_k8s
|
||||
)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
# Inject node chaos scenarios specified in the config
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
elif scenario_type == "node_scenarios":
|
||||
logging.info("Running node scenarios")
|
||||
nodeaction.run(scenarios_list, config, wait_duration, kubecli)
|
||||
|
||||
failed_post_scenarios, scenario_telemetries = nodeaction.run(scenarios_list, config, wait_duration, kubecli, telemetry_k8s)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
# Inject managedcluster chaos scenarios specified in the config
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
elif scenario_type == "managedcluster_scenarios":
|
||||
logging.info("Running managedcluster scenarios")
|
||||
managedcluster_scenarios.run(
|
||||
@@ -243,11 +303,12 @@ def main(cfg):
|
||||
|
||||
# Inject time skew chaos scenarios specified
|
||||
# in the config
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
elif scenario_type == "time_scenarios":
|
||||
if distribution == "openshift":
|
||||
logging.info("Running time skew scenarios")
|
||||
time_actions.run(scenarios_list, config, wait_duration, kubecli)
|
||||
failed_post_scenarios, scenario_telemetries = time_actions.run(scenarios_list, config, wait_duration, kubecli, telemetry_k8s)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
else:
|
||||
logging.error(
|
||||
"Litmus scenarios are currently "
|
||||
@@ -295,46 +356,50 @@ def main(cfg):
|
||||
sys.exit(1)
|
||||
|
||||
# Inject cluster shutdown scenarios
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
elif scenario_type == "cluster_shut_down_scenarios":
|
||||
shut_down.run(scenarios_list, config, wait_duration, kubecli)
|
||||
failed_post_scenarios, scenario_telemetries = shut_down.run(scenarios_list, config, wait_duration, kubecli, telemetry_k8s)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
# Inject namespace chaos scenarios
|
||||
# krkn_lib_kubernetes
|
||||
elif scenario_type == "namespace_scenarios":
|
||||
logging.info("Running namespace scenarios")
|
||||
namespace_actions.run(
|
||||
# krkn_lib
|
||||
elif scenario_type == "service_disruption_scenarios":
|
||||
logging.info("Running service disruption scenarios")
|
||||
failed_post_scenarios, scenario_telemetries = service_disruption.run(
|
||||
scenarios_list,
|
||||
config,
|
||||
wait_duration,
|
||||
failed_post_scenarios,
|
||||
kubeconfig_path,
|
||||
kubecli
|
||||
kubecli,
|
||||
telemetry_k8s
|
||||
)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
# Inject zone failures
|
||||
elif scenario_type == "zone_outages":
|
||||
logging.info("Inject zone outages")
|
||||
zone_outages.run(scenarios_list, config, wait_duration)
|
||||
|
||||
failed_post_scenarios, scenario_telemetries = zone_outages.run(scenarios_list, config, wait_duration, telemetry_k8s)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
# Application outages
|
||||
elif scenario_type == "application_outages":
|
||||
logging.info("Injecting application outage")
|
||||
application_outage.run(
|
||||
scenarios_list, config, wait_duration
|
||||
)
|
||||
failed_post_scenarios, scenario_telemetries = application_outage.run(
|
||||
scenarios_list, config, wait_duration, telemetry_k8s)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
# PVC scenarios
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
elif scenario_type == "pvc_scenarios":
|
||||
logging.info("Running PVC scenario")
|
||||
pvc_scenario.run(scenarios_list, config, kubecli)
|
||||
failed_post_scenarios, scenario_telemetries = pvc_scenario.run(scenarios_list, config, kubecli, telemetry_k8s)
|
||||
chaos_telemetry.scenarios.extend(scenario_telemetries)
|
||||
|
||||
# Network scenarios
|
||||
# krkn_lib_kubernetes
|
||||
# krkn_lib
|
||||
elif scenario_type == "network_chaos":
|
||||
logging.info("Running Network Chaos")
|
||||
network_chaos.run(scenarios_list, config, wait_duration, kubecli)
|
||||
failed_post_scenarios, scenario_telemetries = network_chaos.run(scenarios_list, config, wait_duration, kubecli, telemetry_k8s)
|
||||
|
||||
# Check for critical alerts when enabled
|
||||
if check_critical_alerts:
|
||||
@@ -353,9 +418,45 @@ def main(cfg):
|
||||
iteration += 1
|
||||
logging.info("")
|
||||
|
||||
# Capture the end time
|
||||
# telemetry
|
||||
# in order to print decoded telemetry data even if telemetry collection
|
||||
# is disabled, it's necessary to serialize the ChaosRunTelemetry object
|
||||
# to json, and recreate a new object from it.
|
||||
end_time = int(time.time())
|
||||
|
||||
# if platform is openshift will be collected
|
||||
# Cloud platform and network plugins metadata
|
||||
# through OCP specific APIs
|
||||
if config["kraken"]["distribution"] == "openshift":
|
||||
telemetry_ocp.collect_cluster_metadata(chaos_telemetry)
|
||||
else:
|
||||
telemetry_k8s.collect_cluster_metadata(chaos_telemetry)
|
||||
|
||||
decoded_chaos_run_telemetry = ChaosRunTelemetry(json.loads(chaos_telemetry.to_json()))
|
||||
logging.info(f"Telemetry data:\n{decoded_chaos_run_telemetry.to_json()}")
|
||||
|
||||
if config["telemetry"]["enabled"]:
|
||||
logging.info(f"telemetry data will be stored on s3 bucket folder: {telemetry_request_id}")
|
||||
logging.info(f"telemetry upload log: {safe_logger.log_file_name}")
|
||||
try:
|
||||
telemetry_k8s.send_telemetry(config["telemetry"], telemetry_request_id, chaos_telemetry)
|
||||
telemetry_k8s.put_cluster_events(telemetry_request_id, config["telemetry"], start_time, end_time)
|
||||
# prometheus data collection is available only on Openshift
|
||||
if config["telemetry"]["prometheus_backup"] and config["kraken"]["distribution"] == "openshift":
|
||||
safe_logger.info("archives download started:")
|
||||
prometheus_archive_files = telemetry_ocp.get_ocp_prometheus_data(config["telemetry"], telemetry_request_id)
|
||||
safe_logger.info("archives upload started:")
|
||||
telemetry_k8s.put_prometheus_data(config["telemetry"], prometheus_archive_files, telemetry_request_id)
|
||||
if config["telemetry"]["logs_backup"]:
|
||||
telemetry_ocp.put_ocp_logs(telemetry_request_id, config["telemetry"], start_time, end_time)
|
||||
except Exception as e:
|
||||
logging.error(f"failed to send telemetry data: {str(e)}")
|
||||
else:
|
||||
logging.info("telemetry collection disabled, skipping.")
|
||||
|
||||
# Capture the end time
|
||||
|
||||
|
||||
# Capture metrics for the run
|
||||
if capture_metrics:
|
||||
logging.info("Capturing metrics")
|
||||
@@ -387,7 +488,7 @@ def main(cfg):
|
||||
else:
|
||||
logging.error("Alert profile is not defined")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if litmus_uninstall and litmus_installed:
|
||||
common_litmus.delete_chaos(litmus_namespace, kubecli)
|
||||
common_litmus.delete_chaos_experiments(litmus_namespace, kubecli)
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
version: v0.1.0
|
||||
input:
|
||||
root: RootObject
|
||||
objects:
|
||||
RootObject:
|
||||
id: RootObject
|
||||
id: input_item
|
||||
properties:
|
||||
kubeconfig:
|
||||
display:
|
||||
@@ -60,17 +61,17 @@ input:
|
||||
|
||||
steps:
|
||||
kubeconfig:
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-kubeconfig:latest
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-kubeconfig:0.2.0
|
||||
input:
|
||||
kubeconfig: !expr $.input.kubeconfig
|
||||
stressng:
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-stressng:latest
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-stressng:0.5.0
|
||||
step: workload
|
||||
input:
|
||||
cleanup: "true"
|
||||
StressNGParams:
|
||||
timeout: !expr $.input.duration
|
||||
cleanup: "true"
|
||||
items:
|
||||
stressors:
|
||||
- stressor: cpu
|
||||
cpu_count: !expr $.input.cpu_count
|
||||
cpu_method: !expr $.input.cpu_method
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
version: v0.1.0
|
||||
input:
|
||||
root: RootObject
|
||||
objects:
|
||||
|
||||
10
scenarios/arcaflow/io-hog/config.yaml
Normal file
10
scenarios/arcaflow/io-hog/config.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
deployer:
|
||||
connection: {}
|
||||
type: kubernetes
|
||||
log:
|
||||
level: debug
|
||||
logged_outputs:
|
||||
error:
|
||||
level: error
|
||||
success:
|
||||
level: debug
|
||||
13
scenarios/arcaflow/io-hog/input.yaml
Normal file
13
scenarios/arcaflow/io-hog/input.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
input_list:
|
||||
- duration: 30s
|
||||
io_block_size: 1m
|
||||
io_workers: 1
|
||||
io_write_bytes: 10m
|
||||
kubeconfig: ''
|
||||
namespace: default
|
||||
node_selector: {}
|
||||
target_pod_folder: /hog-data
|
||||
target_pod_volume:
|
||||
hostPath:
|
||||
path: /tmp
|
||||
name: node-volume
|
||||
139
scenarios/arcaflow/io-hog/sub-workflow.yaml
Normal file
139
scenarios/arcaflow/io-hog/sub-workflow.yaml
Normal file
@@ -0,0 +1,139 @@
|
||||
version: v0.1.0
|
||||
input:
|
||||
root: RootObject
|
||||
objects:
|
||||
RootObject:
|
||||
id: input_item
|
||||
properties:
|
||||
kubeconfig:
|
||||
display:
|
||||
description: The complete kubeconfig file as a string
|
||||
name: Kubeconfig file contents
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
namespace:
|
||||
display:
|
||||
description: The namespace where the container will be deployed
|
||||
name: Namespace
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
node_selector:
|
||||
display:
|
||||
description: kubernetes node name where the plugin must be deployed
|
||||
type:
|
||||
type_id: map
|
||||
values:
|
||||
type_id: string
|
||||
keys:
|
||||
type_id: string
|
||||
required: true
|
||||
duration:
|
||||
display:
|
||||
name: duration the scenario expressed in seconds
|
||||
description: stop stress test after T seconds. One can also specify the units of time in
|
||||
seconds, minutes, hours, days or years with the suffix s, m, h, d or y
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
io_workers:
|
||||
display:
|
||||
description: number of workers
|
||||
name: start N workers continually writing, reading and removing temporary files
|
||||
type:
|
||||
type_id: integer
|
||||
required: true
|
||||
io_block_size:
|
||||
display:
|
||||
description: single write size
|
||||
name: specify size of each write in bytes. Size can be from 1 byte to 4MB.
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
io_write_bytes:
|
||||
display:
|
||||
description: Total number of bytes written
|
||||
name: write N bytes for each hdd process, the default is 1 GB. One can specify the size
|
||||
as % of free space on the file system or in units of Bytes, KBytes, MBytes and
|
||||
GBytes using the suffix b, k, m or g
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
target_pod_folder:
|
||||
display:
|
||||
description: Target Folder
|
||||
name: Folder in the pod where the test will be executed and the test files will be written
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
target_pod_volume:
|
||||
display:
|
||||
name: kubernetes volume definition
|
||||
description: the volume that will be attached to the pod. In order to stress
|
||||
the node storage only hosPath mode is currently supported
|
||||
type:
|
||||
type_id: object
|
||||
id: k8s_volume
|
||||
properties:
|
||||
name:
|
||||
display:
|
||||
description: name of the volume (must match the name in pod definition)
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
hostPath:
|
||||
display:
|
||||
description: hostPath options expressed as string map (key-value)
|
||||
type:
|
||||
type_id: map
|
||||
values:
|
||||
type_id: string
|
||||
keys:
|
||||
type_id: string
|
||||
required: true
|
||||
required: true
|
||||
|
||||
steps:
|
||||
kubeconfig:
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-kubeconfig:0.2.0
|
||||
input:
|
||||
kubeconfig: !expr $.input.kubeconfig
|
||||
stressng:
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-stressng:0.5.0
|
||||
step: workload
|
||||
input:
|
||||
cleanup: "true"
|
||||
StressNGParams:
|
||||
timeout: !expr $.input.duration
|
||||
workdir: !expr $.input.target_pod_folder
|
||||
stressors:
|
||||
- stressor: hdd
|
||||
hdd: !expr $.input.io_workers
|
||||
hdd_bytes: !expr $.input.io_write_bytes
|
||||
hdd_write_size: !expr $.input.io_block_size
|
||||
|
||||
deploy:
|
||||
type: kubernetes
|
||||
connection: !expr $.steps.kubeconfig.outputs.success.connection
|
||||
pod:
|
||||
metadata:
|
||||
namespace: !expr $.input.namespace
|
||||
labels:
|
||||
arcaflow: stressng
|
||||
spec:
|
||||
nodeSelector: !expr $.input.node_selector
|
||||
pluginContainer:
|
||||
imagePullPolicy: Always
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /hog-data
|
||||
name: node-volume
|
||||
volumes:
|
||||
- !expr $.input.target_pod_volume
|
||||
|
||||
outputs:
|
||||
success:
|
||||
stressng: !expr $.steps.stressng.outputs.success
|
||||
|
||||
114
scenarios/arcaflow/io-hog/workflow.yaml
Normal file
114
scenarios/arcaflow/io-hog/workflow.yaml
Normal file
@@ -0,0 +1,114 @@
|
||||
version: v0.1.0
|
||||
input:
|
||||
root: RootObject
|
||||
objects:
|
||||
RootObject:
|
||||
id: RootObject
|
||||
properties:
|
||||
input_list:
|
||||
type:
|
||||
type_id: list
|
||||
items:
|
||||
id: input_item
|
||||
type_id: object
|
||||
properties:
|
||||
kubeconfig:
|
||||
display:
|
||||
description: The complete kubeconfig file as a string
|
||||
name: Kubeconfig file contents
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
namespace:
|
||||
display:
|
||||
description: The namespace where the container will be deployed
|
||||
name: Namespace
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
node_selector:
|
||||
display:
|
||||
description: kubernetes node name where the plugin must be deployed
|
||||
type:
|
||||
type_id: map
|
||||
values:
|
||||
type_id: string
|
||||
keys:
|
||||
type_id: string
|
||||
required: true
|
||||
duration:
|
||||
display:
|
||||
name: duration the scenario expressed in seconds
|
||||
description: stop stress test after T seconds. One can also specify the units of time in
|
||||
seconds, minutes, hours, days or years with the suffix s, m, h, d or y
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
io_workers:
|
||||
display:
|
||||
description: number of workers
|
||||
name: start N workers continually writing, reading and removing temporary files
|
||||
type:
|
||||
type_id: integer
|
||||
required: true
|
||||
io_block_size:
|
||||
display:
|
||||
description: single write size
|
||||
name: specify size of each write in bytes. Size can be from 1 byte to 4MB.
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
io_write_bytes:
|
||||
display:
|
||||
description: Total number of bytes written
|
||||
name: write N bytes for each hdd process, the default is 1 GB. One can specify the size
|
||||
as % of free space on the file system or in units of Bytes, KBytes, MBytes and
|
||||
GBytes using the suffix b, k, m or g
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
target_pod_folder:
|
||||
display:
|
||||
description: Target Folder
|
||||
name: Folder in the pod where the test will be executed and the test files will be written
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
target_pod_volume:
|
||||
display:
|
||||
name: kubernetes volume definition
|
||||
description: the volume that will be attached to the pod. In order to stress
|
||||
the node storage only hosPath mode is currently supported
|
||||
type:
|
||||
type_id: object
|
||||
id: k8s_volume
|
||||
properties:
|
||||
name:
|
||||
display:
|
||||
description: name of the volume (must match the name in pod definition)
|
||||
type:
|
||||
type_id: string
|
||||
required: true
|
||||
hostPath:
|
||||
display:
|
||||
description: hostPath options expressed as string map (key-value)
|
||||
type:
|
||||
type_id: map
|
||||
values:
|
||||
type_id: string
|
||||
keys:
|
||||
type_id: string
|
||||
required: true
|
||||
required: true
|
||||
steps:
|
||||
workload_loop:
|
||||
kind: foreach
|
||||
items: !expr $.input.input_list
|
||||
workflow: sub-workflow.yaml
|
||||
parallelism: 1000
|
||||
outputs:
|
||||
success:
|
||||
workloads: !expr $.steps.workload_loop.outputs.success.data
|
||||
|
||||
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
version: v0.1.0
|
||||
input:
|
||||
root: RootObject
|
||||
objects:
|
||||
RootObject:
|
||||
id: RootObject
|
||||
id: input_item
|
||||
properties:
|
||||
kubeconfig:
|
||||
display:
|
||||
@@ -52,17 +53,17 @@ input:
|
||||
|
||||
steps:
|
||||
kubeconfig:
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-kubeconfig:latest
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-kubeconfig:0.2.0
|
||||
input:
|
||||
kubeconfig: !expr $.input.kubeconfig
|
||||
stressng:
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-stressng:latest
|
||||
plugin: quay.io/arcalot/arcaflow-plugin-stressng:0.5.0
|
||||
step: workload
|
||||
input:
|
||||
cleanup: "true"
|
||||
StressNGParams:
|
||||
timeout: !expr $.input.duration
|
||||
cleanup: "true"
|
||||
items:
|
||||
stressors:
|
||||
- stressor: vm
|
||||
vm: !expr $.input.vm_workers
|
||||
vm_bytes: !expr $.input.vm_bytes
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
version: v0.1.0
|
||||
input:
|
||||
root: RootObject
|
||||
objects:
|
||||
|
||||
@@ -3,6 +3,6 @@ scenarios:
|
||||
namespace: "openshift-etcd"
|
||||
label_selector: "k8s-app=etcd"
|
||||
container_name: "etcd"
|
||||
action: "kill 1"
|
||||
action: 1
|
||||
count: 1
|
||||
expected_recovery_time: 60
|
||||
|
||||
14
scenarios/openshift/pod_network_shaping.yml
Normal file
14
scenarios/openshift/pod_network_shaping.yml
Normal file
@@ -0,0 +1,14 @@
|
||||
# yaml-language-server: $schema=../plugin.schema.json
|
||||
- id: pod_egress_shaping
|
||||
config:
|
||||
namespace: <namespace> # Required - Namespace of the pod to which traffic shaping need to be applied
|
||||
label_selector: <label_selector> # When pod_name is not specified, pod with matching label_selector is selected for chaos scenario
|
||||
pod_name: <pod name> # When label_selector is not specified, pod matching the name will be selected for the chaos scenario
|
||||
network_params: # latency, loss and bandwidth are the three supported network parameters to alter for the chaos test
|
||||
latency: <time> # Value is a string. For example : 50ms
|
||||
loss: <fraction> # Loss is a fraction between 0 and 1. It has to be enclosed in quotes to treat it as a string. For example, '0.02%' (not 0.02%)
|
||||
bandwidth: <rate> # Value is a string. For example: 100mbit
|
||||
execution_type: <serial/parallel> # Used to specify whether you want to apply filters on interfaces one at a time or all at once. Default is 'parallel'
|
||||
instance_count: <number> # Number of pods to perform action/select that match the label selector
|
||||
wait_duration: <time_duration> # Default is 300. Ensure that it is at least about twice of test_duration
|
||||
test_duration: <time_duration> # Default is 120
|
||||
9
scenarios/openshift/prom_kill.yml
Normal file
9
scenarios/openshift/prom_kill.yml
Normal file
@@ -0,0 +1,9 @@
|
||||
- id: kill-pods
|
||||
config:
|
||||
namespace_pattern: ^openshift-monitoring$
|
||||
label_selector: statefulset.kubernetes.io/pod-name=prometheus-k8s-0
|
||||
- id: wait-for-pods
|
||||
config:
|
||||
namespace_pattern: ^openshift-monitoring$
|
||||
label_selector: statefulset.kubernetes.io/pod-name=prometheus-k8s-0
|
||||
count: 1
|
||||
@@ -2253,6 +2253,166 @@
|
||||
"id",
|
||||
"config"
|
||||
]
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"title": "pod_egress_shaping Arcaflow scenarios",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"const": "pod_egress_shaping"
|
||||
},
|
||||
"config": {
|
||||
"$defs": {
|
||||
"EgressParams": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"namespace": {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
"title": "Namespace",
|
||||
"description": "Namespace of the pod to which filter need to be appliedfor details."
|
||||
},
|
||||
"kubeconfig_path": {
|
||||
"type": "string",
|
||||
"title": "Kubeconfig path",
|
||||
"description": "Kubeconfig file as string\nSee https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ for details."
|
||||
},
|
||||
"pod_name": {
|
||||
"type": "string",
|
||||
"title": "Pod name",
|
||||
"description": "When label_selector is not specified, pod matching the name will beselected for the chaos scenario"
|
||||
},
|
||||
"label_selector": {
|
||||
"type": "string",
|
||||
"title": "Label selector",
|
||||
"description": "Kubernetes label selector for the target pod. When pod_name is not specified, pod with matching label_selector is selected for chaos scenario"
|
||||
},
|
||||
"kraken_config": {
|
||||
"type": "string",
|
||||
"title": "Kraken Config",
|
||||
"description": "Path to the config file of Kraken. Set this field if you wish to publish status onto Cerberus"
|
||||
},
|
||||
"test_duration": {
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"default": 90,
|
||||
"title": "Test duration",
|
||||
"description": "Duration for which each step of the ingress chaos testing is to be performed."
|
||||
},
|
||||
"wait_duration": {
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"default": 300,
|
||||
"title": "Wait Duration",
|
||||
"description": "Wait duration for finishing a test and its cleanup.Ensure that it is significantly greater than wait_duration"
|
||||
},
|
||||
"instance_count": {
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"default": 1,
|
||||
"title": "Instance Count",
|
||||
"description": "Number of pods to perform action/select that match the label selector."
|
||||
},
|
||||
"execution_type": {
|
||||
"type": "string",
|
||||
"default": "parallel",
|
||||
"title": "Execution Type",
|
||||
"description": "The order in which the ingress filters are applied. Execution type can be 'serial' or 'parallel'"
|
||||
},
|
||||
"network_params": {
|
||||
"type": "object",
|
||||
"propertyNames": {},
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": "Network Parameters",
|
||||
"description": "The network filters that are applied on the interface. The currently supported filters are latency, loss and bandwidth"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"namespace"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"dependentRequired": {}
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"namespace": {
|
||||
"type": "string",
|
||||
"minLength": 1,
|
||||
"title": "Namespace",
|
||||
"description": "Namespace of the pod to which filter need to be appliedfor details."
|
||||
},
|
||||
"kubeconfig_path": {
|
||||
"type": "string",
|
||||
"title": "Kubeconfig path",
|
||||
"description": "Kubeconfig file as string\nSee https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/ for details."
|
||||
},
|
||||
"pod_name": {
|
||||
"type": "string",
|
||||
"title": "Pod name",
|
||||
"description": "When label_selector is not specified, pod matching the name will beselected for the chaos scenario"
|
||||
},
|
||||
"label_selector": {
|
||||
"type": "string",
|
||||
"title": "Label selector",
|
||||
"description": "Kubernetes label selector for the target pod. When pod_name is not specified, pod with matching label_selector is selected for chaos scenario"
|
||||
},
|
||||
"kraken_config": {
|
||||
"type": "string",
|
||||
"title": "Kraken Config",
|
||||
"description": "Path to the config file of Kraken. Set this field if you wish to publish status onto Cerberus"
|
||||
},
|
||||
"test_duration": {
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"default": 90,
|
||||
"title": "Test duration",
|
||||
"description": "Duration for which each step of the ingress chaos testing is to be performed."
|
||||
},
|
||||
"wait_duration": {
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"default": 300,
|
||||
"title": "Wait Duration",
|
||||
"description": "Wait duration for finishing a test and its cleanup.Ensure that it is significantly greater than wait_duration"
|
||||
},
|
||||
"instance_count": {
|
||||
"type": "integer",
|
||||
"minimum": 1,
|
||||
"default": 1,
|
||||
"title": "Instance Count",
|
||||
"description": "Number of pods to perform action/select that match the label selector."
|
||||
},
|
||||
"execution_type": {
|
||||
"type": "string",
|
||||
"default": "parallel",
|
||||
"title": "Execution Type",
|
||||
"description": "The order in which the ingress filters are applied. Execution type can be 'serial' or 'parallel'"
|
||||
},
|
||||
"network_params": {
|
||||
"type": "object",
|
||||
"propertyNames": {},
|
||||
"additionalProperties": {
|
||||
"type": "string"
|
||||
},
|
||||
"title": "Network Parameters",
|
||||
"description": "The network filters that are applied on the interface. The currently supported filters are latency, loss and bandwidth"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"namespace"
|
||||
],
|
||||
"additionalProperties": false,
|
||||
"dependentRequired": {}
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"id",
|
||||
"config"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
106
utils/chaos_recommender/README.md
Normal file
106
utils/chaos_recommender/README.md
Normal file
@@ -0,0 +1,106 @@
|
||||
# Chaos Recommendation Tool
|
||||
|
||||
This tool, designed for Redhat Kraken, operates through the command line and offers recommendations for chaos testing. It suggests probable chaos test cases that can disrupt application services by analyzing their behavior and assessing their susceptibility to specific fault types.
|
||||
|
||||
This tool profiles an application and gathers telemetry data such as CPU, Memory, and Network usage, analyzing it to suggest probable chaos scenarios. For optimal results, it is recommended to activate the utility while the application is under load.
|
||||
|
||||
## Pre-requisites
|
||||
|
||||
- Openshift Or Kubernetes Environment where the application is hosted
|
||||
- Access to the telemetry data via the exposed Prometheus endpoint
|
||||
- Python3
|
||||
|
||||
## Usage
|
||||
|
||||
1. To run
|
||||
|
||||
```
|
||||
$ python3 -m venv chaos
|
||||
$ source chaos/bin/activate
|
||||
$ git clone https://github.com/redhat-chaos/krkn.git
|
||||
$ cd krkn
|
||||
$ pip3 install -r requirements.txt
|
||||
$ python3 utils/chaos_recommender/chaos_recommender.py
|
||||
```
|
||||
|
||||
2. Follow the prompts to provide the required information.
|
||||
|
||||
## Configuration
|
||||
To run the recommender with a config file specify the config file path with the `-c` argument.
|
||||
You can customize the default values by editing the `krkn/config/recommender_config.yaml` file. The configuration file contains the following options:
|
||||
|
||||
- `application`: Specify the application name.
|
||||
- `namespace`: Specify the namespace name. If you want to profile
|
||||
- `labels`: Specify the labels (not used).
|
||||
- `kubeconfig`: Specify the location of the kubeconfig file (not used).
|
||||
- `prometheus_endpoint`: Specify the prometheus endpoint (must).
|
||||
- `auth_token`: Auth token to connect to prometheus endpoint (must).
|
||||
- `scrape_duration`: For how long data should be fetched, e.g., '1m' (must).
|
||||
- `chaos_library`: "kraken" (currently it only supports kraken).
|
||||
- `chaos_tests`: (for output purpose only do not change if not needed)
|
||||
- `GENERAL`: list of general purpose tests available in Krkn
|
||||
- `MEM`: list of memory related tests available in Krkn
|
||||
- `NETWORK`: list of network related tests available in Krkn
|
||||
- `CPU`: list of memory related tests available in Krkn
|
||||
|
||||
*TIP:* to collect prometheus endpoint and token from your OpenShift cluster you can run the following commands:
|
||||
```
|
||||
prometheus_url=$(kubectl get routes -n openshift-monitoring prometheus-k8s --no-headers | awk '{print $2}')
|
||||
#TO USE YOUR CURRENT SESSION TOKEN
|
||||
token=$(oc whoami -t)
|
||||
#TO CREATE A NEW TOKEN
|
||||
token=$(kubectl create token -n openshift-monitoring prometheus-k8s --duration=6h || oc sa new-token -n openshift-monitoring prometheus-k8s)
|
||||
```
|
||||
|
||||
You can also provide the input values through command-line arguments launching the recommender with `-o` option:
|
||||
|
||||
```
|
||||
-o, --options Evaluate command line options
|
||||
-a APPLICATION, --application APPLICATION
|
||||
Kubernetes application name
|
||||
-n NAMESPACE, --namespace NAMESPACE
|
||||
Kubernetes application namespace
|
||||
-l LABELS, --labels LABELS
|
||||
Kubernetes application labels
|
||||
-p PROMETHEUS_ENDPOINT, --prometheus-endpoint PROMETHEUS_ENDPOINT
|
||||
Prometheus endpoint URI
|
||||
-k KUBECONFIG, --kubeconfig KUBECONFIG
|
||||
Kubeconfig path
|
||||
-t TOKEN, --token TOKEN
|
||||
Kubernetes authentication token
|
||||
-s SCRAPE_DURATION, --scrape-duration SCRAPE_DURATION
|
||||
Prometheus scrape duration
|
||||
-i LIBRARY, --library LIBRARY
|
||||
Chaos library
|
||||
-L LOG_LEVEL, --log-level LOG_LEVEL
|
||||
log level (DEBUG, INFO, WARNING, ERROR, CRITICAL
|
||||
-M MEM [MEM ...], --MEM MEM [MEM ...]
|
||||
Memory related chaos tests (space separated list)
|
||||
-C CPU [CPU ...], --CPU CPU [CPU ...]
|
||||
CPU related chaos tests (space separated list)
|
||||
-N NETWORK [NETWORK ...], --NETWORK NETWORK [NETWORK ...]
|
||||
Network related chaos tests (space separated list)
|
||||
-G GENERIC [GENERIC ...], --GENERIC GENERIC [GENERIC ...]
|
||||
Memory related chaos tests (space separated list)
|
||||
|
||||
```
|
||||
|
||||
If you provide the input values through command-line arguments, the corresponding config file inputs would be ignored.
|
||||
|
||||
## Podman & Docker image
|
||||
|
||||
To run the recommender image please visit the [krkn-hub](https://github.com/redhat-chaos/krkn-hub for further infos.
|
||||
|
||||
## How it works
|
||||
|
||||
After obtaining telemetry data, sourced either locally or from Prometheus, the tool conducts a comprehensive data analysis to detect anomalies. Employing the Z-score method and heatmaps, it identifies outliers by evaluating CPU, memory, and network usage against established limits. Services with Z-scores surpassing a specified threshold are categorized as outliers. This categorization classifies services as network, CPU, or memory-sensitive, consequently leading to the recommendation of relevant test cases.
|
||||
|
||||
## Customizing Thresholds and Options
|
||||
|
||||
You can customize the thresholds and options used for data analysis by modifying the `krkn/kraken/chaos_recommender/analysis.py` file. For example, you can adjust the threshold for identifying outliers by changing the value of the `threshold` variable in the `identify_outliers` function.
|
||||
|
||||
## Additional Files
|
||||
|
||||
- `config/recommender_config.yaml`: The configuration file containing default values for application, namespace, labels, and kubeconfig.
|
||||
|
||||
Happy Chaos!
|
||||
117
utils/chaos_recommender/chaos_recommender.py
Normal file
117
utils/chaos_recommender/chaos_recommender.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import argparse
|
||||
import logging
|
||||
import os.path
|
||||
import sys
|
||||
import yaml
|
||||
# kraken module import for running the recommender
|
||||
# both from the root directory and the recommender
|
||||
# folder
|
||||
sys.path.insert(0, './')
|
||||
sys.path.insert(0, '../../')
|
||||
|
||||
import kraken.chaos_recommender.analysis as analysis
|
||||
import kraken.chaos_recommender.prometheus as prometheus
|
||||
from kubernetes import config as kube_config
|
||||
|
||||
|
||||
|
||||
def parse_arguments(parser):
|
||||
|
||||
# command line options
|
||||
parser.add_argument("-c", "--config-file", action="store", help="Config file path")
|
||||
parser.add_argument("-o", "--options", action="store_true", help="Evaluate command line options")
|
||||
parser.add_argument("-n", "--namespace", action="store", default="", help="Kubernetes application namespace")
|
||||
parser.add_argument("-p", "--prometheus-endpoint", action="store", default="", help="Prometheus endpoint URI")
|
||||
parser.add_argument("-k", "--kubeconfig", action="store", default=kube_config.KUBE_CONFIG_DEFAULT_LOCATION, help="Kubeconfig path")
|
||||
parser.add_argument("-t", "--token", action="store", default="", help="Kubernetes authentication token")
|
||||
parser.add_argument("-s", "--scrape-duration", action="store", default="10m", help="Prometheus scrape duration")
|
||||
parser.add_argument("-L", "--log-level", action="store", default="INFO", help="log level (DEBUG, INFO, WARNING, ERROR, CRITICAL")
|
||||
|
||||
parser.add_argument("-M", "--MEM", nargs='+', action="store", default=[],
|
||||
help="Memory related chaos tests (space separated list)")
|
||||
parser.add_argument("-C", "--CPU", nargs='+', action="store", default=[],
|
||||
help="CPU related chaos tests (space separated list)")
|
||||
parser.add_argument("-N", "--NETWORK", nargs='+', action="store", default=[],
|
||||
help="Network related chaos tests (space separated list)")
|
||||
parser.add_argument("-G", "--GENERIC", nargs='+', action="store", default=[],
|
||||
help="Memory related chaos tests (space separated list)")
|
||||
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
def read_configuration(config_file_path):
|
||||
if not os.path.exists(config_file_path):
|
||||
logging.error(f"Config file not found: {config_file_path}")
|
||||
sys.exit(1)
|
||||
|
||||
with open(config_file_path, mode="r") as config_file:
|
||||
config = yaml.safe_load(config_file)
|
||||
|
||||
log_level = config.get("log level", "INFO")
|
||||
namespace = config.get("namespace", "")
|
||||
kubeconfig = config.get("kubeconfig", kube_config.KUBE_CONFIG_DEFAULT_LOCATION)
|
||||
|
||||
prometheus_endpoint = config.get("prometheus_endpoint", "")
|
||||
auth_token = config.get("auth_token", "")
|
||||
scrape_duration = config.get("scrape_duration", "10m")
|
||||
chaos_tests = config.get("chaos_tests" , {})
|
||||
return (namespace, kubeconfig, prometheus_endpoint, auth_token, scrape_duration,
|
||||
chaos_tests, log_level)
|
||||
|
||||
def prompt_input(prompt, default_value):
|
||||
user_input = input(f"{prompt} [{default_value}]: ")
|
||||
if user_input.strip():
|
||||
return user_input
|
||||
return default_value
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Krkn Chaos Recommender Command-Line tool")
|
||||
args = parse_arguments(parser)
|
||||
|
||||
if args.config_file is None and not args.options:
|
||||
logging.error("You have to either specify a config file path or pass recommender options as command line arguments")
|
||||
parser.print_help()
|
||||
sys.exit(1)
|
||||
|
||||
if args.config_file is not None:
|
||||
(
|
||||
namespace,
|
||||
kubeconfig,
|
||||
prometheus_endpoint,
|
||||
auth_token,
|
||||
scrape_duration,
|
||||
chaos_tests,
|
||||
log_level
|
||||
) = read_configuration(args.config_file)
|
||||
|
||||
if args.options:
|
||||
namespace = args.namespace
|
||||
kubeconfig = args.kubeconfig
|
||||
auth_token = args.token
|
||||
scrape_duration = args.scrape_duration
|
||||
log_level = args.log_level
|
||||
prometheus_endpoint = args.prometheus_endpoint
|
||||
chaos_tests = {"MEM": args.MEM, "GENERIC": args.GENERIC, "CPU": args.CPU, "NETWORK": args.NETWORK}
|
||||
|
||||
if log_level not in ["DEBUG","INFO", "WARNING", "ERROR","CRITICAL"]:
|
||||
logging.error(f"{log_level} not a valid log level")
|
||||
sys.exit(1)
|
||||
|
||||
logging.basicConfig(level=log_level)
|
||||
|
||||
logging.info("============================INPUTS===================================")
|
||||
logging.info(f"Namespace: {namespace}")
|
||||
logging.info(f"Kubeconfig: {kubeconfig}")
|
||||
logging.info(f"Prometheus endpoint: {prometheus_endpoint}")
|
||||
logging.info(f"Scrape duration: {scrape_duration}")
|
||||
for test in chaos_tests.keys():
|
||||
logging.info(f"Chaos tests {test}: {chaos_tests[test]}")
|
||||
logging.info("=====================================================================")
|
||||
logging.info("Starting Analysis ...")
|
||||
logging.info("Fetching the Telemetry data")
|
||||
|
||||
file_path = prometheus.fetch_utilization_from_prometheus(prometheus_endpoint, auth_token, namespace, scrape_duration)
|
||||
analysis(file_path, chaos_tests)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user