diff --git a/.gitleaks.toml b/.gitleaks.toml new file mode 100644 index 00000000..2fd61173 --- /dev/null +++ b/.gitleaks.toml @@ -0,0 +1,6 @@ +[allowlist] + description = "Global Allowlist" + + paths = [ + '''kraken/arcaflow_plugin/fixtures/*''' + ] diff --git a/README.md b/README.md index 739e0a80..adb8d198 100644 --- a/README.md +++ b/README.md @@ -62,7 +62,7 @@ Scenario type | Kubernetes | OpenShift [Container Scenarios](docs/container_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: | [Node Scenarios](docs/node_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: | [Time Scenarios](docs/time_scenarios.md) | :x: | :heavy_check_mark: | -[Litmus Scenarios](docs/litmus_scenarios.md) | :x: | :heavy_check_mark: | +[Hog Scenarios](docs/arcaflow_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: | [Cluster Shut Down Scenarios](docs/cluster_shut_down_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: | [Namespace Scenarios](docs/namespace_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: | [Zone Outage Scenarios](docs/zone_outage.md) | :heavy_check_mark: | :heavy_check_mark: | @@ -70,7 +70,6 @@ Scenario type | Kubernetes | OpenShift [PVC scenario](docs/pvc_scenario.md) | :heavy_check_mark: | :heavy_check_mark: | [Network_Chaos](docs/network_chaos.md) | :heavy_check_mark: | :heavy_check_mark: | [ManagedCluster Scenarios](docs/managedcluster_scenarios.md) | :heavy_check_mark: | :question: | -[Arcaflow Scenarios](docs/arcaflow_scenarios.md) | :heavy_check_mark: | :heavy_check_mark: | ### Kraken scenario pass/fail criteria and report diff --git a/config/config.yaml b/config/config.yaml index bc124045..9c1c71f7 100644 --- a/config/config.yaml +++ b/config/config.yaml @@ -11,6 +11,10 @@ kraken: litmus_uninstall: False # If you want to uninstall litmus if failure litmus_uninstall_before_run: True # If you want to uninstall litmus before a new run starts chaos_scenarios: # List of policies/chaos scenarios to load + - arcaflow_scenarios: + - scenarios/arcaflow/cpu-hog/input.yaml + - scenarios/arcaflow/io-hog/input.yaml + - scenarios/arcaflow/memory-hog/input.yaml - container_scenarios: # List of chaos pod scenarios to load - - scenarios/openshift/container_etcd.yml - plugin_scenarios: diff --git a/config/config_arcaflow.yaml b/config/config_arcaflow.yaml deleted file mode 100644 index d3e1c3b5..00000000 --- a/config/config_arcaflow.yaml +++ /dev/null @@ -1,38 +0,0 @@ -kraken: - distribution: openshift # Distribution can be kubernetes or openshift - kubeconfig_path: ~/.kube/config # Path to kubeconfig - exit_on_failure: False # Exit when a post action scenario fails - publish_kraken_status: True # Can be accessed at http://0.0.0.0:8081 - signal_state: RUN # Will wait for the RUN signal when set to PAUSE before running the scenarios, refer docs/signal.md for more details - signal_address: 0.0.0.0 # Signal listening address - port: 8081 # Signal port - litmus_install: True # Installs specified version, set to False if it's already setup - litmus_version: v1.13.6 # Litmus version to install - litmus_uninstall: False # If you want to uninstall litmus if failure - litmus_uninstall_before_run: True # If you want to uninstall litmus before a new run starts - chaos_scenarios: # List of policies/chaos scenarios to load - - arcaflow_scenarios: - - scenarios/arcaflow/sysbench-cpu-hog/input.yaml - -cerberus: - cerberus_enabled: False # Enable it when cerberus is previously installed - cerberus_url: # When cerberus_enabled is set to True, provide the url where cerberus publishes go/no-go signal - check_applicaton_routes: False # When enabled will look for application unavailability using the routes specified in the cerberus config and fails the run - -performance_monitoring: - deploy_dashboards: False # Install a mutable grafana and load the performance dashboards. Enable this only when running on OpenShift - repo: "https://github.com/cloud-bulldozer/performance-dashboards.git" - kube_burner_binary_url: "https://github.com/cloud-bulldozer/kube-burner/releases/download/v0.9.1/kube-burner-0.9.1-Linux-x86_64.tar.gz" - capture_metrics: False - config_path: config/kube_burner.yaml # Define the Elasticsearch url and index name in this config - metrics_profile_path: config/metrics-aggregated.yaml - prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. - prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus. - uuid: # uuid for the run is generated by default if not set - enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error - alert_profile: config/alerts # Path to alert profile with the prometheus queries - -tunings: - wait_duration: 60 # Duration to wait between each chaos scenario - iterations: 1 # Number of times to execute the scenarios - daemon_mode: False # Iterations are set to infinity which means that the kraken will cause chaos forever diff --git a/docs/arcaflow_scenarios.md b/docs/arcaflow_scenarios.md index c2b431cd..495d956d 100644 --- a/docs/arcaflow_scenarios.md +++ b/docs/arcaflow_scenarios.md @@ -1,28 +1,36 @@ -### Arcaflow Scenarios +## Arcaflow Scenarios Arcaflow is a workflow engine in development which provides the ability to execute workflow steps in sequence, in parallel, repeatedly, etc. The main difference to competitors such as Netflix Conductor is the ability to run ad-hoc workflows without an infrastructure setup required. The engine uses containers to execute plugins and runs them either locally in Docker/Podman or remotely on a Kubernetes cluster. The workflow system is strongly typed and allows for generating JSON schema and OpenAPI documents for all data formats involved. -#### Prequisites + +### Available Scenarios +#### Hog scenarios: +- [CPU Hog](arcaflow_scenarios/cpu_hog.md) +- [Memory Hog](arcaflow_scenarios/memory_hog.md) +- [I/O Hog](arcaflow_scenarios/io_hog.md) + + +### Prequisites Arcaflow supports three deployment technologies: - Docker - Podman - Kubernetes -##### Docker +#### Docker In order to run Arcaflow Scenarios with the Docker deployer, be sure that: - Docker is correctly installed in your Operating System (to find instructions on how to install docker please refer to [Docker Documentation](https://www.docker.com/)) - The Docker daemon is running -##### Podman +#### Podman The podman deployer is built around the podman CLI and doesn't need necessarily to be run along with the podman daemon. To run Arcaflow Scenarios in your Operating system be sure that: - podman is correctly installed in your Operating System (to find instructions on how to install podman refer to [Podman Documentation](https://podman.io/)) - the podman CLI is in your shell PATH -##### Kubernetes +#### Kubernetes The kubernetes deployer integrates directly the Kubernetes API Client and needs only a valid kubeconfig file and a reachable Kubernetes/OpenShift Cluster. -#### Usage +### Usage To enable arcaflow scenarios edit the kraken config file, go to the section `kraken -> chaos_scenarios` of the yaml structure and add a new element to the list named `arcaflow_scenarios` then add the desired scenario @@ -32,14 +40,14 @@ kraken: ... chaos_scenarios: - arcaflow_scenarios: - - scenarios/arcaflow/sysbench-cpu-hog/input.yaml + - scenarios/arcaflow/cpu-hog/input.yaml ``` -##### input.yaml +#### input.yaml The implemented scenarios can be found in *scenarios/arcaflow/* folder. The entrypoint of each scenario is the *input.yaml* file. In this file there are all the options to set up the scenario accordingly to the desired target -#### config.yaml +### config.yaml The arcaflow config file. Here you can set the arcaflow deployer and the arcaflow log level. The supported deployers are: - Docker @@ -51,25 +59,10 @@ The supported log levels are: - info - warning - error -#### workflow.yaml +### workflow.yaml This file contains the steps that will be executed to perform the scenario against the target. Each step is represented by a container that will be executed from the deployer and its options. Note that we provide the scenarios as a template, but they can be manipulated to define more complex workflows. To have more details regarding the arcaflow workflows architecture and syntax it is suggested to refer to the [Arcaflow Documentation](https://arcalot.io/arcaflow/). -#### Scenarios -##### sysbench-cpu-hog -This scenario is based on the arcaflow [sysbench](https://github.com/akopytov/sysbench) plugin. -The purpose of this scenario is to create cpu pressure on a particular node of the Kubernetes/OpenShift cluster for a time span. -To enable this plugin add the pointer to the scenario input file `scenarios/arcaflow/sysbench-cpu-hog/input.yaml` as described in the -Usage section. -This scenarios takes the following input parameters: -- **kubeconfig :** string representation of the kubeconfig needed by the deployer to deploy the sysbench plugin in the target cluster -**Note:** this parameter will be automatically filled by kraken if the `kubeconfig_path` property is correctly set -- **node_selector :** key-value representation of the node label that will be used as `nodeSelector` by the pod to target a specific cluster node -- **sysbench_cpumaxprime :** integer that defines the highest prime number during the test. Higher this value is, higher will be the time to find all the prime numbers -- **sysbench_events :** integer that limits the maximum number of events that will be performed by sysbench, 0 removes the limit -- **sysbench_runtime :** number of seconds the test will be run -- **sysbench_forced_shutdown_time :** the number of seconds to wait before shutting down the benchmark after the defined run time -- **sysbench_threads :** the number of threads on which the test will run \ No newline at end of file diff --git a/docs/arcaflow_scenarios/cpu_hog.md b/docs/arcaflow_scenarios/cpu_hog.md new file mode 100644 index 00000000..c29f7a90 --- /dev/null +++ b/docs/arcaflow_scenarios/cpu_hog.md @@ -0,0 +1,18 @@ +# CPU Hog +This scenario is based on the arcaflow [arcaflow-plugin-stressng](https://github.com/arcalot/arcaflow-plugin-stressng) plugin. +The purpose of this scenario is to create cpu pressure on a particular node of the Kubernetes/OpenShift cluster for a time span. +To enable this plugin add the pointer to the scenario input file `scenarios/arcaflow/cpu-hog/input.yaml` as described in the +Usage section. +This scenario takes a list of objects named `input_list` with the following properties: + +- **kubeconfig :** *string* the kubeconfig needed by the deployer to deploy the sysbench plugin in the target cluster +**Note:** this parameter will be automatically filled by kraken if the `kubeconfig_path` property is correctly set +- **node_selector :** *key-value map* the node label that will be used as `nodeSelector` by the pod to target a specific cluster node +- **duration :** *string* stop stress test after N seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y. +- **cpu_count :** *int* the number of CPU cores to be used (0 means all) +- **cpu_method :** *string* a fine-grained control of which cpu stressors to use (ackermann, cfloat etc. see [manpage](https://manpages.org/sysbench) for all the cpu_method options) +- **cpu_load_percentage :** *int* the CPU load by percentage + +To perform several load tests in the same run simultaneously (eg. stress two or more nodes in the same run) add another item +to the `input_list` with the same properties (and eventually different values eg. different node_selectors +to schedule the pod on different nodes). To reduce (or increase) the parallelism change the value `parallelism` in `workload.yaml` file \ No newline at end of file diff --git a/docs/arcaflow_scenarios/io_hog.md b/docs/arcaflow_scenarios/io_hog.md new file mode 100644 index 00000000..d87fce84 --- /dev/null +++ b/docs/arcaflow_scenarios/io_hog.md @@ -0,0 +1,20 @@ +# I/O Hog +This scenario is based on the arcaflow [arcaflow-plugin-stressng](https://github.com/arcalot/arcaflow-plugin-stressng) plugin. +The purpose of this scenario is to create disk pressure on a particular node of the Kubernetes/OpenShift cluster for a time span. +The scenario allows to attach a node path to the pod as a `hostPath` volume. +To enable this plugin add the pointer to the scenario input file `scenarios/arcaflow/io-hog/input.yaml` as described in the +Usage section. +This scenario takes a list of objects named `input_list` with the following properties: + +- **kubeconfig :** *string* the kubeconfig needed by the deployer to deploy the sysbench plugin in the target cluster +**Note:** this parameter will be automatically filled by kraken if the `kubeconfig_path` property is correctly set +- **node_selector :** *key-value map* the node label that will be used as `nodeSelector` by the pod to target a specific cluster node +- **duration :** *string* stop stress test after N seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y. +- **target_pod_folder :** *string* the path in the pod where the volume is mounted +- **target_pod_volume :** *object* the `hostPath` volume definition in the [Kubernetes/OpenShift](https://docs.openshift.com/container-platform/3.11/install_config/persistent_storage/using_hostpath.html) format, that will be attached to the pod as a volume +- **io_write_bytes :** *string* writes N bytes for each hdd process. The size can be expressed as % of free space on the file system or in units of Bytes, KBytes, MBytes and GBytes using the suffix b, k, m or g +- **io_block_size :** *string* size of each write in bytes. Size can be from 1 byte to 4m. + +To perform several load tests in the same run simultaneously (eg. stress two or more nodes in the same run) add another item +to the `input_list` with the same properties (and eventually different values eg. different node_selectors +to schedule the pod on different nodes). To reduce (or increase) the parallelism change the value `parallelism` in `workload.yaml` file \ No newline at end of file diff --git a/docs/arcaflow_scenarios/memory_hog.md b/docs/arcaflow_scenarios/memory_hog.md new file mode 100644 index 00000000..ff59b6fb --- /dev/null +++ b/docs/arcaflow_scenarios/memory_hog.md @@ -0,0 +1,17 @@ +# Memory Hog +This scenario is based on the arcaflow [arcaflow-plugin-stressng](https://github.com/arcalot/arcaflow-plugin-stressng) plugin. +The purpose of this scenario is to create Virtual Memory pressure on a particular node of the Kubernetes/OpenShift cluster for a time span. +To enable this plugin add the pointer to the scenario input file `scenarios/arcaflow/memory-hog/input.yaml` as described in the +Usage section. +This scenario takes a list of objects named `input_list` with the following properties: + +- **kubeconfig :** *string* the kubeconfig needed by the deployer to deploy the sysbench plugin in the target cluster +**Note:** this parameter will be automatically filled by kraken if the `kubeconfig_path` property is correctly set +- **node_selector :** *key-value map* the node label that will be used as `nodeSelector` by the pod to target a specific cluster node +- **duration :** *string* stop stress test after N seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y. +- **vm_bytes :** *string* N bytes per vm process or percentage of memory used (using the % symbol). The size can be expressed in units of Bytes, KBytes, MBytes and GBytes using the suffix b, k, m or g. +- **vm_workers :** *int* Number of VM stressors to be run (0 means 1 stressor per CPU) + +To perform several load tests in the same run simultaneously (eg. stress two or more nodes in the same run) add another item +to the `input_list` with the same properties (and eventually different values eg. different node_selectors +to schedule the pod on different nodes). To reduce (or increase) the parallelism change the value `parallelism` in `workload.yaml` file \ No newline at end of file diff --git a/kraken/arcaflow_plugin/__init__.py b/kraken/arcaflow_plugin/__init__.py index d45a7a4f..9438d945 100644 --- a/kraken/arcaflow_plugin/__init__.py +++ b/kraken/arcaflow_plugin/__init__.py @@ -1 +1,2 @@ -from .arcaflow_plugin import * \ No newline at end of file +from .arcaflow_plugin import * +from .context_auth import ContextAuth diff --git a/kraken/arcaflow_plugin/arcaflow_plugin.py b/kraken/arcaflow_plugin/arcaflow_plugin.py index 4a47e654..5c8ee707 100644 --- a/kraken/arcaflow_plugin/arcaflow_plugin.py +++ b/kraken/arcaflow_plugin/arcaflow_plugin.py @@ -1,64 +1,117 @@ import arcaflow import os import yaml -import base64 +import logging +import sys from pathlib import Path from typing import List +from .context_auth import ContextAuth def run(scenarios_list: List[str], kubeconfig_path: str): for scenario in scenarios_list: - engineArgs = buildArgs(scenario) - runWorkflow(engineArgs, kubeconfig_path) + engine_args = build_args(scenario) + run_workflow(engine_args, kubeconfig_path) -def runWorkflow(engineArgs: arcaflow.EngineArgs, kubeconfig_path: str): - setArcaKubeConfig(engineArgs, kubeconfig_path) - arcaflow.run(engineArgs) +def run_workflow(engine_args: arcaflow.EngineArgs, kubeconfig_path: str): + set_arca_kubeconfig(engine_args, kubeconfig_path) + exit_status = arcaflow.run(engine_args) + if exit_status != 0: + logging.error( + f"failed to run arcaflow scenario {engine_args.input}" + ) + sys.exit(exit_status) -def buildArgs(input: str) -> arcaflow.EngineArgs: +def build_args(input_file: str) -> arcaflow.EngineArgs: """sets the kubeconfig parsed by setArcaKubeConfig as an input to the arcaflow workflow""" - context = Path(input).parent + context = Path(input_file).parent workflow = "{}/workflow.yaml".format(context) config = "{}/config.yaml".format(context) - if os.path.exists(context) == False: + if not os.path.exists(context): raise Exception( "context folder for arcaflow workflow not found: {}".format( context) ) - if os.path.exists(input) == False: + if not os.path.exists(input_file): raise Exception( - "input file for arcaflow workflow not found: {}".format(input)) - if os.path.exists(workflow) == False: + "input file for arcaflow workflow not found: {}".format(input_file)) + if not os.path.exists(workflow): raise Exception( "workflow file for arcaflow workflow not found: {}".format( workflow) ) - if os.path.exists(config) == False: + if not os.path.exists(config): raise Exception( "configuration file for arcaflow workflow not found: {}".format( config) ) - engineArgs = arcaflow.EngineArgs() - engineArgs.context = context - engineArgs.config = config - engineArgs.input = input - return engineArgs + engine_args = arcaflow.EngineArgs() + engine_args.context = context + engine_args.config = config + engine_args.input = input_file + return engine_args -def setArcaKubeConfig(engineArgs: arcaflow.EngineArgs, kubeconfig_path: str): - kubeconfig_str = buildArcaKubeConfig(kubeconfig_path) - with open(engineArgs.input, "r") as stream: - input = yaml.safe_load(stream) - input["kubeconfig"] = kubeconfig_str +def set_arca_kubeconfig(engine_args: arcaflow.EngineArgs, kubeconfig_path: str): + + context_auth = ContextAuth() + if not os.path.exists(kubeconfig_path): + raise Exception("kubeconfig not found in {}".format(kubeconfig_path)) + + with open(kubeconfig_path, "r") as stream: + try: + kubeconfig = yaml.safe_load(stream) + context_auth.fetch_auth_data(kubeconfig) + except Exception as e: + logging.error("impossible to read kubeconfig file in: {}".format( + kubeconfig_path)) + raise e + + kubeconfig_str = set_kubeconfig_auth(kubeconfig, context_auth) + + with open(engine_args.input, "r") as stream: + input_file = yaml.safe_load(stream) + if "input_list" in input_file and isinstance(input_file["input_list"],list): + for index, _ in enumerate(input_file["input_list"]): + if isinstance(input_file["input_list"][index], dict): + input_file["input_list"][index]["kubeconfig"] = kubeconfig_str + else: + input_file["kubeconfig"] = kubeconfig_str stream.close() - with open(engineArgs.input, "w") as stream: - yaml.safe_dump(input, stream) + with open(engine_args.input, "w") as stream: + yaml.safe_dump(input_file, stream) + + with open(engine_args.config, "r") as stream: + config_file = yaml.safe_load(stream) + if config_file["deployer"]["type"] == "kubernetes": + kube_connection = set_kubernetes_deployer_auth(config_file["deployer"]["connection"], context_auth) + config_file["deployer"]["connection"]=kube_connection + with open(engine_args.config, "w") as stream: + yaml.safe_dump(config_file, stream,explicit_start=True, width=4096) -def buildArcaKubeConfig(kubeconfig_path: str) -> str: +def set_kubernetes_deployer_auth(deployer: any, context_auth: ContextAuth) -> any: + if context_auth.clusterHost is not None : + deployer["host"] = context_auth.clusterHost + if context_auth.clientCertificateData is not None : + deployer["cert"] = context_auth.clientCertificateData + if context_auth.clientKeyData is not None: + deployer["key"] = context_auth.clientKeyData + if context_auth.clusterCertificateData is not None: + deployer["cacert"] = context_auth.clusterCertificateData + if context_auth.username is not None: + deployer["username"] = context_auth.username + if context_auth.password is not None: + deployer["password"] = context_auth.password + if context_auth.bearerToken is not None: + deployer["bearerToken"] = context_auth.bearerToken + return deployer + + +def set_kubeconfig_auth(kubeconfig: any, context_auth: ContextAuth) -> str: """ Builds an arcaflow-compatible kubeconfig representation and returns it as a string. In order to run arcaflow plugins in kubernetes/openshift the kubeconfig must contain client certificate/key @@ -66,80 +119,50 @@ def buildArcaKubeConfig(kubeconfig_path: str) -> str: case, infact kubeconfig may contain filesystem paths to those files, this function builds an arcaflow-compatible kubeconfig file and returns it as a string that can be safely included in input.yaml """ - if os.path.exists(kubeconfig_path) == False: - raise Exception("kubeconfig not found in {}".format(kubeconfig_path)) - - with open(kubeconfig_path, "r") as stream: - try: - kubeconfig = yaml.safe_load(stream) - except: - raise Exception( - "impossible to read kubeconfig file in: {}".format( - kubeconfig_path) - ) if "current-context" not in kubeconfig.keys(): raise Exception( "invalid kubeconfig file, impossible to determine current-context" ) - userId = None - clusterId = None - userName = None - clusterName = None - currentContext = kubeconfig["current-context"] + user_id = None + cluster_id = None + user_name = None + cluster_name = None + current_context = kubeconfig["current-context"] for context in kubeconfig["contexts"]: - if context["name"] == currentContext: - userName = context["context"]["user"] - clusterName = context["context"]["cluster"] - if userName is None: + if context["name"] == current_context: + user_name = context["context"]["user"] + cluster_name = context["context"]["cluster"] + if user_name is None: raise Exception( - "user not set for context {} in kubeconfig file".format(context) + "user not set for context {} in kubeconfig file".format(current_context) ) - if clusterName is None: + if cluster_name is None: raise Exception( - "cluster not set for context {} in kubeconfig file".format(context) + "cluster not set for context {} in kubeconfig file".format(current_context) ) for index, user in enumerate(kubeconfig["users"]): - if user["name"] == userName: - userId = index + if user["name"] == user_name: + user_id = index for index, cluster in enumerate(kubeconfig["clusters"]): - if cluster["name"] == clusterName: - clusterId = index + if cluster["name"] == cluster_name: + cluster_id = index - if userId is None: + if cluster_id is None: raise Exception( - "no user {} found in kubeconfig users".format(userName) + "no cluster {} found in kubeconfig users".format(cluster_name) ) - if clusterId is None: - raise Exception( - "no cluster {} found in kubeconfig users".format(cluster) - ) - if "client-certificate" in kubeconfig["users"][userId]["user"]: - file = kubeconfig["users"][userId]["user"]["client-certificate"] - if (os.path.exists(file) == False): - raise Exception("user certificate not found {} ".format(file)) - with open(file, "rb") as file_stream: - encoded_file = base64.b64encode(file_stream.read()).decode("utf-8") - kubeconfig["users"][userId]["user"]["client-certificate-data"] = encoded_file - del kubeconfig["users"][userId]["user"]["client-certificate"] + if "client-certificate" in kubeconfig["users"][user_id]["user"]: + kubeconfig["users"][user_id]["user"]["client-certificate-data"] = context_auth.clientCertificateDataBase64 + del kubeconfig["users"][user_id]["user"]["client-certificate"] - if "client-key" in kubeconfig["users"][userId]["user"]: - file = kubeconfig["users"][userId]["user"]["client-key"] - if (os.path.exists(file) == False): - raise Exception("user key not found: {} ".format(file)) - with open(file, "rb") as file_stream: - encoded_file = base64.b64encode(file_stream.read()).decode("utf-8") - kubeconfig["users"][userId]["user"]["client-key-data"] = encoded_file - del kubeconfig["users"][userId]["user"]["client-key"] + if "client-key" in kubeconfig["users"][user_id]["user"]: + kubeconfig["users"][user_id]["user"]["client-key-data"] = context_auth.clientKeyDataBase64 + del kubeconfig["users"][user_id]["user"]["client-key"] - if "certificate-authority" in kubeconfig["clusters"][clusterId]["cluster"]: - file = kubeconfig["clusters"][clusterId]["cluster"]["certificate-authority"] - if (os.path.exists(file) == False): - raise Exception("cluster certificate not found: {}".format(file)) - with open(file, "rb") as file_stream: - encoded_file = base64.b64encode(file_stream.read()).decode("utf-8") - kubeconfig["clusters"][clusterId]["cluster"]["certificate-authority-data"] = encoded_file - del kubeconfig["clusters"][clusterId]["cluster"]["certificate-authority"] + if "certificate-authority" in kubeconfig["clusters"][cluster_id]["cluster"]: + kubeconfig["clusters"][cluster_id]["cluster"]["certificate-authority-data"] = context_auth.clusterCertificateDataBase64 + del kubeconfig["clusters"][cluster_id]["cluster"]["certificate-authority"] kubeconfig_str = yaml.dump(kubeconfig) return kubeconfig_str diff --git a/kraken/arcaflow_plugin/context_auth.py b/kraken/arcaflow_plugin/context_auth.py new file mode 100644 index 00000000..47866c14 --- /dev/null +++ b/kraken/arcaflow_plugin/context_auth.py @@ -0,0 +1,142 @@ +import yaml +import os +import base64 + + +class ContextAuth: + clusterCertificate: str = None + clusterCertificateData: str = None + clusterHost: str = None + clientCertificate: str = None + clientCertificateData: str = None + clientKey: str = None + clientKeyData: str = None + clusterName: str = None + username: str = None + password: str = None + bearerToken: str = None + # TODO: integrate in krkn-lib-kubernetes in the next iteration + + @property + def clusterCertificateDataBase64(self): + if self.clusterCertificateData is not None: + return base64.b64encode(bytes(self.clusterCertificateData,'utf8')).decode("ascii") + return + + @property + def clientCertificateDataBase64(self): + if self.clientCertificateData is not None: + return base64.b64encode(bytes(self.clientCertificateData,'utf8')).decode("ascii") + return + + @property + def clientKeyDataBase64(self): + if self.clientKeyData is not None: + return base64.b64encode(bytes(self.clientKeyData,"utf-8")).decode("ascii") + return + + + + def fetch_auth_data(self, kubeconfig: any): + context_username = None + current_context = kubeconfig["current-context"] + if current_context is None: + raise Exception("no current-context found in kubeconfig") + + for context in kubeconfig["contexts"]: + if context["name"] == current_context: + context_username = context["context"]["user"] + self.clusterName = context["context"]["cluster"] + if context_username is None: + raise Exception("user not found for context {0}".format(current_context)) + if self.clusterName is None: + raise Exception("cluster not found for context {0}".format(current_context)) + cluster_id = None + user_id = None + for index, user in enumerate(kubeconfig["users"]): + if user["name"] == context_username: + user_id = index + if user_id is None : + raise Exception("user {0} not found in kubeconfig users".format(context_username)) + + for index, cluster in enumerate(kubeconfig["clusters"]): + if cluster["name"] == self.clusterName: + cluster_id = index + + if cluster_id is None: + raise Exception( + "no cluster {} found in kubeconfig users".format(self.clusterName) + ) + + user = kubeconfig["users"][user_id]["user"] + cluster = kubeconfig["clusters"][cluster_id]["cluster"] + # sets cluster api URL + self.clusterHost = cluster["server"] + # client certificates + + if "client-key" in user: + try: + self.clientKey = user["client-key"] + self.clientKeyData = self.read_file(user["client-key"]) + except Exception as e: + raise e + + if "client-key-data" in user: + try: + self.clientKeyData = base64.b64decode(user["client-key-data"]).decode('utf-8') + except Exception as e: + raise Exception("impossible to decode client-key-data") + + if "client-certificate" in user: + try: + self.clientCertificate = user["client-certificate"] + self.clientCertificateData = self.read_file(user["client-certificate"]) + except Exception as e: + raise e + + if "client-certificate-data" in user: + try: + self.clientCertificateData = base64.b64decode(user["client-certificate-data"]).decode('utf-8') + except Exception as e: + raise Exception("impossible to decode client-certificate-data") + + # cluster certificate authority + + if "certificate-authority" in cluster: + try: + self.clusterCertificate = cluster["certificate-authority"] + self.clusterCertificateData = self.read_file(cluster["certificate-authority"]) + except Exception as e: + raise e + + if "certificate-authority-data" in cluster: + try: + self.clusterCertificateData = base64.b64decode(cluster["certificate-authority-data"]).decode('utf-8') + except Exception as e: + raise Exception("impossible to decode certificate-authority-data") + + if "username" in user: + self.username = user["username"] + + if "password" in user: + self.password = user["password"] + + if "token" in user: + self.bearerToken = user["token"] + + def read_file(self, filename:str) -> str: + if not os.path.exists(filename): + raise Exception("file not found {0} ".format(filename)) + with open(filename, "rb") as file_stream: + return file_stream.read().decode('utf-8') + + + + + + + + + + + diff --git a/kraken/arcaflow_plugin/fixtures/ca.crt b/kraken/arcaflow_plugin/fixtures/ca.crt new file mode 100644 index 00000000..e3264358 --- /dev/null +++ b/kraken/arcaflow_plugin/fixtures/ca.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDBjCCAe6gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p +a3ViZUNBMB4XDTIzMDMxMzE1NDAxM1oXDTMzMDMxMTE1NDAxM1owFTETMBEGA1UE +AxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMnz +U/gIbJBRGOgNYVKX2fV03ANOwnM4VjquR28QMAdxURqgOFZ6IxYNysHEyxxE9I+I +DAm9hi4vQPbOX7FlxUezuzw+ExEfa6RRJ+n+AGJOV1lezCVph6OaJxB1+L1UqaDZ +eM3B4cUf/iCc5Y4bs927+CBG3MJL/jmCVPCO+MiSn/l73PXSFNJAYMvRj42zkXqD +CVG9CwY2vWgZnnzl01l7jNGtie871AmV2uqKakJrQ2ILhD+8fZk4jE5JBDTCZnqQ +pXIc+vERNKLUS8cvjO6Ux8dMv/Z7+xonpXOU59LlpUdHWP9jgCvMTwiOriwqGjJ+ +pQJWpX9Dm+oxJiVOJzsCAwEAAaNhMF8wDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQW +MBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBQU9pDMtbayJdNM6bp0IG8dcs15qTANBgkqhkiG9w0BAQsFAAOCAQEAtl9TVKPA +hTnPODqv0AGTqreS9kLg4WUUjZRaPUkPWmtCoTh2Yf55nRWdHOHeZnCWDSg24x42 +lpt+13IdqKew1RKTpKCTkicMFi090A01bYu/w39Cm6nOAA5h8zkgSkV5czvQotuV +SoN2vB+nbuY28ah5PkdqjMHEZbNwa59cgEke8wB1R1DWFQ/pqflrH2v9ACAuY+5Q +i673tA6CXrb1YfaCQnVBzcfvjGS1MqShPKpOLMF+/GccPczNimaBxMnKvYLvf3pN +qEUrJC00mAcein8HmxR2Xz8wredbMUUyrQxW29pZJwfGE5GU0olnlsA0lZLbTwio +xoolo5y+fsK/dA== +-----END CERTIFICATE----- \ No newline at end of file diff --git a/kraken/arcaflow_plugin/fixtures/client.crt b/kraken/arcaflow_plugin/fixtures/client.crt new file mode 100644 index 00000000..64e4aef9 --- /dev/null +++ b/kraken/arcaflow_plugin/fixtures/client.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDITCCAgmgAwIBAgIBAjANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5p +a3ViZUNBMB4XDTIzMDUwMTA4NTc0N1oXDTI2MDUwMTA4NTc0N1owMTEXMBUGA1UE +ChMOc3lzdGVtOm1hc3RlcnMxFjAUBgNVBAMTDW1pbmlrdWJlLXVzZXIwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC0b7uy9nQYrh7uC5NODve7dFNLAgo5 +pWRS6Kx13ULA55gOpieZiI5/1jwUBjOz0Hhl5QAdHC1HDNu5wf4MmwIEheuq3kMA +mfuvNxW2BnWSDuXyUMlBfqlwg5o6W8ndEWaK33D7wd2WQsSsAnhQPJSjnzWKvWKq ++Kbcygc4hdss/ZWN+SXLTahNpHBw0sw8AcJqddNeXs2WI5GdZmbXL4QZI36EaNUm +m4xKmKRKYIP9wYkmXOV/D2h1meM44y4lul5v2qvo6I+umJ84q4W1/W1vVmAzyVfL +v1TQCUx8cpKMHzw3ma6CTBCtU3Oq9HKHBnf8GyHZicmV7ESzf/phJu4ZAgMBAAGj +YDBeMA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUH +AwIwDAYDVR0TAQH/BAIwADAfBgNVHSMEGDAWgBQU9pDMtbayJdNM6bp0IG8dcs15 +qTANBgkqhkiG9w0BAQsFAAOCAQEABNzEQQMYUcLsBASHladEjr46avKn7gREfaDl +Y5PBvgCPP42q/sW/9iCNY3UpT9TJZWM6s01+0p6I96jYbRQER1NX7O4OgQYHmFw2 +PF6UOG2vMo54w11OvL7sbr4d+nkE6ItdM9fLDIJ3fEOYJZkSoxhOL/U3jSjIl7Wu +KCIlpM/M/gcZ4w2IvcLrWtvswbFNUd+dwQfBGcQTmSQDOLE7MqSvzYAkeNv73GLB +ieba7gs/PmoTFsf9nW60iXymDDF4MtODn15kqT/y1uD6coujmiEiIomBfxqAkUCU +0ciP/KF5oOEMmMedm7/peQxaRTMdRSk4yu7vbj/BxnTcj039Qg== +-----END CERTIFICATE----- \ No newline at end of file diff --git a/kraken/arcaflow_plugin/fixtures/client.key b/kraken/arcaflow_plugin/fixtures/client.key new file mode 100644 index 00000000..03cfb879 --- /dev/null +++ b/kraken/arcaflow_plugin/fixtures/client.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAtG+7svZ0GK4e7guTTg73u3RTSwIKOaVkUuisdd1CwOeYDqYn +mYiOf9Y8FAYzs9B4ZeUAHRwtRwzbucH+DJsCBIXrqt5DAJn7rzcVtgZ1kg7l8lDJ +QX6pcIOaOlvJ3RFmit9w+8HdlkLErAJ4UDyUo581ir1iqvim3MoHOIXbLP2Vjfkl +y02oTaRwcNLMPAHCanXTXl7NliORnWZm1y+EGSN+hGjVJpuMSpikSmCD/cGJJlzl +fw9odZnjOOMuJbpeb9qr6OiPrpifOKuFtf1tb1ZgM8lXy79U0AlMfHKSjB88N5mu +gkwQrVNzqvRyhwZ3/Bsh2YnJlexEs3/6YSbuGQIDAQABAoIBAQCdJxPb8zt6o2zc +98f8nJy378D7+3LccmjGrVBH98ZELXIKkDy9RGqYfQcmiaBOZKv4U1OeBwSIdXKK +f6O9ZuSC/AEeeSbyRysmmFuYhlewNrmgKyyelqsNDBIv8fIHUTh2i9Xj8B4G2XBi +QGR5vcnYGLqRdBGTx63Nb0iKuksDCwPAuPA/e0ySz9HdWL1j4bqpVSYsOIXsqTDr +CVnxUeSIL0fFQnRm3IASXQD7zdq9eEFX7vESeleZoz8qNcKb4Na/C3N6crScjgH7 +qyNZ2zNLfy1LT84k8uc1TMX2KcEVEmfdDv5cCnUH2ic12CwXMZ0vgId5LJTaHx4x +ytIQIe5hAoGBANB+TsRXP4KzcjZlUUfiAp/pWUM4kVktbsfZa1R2NEuIGJUxPk3P +7WS0WX5W75QKRg+UWTubg5kfd0f9fklLgofmliBnY/HrpgdyugJmUZBgzIxmy0k+ +aCe0biD1gULfyyrKtfe8k5wRFstzhfGszlOf2ebR87sSVNBuF2lEwPTvAoGBAN2M +0/XrsodGU4B9Mj86Go2gb2k2WU2izI0cO+tm2S5U5DvKmVEnmjXfPRaOFj2UUQjo +cljnDAinbN+O0+Inc35qsEeYdAIepNAPglzcpfTHagja9mhx2idLYTXGhbZLL+Ei +TRzMyP27NF+GVVfYU/cA86ns6NboG6spohmnqh13AoGAKPc4aNGv0/GIVnHP56zb +0SnbdR7PSFNp+fCZay4Slmi2U9IqKMXbIjdhgjZ4uoDORU9jvReQYuzQ1h9TyfkB +O8yt4M4P0D/6DmqXa9NI4XJznn6wIMMXWf3UybsTW913IQBVgsjVxAuDjBQ11Eec +/sdg3D6SgkZWzeFjzjZJJ5cCgYBSYVg7fE3hERxhjawOaJuRCBQFSklAngVzfwkk +yhR9ruFC/l2uGIy19XFwnprUgP700gIa3qbR3PeV1TUiRcsjOaacqKqSUzSzjODL +iNxIvZHHAyxWv+b/b38REOWNWD3QeAG2cMtX1bFux7OaO31VPkxcZhRaPOp05cE5 +yudtlwKBgDBbR7RLYn03OPm3NDBLLjTybhD8Iu8Oj7UeNCiEWAdZpqIKYnwSxMzQ +kdo4aTENA/seEwq+XDV7TwbUIFFJg5gDXIhkcK2c9kiO2bObCAmKpBlQCcrp0a5X +NSBk1N/ZG/Qhqns7z8k01KN4LNcdpRoNiYYPgY+p3xbY8+nWhv+q +-----END RSA PRIVATE KEY----- \ No newline at end of file diff --git a/kraken/arcaflow_plugin/test_context_auth.py b/kraken/arcaflow_plugin/test_context_auth.py new file mode 100644 index 00000000..5571018e --- /dev/null +++ b/kraken/arcaflow_plugin/test_context_auth.py @@ -0,0 +1,100 @@ +import os +import unittest + +from context_auth import ContextAuth + + +class TestCurrentContext(unittest.TestCase): + + def get_kubeconfig_with_data(self) -> str: + """ + This function returns a test kubeconfig file as a string. + + :return: a test kubeconfig file in string format (for unit testing purposes) + """ # NOQA + return """apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM5ekNDQWQrZ0F3SUJBZ0lVV01PTVBNMVUrRi9uNXN6TSthYzlMcGZISHB3d0RRWUpLb1pJaHZjTkFRRUwKQlFBd0hqRWNNQm9HQTFVRUF3d1RhM1ZpZFc1MGRTNXNiMk5oYkdSdmJXRnBiakFlRncweU1URXlNRFl4T0RBdwpNRFJhRncwek1URXlNRFF4T0RBd01EUmFNQjR4SERBYUJnTlZCQU1NRTJ0MVluVnVkSFV1Ykc5allXeGtiMjFoCmFXNHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDNExhcG00SDB0T1NuYTNXVisKdzI4a0tOWWRwaHhYOUtvNjUwVGlOK2c5ZFNQU3VZK0V6T1JVOWVONlgyWUZkMEJmVFNodno4Y25rclAvNysxegpETEoxQ3MwRi9haEV3ZDQxQXN5UGFjbnRiVE80dGRLWm9POUdyODR3YVdBN1hSZmtEc2ZxRGN1YW5UTmVmT1hpCkdGbmdDVzU5Q285M056alB1eEFrakJxdVF6eE5GQkgwRlJPbXJtVFJ4cnVLZXo0aFFuUW1OWEFUNnp0M21udzMKWUtWTzU4b2xlcUxUcjVHNlRtVFQyYTZpVGdtdWY2N0cvaVZlalJGbkw3YkNHWmgzSjlCSTNMcVpqRzE4dWxvbgpaVDdQcGQrQTlnaTJOTm9UZlI2TVB5SndxU1BCL0xZQU5ZNGRoZDVJYlVydDZzbmViTlRZSHV2T0tZTDdNTWRMCmVMSzFBZ01CQUFHakxUQXJNQWtHQTFVZEV3UUNNQUF3SGdZRFZSMFJCQmN3RllJVGEzVmlkVzUwZFM1c2IyTmgKYkdSdmJXRnBiakFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBQTVqUHVpZVlnMExySE1PSkxYY0N4d3EvVzBDNApZeFpncVd3VHF5VHNCZjVKdDlhYTk0SkZTc2dHQWdzUTN3NnA2SlBtL0MyR05MY3U4ZWxjV0E4UXViQWxueXRRCnF1cEh5WnYrZ08wMG83TXdrejZrTUxqQVZ0QllkRzJnZ21FRjViTEk5czBKSEhjUGpHUkl1VHV0Z0tHV1dPWHgKSEg4T0RzaG9wZHRXMktrR2c2aThKaEpYaWVIbzkzTHptM00xRUNGcXAvMEdtNkN1RFphVVA2SGpJMWRrYllLdgpsSHNVZ1U1SmZjSWhNYmJLdUllTzRkc1YvT3FHcm9iNW5vcmRjaExBQmRDTnc1cmU5T1NXZGZ1VVhSK0ViZVhrCjVFM0tFYzA1RGNjcGV2a1NTdlJ4SVQrQzNMOTltWGcxL3B5NEw3VUhvNFFLTXlqWXJXTWlLRlVKV1E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + server: https://127.0.0.1:6443 + name: default +contexts: +- context: + cluster: default + namespace: default + user: testuser + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: testuser + user: + client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM5ekNDQWQrZ0F3SUJBZ0lVV01PTVBNMVUrRi9uNXN6TSthYzlMcGZISHB3d0RRWUpLb1pJaHZjTkFRRUwKQlFBd0hqRWNNQm9HQTFVRUF3d1RhM1ZpZFc1MGRTNXNiMk5oYkdSdmJXRnBiakFlRncweU1URXlNRFl4T0RBdwpNRFJhRncwek1URXlNRFF4T0RBd01EUmFNQjR4SERBYUJnTlZCQU1NRTJ0MVluVnVkSFV1Ykc5allXeGtiMjFoCmFXNHdnZ0VpTUEwR0NTcUdTSWIzRFFFQkFRVUFBNElCRHdBd2dnRUtBb0lCQVFDNExhcG00SDB0T1NuYTNXVisKdzI4a0tOWWRwaHhYOUtvNjUwVGlOK2c5ZFNQU3VZK0V6T1JVOWVONlgyWUZkMEJmVFNodno4Y25rclAvNysxegpETEoxQ3MwRi9haEV3ZDQxQXN5UGFjbnRiVE80dGRLWm9POUdyODR3YVdBN1hSZmtEc2ZxRGN1YW5UTmVmT1hpCkdGbmdDVzU5Q285M056alB1eEFrakJxdVF6eE5GQkgwRlJPbXJtVFJ4cnVLZXo0aFFuUW1OWEFUNnp0M21udzMKWUtWTzU4b2xlcUxUcjVHNlRtVFQyYTZpVGdtdWY2N0cvaVZlalJGbkw3YkNHWmgzSjlCSTNMcVpqRzE4dWxvbgpaVDdQcGQrQTlnaTJOTm9UZlI2TVB5SndxU1BCL0xZQU5ZNGRoZDVJYlVydDZzbmViTlRZSHV2T0tZTDdNTWRMCmVMSzFBZ01CQUFHakxUQXJNQWtHQTFVZEV3UUNNQUF3SGdZRFZSMFJCQmN3RllJVGEzVmlkVzUwZFM1c2IyTmgKYkdSdmJXRnBiakFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBQTVqUHVpZVlnMExySE1PSkxYY0N4d3EvVzBDNApZeFpncVd3VHF5VHNCZjVKdDlhYTk0SkZTc2dHQWdzUTN3NnA2SlBtL0MyR05MY3U4ZWxjV0E4UXViQWxueXRRCnF1cEh5WnYrZ08wMG83TXdrejZrTUxqQVZ0QllkRzJnZ21FRjViTEk5czBKSEhjUGpHUkl1VHV0Z0tHV1dPWHgKSEg4T0RzaG9wZHRXMktrR2c2aThKaEpYaWVIbzkzTHptM00xRUNGcXAvMEdtNkN1RFphVVA2SGpJMWRrYllLdgpsSHNVZ1U1SmZjSWhNYmJLdUllTzRkc1YvT3FHcm9iNW5vcmRjaExBQmRDTnc1cmU5T1NXZGZ1VVhSK0ViZVhrCjVFM0tFYzA1RGNjcGV2a1NTdlJ4SVQrQzNMOTltWGcxL3B5NEw3VUhvNFFLTXlqWXJXTWlLRlVKV1E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + client-key-data: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV2QUlCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktZd2dnU2lBZ0VBQW9JQkFRQzRMYXBtNEgwdE9TbmEKM1dWK3cyOGtLTllkcGh4WDlLbzY1MFRpTitnOWRTUFN1WStFek9SVTllTjZYMllGZDBCZlRTaHZ6OGNua3JQLwo3KzF6RExKMUNzMEYvYWhFd2Q0MUFzeVBhY250YlRPNHRkS1pvTzlHcjg0d2FXQTdYUmZrRHNmcURjdWFuVE5lCmZPWGlHRm5nQ1c1OUNvOTNOempQdXhBa2pCcXVRenhORkJIMEZST21ybVRSeHJ1S2V6NGhRblFtTlhBVDZ6dDMKbW53M1lLVk81OG9sZXFMVHI1RzZUbVRUMmE2aVRnbXVmNjdHL2lWZWpSRm5MN2JDR1poM0o5QkkzTHFaakcxOAp1bG9uWlQ3UHBkK0E5Z2kyTk5vVGZSNk1QeUp3cVNQQi9MWUFOWTRkaGQ1SWJVcnQ2c25lYk5UWUh1dk9LWUw3Ck1NZExlTEsxQWdNQkFBRUNnZ0VBQ28rank4NW5ueVk5L2l6ZjJ3cjkzb2J3OERaTVBjYnIxQURhOUZYY1hWblEKT2c4bDZhbU9Ga2tiU0RNY09JZ0VDdkx6dEtXbmQ5OXpydU5sTEVtNEdmb0trNk5kK01OZEtKRUdoZHE5RjM1Qgpqdi91R1owZTIyRE5ZLzFHNVdDTE5DcWMwQkVHY2RFOTF0YzJuMlppRVBTNWZ6WVJ6L1k4cmJ5K1NqbzJkWE9RCmRHYWRlUFplbi9UbmlHTFlqZWhrbXZNQjJvU0FDbVMycTd2OUNrcmdmR1RZbWJzeGVjSU1QK0JONG9KS3BOZ28KOUpnRWJ5SUxkR1pZS2pQb2lLaHNjMVhmSy8zZStXSmxuYjJBaEE5Y1JMUzhMcDdtcEYySWp4SjNSNE93QTg3WQpNeGZvZWFGdnNuVUFHWUdFWFo4Z3BkWmhQMEoxNWRGdERjajIrcngrQVFLQmdRRDFoSE9nVGdFbERrVEc5bm5TCjE1eXYxRzUxYnJMQU1UaWpzNklEMU1qelhzck0xY2ZvazVaaUlxNVJsQ3dReTlYNDdtV1RhY0lZRGR4TGJEcXEKY0IydjR5Wm1YK1VleGJ3cDU1OWY0V05HdzF5YzQrQjdaNFF5aTRFelN4WmFjbldjMnBzcHJMUFVoOUFXRXVNcApOaW1vcXNiVGNnNGs5QWRxeUIrbWhIWmJRUUtCZ1FEQUNzU09qNXZMU1VtaVpxYWcrOVMySUxZOVNOdDZzS1VyCkprcjdCZEVpN3N2YmU5cldRR2RBb0xkQXNzcU94aENydmtPNkpSSHB1YjlRRjlYdlF4Riszc2ZpZm4yYkQ0ZloKMlVsclA1emF3RlNrNDNLbjdMZzRscURpaVUxVGlqTkJBL3dUcFlmbTB4dW5WeFRWNDZpNVViQW1XRk12TWV0bQozWUZYQmJkK2RRS0JnRGl6Q1B6cFpzeEcrazAwbUxlL2dYajl4ekNwaXZCbHJaM29teTdsVWk4YUloMmg5VlBaCjJhMzZNbVcyb1dLVG9HdW5xcCtibWU1eUxRRGlFcjVQdkJ0bGl2V3ppYmRNbFFMY2Nlcnpveml4WDA4QU5WUnEKZUpZdnIzdklDSGFFM25LRjdiVjNJK1NlSk1ra1BYL0QrV1R4WTQ5clZLYm1FRnh4c1JXRW04ekJBb0dBWEZ3UgpZanJoQTZqUW1DRmtYQ0loa0NJMVkwNEorSHpDUXZsY3NGT0EzSnNhUWduVUdwekl5OFUvdlFiLzhpQ0IzZ2RZCmpVck16YXErdnVkbnhYVnRFYVpWWGJIVitPQkVSdHFBdStyUkprZS9yYm1SNS84cUxsVUxOVWd4ZjA4RkRXeTgKTERxOUhKOUZPbnJnRTJvMU9FTjRRMGpSWU81U041dXFXODd0REEwQ2dZQXpXbk1KSFgrbmlyMjhRRXFyVnJKRAo4ZUEwOHIwWTJRMDhMRlcvMjNIVWQ4WU12VnhTUTdwcUwzaE41RXVJQ2dCbEpGVFI3TndBREo3eDY2M002akFMCm1DNlI4dWxSZStwa08xN2Y0UUs3MnVRanJGZEhESnlXQmdDL0RKSkV6d1dwY0Q4VVNPK3A5bVVIbllLTUJTOEsKTVB1ejYrZ3h0VEtsRU5pZUVacXhxZz09Ci0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS0K + username: testuser + password: testpassword + token: sha256~fFyEqjf1xxFMO0tbEyGRvWeNOd7QByuEgS4hyEq_A9o + """ # NOQA + + def get_kubeconfig_with_paths(self) -> str: + """ + This function returns a test kubeconfig file as a string. + + :return: a test kubeconfig file in string format (for unit testing purposes) + """ # NOQA + return """apiVersion: v1 +clusters: +- cluster: + certificate-authority: fixtures/ca.crt + server: https://127.0.0.1:6443 + name: default +contexts: +- context: + cluster: default + namespace: default + user: testuser + name: default +current-context: default +kind: Config +preferences: {} +users: +- name: testuser + user: + client-certificate: fixtures/client.crt + client-key: fixtures/client.key + username: testuser + password: testpassword + token: sha256~fFyEqjf1xxFMO0tbEyGRvWeNOd7QByuEgS4hyEq_A9o + """ # NOQA + + def test_current_context(self): + cwd = os.getcwd() + current_context_data = ContextAuth() + current_context_data.fetch_auth_data(self.get_kubeconfig_with_data()) + self.assertIsNotNone(current_context_data.clusterCertificateData) + self.assertIsNotNone(current_context_data.clientCertificateData) + self.assertIsNotNone(current_context_data.clientKeyData) + self.assertIsNotNone(current_context_data.username) + self.assertIsNotNone(current_context_data.password) + self.assertIsNotNone(current_context_data.bearerToken) + self.assertIsNotNone(current_context_data.clusterHost) + + current_context_no_data = ContextAuth() + current_context_no_data.fetch_auth_data(self.get_kubeconfig_with_paths()) + self.assertIsNotNone(current_context_no_data.clusterCertificate) + self.assertIsNotNone(current_context_no_data.clusterCertificateData) + self.assertIsNotNone(current_context_no_data.clientCertificate) + self.assertIsNotNone(current_context_no_data.clientCertificateData) + self.assertIsNotNone(current_context_no_data.clientKey) + self.assertIsNotNone(current_context_no_data.clientKeyData) + self.assertIsNotNone(current_context_no_data.username) + self.assertIsNotNone(current_context_no_data.password) + self.assertIsNotNone(current_context_no_data.bearerToken) + self.assertIsNotNone(current_context_data.clusterHost) + + + + + + diff --git a/requirements.txt b/requirements.txt index b3a2d0ab..66e9c09a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -32,7 +32,8 @@ wheel service_identity git+https://github.com/vmware/vsphere-automation-sdk-python.git@v8.0.0.0 git+https://github.com/redhat-chaos/arcaflow-plugin-kill-pod.git -arcaflow >= 0.3.0 +arcaflow >= 0.4.1 prometheus_api_client ibm_cloud_sdk_core ibm_vpc +pytest diff --git a/scenarios/arcaflow/cpu-hog/config.yaml b/scenarios/arcaflow/cpu-hog/config.yaml new file mode 100644 index 00000000..a03beb4c --- /dev/null +++ b/scenarios/arcaflow/cpu-hog/config.yaml @@ -0,0 +1,11 @@ +--- +deployer: + connection: {} + type: kubernetes +log: + level: debug +logged_outputs: + error: + level: error + success: + level: debug diff --git a/scenarios/arcaflow/cpu-hog/input.yaml b/scenarios/arcaflow/cpu-hog/input.yaml new file mode 100644 index 00000000..47daf0c3 --- /dev/null +++ b/scenarios/arcaflow/cpu-hog/input.yaml @@ -0,0 +1,13 @@ +input_list: +- cpu_count: 1 + cpu_load_percentage: 80 + cpu_method: all + duration: 30s + node_selector: {} + # node selector example + # node_selector: + # kubernetes.io/hostname: master + kubeconfig: "" + +# duplicate this section to run simultaneous stressors in the same run + diff --git a/scenarios/arcaflow/cpu-hog/sub-workflow.yaml b/scenarios/arcaflow/cpu-hog/sub-workflow.yaml new file mode 100644 index 00000000..b52d552a --- /dev/null +++ b/scenarios/arcaflow/cpu-hog/sub-workflow.yaml @@ -0,0 +1,86 @@ +input: + root: RootObject + objects: + RootObject: + id: RootObject + properties: + kubeconfig: + display: + description: The complete kubeconfig file as a string + name: Kubeconfig file contents + type: + type_id: string + required: true + node_selector: + display: + description: kubernetes node name where the plugin must be deployed + type: + type_id: map + values: + type_id: string + keys: + type_id: string + required: true + duration: + display: + name: duration the scenario expressed in seconds + description: stop stress test after T seconds. One can also specify the units of time in + seconds, minutes, hours, days or years with the suffix s, m, h, d or y + type: + type_id: string + required: true + cpu_count: + display: + description: Number of CPU cores to be used (0 means all) + name: number of CPUs + type: + type_id: integer + required: true + cpu_method: + display: + description: CPU stress method + name: fine grained control of which cpu stressors to use (ackermann, cfloat etc.) + type: + type_id: string + required: true + cpu_load_percentage: + display: + description: load CPU by percentage + name: CPU load + type: + type_id: integer + required: true + +steps: + kubeconfig: + plugin: quay.io/arcalot/arcaflow-plugin-kubeconfig:latest + input: + kubeconfig: !expr $.input.kubeconfig + stressng: + plugin: quay.io/arcalot/arcaflow-plugin-stressng:latest + step: workload + input: + StressNGParams: + timeout: !expr $.input.duration + cleanup: "true" + items: + - stressor: cpu + cpu_count: !expr $.input.cpu_count + cpu_method: !expr $.input.cpu_method + cpu_load: !expr $.input.cpu_load_percentage + deploy: + type: kubernetes + connection: !expr $.steps.kubeconfig.outputs.success.connection + pod: + metadata: + namespace: default + labels: + arcaflow: stressng + spec: + nodeSelector: !expr $.input.node_selector + pluginContainer: + imagePullPolicy: Always +outputs: + success: + stressng: !expr $.steps.stressng.outputs.success + diff --git a/scenarios/arcaflow/cpu-hog/workflow.yaml b/scenarios/arcaflow/cpu-hog/workflow.yaml new file mode 100644 index 00000000..9acd4aea --- /dev/null +++ b/scenarios/arcaflow/cpu-hog/workflow.yaml @@ -0,0 +1,69 @@ +input: + root: RootObject + objects: + RootObject: + id: RootObject + properties: + input_list: + type: + type_id: list + items: + id: input_item + type_id: object + properties: + kubeconfig: + display: + description: The complete kubeconfig file as a string + name: Kubeconfig file contents + type: + type_id: string + required: true + node_selector: + display: + description: kubernetes node name where the plugin must be deployed + type: + type_id: map + values: + type_id: string + keys: + type_id: string + required: true + duration: + display: + name: duration the scenario expressed in seconds + description: stop stress test after T seconds. One can also specify the units of time in + seconds, minutes, hours, days or years with the suffix s, m, h, d or y + type: + type_id: string + required: true + cpu_count: + display: + description: Number of CPU cores to be used (0 means all) + name: number of CPUs + type: + type_id: integer + required: true + cpu_method: + display: + description: CPU stress method + name: fine grained control of which cpu stressors to use (ackermann, cfloat etc.) + type: + type_id: string + required: true + cpu_load_percentage: + display: + description: load CPU by percentage + name: CPU load + type: + type_id: integer + required: true +steps: + workload_loop: + kind: foreach + items: !expr $.input.input_list + workflow: sub-workflow.yaml + parallelism: 1000 +outputs: + success: + workloads: !expr $.steps.workload_loop.outputs.success.data + diff --git a/scenarios/arcaflow/io-hog/config.yaml b/scenarios/arcaflow/io-hog/config.yaml new file mode 100644 index 00000000..a03beb4c --- /dev/null +++ b/scenarios/arcaflow/io-hog/config.yaml @@ -0,0 +1,11 @@ +--- +deployer: + connection: {} + type: kubernetes +log: + level: debug +logged_outputs: + error: + level: error + success: + level: debug diff --git a/scenarios/arcaflow/io-hog/input.yaml b/scenarios/arcaflow/io-hog/input.yaml new file mode 100644 index 00000000..430a1f8d --- /dev/null +++ b/scenarios/arcaflow/io-hog/input.yaml @@ -0,0 +1,18 @@ +input_list: +- duration: 30s + io_block_size: 1m + io_workers: 1 + io_write_bytes: 10m + target_pod_folder: /data + target_pod_volume: + hostPath: + path: / + name: node-volume + node_selector: { } + # node selector example + # node_selector: + # kubernetes.io/hostname: master + kubeconfig: "" + +# duplicate this section to run simultaneous stressors in the same run + diff --git a/scenarios/arcaflow/io-hog/sub-workflow.yaml b/scenarios/arcaflow/io-hog/sub-workflow.yaml new file mode 100644 index 00000000..3c9854ca --- /dev/null +++ b/scenarios/arcaflow/io-hog/sub-workflow.yaml @@ -0,0 +1,129 @@ +input: + root: RootObject + objects: + RootObject: + id: RootObject + properties: + kubeconfig: + display: + description: The complete kubeconfig file as a string + name: Kubeconfig file contents + type: + type_id: string + required: true + node_selector: + display: + description: kubernetes node name where the plugin must be deployed + type: + type_id: map + values: + type_id: string + keys: + type_id: string + required: true + duration: + display: + name: duration the scenario expressed in seconds + description: stop stress test after T seconds. One can also specify the units of time in + seconds, minutes, hours, days or years with the suffix s, m, h, d or y + type: + type_id: string + required: true + io_workers: + display: + description: number of workers + name: start N workers continually writing, reading and removing temporary files + type: + type_id: integer + required: true + io_block_size: + display: + description: single write size + name: specify size of each write in bytes. Size can be from 1 byte to 4MB. + type: + type_id: string + required: true + io_write_bytes: + display: + description: Total number of bytes written + name: write N bytes for each hdd process, the default is 1 GB. One can specify the size + as % of free space on the file system or in units of Bytes, KBytes, MBytes and + GBytes using the suffix b, k, m or g + type: + type_id: string + required: true + target_pod_folder: + display: + description: Target Folder + name: Folder in the pod where the test will be executed and the test files will be written + type: + type_id: string + required: true + target_pod_volume: + display: + name: kubernetes volume definition + description: the volume that will be attached to the pod. In order to stress + the node storage only hosPath mode is currently supported + type: + type_id: object + id: k8s_volume + properties: + name: + display: + description: name of the volume (must match the name in pod definition) + type: + type_id: string + required: true + hostPath: + display: + description: hostPath options expressed as string map (key-value) + type: + type_id: map + values: + type_id: string + keys: + type_id: string + required: true + required: true + +steps: + kubeconfig: + plugin: quay.io/arcalot/arcaflow-plugin-kubeconfig:latest + input: + kubeconfig: !expr $.input.kubeconfig + stressng: + plugin: quay.io/arcalot/arcaflow-plugin-stressng:latest + step: workload + input: + StressNGParams: + timeout: !expr $.input.duration + cleanup: "true" + workdir: !expr $.input.target_pod_folder + items: + - stressor: hdd + hdd: !expr $.input.io_workers + hdd_bytes: !expr $.input.io_write_bytes + hdd_write_size: !expr $.input.io_block_size + + deploy: + type: kubernetes + connection: !expr $.steps.kubeconfig.outputs.success.connection + pod: + metadata: + namespace: default + labels: + arcaflow: stressng + spec: + nodeSelector: !expr $.input.node_selector + pluginContainer: + imagePullPolicy: Always + volumeMounts: + - mountPath: /data + name: node-volume + volumes: + - !expr $.input.target_pod_volume + +outputs: + success: + stressng: !expr $.steps.stressng.outputs.success + diff --git a/scenarios/arcaflow/io-hog/workflow.yaml b/scenarios/arcaflow/io-hog/workflow.yaml new file mode 100644 index 00000000..27a8fad5 --- /dev/null +++ b/scenarios/arcaflow/io-hog/workflow.yaml @@ -0,0 +1,106 @@ +input: + root: RootObject + objects: + RootObject: + id: RootObject + properties: + input_list: + type: + type_id: list + items: + id: input_item + type_id: object + properties: + kubeconfig: + display: + description: The complete kubeconfig file as a string + name: Kubeconfig file contents + type: + type_id: string + required: true + node_selector: + display: + description: kubernetes node name where the plugin must be deployed + type: + type_id: map + values: + type_id: string + keys: + type_id: string + required: true + duration: + display: + name: duration the scenario expressed in seconds + description: stop stress test after T seconds. One can also specify the units of time in + seconds, minutes, hours, days or years with the suffix s, m, h, d or y + type: + type_id: string + required: true + io_workers: + display: + description: number of workers + name: start N workers continually writing, reading and removing temporary files + type: + type_id: integer + required: true + io_block_size: + display: + description: single write size + name: specify size of each write in bytes. Size can be from 1 byte to 4MB. + type: + type_id: string + required: true + io_write_bytes: + display: + description: Total number of bytes written + name: write N bytes for each hdd process, the default is 1 GB. One can specify the size + as % of free space on the file system or in units of Bytes, KBytes, MBytes and + GBytes using the suffix b, k, m or g + type: + type_id: string + required: true + target_pod_folder: + display: + description: Target Folder + name: Folder in the pod where the test will be executed and the test files will be written + type: + type_id: string + required: true + target_pod_volume: + display: + name: kubernetes volume definition + description: the volume that will be attached to the pod. In order to stress + the node storage only hosPath mode is currently supported + type: + type_id: object + id: k8s_volume + properties: + name: + display: + description: name of the volume (must match the name in pod definition) + type: + type_id: string + required: true + hostPath: + display: + description: hostPath options expressed as string map (key-value) + type: + type_id: map + values: + type_id: string + keys: + type_id: string + required: true + required: true +steps: + workload_loop: + kind: foreach + items: !expr $.input.input_list + workflow: sub-workflow.yaml + parallelism: 1000 +outputs: + success: + workloads: !expr $.steps.workload_loop.outputs.success.data + + + diff --git a/scenarios/arcaflow/memory-hog/config.yaml b/scenarios/arcaflow/memory-hog/config.yaml new file mode 100644 index 00000000..a03beb4c --- /dev/null +++ b/scenarios/arcaflow/memory-hog/config.yaml @@ -0,0 +1,11 @@ +--- +deployer: + connection: {} + type: kubernetes +log: + level: debug +logged_outputs: + error: + level: error + success: + level: debug diff --git a/scenarios/arcaflow/memory-hog/input.yaml b/scenarios/arcaflow/memory-hog/input.yaml new file mode 100644 index 00000000..8315c207 --- /dev/null +++ b/scenarios/arcaflow/memory-hog/input.yaml @@ -0,0 +1,12 @@ +input_list: +- duration: 30s + vm_bytes: 10% + vm_workers: 2 + node_selector: { } + # node selector example + # node_selector: + # kubernetes.io/hostname: master + kubeconfig: { } + +# duplicate this section to run simultaneous stressors in the same run + diff --git a/scenarios/arcaflow/memory-hog/sub-workflow.yaml b/scenarios/arcaflow/memory-hog/sub-workflow.yaml new file mode 100644 index 00000000..85f89572 --- /dev/null +++ b/scenarios/arcaflow/memory-hog/sub-workflow.yaml @@ -0,0 +1,78 @@ +input: + root: RootObject + objects: + RootObject: + id: RootObject + properties: + kubeconfig: + display: + description: The complete kubeconfig file as a string + name: Kubeconfig file contents + type: + type_id: string + required: true + node_selector: + display: + description: kubernetes node name where the plugin must be deployed + type: + type_id: map + values: + type_id: string + keys: + type_id: string + required: true + duration: + display: + name: duration the scenario expressed in seconds + description: stop stress test after T seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y + type: + type_id: string + required: true + vm_workers: + display: + description: Number of VM stressors to be run (0 means 1 stressor per CPU) + name: Number of VM stressors + type: + type_id: integer + required: true + vm_bytes: + display: + description: N bytes per vm process, the default is 256MB. The size can be expressed in units of Bytes, KBytes, MBytes and GBytes using the suffix b, k, m or g. + name: Kubeconfig file contents + type: + type_id: string + required: true + +steps: + kubeconfig: + plugin: quay.io/arcalot/arcaflow-plugin-kubeconfig:latest + input: + kubeconfig: !expr $.input.kubeconfig + stressng: + plugin: quay.io/arcalot/arcaflow-plugin-stressng:latest + step: workload + input: + StressNGParams: + timeout: !expr $.input.duration + cleanup: "true" + items: + - stressor: vm + vm: !expr $.input.vm_workers + vm_bytes: !expr $.input.vm_bytes + deploy: + type: kubernetes + connection: !expr $.steps.kubeconfig.outputs.success.connection + pod: + metadata: + namespace: default + labels: + arcaflow: stressng + spec: + nodeSelector: !expr $.input.node_selector + pluginContainer: + imagePullPolicy: Always + +outputs: + success: + stressng: !expr $.steps.stressng.outputs.success + diff --git a/scenarios/arcaflow/memory-hog/workflow.yaml b/scenarios/arcaflow/memory-hog/workflow.yaml new file mode 100644 index 00000000..528f92d5 --- /dev/null +++ b/scenarios/arcaflow/memory-hog/workflow.yaml @@ -0,0 +1,65 @@ +input: + root: RootObject + objects: + RootObject: + id: RootObject + properties: + input_list: + type: + type_id: list + items: + id: input_item + type_id: object + properties: + kubeconfig: + display: + description: The complete kubeconfig file as a string + name: Kubeconfig file contents + type: + type_id: string + required: true + node_selector: + display: + description: kubernetes node name where the plugin must be deployed + type: + type_id: map + values: + type_id: string + keys: + type_id: string + required: true + duration: + display: + name: duration the scenario expressed in seconds + description: stop stress test after T seconds. One can also specify the units of time in seconds, minutes, hours, days or years with the suffix s, m, h, d or y + type: + type_id: string + required: true + vm_workers: + display: + description: Number of VM stressors to be run (0 means 1 stressor per CPU) + name: Number of VM stressors + type: + type_id: integer + required: true + vm_bytes: + display: + description: N bytes per vm process, the default is 256MB. The size can be expressed in units of Bytes, KBytes, MBytes and GBytes using the suffix b, k, m or g. + name: Kubeconfig file contents + type: + type_id: string + required: true +steps: + workload_loop: + kind: foreach + items: !expr $.input.input_list + workflow: sub-workflow.yaml + parallelism: 1000 +outputs: + success: + workloads: !expr $.steps.workload_loop.outputs.success.data + + + + + diff --git a/scenarios/arcaflow/sysbench-cpu-hog/config.yaml b/scenarios/arcaflow/sysbench-cpu-hog/config.yaml deleted file mode 100644 index 2b6bf74b..00000000 --- a/scenarios/arcaflow/sysbench-cpu-hog/config.yaml +++ /dev/null @@ -1,5 +0,0 @@ -deployer: - type: podman - # More deployer options -log: - level: debug \ No newline at end of file diff --git a/scenarios/arcaflow/sysbench-cpu-hog/input.yaml b/scenarios/arcaflow/sysbench-cpu-hog/input.yaml deleted file mode 100644 index 2125bf33..00000000 --- a/scenarios/arcaflow/sysbench-cpu-hog/input.yaml +++ /dev/null @@ -1,8 +0,0 @@ -kubeconfig: {} -node_selector: - label: value #replace with the node label of your choice -sysbench_cpumaxprime: 12000 -sysbench_events: 0 -sysbench_forced_shutdown_time: 10 -sysbench_runtime: 60 -sysbench_threads: 50 diff --git a/scenarios/arcaflow/sysbench-cpu-hog/workflow.yaml b/scenarios/arcaflow/sysbench-cpu-hog/workflow.yaml deleted file mode 100644 index 53dd0482..00000000 --- a/scenarios/arcaflow/sysbench-cpu-hog/workflow.yaml +++ /dev/null @@ -1,83 +0,0 @@ -input: - root: RootObject - objects: - RootObject: - id: RootObject - properties: - kubeconfig: - display: - description: The complete kubeconfig file as a string - name: Kubeconfig file contents - type: - type_id: string - required: true - node_selector: - display: - description: kubernetes node name where the plugin must be deployed - type: - type_id: map - values: - type_id: string - keys: - type_id: string - required: true - sysbench_threads: - display: - description: The number of threads sysbench will run - name: sysbench threads - type: - type_id: integer - sysbench_events: - display: - description: The number of events sysbench will run - name: sysbench events - type: - type_id: integer - sysbench_runtime: - display: - description: The total runtime in seconds for the sysbench tests - name: sysbench runtime seconds - type: - type_id: integer - sysbench_forced_shutdown_time: - display: - description: Number of seconds to wait after the 'time' limit before forcing shutdown, or exclude parameter to disable forced shutdown - name: sysbench runtime seconds - type: - type_id: integer - sysbench_cpumaxprime: - display: - description: The upper limit of the number of prime numbers generated - name: sysbench cpu max primes - type: - type_id: integer - -steps: - kubeconfig: - plugin: quay.io/arcalot/arcaflow-plugin-kubeconfig:latest - input: - kubeconfig: !expr $.input.kubeconfig - sysbench: - plugin: quay.io/arcalot/arcaflow-plugin-sysbench:latest - step: sysbenchcpu - input: - threads: !expr $.input.sysbench_threads - events: !expr $.input.sysbench_events - time: !expr $.input.sysbench_runtime - forced-shutdown: !expr $.input.sysbench_forced_shutdown_time - cpu-max-prime: !expr $.input.sysbench_cpumaxprime - deploy: - type: kubernetes - connection: !expr $.steps.kubeconfig.outputs.success.connection - pod: - - metadata: - namespace: default - labels: - arcaflow: sysbench - spec: - nodeSelector: !expr $.input.node_selector - pluginContainer: - imagePullPolicy: Always -output: - sysbench: !expr $.steps.sysbench.outputs.success