mirror of
https://github.com/krkn-chaos/krkn.git
synced 2026-02-14 18:10:00 +00:00
adding enable metrics for prometheus coverage (#871)
Some checks failed
Functional & Unit Tests / Functional & Unit Tests (push) Failing after 9m31s
Functional & Unit Tests / Generate Coverage Badge (push) Has been skipped
Some checks failed
Functional & Unit Tests / Functional & Unit Tests (push) Failing after 9m31s
Functional & Unit Tests / Generate Coverage Badge (push) Has been skipped
Signed-off-by: Paige Patton <prubenda@redhat.com>
This commit is contained in:
6
.github/workflows/tests.yml
vendored
6
.github/workflows/tests.yml
vendored
@@ -39,6 +39,8 @@ jobs:
|
||||
run: |
|
||||
es_pod_name=$(kubectl get pods -l "app.kubernetes.io/instance=elasticsearch" -o name)
|
||||
kubectl --namespace default port-forward $es_pod_name 9200 &
|
||||
prom_name=$(kubectl get pods -n monitoring -l "app.kubernetes.io/name=prometheus" -o name)
|
||||
kubectl --namespace monitoring port-forward $prom_name 9090 &
|
||||
kubectl apply -f CI/templates/outage_pod.yaml
|
||||
kubectl wait --for=condition=ready pod -l scenario=outage --timeout=300s
|
||||
kubectl apply -f CI/templates/container_scenario_pod.yaml
|
||||
@@ -66,6 +68,7 @@ jobs:
|
||||
yq -i '.elastic.elastic_port=9200' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.elastic_url="https://localhost"' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.enable_elastic=True' CI/config/common_test_config.yaml
|
||||
yq -i '.performance_monitoring.prometheus_url="http://localhost:9090"' CI/config/common_test_config.yaml
|
||||
echo "test_service_hijacking" > ./CI/tests/functional_tests
|
||||
echo "test_app_outages" >> ./CI/tests/functional_tests
|
||||
echo "test_container" >> ./CI/tests/functional_tests
|
||||
@@ -97,6 +100,7 @@ jobs:
|
||||
yq -i '.elastic.enable_elastic=True' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.elastic_port=9200' CI/config/common_test_config.yaml
|
||||
yq -i '.elastic.elastic_url="https://localhost"' CI/config/common_test_config.yaml
|
||||
yq -i '.performance_monitoring.prometheus_url="http://localhost:9090"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.username="${{secrets.TELEMETRY_USERNAME}}"' CI/config/common_test_config.yaml
|
||||
yq -i '.telemetry.password="${{secrets.TELEMETRY_PASSWORD}}"' CI/config/common_test_config.yaml
|
||||
echo "test_telemetry" > ./CI/tests/functional_tests
|
||||
@@ -145,12 +149,14 @@ jobs:
|
||||
path: htmlcov
|
||||
if-no-files-found: error
|
||||
- name: Upload json coverage
|
||||
if: ${{ success() || failure() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: coverage.json
|
||||
path: coverage.json
|
||||
if-no-files-found: error
|
||||
- name: Check CI results
|
||||
if: ${{ success() || failure() }}
|
||||
run: "! grep Fail CI/results.markdown"
|
||||
|
||||
badge:
|
||||
|
||||
@@ -17,8 +17,11 @@ performance_monitoring:
|
||||
prometheus_url: # The prometheus url/route is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes.
|
||||
prometheus_bearer_token: # The bearer token is automatically obtained in case of OpenShift, please set it when the distribution is Kubernetes. This is needed to authenticate with prometheus.
|
||||
uuid: # uuid for the run is generated by default if not set.
|
||||
enable_alerts: False # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error.
|
||||
alert_profile: config/alerts.yaml # Path to alert profile with the prometheus queries.
|
||||
enable_alerts: True # Runs the queries specified in the alert profile and displays the info or exits 1 when severity=error
|
||||
enable_metrics: True
|
||||
alert_profile: config/alerts.yaml # Path or URL to alert profile with the prometheus queries
|
||||
metrics_profile: config/metrics-report.yaml
|
||||
check_critical_alerts: True # Path to alert profile with the prometheus queries.
|
||||
|
||||
tunings:
|
||||
wait_duration: 6 # Duration to wait between each chaos scenario.
|
||||
|
||||
@@ -18,6 +18,7 @@ function functional_test_app_outage {
|
||||
|
||||
kubectl get pods
|
||||
envsubst < CI/config/common_test_config.yaml > CI/config/app_outage.yaml
|
||||
cat $scenario_file
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/app_outage.yaml
|
||||
echo "App outage scenario test: Success"
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ function functional_pod_network_filter {
|
||||
yq -i '.[0].protocols=["tcp"]' scenarios/kube/pod-network-filter.yml
|
||||
yq -i '.[0].ports=[443]' scenarios/kube/pod-network-filter.yml
|
||||
|
||||
|
||||
## Test webservice deployment
|
||||
kubectl apply -f ./CI/templates/pod_network_filter.yaml
|
||||
COUNTER=0
|
||||
@@ -29,7 +28,9 @@ function functional_pod_network_filter {
|
||||
[ $COUNTER -eq "100" ] && echo "maximum number of retry reached, test failed" && exit 1
|
||||
done
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_network_filter.yaml > /dev/null 2>&1 &
|
||||
cat scenarios/kube/pod-network-filter.yml
|
||||
|
||||
python3 -m coverage run -a run_kraken.py -c CI/config/pod_network_filter.yaml > krkn_pod_network.out 2>&1 &
|
||||
PID=$!
|
||||
|
||||
# wait until the dns resolution starts failing and the service returns 400
|
||||
@@ -53,6 +54,7 @@ function functional_pod_network_filter {
|
||||
done
|
||||
|
||||
wait $PID
|
||||
|
||||
}
|
||||
|
||||
functional_pod_network_filter
|
||||
|
||||
Reference in New Issue
Block a user