mirror of
https://github.com/fluxcd/flagger.git
synced 2026-02-23 14:24:09 +00:00
Bump Gateway API E2E tests to v1 and switch to Istio from Contour. Signed-off-by: Sanskar Jaiswal <jaiswalsanskar078@gmail.com>
181 lines
4.2 KiB
Bash
Executable File
181 lines
4.2 KiB
Bash
Executable File
#!/usr/bin/env bash
|
|
|
|
# This script runs e2e tests for progressive traffic shifting, Canary analysis and promotion
|
|
# Prerequisites: Kubernetes Kind and Istio with GatewayAPI
|
|
|
|
set -o errexit
|
|
|
|
REPO_ROOT=$(git rev-parse --show-toplevel)
|
|
|
|
source ${REPO_ROOT}/test/gatewayapi/test-utils.sh
|
|
|
|
create_latency_metric_template
|
|
create_error_rate_metric_template
|
|
|
|
echo '>>> Installing Canary'
|
|
cat <<EOF | kubectl apply -f -
|
|
apiVersion: flagger.app/v1beta1
|
|
kind: Canary
|
|
metadata:
|
|
name: podinfo
|
|
namespace: test
|
|
spec:
|
|
targetRef:
|
|
apiVersion: apps/v1
|
|
kind: Deployment
|
|
name: podinfo
|
|
progressDeadlineSeconds: 60
|
|
service:
|
|
port: 9898
|
|
portName: http
|
|
hosts:
|
|
- www.example.com
|
|
gatewayRefs:
|
|
- name: gateway
|
|
namespace: istio-ingress
|
|
analysis:
|
|
interval: 15s
|
|
threshold: 15
|
|
maxWeight: 50
|
|
stepWeight: 10
|
|
metrics:
|
|
- name: error-rate
|
|
templateRef:
|
|
name: error-rate
|
|
namespace: flagger-system
|
|
thresholdRange:
|
|
max: 1
|
|
interval: 1m
|
|
- name: latency
|
|
templateRef:
|
|
name: latency
|
|
namespace: flagger-system
|
|
thresholdRange:
|
|
max: 0.5
|
|
interval: 30s
|
|
webhooks:
|
|
- name: load-test
|
|
type: rollout
|
|
url: http://flagger-loadtester.test/
|
|
timeout: 5s
|
|
metadata:
|
|
cmd: "hey -z 2m -q 10 -c 2 -host www.example.com http://gateway-istio.istio-ingress"
|
|
logCmdOutput: "true"
|
|
EOF
|
|
|
|
check_primary "test"
|
|
|
|
display_httproute "test"
|
|
|
|
echo '>>> Triggering canary deployment'
|
|
kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:6.0.1
|
|
|
|
echo '>>> Waiting for canary promotion'
|
|
retries=50
|
|
count=0
|
|
ok=false
|
|
until ${ok}; do
|
|
kubectl -n test describe deployment/podinfo-primary | grep '6.0.1' && ok=true || ok=false
|
|
sleep 10
|
|
kubectl -n flagger-system logs deployment/flagger --tail 1
|
|
count=$(($count + 1))
|
|
if [[ ${count} -eq ${retries} ]]; then
|
|
kubectl -n flagger-system logs deployment/flagger
|
|
echo "No more retries left"
|
|
exit 1
|
|
fi
|
|
done
|
|
|
|
display_httproute "test"
|
|
|
|
echo '>>> Waiting for canary finalization'
|
|
retries=50
|
|
count=0
|
|
ok=false
|
|
until ${ok}; do
|
|
kubectl -n test get canary/podinfo | grep 'Succeeded' && ok=true || ok=false
|
|
sleep 5
|
|
count=$(($count + 1))
|
|
if [[ ${count} -eq ${retries} ]]; then
|
|
kubectl -n flagger-system logs deployment/flagger
|
|
echo "No more retries left"
|
|
exit 1
|
|
fi
|
|
done
|
|
|
|
echo '✔ Canary promotion test passed'
|
|
|
|
cat <<EOF | kubectl apply -f -
|
|
apiVersion: flagger.app/v1beta1
|
|
kind: Canary
|
|
metadata:
|
|
name: podinfo
|
|
namespace: test
|
|
spec:
|
|
targetRef:
|
|
apiVersion: apps/v1
|
|
kind: Deployment
|
|
name: podinfo
|
|
progressDeadlineSeconds: 30
|
|
service:
|
|
port: 9898
|
|
targetPort: 9898
|
|
portName: http
|
|
hosts:
|
|
- www.example.com
|
|
gatewayRefs:
|
|
- name: gateway
|
|
namespace: istio-ingress
|
|
analysis:
|
|
interval: 15s
|
|
threshold: 6
|
|
maxWeight: 50
|
|
stepWeight: 10
|
|
metrics:
|
|
- name: error-rate
|
|
templateRef:
|
|
name: error-rate
|
|
namespace: flagger-system
|
|
thresholdRange:
|
|
max: 1
|
|
interval: 30s
|
|
- name: latency
|
|
templateRef:
|
|
name: latency
|
|
namespace: flagger-system
|
|
thresholdRange:
|
|
max: 0.5
|
|
interval: 30s
|
|
webhooks:
|
|
- name: load-test
|
|
type: rollout
|
|
url: http://flagger-loadtester.test/
|
|
timeout: 5s
|
|
metadata:
|
|
cmd: "hey -z 2m -q 10 -c 2 -host www.example.com http://gateway-istio.istio-ingress/status/500"
|
|
logCmdOutput: "true"
|
|
EOF
|
|
|
|
echo '>>> Triggering canary deployment rollback test'
|
|
kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:6.0.2
|
|
|
|
echo '>>> Waiting for canary rollback'
|
|
retries=50
|
|
count=0
|
|
ok=false
|
|
until ${ok}; do
|
|
kubectl -n test get canary/podinfo | grep 'Failed' && ok=true || ok=false
|
|
sleep 10
|
|
kubectl -n flagger-system logs deployment/flagger --tail 1
|
|
count=$(($count + 1))
|
|
if [[ ${count} -eq ${retries} ]]; then
|
|
kubectl -n flagger-system logs deployment/flagger
|
|
echo "No more retries left"
|
|
exit 1
|
|
fi
|
|
done
|
|
|
|
echo '✔ Canary rollback test passed'
|
|
|
|
kubectl delete -n test canary podinfo
|