Files
flagger/test/linkerd/test-steps.sh
Alejandro Pedraza 7242fa7d5c Add support for Linkerd 2.13
In Linkerd 2.13 the Prometheus instance in
the `linkerd-viz` namespace is now locked behind an
[_AuthorizationPolicy_](https://github.com/linkerd/linkerd2/blob/stable-2.13.1/viz/charts/linkerd-viz/templates/prometheus-policy.yaml)
that only allows access to the `metrics-api` _ServiceAccount_.

This adds an extra _AuthorizationPolicy_ to authorize the `flagger`
_ServiceAccount_. It's created by default when using Kustomize, but
needs to be opted-in when using Helm via the new
`linkerdAuthPolicy.create` value. This also implies that the Flagger
workload has to be injected by the Linkerd proxy, and that can't happen
in the same `linkerd` namespace where the control plane lives, so we're
moving Flagger into the new injected `flagger-system` namespace.

The `namespace` field in `kustomization.yml` was resetting the namespace
for the new _AuthorizationPolicy_ resource, so that gets restored back
  to `linkerd-viz` using a `patchesJson6902` entry. A better way to do
  this would have been to use the `unsetOnly` field in a
  _NamespaceTransformer_ (see kubernetes-sigs/kustomize#4708) but for
  the life of me I couldn't make that work...

Signed-off-by: Alejandro Pedraza <alejandro@buoyant.io>
2023-05-08 06:33:00 -05:00

96 lines
2.3 KiB
Bash
Executable File

#!/usr/bin/env bash
# This script runs Linkerd e2e tests for Canary initialization, analysis and promotion
set -o errexit
cat <<EOF | kubectl apply -f -
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
progressDeadlineSeconds: 60
service:
port: 80
targetPort: http
portDiscovery: true
analysis:
interval: 15s
threshold: 15
maxWeight: 50
stepWeights: [10, 20, 30, 40, 50, 60]
metrics:
- name: request-success-rate
threshold: 99
interval: 1m
- name: request-duration
threshold: 500
interval: 30s
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 10m -q 10 -c 2 http://podinfo.test/"
EOF
echo '>>> Waiting for primary to be ready'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false
sleep 5
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n flagger-system logs deployment/flagger flagger
echo "No more retries left"
exit 1
fi
done
echo '✔ Canary initialization test passed'
echo '>>> Triggering canary deployment'
kubectl -n test set image deployment/podinfo podinfod=ghcr.io/stefanprodan/podinfo:6.0.1
echo '>>> Waiting for canary promotion'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test describe deployment/podinfo-primary | grep '6.0.1' && ok=true || ok=false
sleep 10
kubectl -n flagger-system logs deployment/flagger flagger --tail 1
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n flagger-system logs deployment/flagger flagger
echo "No more retries left"
exit 1
fi
done
echo '>>> Waiting for canary finalization'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test get canary/podinfo | grep 'Succeeded' && ok=true || ok=false
sleep 5
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n flagger-system logs deployment/flagger flagger
echo "No more retries left"
exit 1
fi
done
echo '✔ Canary promotion test passed'