bump provider versions in e2e tests

Signed-off-by: Sanskar Jaiswal <sanskar.jaiswal@weave.works>
This commit is contained in:
Sanskar Jaiswal
2022-06-06 12:23:08 +05:30
parent 063d38dbd2
commit 6dd8a755c8
14 changed files with 74 additions and 41 deletions

View File

@@ -2,7 +2,7 @@
set -o errexit
CONTOUR_VER="release-1.18"
CONTOUR_VER="v1.21.0"
REPO_ROOT=$(git rev-parse --show-toplevel)
mkdir -p ${REPO_ROOT}/bin

View File

@@ -2,7 +2,7 @@
set -o errexit
CONTOUR_VER="release-1.20"
CONTOUR_VER="v1.21.0"
GATEWAY_API_VER="v1alpha2"
REPO_ROOT=$(git rev-parse --show-toplevel)
KUSTOMIZE_VERSION=4.5.2
@@ -14,17 +14,42 @@ fi
mkdir -p ${REPO_ROOT}/bin
echo ">>> Installing Gateway API CRDs"
kubectl kustomize "github.com/kubernetes-sigs/gateway-api/config/crd?ref=v0.4.1" \
| kubectl apply -f -
echo ">>> Installing Contour components, Gateway API CRDs"
kubectl apply -f https://raw.githubusercontent.com/projectcontour/contour/${CONTOUR_VER}/examples/render/contour-gateway-provisioner.yaml
echo ">>> Installing Contour components, GatewayClass and Gateway"
kubectl apply -f https://raw.githubusercontent.com/projectcontour/contour/${CONTOUR_VER}/examples/render/contour-gateway.yaml
kubectl -n projectcontour rollout status deployment/contour
kubectl -n projectcontour rollout status deployment/contour-gateway-provisioner
kubectl -n gateway-api wait --for=condition=complete job/gateway-api-admission
kubectl -n gateway-api wait --for=condition=complete job/gateway-api-admission-patch
kubectl -n gateway-api rollout status deployment/gateway-api-admission-server
kubectl -n projectcontour get all
kubectl get gatewayclass -oyaml
kubectl -n projectcontour get gateway -oyaml
echo ">>> Creating GatewayClass"
cat <<EOF | kubectl apply -f -
kind: GatewayClass
apiVersion: gateway.networking.k8s.io/v1alpha2
metadata:
name: contour
spec:
controllerName: projectcontour.io/gateway-controller
EOF
echo ">>> Creating Gateway"
cat <<EOF | kubectl apply -f -
kind: Gateway
apiVersion: gateway.networking.k8s.io/v1alpha2
metadata:
name: contour
namespace: projectcontour
spec:
gatewayClassName: contour
listeners:
- name: http
protocol: HTTP
port: 80
allowedRoutes:
namespaces:
from: All
EOF
echo '>>> Installing Kustomize'
cd ${REPO_ROOT}/bin && \

View File

@@ -74,7 +74,7 @@ spec:
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 2m -q 10 -c 2 -host localproject.contour.io -H 'X-Canary: insider' http://envoy.projectcontour/"
cmd: "hey -z 2m -q 10 -c 2 -host localproject.contour.io -H 'X-Canary: insider' http://envoy-contour.projectcontour/"
logCmdOutput: "true"
EOF
@@ -83,14 +83,14 @@ check_primary "ab-test"
display_httproute "ab-test"
echo '>>> Triggering A/B testing'
kubectl -n ab-test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.1
kubectl -n ab-test set image deployment/podinfo podinfod=stefanprodan/podinfo:6.0.1
echo '>>> Waiting for A/B testing promotion'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n ab-test describe deployment/podinfo-primary | grep '3.1.1' && ok=true || ok=false
kubectl -n ab-test describe deployment/podinfo-primary | grep '6.0.1' && ok=true || ok=false
sleep 10
kubectl -n flagger-system logs deployment/flagger --tail 1
count=$(($count + 1))

View File

@@ -68,14 +68,14 @@ check_primary "bg-test"
display_httproute "bg-test"
echo '>>> Triggering B/G deployment'
kubectl -n bg-test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.1
kubectl -n bg-test set image deployment/podinfo podinfod=stefanprodan/podinfo:6.0.1
echo '>>> Waiting for B/G promotion'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n bg-test describe deployment/podinfo-primary | grep '3.1.1' && ok=true || ok=false
kubectl -n bg-test describe deployment/podinfo-primary | grep '6.0.1' && ok=true || ok=false
sleep 10
kubectl -n flagger-system logs deployment/flagger --tail 1
count=$(($count + 1))

View File

@@ -59,7 +59,7 @@ spec:
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 2m -q 10 -c 2 -host localproject.contour.io http://envoy.projectcontour/"
cmd: "hey -z 2m -q 10 -c 2 -host localproject.contour.io http://envoy-contour.projectcontour/"
logCmdOutput: "true"
EOF
@@ -68,14 +68,14 @@ check_primary "test"
display_httproute "test"
echo '>>> Triggering canary deployment'
kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.1
kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:6.0.1
echo '>>> Waiting for canary promotion'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test describe deployment/podinfo-primary | grep '3.1.1' && ok=true || ok=false
kubectl -n test describe deployment/podinfo-primary | grep '6.0.1' && ok=true || ok=false
sleep 10
kubectl -n flagger-system logs deployment/flagger --tail 1
count=$(($count + 1))
@@ -152,12 +152,12 @@ spec:
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 2m -q 10 -c 2 -host localproject.contour.io http://envoy.projectcontour/status/500"
cmd: "hey -z 2m -q 10 -c 2 -host localproject.contour.io http://envoy-contour.projectcontour/status/500"
logCmdOutput: "true"
EOF
echo '>>> Triggering canary deployment rollback test'
kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:3.1.2
kubectl -n test set image deployment/podinfo podinfod=stefanprodan/podinfo:6.0.2
echo '>>> Waiting for canary rollback'
retries=50

View File

@@ -2,7 +2,7 @@
set -o errexit
GLOO_VER="1.9.0-rc2"
GLOO_VER="1.11.13"
REPO_ROOT=$(git rev-parse --show-toplevel)
mkdir -p ${REPO_ROOT}/bin

View File

@@ -2,7 +2,7 @@
set -o errexit
ISTIO_VER="1.11.0"
ISTIO_VER="1.14.0"
REPO_ROOT=$(git rev-parse --show-toplevel)
mkdir -p ${REPO_ROOT}/bin

View File

@@ -2,7 +2,7 @@
set -o errexit
KUMA_VER="1.4.1"
KUMA_VER="1.6.0"
REPO_ROOT=$(git rev-parse --show-toplevel)
KUSTOMIZE_VERSION=3.8.2
mkdir -p ${REPO_ROOT}/bin

View File

@@ -2,7 +2,7 @@
set -o errexit
LINKERD_VER="stable-2.10.2"
LINKERD_VER="stable-2.11.2"
REPO_ROOT=$(git rev-parse --show-toplevel)
mkdir -p ${REPO_ROOT}/bin

View File

@@ -2,7 +2,7 @@
set -o errexit
NGINX_HELM_VERSION=4.0.3 # ingress v1.0.2
NGINX_HELM_VERSION=4.1.3 # ingress v1.2.1
REPO_ROOT=$(git rev-parse --show-toplevel)
mkdir -p ${REPO_ROOT}/bin

View File

@@ -2,7 +2,7 @@
set -o errexit
OSM_VER="v0.9.1"
OSM_VER="v1.1.1"
REPO_ROOT=$(git rev-parse --show-toplevel)
mkdir -p ${REPO_ROOT}/bin
@@ -12,14 +12,16 @@ chmod +x ${REPO_ROOT}/bin/linux-amd64/osm
echo ">>> Installing Open Service Mesh ${OSM_VER}"
${REPO_ROOT}/bin/linux-amd64/osm install \
--set=OpenServiceMesh.deployPrometheus=true \
--set=OpenServiceMesh.enablePermissiveTrafficPolicy=true \
--set=OpenServiceMesh.osmController.resource.limits.cpu=300m \
--set=OpenServiceMesh.osmController.resource.requests.cpu=300m \
--set=OpenServiceMesh.prometheus.resources.limits.cpu=300m \
--set=OpenServiceMesh.prometheus.resources.requests.cpu=300m \
--set=OpenServiceMesh.injector.resource.limits.cpu=300m \
--set=OpenServiceMesh.injector.resource.requests.cpu=300m
--set=osm.deployPrometheus=true \
--set=osm.enablePermissiveTrafficPolicy=true \
--set=osm.osmController.resource.limits.cpu=250m \
--set=osm.osmController.resource.requests.cpu=250m \
--set=osm.prometheus.resources.limits.cpu=250m \
--set=osm.prometheus.resources.requests.cpu=250m \
--set=osm.injector.resource.limits.cpu=250m \
--set=osm.injector.resource.requests.cpu=250m \
--set=osm.osmBootstrap.resource.limits.cpu=250m \
--set=osm.osmBootstrap.resource.requests.cpu=250m
${REPO_ROOT}/bin/linux-amd64/osm version

View File

@@ -21,4 +21,4 @@ echo '>>> Installing Flagger'
kubectl -n flagger-system set image deployment/flagger flagger=test/flagger:latest
kubectl -n flagger-system rollout status deployment/flagger
kubectl -n flagger-system rollout status deployment/flagger-prometheus
kubectl -n flagger-system rollout status deployment/flagger-prometheus

View File

@@ -3,9 +3,9 @@ kind: Kustomization
resources:
- ../../kustomize/base/prometheus/
- ../../kustomize/base/flagger/
- https://raw.githubusercontent.com/zalando/skipper/v0.13.61/docs/kubernetes/deploy/deployment/rbac.yaml
- https://raw.githubusercontent.com/zalando/skipper/v0.13.61/docs/kubernetes/deploy/deployment/service.yaml
- https://raw.githubusercontent.com/zalando/skipper/v0.13.61/docs/kubernetes/deploy/deployment/deployment.yaml
- https://raw.githubusercontent.com/zalando/skipper/v0.13.210/docs/kubernetes/deploy/deployment/rbac.yaml
- https://raw.githubusercontent.com/zalando/skipper/v0.13.210/docs/kubernetes/deploy/deployment/service.yaml
- https://raw.githubusercontent.com/zalando/skipper/v0.13.210/docs/kubernetes/deploy/deployment/deployment.yaml
- namespace.yaml
patchesStrategicMerge:
- patch.yaml
@@ -32,13 +32,18 @@ patches:
$patch: delete
containers:
- name: skipper-ingress
image: registry.opensource.zalan.do/teapot/skipper:v0.13.61
image: registry.opensource.zalan.do/teapot/skipper:v0.13.210
ports:
- name: metrics-port
containerPort: 9911
resources:
$patch: delete
readinessProbe:
httpGet:
path: /kube-system/healthz
port: 9999
initialDelaySeconds: 60
timeoutSeconds: 5
initialDelaySeconds: 5
args:
- skipper
@@ -65,3 +70,4 @@ patches:
- -serve-host-metrics
- -serve-route-metrics
- -whitelisted-healthcheck-cidr=0.0.0.0/0 # kind uses other IP addresse
- -kubernetes-ingress-v1

View File

@@ -2,7 +2,7 @@
set -o errexit
TRAEFIK_CHART_VERSION="10.1.1" # traefik 2.4.9
TRAEFIK_CHART_VERSION="10.20.1" # traefik 2.7.0
REPO_ROOT=$(git rev-parse --show-toplevel)
mkdir -p ${REPO_ROOT}/bin