mirror of
https://github.com/fluxcd/flagger.git
synced 2026-02-15 18:40:12 +00:00
Compare commits
96 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6478d0b6cf | ||
|
|
958af18dc0 | ||
|
|
54b8257c60 | ||
|
|
e86f62744e | ||
|
|
0734773993 | ||
|
|
888cc667f1 | ||
|
|
053d0da617 | ||
|
|
7a4e0bc80c | ||
|
|
7b7306584f | ||
|
|
d6027af632 | ||
|
|
761746af21 | ||
|
|
510a6eaaed | ||
|
|
9df6bfbb5e | ||
|
|
2ff86fa56e | ||
|
|
1b2e0481b9 | ||
|
|
fe96af64e9 | ||
|
|
77d8e4e4d3 | ||
|
|
800b0475ee | ||
|
|
b58e13809c | ||
|
|
9845578cdd | ||
|
|
96ccfa54fb | ||
|
|
b8a64c79be | ||
|
|
4a4c261a88 | ||
|
|
8282f86d9c | ||
|
|
2b6966d8e3 | ||
|
|
c667c947ad | ||
|
|
105b28bf42 | ||
|
|
37a1ff5c99 | ||
|
|
d19a070faf | ||
|
|
d908355ab3 | ||
|
|
a6d86f2e81 | ||
|
|
9d856a4f96 | ||
|
|
a7112fafb0 | ||
|
|
93f9e51280 | ||
|
|
65e9a402cf | ||
|
|
f7513b33a6 | ||
|
|
0b3fa517d3 | ||
|
|
507075920c | ||
|
|
a212f032a6 | ||
|
|
eb8755249f | ||
|
|
73bb2a9fa2 | ||
|
|
5d3ffa8c90 | ||
|
|
87f143f5fd | ||
|
|
f56b6dd6a7 | ||
|
|
5e40340f9c | ||
|
|
2456737df7 | ||
|
|
1191d708de | ||
|
|
4d26971fc7 | ||
|
|
0421b32834 | ||
|
|
360dd63e49 | ||
|
|
f1670dbe6a | ||
|
|
e7ad5c0381 | ||
|
|
2cfe2a105a | ||
|
|
bc83cee503 | ||
|
|
5091d3573c | ||
|
|
ffe5dd91c5 | ||
|
|
d76b560967 | ||
|
|
f062ef3a57 | ||
|
|
5fc1baf4df | ||
|
|
777b77b69e | ||
|
|
5d221e781a | ||
|
|
ddab72cd59 | ||
|
|
87d0b33327 | ||
|
|
225a9015bb | ||
|
|
c0b60b1497 | ||
|
|
0463c19825 | ||
|
|
8e70aa90c1 | ||
|
|
0a418eb88a | ||
|
|
040dbb8d03 | ||
|
|
64f2288bdd | ||
|
|
8008562a33 | ||
|
|
a39652724d | ||
|
|
691c3c4f36 | ||
|
|
f6fa5e3891 | ||
|
|
a305a0b705 | ||
|
|
dfe619e2ea | ||
|
|
2b3d425b70 | ||
|
|
6e55fea413 | ||
|
|
b6a08b6615 | ||
|
|
eaa6906516 | ||
|
|
62a7a92f2a | ||
|
|
3aeb0945c5 | ||
|
|
e8c85efeae | ||
|
|
6651f6452b | ||
|
|
0ca48d77be | ||
|
|
a9e0e018e3 | ||
|
|
122d11f445 | ||
|
|
b03555858c | ||
|
|
dcc5a40441 | ||
|
|
8c949f59de | ||
|
|
e8d91a0375 | ||
|
|
fae9aa664d | ||
|
|
c31e9e5a96 | ||
|
|
99fff98274 | ||
|
|
11d84bf35d | ||
|
|
e56ba480c7 |
@@ -3,7 +3,7 @@ jobs:
|
||||
|
||||
build-binary:
|
||||
docker:
|
||||
- image: circleci/golang:1.12
|
||||
- image: circleci/golang:1.13
|
||||
working_directory: ~/build
|
||||
steps:
|
||||
- checkout
|
||||
@@ -44,7 +44,7 @@ jobs:
|
||||
|
||||
push-container:
|
||||
docker:
|
||||
- image: circleci/golang:1.12
|
||||
- image: circleci/golang:1.13
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
@@ -56,7 +56,7 @@ jobs:
|
||||
|
||||
push-binary:
|
||||
docker:
|
||||
- image: circleci/golang:1.12
|
||||
- image: circleci/golang:1.13
|
||||
working_directory: ~/build
|
||||
steps:
|
||||
- checkout
|
||||
@@ -78,6 +78,17 @@ jobs:
|
||||
- run: test/e2e-istio.sh
|
||||
- run: test/e2e-tests.sh
|
||||
|
||||
e2e-kubernetes-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-kubernetes.sh
|
||||
- run: test/e2e-kubernetes-tests.sh
|
||||
|
||||
e2e-smi-istio-testing:
|
||||
machine: true
|
||||
steps:
|
||||
@@ -121,6 +132,9 @@ jobs:
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-nginx.sh
|
||||
- run: test/e2e-nginx-tests.sh
|
||||
- run: test/e2e-nginx-cleanup.sh
|
||||
- run: test/e2e-nginx-custom-annotations.sh
|
||||
- run: test/e2e-nginx-tests.sh
|
||||
|
||||
e2e-linkerd-testing:
|
||||
machine: true
|
||||
@@ -133,6 +147,47 @@ jobs:
|
||||
- run: test/e2e-linkerd.sh
|
||||
- run: test/e2e-linkerd-tests.sh
|
||||
|
||||
push-helm-charts:
|
||||
docker:
|
||||
- image: circleci/golang:1.13
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Install kubectl
|
||||
command: sudo curl -L https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl && sudo chmod +x /usr/local/bin/kubectl
|
||||
- run:
|
||||
name: Install helm
|
||||
command: sudo curl -L https://storage.googleapis.com/kubernetes-helm/helm-v2.14.2-linux-amd64.tar.gz | tar xz && sudo mv linux-amd64/helm /bin/helm && sudo rm -rf linux-amd64
|
||||
- run:
|
||||
name: Initialize helm
|
||||
command: helm init --client-only --kubeconfig=$HOME/.kube/kubeconfig
|
||||
- run:
|
||||
name: Lint charts
|
||||
command: |
|
||||
helm lint ./charts/*
|
||||
- run:
|
||||
name: Package charts
|
||||
command: |
|
||||
mkdir $HOME/charts
|
||||
helm package ./charts/* --destination $HOME/charts
|
||||
- run:
|
||||
name: Publish charts
|
||||
command: |
|
||||
if echo "${CIRCLE_TAG}" | grep -Eq "[0-9]+(\.[0-9]+)*(-[a-z]+)?$"; then
|
||||
REPOSITORY="https://weaveworksbot:${GITHUB_TOKEN}@github.com/weaveworks/flagger.git"
|
||||
git config user.email weaveworksbot@users.noreply.github.com
|
||||
git config user.name weaveworksbot
|
||||
git remote set-url origin ${REPOSITORY}
|
||||
git checkout gh-pages
|
||||
mv -f $HOME/charts/*.tgz .
|
||||
helm repo index . --url https://flagger.app
|
||||
git add .
|
||||
git commit -m "Publish Helm charts v${CIRCLE_TAG}"
|
||||
git push origin gh-pages
|
||||
else
|
||||
echo "Not a release! Skip charts publish"
|
||||
fi
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build-test-push:
|
||||
@@ -145,7 +200,7 @@ workflows:
|
||||
- e2e-istio-testing:
|
||||
requires:
|
||||
- build-binary
|
||||
- e2e-smi-istio-testing:
|
||||
- e2e-kubernetes-testing:
|
||||
requires:
|
||||
- build-binary
|
||||
# - e2e-supergloo-testing:
|
||||
@@ -164,7 +219,7 @@ workflows:
|
||||
requires:
|
||||
- build-binary
|
||||
- e2e-istio-testing
|
||||
- e2e-smi-istio-testing
|
||||
- e2e-kubernetes-testing
|
||||
#- e2e-supergloo-testing
|
||||
- e2e-gloo-testing
|
||||
- e2e-nginx-testing
|
||||
@@ -187,6 +242,14 @@ workflows:
|
||||
tags:
|
||||
ignore: /^chart.*/
|
||||
- push-binary:
|
||||
requires:
|
||||
- push-container
|
||||
filters:
|
||||
branches:
|
||||
ignore: /.*/
|
||||
tags:
|
||||
ignore: /^chart.*/
|
||||
- push-helm-charts:
|
||||
requires:
|
||||
- push-container
|
||||
filters:
|
||||
|
||||
75
CHANGELOG.md
75
CHANGELOG.md
@@ -2,6 +2,77 @@
|
||||
|
||||
All notable changes to this project are documented in this file.
|
||||
|
||||
## 0.18.5 (2019-10-02)
|
||||
|
||||
Adds support for [confirm-promotion](https://docs.flagger.app/how-it-works#webhooks) webhooks and blue/green deployments when using a service mesh
|
||||
|
||||
#### Features
|
||||
|
||||
- Implement confirm-promotion hook [#307](https://github.com/weaveworks/flagger/pull/307)
|
||||
- Implement B/G for service mesh providers [#305](https://github.com/weaveworks/flagger/pull/305)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Canary promotion improvements to avoid dropping in-flight requests [#310](https://github.com/weaveworks/flagger/pull/310)
|
||||
- Update end-to-end tests to Kubernetes v1.15.3 and Istio 1.3.0 [#306](https://github.com/weaveworks/flagger/pull/306)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Skip primary check for App Mesh [#315](https://github.com/weaveworks/flagger/pull/315)
|
||||
|
||||
## 0.18.4 (2019-09-08)
|
||||
|
||||
Adds support for NGINX custom annotations and Helm v3 acceptance testing
|
||||
|
||||
#### Features
|
||||
|
||||
- Add annotations prefix for NGINX ingresses [#293](https://github.com/weaveworks/flagger/pull/293)
|
||||
- Add wide columns in CRD [#289](https://github.com/weaveworks/flagger/pull/289)
|
||||
- loadtester: implement Helm v3 test command [#296](https://github.com/weaveworks/flagger/pull/296)
|
||||
- loadtester: add gPRC health check to load tester image [#295](https://github.com/weaveworks/flagger/pull/295)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- loadtester: fix tests error logging [#286](https://github.com/weaveworks/flagger/pull/286)
|
||||
|
||||
## 0.18.3 (2019-08-22)
|
||||
|
||||
Adds support for tillerless helm tests and protobuf health checking
|
||||
|
||||
#### Features
|
||||
|
||||
- loadtester: add support for tillerless helm [#280](https://github.com/weaveworks/flagger/pull/280)
|
||||
- loadtester: add support for protobuf health checking [#280](https://github.com/weaveworks/flagger/pull/280)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Set HTTP listeners for AppMesh virtual routers [#272](https://github.com/weaveworks/flagger/pull/272)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Add missing fields to CRD validation spec [#271](https://github.com/weaveworks/flagger/pull/271)
|
||||
- Fix App Mesh backends validation in CRD [#281](https://github.com/weaveworks/flagger/pull/281)
|
||||
|
||||
## 0.18.2 (2019-08-05)
|
||||
|
||||
Fixes multi-port support for Istio
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix port discovery for multiple port services [#267](https://github.com/weaveworks/flagger/pull/267)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Update e2e testing to Istio v1.2.3, Gloo v0.18.8 and NGINX ingress chart v1.12.1 [#268](https://github.com/weaveworks/flagger/pull/268)
|
||||
|
||||
## 0.18.1 (2019-07-30)
|
||||
|
||||
Fixes Blue/Green style deployments for Kubernetes and Linkerd providers
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix Blue/Green metrics provider and add e2e tests [#261](https://github.com/weaveworks/flagger/pull/261)
|
||||
|
||||
## 0.18.0 (2019-07-29)
|
||||
|
||||
Adds support for [manual gating](https://docs.flagger.app/how-it-works#manual-gating) and pausing/resuming an ongoing analysis
|
||||
@@ -18,8 +89,8 @@ Adds support for [manual gating](https://docs.flagger.app/how-it-works#manual-ga
|
||||
|
||||
#### Breaking changes
|
||||
|
||||
- due to the status sub-resource changes in [#240](https://github.com/weaveworks/flagger/pull/240), when upgrading Flagger the canaries status phase will be reset to `Initialized`
|
||||
- upgrading Flagger with Helm will fail due to Helm poor support of CRDs, see [workaround](https://github.com/weaveworks/flagger/issues/223)
|
||||
- Due to the status sub-resource changes in [#240](https://github.com/weaveworks/flagger/pull/240), when upgrading Flagger the canaries status phase will be reset to `Initialized`
|
||||
- Upgrading Flagger with Helm will fail due to Helm poor support of CRDs, see [workaround](https://github.com/weaveworks/flagger/issues/223)
|
||||
|
||||
## 0.17.0 (2019-07-08)
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM alpine:3.9
|
||||
FROM alpine:3.10
|
||||
|
||||
RUN addgroup -S flagger \
|
||||
&& adduser -S -g flagger flagger \
|
||||
|
||||
@@ -9,13 +9,24 @@ WORKDIR /home/app
|
||||
RUN curl -sSLo hey "https://storage.googleapis.com/jblabs/dist/hey_linux_v0.1.2" && \
|
||||
chmod +x hey && mv hey /usr/local/bin/hey
|
||||
|
||||
RUN curl -sSL "https://get.helm.sh/helm-v2.12.3-linux-amd64.tar.gz" | tar xvz && \
|
||||
RUN curl -sSL "https://get.helm.sh/helm-v2.14.3-linux-amd64.tar.gz" | tar xvz && \
|
||||
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helm && \
|
||||
chmod +x linux-amd64/tiller && mv linux-amd64/tiller /usr/local/bin/tiller && \
|
||||
rm -rf linux-amd64
|
||||
|
||||
RUN curl -sSL "https://get.helm.sh/helm-v3.0.0-beta.3-linux-amd64.tar.gz" | tar xvz && \
|
||||
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helmv3 && \
|
||||
rm -rf linux-amd64
|
||||
|
||||
RUN GRPC_HEALTH_PROBE_VERSION=v0.3.0 && \
|
||||
wget -qO /usr/local/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
|
||||
chmod +x /usr/local/bin/grpc_health_probe
|
||||
|
||||
RUN curl -sSL "https://github.com/bojand/ghz/releases/download/v0.39.0/ghz_0.39.0_Linux_x86_64.tar.gz" | tar xz -C /tmp && \
|
||||
mv /tmp/ghz /usr/local/bin && chmod +x /usr/local/bin/ghz && rm -rf /tmp/ghz-web
|
||||
|
||||
ADD https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto /tmp/ghz/health.proto
|
||||
|
||||
RUN ls /tmp
|
||||
|
||||
COPY ./bin/loadtester .
|
||||
@@ -24,4 +35,7 @@ RUN chown -R app:app ./
|
||||
|
||||
USER app
|
||||
|
||||
RUN curl -sSL "https://github.com/rimusz/helm-tiller/archive/v0.8.3.tar.gz" | tar xvz && \
|
||||
helm init --client-only && helm plugin install helm-tiller-0.8.3 && helm plugin list
|
||||
|
||||
ENTRYPOINT ["./loadtester"]
|
||||
|
||||
20
Makefile
20
Makefile
@@ -7,15 +7,8 @@ LT_VERSION?=$(shell grep 'VERSION' cmd/loadtester/main.go | awk '{ print $$4 }'
|
||||
TS=$(shell date +%Y-%m-%d_%H-%M-%S)
|
||||
|
||||
run:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=istio -namespace=test \
|
||||
-metrics-server=https://prometheus.istio.weavedx.com \
|
||||
-enable-leader-election=true
|
||||
|
||||
run2:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=istio -namespace=test \
|
||||
-metrics-server=https://prometheus.istio.weavedx.com \
|
||||
-enable-leader-election=true \
|
||||
-port=9092
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=istio -namespace=test-istio \
|
||||
-metrics-server=https://prometheus.istio.flagger.dev
|
||||
|
||||
run-appmesh:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=appmesh \
|
||||
@@ -38,8 +31,8 @@ run-nop:
|
||||
-metrics-server=https://prometheus.istio.weavedx.com
|
||||
|
||||
run-linkerd:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=smi:linkerd -namespace=demo \
|
||||
-metrics-server=https://linkerd-prometheus.istio.weavedx.com
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=linkerd -namespace=dev \
|
||||
-metrics-server=https://prometheus.linkerd.flagger.dev
|
||||
|
||||
build:
|
||||
GIT_COMMIT=$$(git rev-list -1 HEAD) && GO111MODULE=on CGO_ENABLED=0 GOOS=linux go build -ldflags "-s -w -X github.com/weaveworks/flagger/pkg/version.REVISION=$${GIT_COMMIT}" -a -installsuffix cgo -o ./bin/flagger ./cmd/flagger/*
|
||||
@@ -63,8 +56,9 @@ test: test-fmt test-codegen
|
||||
|
||||
helm-package:
|
||||
cd charts/ && helm package ./*
|
||||
mv charts/*.tgz docs/
|
||||
helm repo index docs --url https://weaveworks.github.io/flagger --merge ./docs/index.yaml
|
||||
mv charts/*.tgz bin/
|
||||
curl -s https://raw.githubusercontent.com/weaveworks/flagger/gh-pages/index.yaml > ./bin/index.yaml
|
||||
helm repo index bin --url https://flagger.app --merge ./bin/index.yaml
|
||||
|
||||
helm-up:
|
||||
helm upgrade --install flagger ./charts/flagger --namespace=istio-system --set crd.create=false
|
||||
|
||||
@@ -25,7 +25,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.7.0
|
||||
image: quay.io/stefanprodan/podinfo:2.0.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
|
||||
@@ -25,7 +25,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.7.0
|
||||
image: stefanprodan/podinfo:2.0.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
|
||||
@@ -33,6 +33,22 @@ spec:
|
||||
- name: Weight
|
||||
type: string
|
||||
JSONPath: .status.canaryWeight
|
||||
- name: FailedChecks
|
||||
type: string
|
||||
JSONPath: .status.failedChecks
|
||||
priority: 1
|
||||
- name: Interval
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.interval
|
||||
priority: 1
|
||||
- name: StepWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.stepWeight
|
||||
priority: 1
|
||||
- name: MaxWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.maxWeight
|
||||
priority: 1
|
||||
- name: LastTransitionTime
|
||||
type: string
|
||||
JSONPath: .status.lastTransitionTime
|
||||
@@ -46,12 +62,15 @@ spec:
|
||||
- canaryAnalysis
|
||||
properties:
|
||||
provider:
|
||||
description: Traffic managent provider
|
||||
type: string
|
||||
progressDeadlineSeconds:
|
||||
description: Deployment progress deadline
|
||||
type: number
|
||||
targetRef:
|
||||
description: Deployment selector
|
||||
type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
@@ -60,10 +79,11 @@ spec:
|
||||
name:
|
||||
type: string
|
||||
autoscalerRef:
|
||||
description: HPA selector
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
@@ -72,10 +92,11 @@ spec:
|
||||
name:
|
||||
type: string
|
||||
ingressRef:
|
||||
description: NGINX ingress selector
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
@@ -85,18 +106,63 @@ spec:
|
||||
type: string
|
||||
service:
|
||||
type: object
|
||||
required: ['port']
|
||||
required: ["port"]
|
||||
properties:
|
||||
port:
|
||||
description: Container port number
|
||||
type: number
|
||||
portName:
|
||||
description: Container port name
|
||||
type: string
|
||||
portDiscovery:
|
||||
description: Enable port dicovery
|
||||
type: boolean
|
||||
meshName:
|
||||
description: AppMesh mesh name
|
||||
type: string
|
||||
backends:
|
||||
description: AppMesh backend array
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
timeout:
|
||||
description: Istio HTTP or gRPC request timeout
|
||||
type: string
|
||||
trafficPolicy:
|
||||
description: Istio traffic policy
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
match:
|
||||
description: Istio URL match conditions
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
rewrite:
|
||||
description: Istio URL rewrite
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
headers:
|
||||
description: Istio headers operations
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
corsPolicy:
|
||||
description: Istio CORS policy
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
gateways:
|
||||
description: Istio gateways list
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
hosts:
|
||||
description: Istio hosts list
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
skipAnalysis:
|
||||
type: boolean
|
||||
canaryAnalysis:
|
||||
@@ -117,13 +183,18 @@ spec:
|
||||
stepWeight:
|
||||
description: Canary incremental traffic percentage step
|
||||
type: number
|
||||
match:
|
||||
description: A/B testing match conditions
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
metrics:
|
||||
description: Prometheus query list for this canary
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'threshold']
|
||||
required: ["name", "threshold"]
|
||||
properties:
|
||||
name:
|
||||
description: Name of the Prometheus metric
|
||||
@@ -144,7 +215,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'url', 'timeout']
|
||||
required: ["name", "url"]
|
||||
properties:
|
||||
name:
|
||||
description: Name of the webhook
|
||||
@@ -154,8 +225,10 @@ spec:
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- confirm-rollout
|
||||
- pre-rollout
|
||||
- rollout
|
||||
- confirm-promotion
|
||||
- post-rollout
|
||||
url:
|
||||
description: URL address of this webhook
|
||||
@@ -165,6 +238,11 @@ spec:
|
||||
description: Request timeout for this webhook
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
metadata:
|
||||
description: Metadata (key-value pairs) for this webhook
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
status:
|
||||
properties:
|
||||
phase:
|
||||
@@ -176,6 +254,7 @@ spec:
|
||||
- Initialized
|
||||
- Waiting
|
||||
- Progressing
|
||||
- Promoting
|
||||
- Finalising
|
||||
- Succeeded
|
||||
- Failed
|
||||
@@ -201,7 +280,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['type', 'status', 'reason']
|
||||
required: ["type", "status", "reason"]
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this condition
|
||||
|
||||
@@ -22,7 +22,7 @@ spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: weaveworks/flagger:0.18.0
|
||||
image: weaveworks/flagger:0.18.5
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
serviceAccountName: tiller
|
||||
containers:
|
||||
- name: helmtester
|
||||
image: weaveworks/flagger-loadtester:0.4.0
|
||||
image: weaveworks/flagger-loadtester:0.8.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -17,7 +17,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: loadtester
|
||||
image: weaveworks/flagger-loadtester:0.6.1
|
||||
image: weaveworks/flagger-loadtester:0.8.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 0.18.0
|
||||
appVersion: 0.18.0
|
||||
version: 0.18.5
|
||||
appVersion: 0.18.5
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio, Linkerd, App Mesh, Gloo or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
|
||||
@@ -74,6 +74,7 @@ Parameter | Description | Default
|
||||
`msteams.url` | Microsoft Teams incoming webhook | None
|
||||
`leaderElection.enabled` | leader election must be enabled when running more than one replica | `false`
|
||||
`leaderElection.replicaCount` | number of replicas | `1`
|
||||
`ingressAnnotationsPrefix` | annotations prefix for ingresses | `custom.ingress.kubernetes.io`
|
||||
`rbac.create` | if `true`, create and use RBAC resources | `true`
|
||||
`rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false`
|
||||
`crd.create` | if `true`, create Flagger's CRDs | `true`
|
||||
|
||||
@@ -34,6 +34,22 @@ spec:
|
||||
- name: Weight
|
||||
type: string
|
||||
JSONPath: .status.canaryWeight
|
||||
- name: FailedChecks
|
||||
type: string
|
||||
JSONPath: .status.failedChecks
|
||||
priority: 1
|
||||
- name: Interval
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.interval
|
||||
priority: 1
|
||||
- name: StepWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.stepWeight
|
||||
priority: 1
|
||||
- name: MaxWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.maxWeight
|
||||
priority: 1
|
||||
- name: LastTransitionTime
|
||||
type: string
|
||||
JSONPath: .status.lastTransitionTime
|
||||
@@ -47,10 +63,13 @@ spec:
|
||||
- canaryAnalysis
|
||||
properties:
|
||||
provider:
|
||||
description: Traffic managent provider
|
||||
type: string
|
||||
progressDeadlineSeconds:
|
||||
description: Deployment progress deadline
|
||||
type: number
|
||||
targetRef:
|
||||
description: Deployment selector
|
||||
type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
properties:
|
||||
@@ -61,6 +80,7 @@ spec:
|
||||
name:
|
||||
type: string
|
||||
autoscalerRef:
|
||||
description: HPA selector
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
@@ -73,6 +93,7 @@ spec:
|
||||
name:
|
||||
type: string
|
||||
ingressRef:
|
||||
description: NGINX ingress selector
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
@@ -89,15 +110,60 @@ spec:
|
||||
required: ['port']
|
||||
properties:
|
||||
port:
|
||||
description: Container port number
|
||||
type: number
|
||||
portName:
|
||||
description: Container port name
|
||||
type: string
|
||||
portDiscovery:
|
||||
description: Enable port dicovery
|
||||
type: boolean
|
||||
meshName:
|
||||
description: AppMesh mesh name
|
||||
type: string
|
||||
backends:
|
||||
description: AppMesh backend array
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
timeout:
|
||||
description: Istio HTTP or gRPC request timeout
|
||||
type: string
|
||||
trafficPolicy:
|
||||
description: Istio traffic policy
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
match:
|
||||
description: Istio URL match conditions
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
rewrite:
|
||||
description: Istio URL rewrite
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
headers:
|
||||
description: Istio headers operations
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
corsPolicy:
|
||||
description: Istio CORS policy
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
gateways:
|
||||
description: Istio gateways list
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
hosts:
|
||||
description: Istio hosts list
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
skipAnalysis:
|
||||
type: boolean
|
||||
canaryAnalysis:
|
||||
@@ -118,6 +184,11 @@ spec:
|
||||
stepWeight:
|
||||
description: Canary incremental traffic percentage step
|
||||
type: number
|
||||
match:
|
||||
description: A/B testing match conditions
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
metrics:
|
||||
description: Prometheus query list for this canary
|
||||
type: array
|
||||
@@ -145,7 +216,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'url', 'timeout']
|
||||
required: ["name", "url"]
|
||||
properties:
|
||||
name:
|
||||
description: Name of the webhook
|
||||
@@ -155,8 +226,10 @@ spec:
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- confirm-rollout
|
||||
- pre-rollout
|
||||
- rollout
|
||||
- confirm-promotion
|
||||
- post-rollout
|
||||
url:
|
||||
description: URL address of this webhook
|
||||
@@ -166,6 +239,11 @@ spec:
|
||||
description: Request timeout for this webhook
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
metadata:
|
||||
description: Metadata (key-value pairs) for this webhook
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
status:
|
||||
properties:
|
||||
phase:
|
||||
@@ -177,6 +255,7 @@ spec:
|
||||
- Initialized
|
||||
- Waiting
|
||||
- Progressing
|
||||
- Promoting
|
||||
- Finalising
|
||||
- Succeeded
|
||||
- Failed
|
||||
|
||||
@@ -20,6 +20,10 @@ spec:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
annotations:
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
serviceAccountName: {{ template "flagger.serviceAccountName" . }}
|
||||
affinity:
|
||||
@@ -32,6 +36,10 @@ spec:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
topologyKey: kubernetes.io/hostname
|
||||
{{- if .Values.image.pullSecret }}
|
||||
imagePullSecrets:
|
||||
- name: {{ .Values.image.pullSecret }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: flagger
|
||||
securityContext:
|
||||
@@ -68,6 +76,9 @@ spec:
|
||||
- -enable-leader-election=true
|
||||
- -leader-election-namespace={{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingressAnnotationsPrefix }}
|
||||
- -ingress-annotations-prefix={{ .Values.ingressAnnotationsPrefix }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
|
||||
@@ -2,8 +2,11 @@
|
||||
|
||||
image:
|
||||
repository: weaveworks/flagger
|
||||
tag: 0.18.0
|
||||
tag: 0.18.5
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecret:
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
metricsServer: "http://prometheus:9090"
|
||||
|
||||
|
||||
@@ -20,6 +20,9 @@ spec:
|
||||
release: {{ .Release.Name }}
|
||||
annotations:
|
||||
prometheus.io/scrape: 'false'
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
|
||||
@@ -9,6 +9,8 @@ image:
|
||||
tag: 6.2.5
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: v1
|
||||
name: loadtester
|
||||
version: 0.6.0
|
||||
appVersion: 0.6.1
|
||||
version: 0.8.0
|
||||
appVersion: 0.8.0
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger's load testing services based on rakyll/hey and bojand/ghz that generates traffic during canary analysis when configured as a webhook.
|
||||
|
||||
@@ -18,9 +18,14 @@ spec:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
annotations:
|
||||
appmesh.k8s.aws/ports: "444"
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.serviceAccountName }}
|
||||
serviceAccountName: {{ .Values.serviceAccountName }}
|
||||
{{- else if .Values.rbac.create }}
|
||||
serviceAccountName: {{ include "loadtester.fullname" . }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
|
||||
54
charts/loadtester/templates/rbac.yaml
Normal file
54
charts/loadtester/templates/rbac.yaml
Normal file
@@ -0,0 +1,54 @@
|
||||
---
|
||||
{{- if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{- if eq .Values.rbac.scope "cluster" }}
|
||||
kind: ClusterRole
|
||||
{{- else }}
|
||||
kind: Role
|
||||
{{- end }}
|
||||
metadata:
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "loadtester.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "loadtester.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
rules:
|
||||
{{ toYaml .Values.rbac.rules | indent 2 }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{- if eq .Values.rbac.scope "cluster" }}
|
||||
kind: ClusterRoleBinding
|
||||
{{- else }}
|
||||
kind: RoleBinding
|
||||
{{- end }}
|
||||
metadata:
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "loadtester.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "loadtester.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- if eq .Values.rbac.scope "cluster" }}
|
||||
kind: ClusterRole
|
||||
{{- else }}
|
||||
kind: Role
|
||||
{{- end }}
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "loadtester.fullname" . }}
|
||||
labels:
|
||||
helm.sh/chart: {{ template "loadtester.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "loadtester.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
@@ -2,9 +2,11 @@ replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: weaveworks/flagger-loadtester
|
||||
tag: 0.6.1
|
||||
tag: 0.8.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
logLevel: info
|
||||
cmd:
|
||||
timeout: 1h
|
||||
@@ -27,6 +29,20 @@ tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
rbac:
|
||||
# rbac.create: `true` if rbac resources should be created
|
||||
create: false
|
||||
# rbac.scope: `cluster` to create cluster-scope rbac resources (ClusterRole/ClusterRoleBinding)
|
||||
# otherwise, namespace-scope rbac resources will be created (Role/RoleBinding)
|
||||
scope:
|
||||
# rbac.rules: array of rules to apply to the role. example:
|
||||
# rules:
|
||||
# - apiGroups: [""]
|
||||
# resources: ["pods"]
|
||||
# verbs: ["list", "get"]
|
||||
rules: []
|
||||
|
||||
# name of an existing service account to use - if not creating rbac resources
|
||||
serviceAccountName: ""
|
||||
|
||||
# App Mesh virtual node settings
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
apiVersion: v1
|
||||
version: 2.3.0
|
||||
appVersion: 1.7.0
|
||||
version: 3.1.0
|
||||
appVersion: 3.1.0
|
||||
name: podinfo
|
||||
engine: gotpl
|
||||
description: Flagger canary deployment demo chart
|
||||
home: https://github.com/weaveworks/flagger
|
||||
home: https://flagger.app
|
||||
maintainers:
|
||||
- email: stefanprodan@users.noreply.github.com
|
||||
name: stefanprodan
|
||||
|
||||
@@ -21,10 +21,13 @@ spec:
|
||||
app: {{ template "podinfo.fullname" . }}
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
- name: podinfo
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
command:
|
||||
@@ -34,6 +37,9 @@ spec:
|
||||
- --random-delay={{ .Values.faults.delay }}
|
||||
- --random-error={{ .Values.faults.error }}
|
||||
- --config-path=/podinfo/config
|
||||
{{- range .Values.backends }}
|
||||
- --backend-url={{ . }}
|
||||
{{- end }}
|
||||
env:
|
||||
{{- if .Values.message }}
|
||||
- name: PODINFO_UI_MESSAGE
|
||||
|
||||
29
charts/podinfo/templates/tests/jwt.yaml
Normal file
29
charts/podinfo/templates/tests/jwt.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}-jwt-test-{{ randAlphaNum 5 | lower }}
|
||||
labels:
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
app: {{ template "podinfo.name" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
sidecar.istio.io/inject: "false"
|
||||
linkerd.io/inject: disabled
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: disabled
|
||||
spec:
|
||||
containers:
|
||||
- name: tools
|
||||
image: giantswarm/tiny-tools
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
TOKEN=$(curl -sd 'test' ${PODINFO_SVC}/token | jq -r .token) &&
|
||||
curl -H "Authorization: Bearer ${TOKEN}" ${PODINFO_SVC}/token/validate | grep test
|
||||
env:
|
||||
- name: PODINFO_SVC
|
||||
value: {{ template "podinfo.fullname" . }}:{{ .Values.service.port }}
|
||||
restartPolicy: Never
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
{{- $url := printf "%s%s.%s:%v" (include "podinfo.fullname" .) (include "podinfo.suffix" .) .Release.Namespace .Values.service.port -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}-tests
|
||||
labels:
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
app: {{ template "podinfo.name" . }}
|
||||
data:
|
||||
run.sh: |-
|
||||
@test "HTTP POST /echo" {
|
||||
run curl --retry 3 --connect-timeout 2 -sSX POST -d 'test' {{ $url }}/echo
|
||||
[ $output = "test" ]
|
||||
}
|
||||
@test "HTTP POST /store" {
|
||||
curl --retry 3 --connect-timeout 2 -sSX POST -d 'test' {{ $url }}/store
|
||||
}
|
||||
@test "HTTP GET /" {
|
||||
curl --retry 3 --connect-timeout 2 -sS {{ $url }} | grep hostname
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}-tests-{{ randAlphaNum 5 | lower }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
sidecar.istio.io/inject: "false"
|
||||
labels:
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
app: {{ template "podinfo.name" . }}
|
||||
spec:
|
||||
initContainers:
|
||||
- name: "test-framework"
|
||||
image: "dduportal/bats:0.4.0"
|
||||
command:
|
||||
- "bash"
|
||||
- "-c"
|
||||
- |
|
||||
set -ex
|
||||
# copy bats to tools dir
|
||||
cp -R /usr/local/libexec/ /tools/bats/
|
||||
volumeMounts:
|
||||
- mountPath: /tools
|
||||
name: tools
|
||||
containers:
|
||||
- name: {{ .Release.Name }}-ui-test
|
||||
image: dduportal/bats:0.4.0
|
||||
command: ["/tools/bats/bats", "-t", "/tests/run.sh"]
|
||||
volumeMounts:
|
||||
- mountPath: /tests
|
||||
name: tests
|
||||
readOnly: true
|
||||
- mountPath: /tools
|
||||
name: tools
|
||||
volumes:
|
||||
- name: tests
|
||||
configMap:
|
||||
name: {{ template "podinfo.fullname" . }}-tests
|
||||
- name: tools
|
||||
emptyDir: {}
|
||||
restartPolicy: Never
|
||||
@@ -1,22 +1,25 @@
|
||||
# Default values for podinfo.
|
||||
image:
|
||||
repository: quay.io/stefanprodan/podinfo
|
||||
tag: 1.7.0
|
||||
repository: stefanprodan/podinfo
|
||||
tag: 3.1.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
service:
|
||||
enabled: false
|
||||
type: ClusterIP
|
||||
port: 9898
|
||||
|
||||
hpa:
|
||||
enabled: true
|
||||
minReplicas: 2
|
||||
maxReplicas: 2
|
||||
maxReplicas: 4
|
||||
cpu: 80
|
||||
memory: 512Mi
|
||||
|
||||
canary:
|
||||
enabled: true
|
||||
enabled: false
|
||||
# Istio traffic policy tls can be DISABLE or ISTIO_MUTUAL
|
||||
istioTLS: DISABLE
|
||||
istioIngress:
|
||||
@@ -69,6 +72,7 @@ fullnameOverride: ""
|
||||
|
||||
logLevel: info
|
||||
backend: #http://backend-podinfo:9898/echo
|
||||
backends: []
|
||||
message: #UI greetings
|
||||
|
||||
faults:
|
||||
|
||||
@@ -33,25 +33,26 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
masterURL string
|
||||
kubeconfig string
|
||||
metricsServer string
|
||||
controlLoopInterval time.Duration
|
||||
logLevel string
|
||||
port string
|
||||
msteamsURL string
|
||||
slackURL string
|
||||
slackUser string
|
||||
slackChannel string
|
||||
threadiness int
|
||||
zapReplaceGlobals bool
|
||||
zapEncoding string
|
||||
namespace string
|
||||
meshProvider string
|
||||
selectorLabels string
|
||||
enableLeaderElection bool
|
||||
leaderElectionNamespace string
|
||||
ver bool
|
||||
masterURL string
|
||||
kubeconfig string
|
||||
metricsServer string
|
||||
controlLoopInterval time.Duration
|
||||
logLevel string
|
||||
port string
|
||||
msteamsURL string
|
||||
slackURL string
|
||||
slackUser string
|
||||
slackChannel string
|
||||
threadiness int
|
||||
zapReplaceGlobals bool
|
||||
zapEncoding string
|
||||
namespace string
|
||||
meshProvider string
|
||||
selectorLabels string
|
||||
ingressAnnotationsPrefix string
|
||||
enableLeaderElection bool
|
||||
leaderElectionNamespace string
|
||||
ver bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -71,6 +72,7 @@ func init() {
|
||||
flag.StringVar(&namespace, "namespace", "", "Namespace that flagger would watch canary object.")
|
||||
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio, linkerd, appmesh, supergloo, nginx or smi.")
|
||||
flag.StringVar(&selectorLabels, "selector-labels", "app,name,app.kubernetes.io/name", "List of pod labels that Flagger uses to create pod selectors.")
|
||||
flag.StringVar(&ingressAnnotationsPrefix, "ingress-annotations-prefix", "nginx.ingress.kubernetes.io", "Annotations prefix for ingresses.")
|
||||
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election.")
|
||||
flag.StringVar(&leaderElectionNamespace, "leader-election-namespace", "kube-system", "Namespace used to create the leader election config map.")
|
||||
flag.BoolVar(&ver, "version", false, "Print version")
|
||||
@@ -175,7 +177,7 @@ func main() {
|
||||
// start HTTP server
|
||||
go server.ListenAndServe(port, 3*time.Second, logger, stopCh)
|
||||
|
||||
routerFactory := router.NewFactory(cfg, kubeClient, flaggerClient, logger, meshClient)
|
||||
routerFactory := router.NewFactory(cfg, kubeClient, flaggerClient, ingressAnnotationsPrefix, logger, meshClient)
|
||||
|
||||
c := controller.NewController(
|
||||
kubeClient,
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var VERSION = "0.6.1"
|
||||
var VERSION = "0.8.0"
|
||||
var (
|
||||
logLevel string
|
||||
port string
|
||||
|
||||
@@ -232,8 +232,7 @@ spec:
|
||||
mode: ISTIO_MUTUAL
|
||||
```
|
||||
|
||||
Both port `8080` and `9090` will be added to the ClusterIP services but the virtual service
|
||||
will point to the port specified in `spec.service.port`.
|
||||
Both port `8080` and `9090` will be added to the ClusterIP services.
|
||||
|
||||
### Label selectors
|
||||
|
||||
|
||||
@@ -143,7 +143,7 @@ status:
|
||||
```
|
||||
|
||||
The `Promoted` status condition can have one of the following reasons:
|
||||
Initialized, Waiting, Progressing, Finalising, Succeeded or Failed.
|
||||
Initialized, Waiting, Progressing, Promoting, Finalising, Succeeded or Failed.
|
||||
A failed canary will have the promoted status set to `false`,
|
||||
the reason to `failed` and the last applied spec will be different to the last promoted one.
|
||||
|
||||
@@ -153,6 +153,26 @@ Wait for a successful rollout:
|
||||
kubectl wait canary/podinfo --for=condition=promoted
|
||||
```
|
||||
|
||||
CI example:
|
||||
|
||||
```bash
|
||||
# update the container image
|
||||
kubectl set image deployment/podinfo podinfod=stefanprodan/podinfo:3.0.1
|
||||
|
||||
# wait for Flagger to detect the change
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl get canary/podinfo | grep 'Progressing' && ok=true || ok=false
|
||||
sleep 5
|
||||
done
|
||||
|
||||
# wait for the canary analysis to finish
|
||||
kubectl wait canary/podinfo --for=condition=promoted --timeout=5m
|
||||
|
||||
# check if the deployment was successful
|
||||
kubectl get canary/podinfo | grep Succeeded
|
||||
```
|
||||
|
||||
### Istio routing
|
||||
|
||||
Flagger creates an Istio Virtual Service and Destination Rules based on the Canary service spec.
|
||||
@@ -344,12 +364,13 @@ A canary deployment is triggered by changes in any of the following objects:
|
||||
Gated canary promotion stages:
|
||||
|
||||
* scan for canary deployments
|
||||
* check Istio virtual service routes are mapped to primary and canary ClusterIP services
|
||||
* check primary and canary deployments status
|
||||
* check primary and canary deployment status
|
||||
* halt advancement if a rolling update is underway
|
||||
* halt advancement if pods are unhealthy
|
||||
* call pre-rollout webhooks are check results
|
||||
* halt advancement if any hook returned a non HTTP 2xx result
|
||||
* call confirm-rollout webhooks and check results
|
||||
* halt advancement if any hook returns a non HTTP 2xx result
|
||||
* call pre-rollout webhooks and check results
|
||||
* halt advancement if any hook returns a non HTTP 2xx result
|
||||
* increment the failed checks counter
|
||||
* increase canary traffic weight percentage from 0% to 5% (step weight)
|
||||
* call rollout webhooks and check results
|
||||
@@ -366,8 +387,11 @@ Gated canary promotion stages:
|
||||
* halt advancement if any webhook call fails
|
||||
* halt advancement while canary request success rate is under the threshold
|
||||
* halt advancement while canary request duration P99 is over the threshold
|
||||
* halt advancement while any custom metric check fails
|
||||
* halt advancement if the primary or canary deployment becomes unhealthy
|
||||
* halt advancement while canary deployment is being scaled up/down by HPA
|
||||
* call confirm-promotion webhooks and check results
|
||||
* halt advancement if any hook returns a non HTTP 2xx result
|
||||
* promote canary to primary
|
||||
* copy ConfigMaps and Secrets from canary to primary
|
||||
* copy canary deployment spec template over primary
|
||||
@@ -377,7 +401,7 @@ Gated canary promotion stages:
|
||||
* scale to zero the canary deployment
|
||||
* mark rollout as finished
|
||||
* call post-rollout webhooks
|
||||
* post the analysis result to Slack
|
||||
* post the analysis result to Slack or MS Teams
|
||||
* wait for the canary deployment to be updated and start over
|
||||
|
||||
### Canary Analysis
|
||||
@@ -663,6 +687,9 @@ The canary advancement is paused if a pre-rollout hook fails and if the number o
|
||||
threshold the canary will be rollback.
|
||||
* Rollout hooks are executed during the analysis on each iteration before the metric checks.
|
||||
If a rollout hook call fails the canary advancement is paused and eventfully rolled back.
|
||||
* Confirm-promotion hooks are executed before the promotion step.
|
||||
The canary promotion is paused until the hooks return HTTP 200.
|
||||
While the promotion is paused, Flagger will continue to run the metrics checks and rollout hooks.
|
||||
* Post-rollout hooks are executed after the canary has been promoted or rolled back.
|
||||
If a post rollout hook fails the error is logged.
|
||||
|
||||
@@ -687,6 +714,9 @@ Spec:
|
||||
timeout: 15s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 5 -c 2 http://podinfo-canary.test:9898/"
|
||||
- name: "promotion gate"
|
||||
type: confirm-promotion
|
||||
url: http://flagger-loadtester.test/gate/approve
|
||||
- name: "notify"
|
||||
type: post-rollout
|
||||
url: http://telegram.bot:8080/
|
||||
@@ -798,6 +828,18 @@ webhooks:
|
||||
cmd: "ghz -z 1m -q 10 -c 2 --insecure podinfo.test:9898"
|
||||
```
|
||||
|
||||
`ghz` uses reflection to identify which gRPC method to call. If you do not wish to enable reflection for your gRPC service you can implement a standardized health check from the [grpc-proto](https://github.com/grpc/grpc-proto) library. To use this [health check schema](https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto) without reflection you can pass a parameter to `ghz` like this
|
||||
|
||||
```yaml
|
||||
webhooks:
|
||||
- name: grpc-load-test-no-reflection
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "ghz --insecure --proto=/tmp/ghz/health.proto --call=grpc.health.v1.Health/Check podinfo.test:9898"
|
||||
```
|
||||
|
||||
The load tester can run arbitrary commands as long as the binary is present in the container image.
|
||||
For example if you you want to replace `hey` with another CLI, you can create your own Docker image:
|
||||
|
||||
@@ -870,6 +912,20 @@ Now you can add pre-rollout webhooks to the canary analysis spec:
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
|
||||
If the helm test fails, Flagger will retry until the analysis threshold is reached and the canary is rolled back.
|
||||
|
||||
If you are using Helm v3, you'll have to create a dedicated service account and add the release namespace to the test command:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
webhooks:
|
||||
- name: "smoke test"
|
||||
type: pre-rollout
|
||||
url: http://flagger-helmtester.kube-system/
|
||||
timeout: 3m
|
||||
metadata:
|
||||
type: "helmv3"
|
||||
cmd: "test run {{ .Release.Name }} --cleanup -n {{ .Release.Namespace }}"
|
||||
```
|
||||
|
||||
As an alternative to Helm you can use the [Bash Automated Testing System](https://github.com/bats-core/bats-core) to run your tests.
|
||||
|
||||
```yaml
|
||||
@@ -888,8 +944,8 @@ Note that you should create a ConfigMap with your Bats tests and mount it inside
|
||||
|
||||
### Manual Gating
|
||||
|
||||
For manual approval of a canary deployment you can use the `confirm-rollout` webhook.
|
||||
The confirmation hooks are executed before the pre-rollout hooks.
|
||||
For manual approval of a canary deployment you can use the `confirm-rollout` and `confirm-promotion` webhooks.
|
||||
The confirmation rollout hooks are executed before the pre-rollout hooks.
|
||||
Flagger will halt the canary traffic shifting and analysis until the confirm webhook returns HTTP status 200.
|
||||
|
||||
Manual gating with Flagger's tester:
|
||||
@@ -948,3 +1004,16 @@ kubectl get canary/podinfo
|
||||
NAME STATUS WEIGHT
|
||||
podinfo Waiting 0
|
||||
```
|
||||
|
||||
The `confirm-promotion` hook type can be used to manually approve the canary promotion.
|
||||
While the promotion is paused, Flagger will continue to run the metrics checks and load tests.
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
webhooks:
|
||||
- name: "promotion gate"
|
||||
type: confirm-promotion
|
||||
url: http://flagger-loadtester.test/gate/halt
|
||||
```
|
||||
|
||||
If you have notifications enabled, Flagger will post a message to Slack or MS Teams if a canary promotion is waiting for approval.
|
||||
|
||||
@@ -186,7 +186,7 @@ Install cert-manager's CRDs:
|
||||
```bash
|
||||
CERT_REPO=https://raw.githubusercontent.com/jetstack/cert-manager
|
||||
|
||||
kubectl apply -f ${CERT_REPO}/release-0.7/deploy/manifests/00-crds.yaml
|
||||
kubectl apply -f ${CERT_REPO}/release-0.10/deploy/manifests/00-crds.yaml
|
||||
```
|
||||
|
||||
Create the cert-manager namespace and disable resource validation:
|
||||
@@ -204,7 +204,7 @@ helm repo add jetstack https://charts.jetstack.io && \
|
||||
helm repo update && \
|
||||
helm upgrade -i cert-manager \
|
||||
--namespace cert-manager \
|
||||
--version v0.7.0 \
|
||||
--version v0.10.0 \
|
||||
jetstack/cert-manager
|
||||
```
|
||||
|
||||
@@ -339,7 +339,7 @@ Find the GKE Istio version with:
|
||||
kubectl -n istio-system get deploy istio-pilot -oyaml | grep image:
|
||||
```
|
||||
|
||||
Install Prometheus in istio-system namespace (replace `1.0.6-gke.3` with your version):
|
||||
Install Prometheus in istio-system namespace:
|
||||
|
||||
```bash
|
||||
kubectl -n istio-system apply -f \
|
||||
|
||||
@@ -51,7 +51,7 @@ helm upgrade -i frontend flagger/podinfo \
|
||||
--namespace test \
|
||||
--set nameOverride=frontend \
|
||||
--set backend=http://backend.test:9898/echo \
|
||||
--set canary.loadtest.enabled=true \
|
||||
--set canary.enabled=true \
|
||||
--set canary.istioIngress.enabled=true \
|
||||
--set canary.istioIngress.gateway=public-gateway.istio-system.svc.cluster.local \
|
||||
--set canary.istioIngress.host=frontend.istio.example.com
|
||||
@@ -91,7 +91,7 @@ Now let's install the `backend` release without exposing it outside the mesh:
|
||||
helm upgrade -i backend flagger/podinfo \
|
||||
--namespace test \
|
||||
--set nameOverride=backend \
|
||||
--set canary.loadtest.enabled=true \
|
||||
--set canary.enabled=true \
|
||||
--set canary.istioIngress.enabled=false
|
||||
```
|
||||
|
||||
@@ -138,7 +138,7 @@ helm upgrade -i frontend flagger/podinfo/ \
|
||||
--reuse-values \
|
||||
--set canary.loadtest.enabled=true \
|
||||
--set canary.helmtest.enabled=true \
|
||||
--set image.tag=1.7.1
|
||||
--set image.tag=3.1.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the canary analysis:
|
||||
@@ -177,6 +177,7 @@ Now trigger a canary deployment for the `backend` app, but this time you'll chan
|
||||
helm upgrade -i backend flagger/podinfo/ \
|
||||
--namespace test \
|
||||
--reuse-values \
|
||||
--set canary.loadtest.enabled=true \
|
||||
--set canary.helmtest.enabled=true \
|
||||
--set httpServer.timeout=25s
|
||||
```
|
||||
@@ -283,17 +284,17 @@ metadata:
|
||||
namespace: test
|
||||
annotations:
|
||||
flux.weave.works/automated: "true"
|
||||
flux.weave.works/tag.chart-image: semver:~1.7
|
||||
flux.weave.works/tag.chart-image: semver:~3.1
|
||||
spec:
|
||||
releaseName: frontend
|
||||
chart:
|
||||
repository: https://stefanprodan.github.io/flagger/
|
||||
name: podinfo
|
||||
version: 2.3.0
|
||||
git: https://github.com/weaveowrks/flagger
|
||||
ref: master
|
||||
path: charts/podinfo
|
||||
values:
|
||||
image:
|
||||
repository: quay.io/stefanprodan/podinfo
|
||||
tag: 1.7.0
|
||||
repository: stefanprodan/podinfo
|
||||
tag: 3.1.0
|
||||
backend: http://backend-podinfo:9898/echo
|
||||
canary:
|
||||
enabled: true
|
||||
@@ -311,26 +312,26 @@ In the `chart` section I've defined the release source by specifying the Helm re
|
||||
In the `values` section I've overwritten the defaults set in values.yaml.
|
||||
|
||||
With the `flux.weave.works` annotations I instruct Flux to automate this release.
|
||||
When an image tag in the sem ver range of `1.7.0 - 1.7.99` is pushed to Quay,
|
||||
When an image tag in the sem ver range of `3.1.0 - 3.1.99` is pushed to Docker Hub,
|
||||
Flux will upgrade the Helm release and from there Flagger will pick up the change and start a canary deployment.
|
||||
|
||||
Install [Weave Flux](https://github.com/weaveworks/flux) and its Helm Operator by specifying your Git repo URL:
|
||||
|
||||
```bash
|
||||
helm repo add weaveworks https://weaveworks.github.io/flux
|
||||
helm repo add fluxcd https://charts.fluxcd.io
|
||||
|
||||
helm install --name flux \
|
||||
--set helmOperator.create=true \
|
||||
--set helmOperator.createCRD=true \
|
||||
--set git.url=git@github.com:<USERNAME>/<REPOSITORY> \
|
||||
--namespace flux \
|
||||
weaveworks/flux
|
||||
--namespace fluxcd \
|
||||
fluxcd/flux
|
||||
```
|
||||
|
||||
At startup Flux generates a SSH key and logs the public key. Find the SSH public key with:
|
||||
|
||||
```bash
|
||||
kubectl -n flux logs deployment/flux | grep identity.pub | cut -d '"' -f2
|
||||
kubectl -n fluxcd logs deployment/flux | grep identity.pub | cut -d '"' -f2
|
||||
```
|
||||
|
||||
In order to sync your cluster state with Git you need to copy the public key and create a
|
||||
@@ -344,9 +345,9 @@ launch the `frontend` and `backend` apps.
|
||||
|
||||
A CI/CD pipeline for the `frontend` release could look like this:
|
||||
|
||||
* cut a release from the master branch of the podinfo code repo with the git tag `1.7.1`
|
||||
* CI builds the image and pushes the `podinfo:1.7.1` image to the container registry
|
||||
* Flux scans the registry and updates the Helm release `image.tag` to `1.7.1`
|
||||
* cut a release from the master branch of the podinfo code repo with the git tag `3.1.1`
|
||||
* CI builds the image and pushes the `podinfo:3.1.1` image to the container registry
|
||||
* Flux scans the registry and updates the Helm release `image.tag` to `3.1.1`
|
||||
* Flux commits and push the change to the cluster repo
|
||||
* Flux applies the updated Helm release on the cluster
|
||||
* Flux Helm Operator picks up the change and calls Tiller to upgrade the release
|
||||
@@ -354,9 +355,9 @@ A CI/CD pipeline for the `frontend` release could look like this:
|
||||
* Flagger runs the helm test before routing traffic to the canary service
|
||||
* Flagger starts the load test and runs the canary analysis
|
||||
* Based on the analysis result the canary deployment is promoted to production or rolled back
|
||||
* Flagger sends a Slack notification with the canary result
|
||||
* Flagger sends a Slack or MS Teams notification with the canary result
|
||||
|
||||
If the canary fails, fix the bug, do another patch release eg `1.7.2` and the whole process will run again.
|
||||
If the canary fails, fix the bug, do another patch release eg `3.1.2` and the whole process will run again.
|
||||
|
||||
A canary deployment can fail due to any of the following reasons:
|
||||
|
||||
|
||||
@@ -131,7 +131,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/abtest \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.1
|
||||
podinfod=stefanprodan/podinfo:2.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
@@ -37,8 +37,10 @@ Deploy the load testing service to generate traffic during the canary analysis:
|
||||
```bash
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test \
|
||||
--set meshName=global.appmesh-system \
|
||||
--set "backends[0]=podinfo.test"
|
||||
--set meshName=global \
|
||||
--set "backends[0]=podinfo.test" \
|
||||
--set "backends[1]=podinfo-canary.test" \
|
||||
--set "backends[2]=podinfo-primary.test"
|
||||
```
|
||||
|
||||
Create a canary custom resource:
|
||||
@@ -67,7 +69,7 @@ spec:
|
||||
# container port
|
||||
port: 9898
|
||||
# App Mesh reference
|
||||
meshName: global.appmesh-system
|
||||
meshName: global
|
||||
# App Mesh egress (optional)
|
||||
backends:
|
||||
- backend.test
|
||||
@@ -176,7 +178,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.1
|
||||
podinfod=stefanprodan/podinfo:2.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -239,7 +241,7 @@ Trigger a canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.2
|
||||
podinfod=stefanprodan/podinfo:2.0.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
|
||||
@@ -172,7 +172,7 @@ Trigger a deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.1
|
||||
podinfod=stefanprodan/podinfo:2.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -297,7 +297,7 @@ Trigger a deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.3
|
||||
podinfod=stefanprodan/podinfo:2.0.3
|
||||
```
|
||||
|
||||
Generate 404s:
|
||||
|
||||
@@ -103,6 +103,7 @@ metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: gloo
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
@@ -197,7 +198,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.1
|
||||
podinfod=stefanprodan/podinfo:2.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -251,7 +252,7 @@ Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.2
|
||||
podinfod=stefanprodan/podinfo:2.0.2
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
@@ -334,7 +335,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.3
|
||||
podinfod=stefanprodan/podinfo:2.0.3
|
||||
```
|
||||
|
||||
Generate 404s:
|
||||
@@ -362,5 +363,3 @@ Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
If you have Slack configured, Flagger will send a notification with the reason why the canary failed.
|
||||
|
||||
|
||||
|
||||
@@ -150,7 +150,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.1
|
||||
podinfod=stefanprodan/podinfo:2.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -208,7 +208,7 @@ Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.2
|
||||
podinfod=stefanprodan/podinfo:2.0.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
@@ -297,7 +297,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.3
|
||||
podinfod=stefanprodan/podinfo:2.0.3
|
||||
```
|
||||
|
||||
Generate 404s:
|
||||
@@ -444,7 +444,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.4
|
||||
podinfod=stefanprodan/podinfo:2.0.4
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B testing:
|
||||
|
||||
@@ -104,6 +104,7 @@ metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: nginx
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
@@ -189,7 +190,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.1
|
||||
podinfod=stefanprodan/podinfo:2.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -243,7 +244,7 @@ Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.2
|
||||
podinfod=stefanprodan/podinfo:2.0.2
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
@@ -313,7 +314,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.4.3
|
||||
podinfod=stefanprodan/podinfo:2.0.3
|
||||
```
|
||||
|
||||
Generate high response latency:
|
||||
@@ -387,7 +388,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.5.0
|
||||
podinfod=stefanprodan/podinfo:2.0.4
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B testing:
|
||||
|
||||
@@ -79,8 +79,15 @@ spec:
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# generate traffic during analysis
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary:9898/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
@@ -94,6 +101,9 @@ Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
|
||||
The canary analysis will run for five minutes while validating the HTTP metrics and rollout hooks every minute.
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
@@ -119,7 +129,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.1
|
||||
podinfod=stefanprodan/podinfo:2.0.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -169,14 +179,17 @@ prod backend Failed 0 2019-01-14T17:05:07Z
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
|
||||
|
||||
Create a tester pod and exec into it:
|
||||
Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test run tester \
|
||||
--image=quay.io/stefanprodan/podinfo:1.2.1 \
|
||||
-- ./podinfo --port=9898
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:2.0.2
|
||||
```
|
||||
|
||||
kubectl -n test exec -it tester-xx-xx sh
|
||||
Exec into the load tester pod with:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it flagger-loadtester-xx-xx sh
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
@@ -216,4 +229,3 @@ Events:
|
||||
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
|
||||
2
go.mod
2
go.mod
@@ -1,6 +1,6 @@
|
||||
module github.com/weaveworks/flagger
|
||||
|
||||
go 1.12
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.37.4 // indirect
|
||||
|
||||
@@ -1,23 +1,38 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
CODEGEN_VERSION="@v0.0.0-20190620073620-d55040311883"
|
||||
CODEGEN_PKG=${CODEGEN_PKG:-$(echo `go env GOPATH`"/pkg/mod/k8s.io/code-generator${CODEGEN_VERSION}")}
|
||||
SCRIPT_ROOT=$(realpath $(dirname ${BASH_SOURCE})/..)
|
||||
|
||||
if [[ "$CIRCLECI" ]]; then
|
||||
ls $(echo `go env GOPATH`'/pkg/mod/k8s.io/');
|
||||
mkdir -p ${REPO_ROOT}/bin;
|
||||
cp -r ${CODEGEN_PKG} ${REPO_ROOT}/bin/;
|
||||
CODEGEN_PKG=${REPO_ROOT}/bin/code-generator${CODEGEN_VERSION};
|
||||
echo ">> $CODEGEN_PKG";
|
||||
fi
|
||||
# Grab code-generator version from go.sum.
|
||||
CODEGEN_VERSION=$(grep 'k8s.io/code-generator' go.sum | awk '{print $2}' | head -1)
|
||||
CODEGEN_PKG=$(echo `go env GOPATH`"/pkg/mod/k8s.io/code-generator@${CODEGEN_VERSION}")
|
||||
|
||||
echo ">> Using ${CODEGEN_PKG}"
|
||||
|
||||
# code-generator does work with go.mod but makes assumptions about
|
||||
# the project living in `$GOPATH/src`. To work around this and support
|
||||
# any location; create a temporary directory, use this as an output
|
||||
# base, and copy everything back once generated.
|
||||
TEMP_DIR=$(mktemp -d)
|
||||
cleanup() {
|
||||
echo ">> Removing ${TEMP_DIR}"
|
||||
rm -rf ${TEMP_DIR}
|
||||
}
|
||||
trap "cleanup" EXIT SIGINT
|
||||
|
||||
echo ">> Temporary output directory ${TEMP_DIR}"
|
||||
|
||||
# Ensure we can execute.
|
||||
chmod +x ${CODEGEN_PKG}/generate-groups.sh
|
||||
|
||||
${CODEGEN_PKG}/generate-groups.sh all \
|
||||
github.com/weaveworks/flagger/pkg/client github.com/weaveworks/flagger/pkg/apis \
|
||||
"appmesh:v1beta1 istio:v1alpha3 flagger:v1alpha3 smi:v1alpha1" \
|
||||
--go-header-file ${REPO_ROOT}/hack/boilerplate.go.txt
|
||||
--output-base "${TEMP_DIR}" \
|
||||
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt
|
||||
|
||||
# Copy everything back.
|
||||
cp -r "${TEMP_DIR}/github.com/weaveworks/flagger/." "${SCRIPT_ROOT}/"
|
||||
|
||||
@@ -33,6 +33,22 @@ spec:
|
||||
- name: Weight
|
||||
type: string
|
||||
JSONPath: .status.canaryWeight
|
||||
- name: FailedChecks
|
||||
type: string
|
||||
JSONPath: .status.failedChecks
|
||||
priority: 1
|
||||
- name: Interval
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.interval
|
||||
priority: 1
|
||||
- name: StepWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.stepWeight
|
||||
priority: 1
|
||||
- name: MaxWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.maxWeight
|
||||
priority: 1
|
||||
- name: LastTransitionTime
|
||||
type: string
|
||||
JSONPath: .status.lastTransitionTime
|
||||
@@ -46,12 +62,15 @@ spec:
|
||||
- canaryAnalysis
|
||||
properties:
|
||||
provider:
|
||||
description: Traffic managent provider
|
||||
type: string
|
||||
progressDeadlineSeconds:
|
||||
description: Deployment progress deadline
|
||||
type: number
|
||||
targetRef:
|
||||
description: Deployment selector
|
||||
type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
@@ -60,10 +79,11 @@ spec:
|
||||
name:
|
||||
type: string
|
||||
autoscalerRef:
|
||||
description: HPA selector
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
@@ -72,10 +92,11 @@ spec:
|
||||
name:
|
||||
type: string
|
||||
ingressRef:
|
||||
description: NGINX ingress selector
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
@@ -85,18 +106,63 @@ spec:
|
||||
type: string
|
||||
service:
|
||||
type: object
|
||||
required: ['port']
|
||||
required: ["port"]
|
||||
properties:
|
||||
port:
|
||||
description: Container port number
|
||||
type: number
|
||||
portName:
|
||||
description: Container port name
|
||||
type: string
|
||||
portDiscovery:
|
||||
description: Enable port dicovery
|
||||
type: boolean
|
||||
meshName:
|
||||
description: AppMesh mesh name
|
||||
type: string
|
||||
backends:
|
||||
description: AppMesh backend array
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
timeout:
|
||||
description: Istio HTTP or gRPC request timeout
|
||||
type: string
|
||||
trafficPolicy:
|
||||
description: Istio traffic policy
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
match:
|
||||
description: Istio URL match conditions
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
rewrite:
|
||||
description: Istio URL rewrite
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
headers:
|
||||
description: Istio headers operations
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
corsPolicy:
|
||||
description: Istio CORS policy
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
gateways:
|
||||
description: Istio gateways list
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
hosts:
|
||||
description: Istio hosts list
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
skipAnalysis:
|
||||
type: boolean
|
||||
canaryAnalysis:
|
||||
@@ -117,13 +183,18 @@ spec:
|
||||
stepWeight:
|
||||
description: Canary incremental traffic percentage step
|
||||
type: number
|
||||
match:
|
||||
description: A/B testing match conditions
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: array
|
||||
metrics:
|
||||
description: Prometheus query list for this canary
|
||||
type: array
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'threshold']
|
||||
required: ["name", "threshold"]
|
||||
properties:
|
||||
name:
|
||||
description: Name of the Prometheus metric
|
||||
@@ -144,7 +215,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'url', 'timeout']
|
||||
required: ["name", "url"]
|
||||
properties:
|
||||
name:
|
||||
description: Name of the webhook
|
||||
@@ -154,8 +225,10 @@ spec:
|
||||
type: string
|
||||
enum:
|
||||
- ""
|
||||
- confirm-rollout
|
||||
- pre-rollout
|
||||
- rollout
|
||||
- confirm-promotion
|
||||
- post-rollout
|
||||
url:
|
||||
description: URL address of this webhook
|
||||
@@ -165,6 +238,11 @@ spec:
|
||||
description: Request timeout for this webhook
|
||||
type: string
|
||||
pattern: "^[0-9]+(m|s)"
|
||||
metadata:
|
||||
description: Metadata (key-value pairs) for this webhook
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
status:
|
||||
properties:
|
||||
phase:
|
||||
@@ -176,6 +254,7 @@ spec:
|
||||
- Initialized
|
||||
- Waiting
|
||||
- Progressing
|
||||
- Promoting
|
||||
- Finalising
|
||||
- Succeeded
|
||||
- Failed
|
||||
@@ -201,7 +280,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['type', 'status', 'reason']
|
||||
required: ["type", "status", "reason"]
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this condition
|
||||
|
||||
@@ -8,4 +8,4 @@ resources:
|
||||
- deployment.yaml
|
||||
images:
|
||||
- name: weaveworks/flagger
|
||||
newTag: 0.18.0
|
||||
newTag: 0.18.5
|
||||
|
||||
@@ -8,6 +8,7 @@ spec:
|
||||
containers:
|
||||
- name: flagger
|
||||
args:
|
||||
- -log-level=info
|
||||
- -mesh-provider=istio
|
||||
- -metrics-server=http://prometheus:9090
|
||||
- -slack-user=flagger
|
||||
|
||||
@@ -8,6 +8,7 @@ spec:
|
||||
containers:
|
||||
- name: flagger
|
||||
args:
|
||||
- -log-level=info
|
||||
- -mesh-provider=kubernetes
|
||||
- -metrics-server=http://flagger-prometheus:9090
|
||||
- -slack-user=flagger
|
||||
|
||||
@@ -8,6 +8,7 @@ spec:
|
||||
containers:
|
||||
- name: flagger
|
||||
args:
|
||||
- -log-level=info
|
||||
- -mesh-provider=linkerd
|
||||
- -metrics-server=http://linkerd-prometheus:9090
|
||||
- -slack-user=flagger
|
||||
|
||||
@@ -10,7 +10,7 @@ spec:
|
||||
progressDeadlineSeconds: 60
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
@@ -24,21 +24,27 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.7.0
|
||||
image: stefanprodan/podinfo:3.1.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
- name: http
|
||||
containerPort: 9898
|
||||
protocol: TCP
|
||||
- name: http-metrics
|
||||
containerPort: 9797
|
||||
protocol: TCP
|
||||
- name: grpc
|
||||
containerPort: 9999
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --port-metrics=9797
|
||||
- --grpc-port=9999
|
||||
- --grpc-service-name=podinfo
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
value: blue
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
|
||||
@@ -17,7 +17,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: loadtester
|
||||
image: weaveworks/flagger-loadtester:0.6.1
|
||||
image: weaveworks/flagger-loadtester:0.8.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -102,13 +102,18 @@ type VirtualServiceSpec struct {
|
||||
Routes []Route `json:"routes,omitempty"`
|
||||
}
|
||||
|
||||
// VirtualRouter is the spec for a VirtualRouter resource
|
||||
type VirtualRouter struct {
|
||||
Name string `json:"name"`
|
||||
Name string `json:"name"`
|
||||
Listeners []Listener `json:"listeners,omitempty"`
|
||||
}
|
||||
|
||||
type Route struct {
|
||||
Name string `json:"name"`
|
||||
Http HttpRoute `json:"http"`
|
||||
Name string `json:"name"`
|
||||
// +optional
|
||||
Http *HttpRoute `json:"http,omitempty"`
|
||||
// +optional
|
||||
Tcp *TcpRoute `json:"tcp,omitempty"`
|
||||
}
|
||||
|
||||
type HttpRoute struct {
|
||||
@@ -124,6 +129,14 @@ type HttpRouteAction struct {
|
||||
WeightedTargets []WeightedTarget `json:"weightedTargets"`
|
||||
}
|
||||
|
||||
type TcpRoute struct {
|
||||
Action TcpRouteAction `json:"action"`
|
||||
}
|
||||
|
||||
type TcpRouteAction struct {
|
||||
WeightedTargets []WeightedTarget `json:"weightedTargets"`
|
||||
}
|
||||
|
||||
type WeightedTarget struct {
|
||||
VirtualNodeName string `json:"virtualNodeName"`
|
||||
Weight int64 `json:"weight"`
|
||||
@@ -203,6 +216,8 @@ type VirtualNodeSpec struct {
|
||||
ServiceDiscovery *ServiceDiscovery `json:"serviceDiscovery,omitempty"`
|
||||
// +optional
|
||||
Backends []Backend `json:"backends,omitempty"`
|
||||
// +optional
|
||||
Logging *Logging `json:"logging,omitempty"`
|
||||
}
|
||||
|
||||
type Listener struct {
|
||||
@@ -237,6 +252,21 @@ type VirtualServiceBackend struct {
|
||||
VirtualServiceName string `json:"virtualServiceName"`
|
||||
}
|
||||
|
||||
// Logging refers to https://docs.aws.amazon.com/app-mesh/latest/APIReference/API_Logging.html
|
||||
type Logging struct {
|
||||
AccessLog *AccessLog `json:"accessLog"`
|
||||
}
|
||||
|
||||
// AccessLog refers to https://docs.aws.amazon.com/app-mesh/latest/APIReference/API_AccessLog.html
|
||||
type AccessLog struct {
|
||||
File *FileAccessLog `json:"file"`
|
||||
}
|
||||
|
||||
// FileAccessLog refers to https://docs.aws.amazon.com/app-mesh/latest/APIReference/API_FileAccessLog.html
|
||||
type FileAccessLog struct {
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
// VirtualNodeStatus is the status for a VirtualNode resource
|
||||
type VirtualNodeStatus struct {
|
||||
MeshArn *string `json:"meshArn,omitempty"`
|
||||
|
||||
@@ -24,6 +24,27 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AccessLog) DeepCopyInto(out *AccessLog) {
|
||||
*out = *in
|
||||
if in.File != nil {
|
||||
in, out := &in.File, &out.File
|
||||
*out = new(FileAccessLog)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessLog.
|
||||
func (in *AccessLog) DeepCopy() *AccessLog {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AccessLog)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Backend) DeepCopyInto(out *Backend) {
|
||||
*out = *in
|
||||
@@ -73,6 +94,22 @@ func (in *DnsServiceDiscovery) DeepCopy() *DnsServiceDiscovery {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *FileAccessLog) DeepCopyInto(out *FileAccessLog) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FileAccessLog.
|
||||
func (in *FileAccessLog) DeepCopy() *FileAccessLog {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(FileAccessLog)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HttpRoute) DeepCopyInto(out *HttpRoute) {
|
||||
*out = *in
|
||||
@@ -145,6 +182,27 @@ func (in *Listener) DeepCopy() *Listener {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Logging) DeepCopyInto(out *Logging) {
|
||||
*out = *in
|
||||
if in.AccessLog != nil {
|
||||
in, out := &in.AccessLog, &out.AccessLog
|
||||
*out = new(AccessLog)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Logging.
|
||||
func (in *Logging) DeepCopy() *Logging {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Logging)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Mesh) DeepCopyInto(out *Mesh) {
|
||||
*out = *in
|
||||
@@ -304,7 +362,16 @@ func (in *PortMapping) DeepCopy() *PortMapping {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Route) DeepCopyInto(out *Route) {
|
||||
*out = *in
|
||||
in.Http.DeepCopyInto(&out.Http)
|
||||
if in.Http != nil {
|
||||
in, out := &in.Http, &out.Http
|
||||
*out = new(HttpRoute)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Tcp != nil {
|
||||
in, out := &in.Tcp, &out.Tcp
|
||||
*out = new(TcpRoute)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -344,6 +411,44 @@ func (in *ServiceDiscovery) DeepCopy() *ServiceDiscovery {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TcpRoute) DeepCopyInto(out *TcpRoute) {
|
||||
*out = *in
|
||||
in.Action.DeepCopyInto(&out.Action)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TcpRoute.
|
||||
func (in *TcpRoute) DeepCopy() *TcpRoute {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TcpRoute)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TcpRouteAction) DeepCopyInto(out *TcpRouteAction) {
|
||||
*out = *in
|
||||
if in.WeightedTargets != nil {
|
||||
in, out := &in.WeightedTargets, &out.WeightedTargets
|
||||
*out = make([]WeightedTarget, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TcpRouteAction.
|
||||
func (in *TcpRouteAction) DeepCopy() *TcpRouteAction {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TcpRouteAction)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VirtualNode) DeepCopyInto(out *VirtualNode) {
|
||||
*out = *in
|
||||
@@ -453,6 +558,11 @@ func (in *VirtualNodeSpec) DeepCopyInto(out *VirtualNodeSpec) {
|
||||
*out = make([]Backend, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Logging != nil {
|
||||
in, out := &in.Logging, &out.Logging
|
||||
*out = new(Logging)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -514,6 +624,11 @@ func (in *VirtualNodeStatus) DeepCopy() *VirtualNodeStatus {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VirtualRouter) DeepCopyInto(out *VirtualRouter) {
|
||||
*out = *in
|
||||
if in.Listeners != nil {
|
||||
in, out := &in.Listeners, &out.Listeners
|
||||
*out = make([]Listener, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -640,7 +755,7 @@ func (in *VirtualServiceSpec) DeepCopyInto(out *VirtualServiceSpec) {
|
||||
if in.VirtualRouter != nil {
|
||||
in, out := &in.VirtualRouter, &out.VirtualRouter
|
||||
*out = new(VirtualRouter)
|
||||
**out = **in
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Routes != nil {
|
||||
in, out := &in.Routes, &out.Routes
|
||||
|
||||
@@ -47,7 +47,9 @@ const (
|
||||
CanaryPhaseWaiting CanaryPhase = "Waiting"
|
||||
// CanaryPhaseProgressing means the canary analysis is underway
|
||||
CanaryPhaseProgressing CanaryPhase = "Progressing"
|
||||
// CanaryPhaseProgressing means the canary analysis is finished and traffic has been routed back to primary
|
||||
// CanaryPhasePromoting means the canary analysis is finished and the primary spec has been updated
|
||||
CanaryPhasePromoting CanaryPhase = "Promoting"
|
||||
// CanaryPhaseProgressing means the canary promotion is finished and traffic has been routed back to primary
|
||||
CanaryPhaseFinalising CanaryPhase = "Finalising"
|
||||
// CanaryPhaseSucceeded means the canary analysis has been successful
|
||||
// and the canary deployment has been promoted
|
||||
|
||||
@@ -139,6 +139,8 @@ const (
|
||||
PostRolloutHook HookType = "post-rollout"
|
||||
// ConfirmRolloutHook halt canary analysis until webhook returns HTTP 200
|
||||
ConfirmRolloutHook HookType = "confirm-rollout"
|
||||
// ConfirmPromotionHook halt canary promotion until webhook returns HTTP 200
|
||||
ConfirmPromotionHook HookType = "confirm-promotion"
|
||||
)
|
||||
|
||||
// CanaryWebhook holds the reference to external checks used for canary analysis
|
||||
|
||||
@@ -211,6 +211,9 @@ func (c *Deployer) MakeStatusConditions(canaryStatus flaggerv1.CanaryStatus,
|
||||
case flaggerv1.CanaryPhaseProgressing:
|
||||
status = corev1.ConditionUnknown
|
||||
message = "New revision detected, starting canary analysis."
|
||||
case flaggerv1.CanaryPhasePromoting:
|
||||
status = corev1.ConditionUnknown
|
||||
message = "Canary analysis completed, starting primary rolling update."
|
||||
case flaggerv1.CanaryPhaseFinalising:
|
||||
status = corev1.ConditionUnknown
|
||||
message = "Canary analysis completed, routing all traffic to primary."
|
||||
|
||||
@@ -82,7 +82,7 @@ func SetupMocks(abtest bool) Mocks {
|
||||
flaggerInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
|
||||
|
||||
// init router
|
||||
rf := router.NewFactory(nil, kubeClient, flaggerClient, logger, flaggerClient)
|
||||
rf := router.NewFactory(nil, kubeClient, flaggerClient, "annotationsPrefix", logger, flaggerClient)
|
||||
|
||||
// init observer
|
||||
observerFactory, _ := metrics.NewFactory("fake", "istio", 5*time.Second)
|
||||
|
||||
@@ -99,7 +99,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
// create primary deployment and hpa if needed
|
||||
// skip primary check for Istio since the deployment will become ready after the ClusterIP are created
|
||||
skipPrimaryCheck := false
|
||||
if skipLivenessChecks || strings.Contains(provider, "istio") {
|
||||
if skipLivenessChecks || strings.Contains(provider, "istio") || strings.Contains(provider, "appmesh") {
|
||||
skipPrimaryCheck = true
|
||||
}
|
||||
label, ports, err := c.deployer.Initialize(cd, skipPrimaryCheck)
|
||||
@@ -214,7 +214,27 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
return
|
||||
}
|
||||
|
||||
// scale canary to zero if analysis has succeeded
|
||||
// route all traffic to primary if analysis has succeeded
|
||||
if cd.Status.Phase == flaggerv1.CanaryPhasePromoting {
|
||||
if provider != "kubernetes" {
|
||||
c.recordEventInfof(cd, "Routing all traffic to primary")
|
||||
if err := meshRouter.SetRoutes(cd, 100, 0); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
c.recorder.SetWeight(cd, 100, 0)
|
||||
}
|
||||
|
||||
// update status phase
|
||||
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanaryPhaseFinalising); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// scale canary to zero if promotion has finished
|
||||
if cd.Status.Phase == flaggerv1.CanaryPhaseFinalising {
|
||||
if err := c.deployer.Scale(cd, 0); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
@@ -283,7 +303,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
|
||||
// check if the canary success rate is above the threshold
|
||||
// skip check if no traffic is routed to canary
|
||||
if canaryWeight == 0 {
|
||||
if canaryWeight == 0 && cd.Status.Iterations == 0 {
|
||||
c.recordEventInfof(cd, "Starting canary analysis for %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
|
||||
|
||||
// run pre-rollout web hooks
|
||||
@@ -304,8 +324,8 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
}
|
||||
}
|
||||
|
||||
// canary fix routing: A/B testing
|
||||
if len(cd.Spec.CanaryAnalysis.Match) > 0 || cd.Spec.CanaryAnalysis.Iterations > 0 {
|
||||
// strategy: A/B testing
|
||||
if len(cd.Spec.CanaryAnalysis.Match) > 0 && cd.Spec.CanaryAnalysis.Iterations > 0 {
|
||||
// route traffic to canary and increment iterations
|
||||
if cd.Spec.CanaryAnalysis.Iterations > cd.Status.Iterations {
|
||||
if err := meshRouter.SetRoutes(cd, 0, 100); err != nil {
|
||||
@@ -323,6 +343,11 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
return
|
||||
}
|
||||
|
||||
// check promotion gate
|
||||
if promote := c.runConfirmPromotionHooks(cd); !promote {
|
||||
return
|
||||
}
|
||||
|
||||
// promote canary - max iterations reached
|
||||
if cd.Spec.CanaryAnalysis.Iterations == cd.Status.Iterations {
|
||||
c.recordEventInfof(cd, "Copying %s.%s template spec to %s.%s",
|
||||
@@ -331,6 +356,47 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// update status phase
|
||||
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanaryPhasePromoting); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// strategy: Blue/Green
|
||||
if cd.Spec.CanaryAnalysis.Iterations > 0 {
|
||||
// increment iterations
|
||||
if cd.Spec.CanaryAnalysis.Iterations > cd.Status.Iterations {
|
||||
if err := c.deployer.SetStatusIterations(cd, cd.Status.Iterations+1); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
c.recordEventInfof(cd, "Advance %s.%s canary iteration %v/%v",
|
||||
cd.Name, cd.Namespace, cd.Status.Iterations+1, cd.Spec.CanaryAnalysis.Iterations)
|
||||
return
|
||||
}
|
||||
|
||||
// check promotion gate
|
||||
if promote := c.runConfirmPromotionHooks(cd); !promote {
|
||||
return
|
||||
}
|
||||
|
||||
// route all traffic to canary - max iterations reached
|
||||
if cd.Spec.CanaryAnalysis.Iterations == cd.Status.Iterations {
|
||||
if provider != "kubernetes" {
|
||||
c.recordEventInfof(cd, "Routing all traffic to canary")
|
||||
if err := meshRouter.SetRoutes(cd, 0, 100); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
c.recorder.SetWeight(cd, 0, 100)
|
||||
}
|
||||
|
||||
// increment iterations
|
||||
if err := c.deployer.SetStatusIterations(cd, cd.Status.Iterations+1); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
@@ -339,82 +405,80 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
|
||||
return
|
||||
}
|
||||
|
||||
// route all traffic to primary
|
||||
// promote canary - max iterations reached
|
||||
if cd.Spec.CanaryAnalysis.Iterations < cd.Status.Iterations {
|
||||
primaryWeight = 100
|
||||
canaryWeight = 0
|
||||
if err := meshRouter.SetRoutes(cd, primaryWeight, canaryWeight); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
c.recorder.SetWeight(cd, primaryWeight, canaryWeight)
|
||||
|
||||
// update status phase
|
||||
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanaryPhaseFinalising); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.recordEventInfof(cd, "Routing all traffic to primary")
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// canary incremental traffic weight
|
||||
if canaryWeight < maxWeight {
|
||||
primaryWeight -= cd.Spec.CanaryAnalysis.StepWeight
|
||||
if primaryWeight < 0 {
|
||||
primaryWeight = 0
|
||||
}
|
||||
canaryWeight += cd.Spec.CanaryAnalysis.StepWeight
|
||||
if primaryWeight > 100 {
|
||||
primaryWeight = 100
|
||||
}
|
||||
|
||||
if err := meshRouter.SetRoutes(cd, primaryWeight, canaryWeight); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// update weight status
|
||||
if err := c.deployer.SetStatusWeight(cd, canaryWeight); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.recorder.SetWeight(cd, primaryWeight, canaryWeight)
|
||||
c.recordEventInfof(cd, "Advance %s.%s canary weight %v", cd.Name, cd.Namespace, canaryWeight)
|
||||
|
||||
// promote canary
|
||||
if canaryWeight >= maxWeight {
|
||||
c.recordEventInfof(cd, "Copying %s.%s template spec to %s.%s",
|
||||
cd.Spec.TargetRef.Name, cd.Namespace, primaryName, cd.Namespace)
|
||||
if err := c.deployer.Promote(cd); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// route all traffic to primary
|
||||
primaryWeight = 100
|
||||
canaryWeight = 0
|
||||
if err := meshRouter.SetRoutes(cd, primaryWeight, canaryWeight); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
c.recorder.SetWeight(cd, primaryWeight, canaryWeight)
|
||||
|
||||
// update status phase
|
||||
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanaryPhaseFinalising); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
// update status phase
|
||||
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanaryPhasePromoting); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
c.recordEventInfof(cd, "Routing all traffic to primary")
|
||||
return
|
||||
}
|
||||
|
||||
// strategy: Canary progressive traffic increase
|
||||
if cd.Spec.CanaryAnalysis.StepWeight > 0 {
|
||||
// increase traffic weight
|
||||
if canaryWeight < maxWeight {
|
||||
primaryWeight -= cd.Spec.CanaryAnalysis.StepWeight
|
||||
if primaryWeight < 0 {
|
||||
primaryWeight = 0
|
||||
}
|
||||
canaryWeight += cd.Spec.CanaryAnalysis.StepWeight
|
||||
if canaryWeight > 100 {
|
||||
canaryWeight = 100
|
||||
}
|
||||
|
||||
if err := meshRouter.SetRoutes(cd, primaryWeight, canaryWeight); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.deployer.SetStatusWeight(cd, canaryWeight); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.recorder.SetWeight(cd, primaryWeight, canaryWeight)
|
||||
c.recordEventInfof(cd, "Advance %s.%s canary weight %v", cd.Name, cd.Namespace, canaryWeight)
|
||||
return
|
||||
}
|
||||
|
||||
// promote canary - max weight reached
|
||||
if canaryWeight >= maxWeight {
|
||||
// check promotion gate
|
||||
if promote := c.runConfirmPromotionHooks(cd); !promote {
|
||||
return
|
||||
}
|
||||
|
||||
// update primary spec
|
||||
c.recordEventInfof(cd, "Copying %s.%s template spec to %s.%s",
|
||||
cd.Spec.TargetRef.Name, cd.Namespace, primaryName, cd.Namespace)
|
||||
if err := c.deployer.Promote(cd); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// update status phase
|
||||
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanaryPhasePromoting); err != nil {
|
||||
c.recordEventWarningf(cd, "%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (c *Controller) shouldSkipAnalysis(cd *flaggerv1.Canary, meshRouter router.Interface, primaryWeight int, canaryWeight int) bool {
|
||||
@@ -466,6 +530,7 @@ func (c *Controller) shouldAdvance(cd *flaggerv1.Canary) (bool, error) {
|
||||
cd.Status.Phase == flaggerv1.CanaryPhaseInitializing ||
|
||||
cd.Status.Phase == flaggerv1.CanaryPhaseProgressing ||
|
||||
cd.Status.Phase == flaggerv1.CanaryPhaseWaiting ||
|
||||
cd.Status.Phase == flaggerv1.CanaryPhasePromoting ||
|
||||
cd.Status.Phase == flaggerv1.CanaryPhaseFinalising {
|
||||
return true, nil
|
||||
}
|
||||
@@ -490,6 +555,7 @@ func (c *Controller) shouldAdvance(cd *flaggerv1.Canary) (bool, error) {
|
||||
func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, shouldAdvance bool) bool {
|
||||
c.recorder.SetStatus(cd, cd.Status.Phase)
|
||||
if cd.Status.Phase == flaggerv1.CanaryPhaseProgressing ||
|
||||
cd.Status.Phase == flaggerv1.CanaryPhasePromoting ||
|
||||
cd.Status.Phase == flaggerv1.CanaryPhaseFinalising {
|
||||
return true
|
||||
}
|
||||
@@ -565,6 +631,23 @@ func (c *Controller) runConfirmRolloutHooks(canary *flaggerv1.Canary) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) runConfirmPromotionHooks(canary *flaggerv1.Canary) bool {
|
||||
for _, webhook := range canary.Spec.CanaryAnalysis.Webhooks {
|
||||
if webhook.Type == flaggerv1.ConfirmPromotionHook {
|
||||
err := CallWebhook(canary.Name, canary.Namespace, flaggerv1.CanaryPhaseProgressing, webhook)
|
||||
if err != nil {
|
||||
c.recordEventWarningf(canary, "Halt %s.%s advancement waiting for promotion approval %s",
|
||||
canary.Name, canary.Namespace, webhook.Name)
|
||||
c.sendNotification(canary, "Canary promotion is waiting for approval.", false, false)
|
||||
return false
|
||||
} else {
|
||||
c.recordEventInfof(canary, "Confirm-promotion check %s passed", webhook.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) runPreRolloutHooks(canary *flaggerv1.Canary) bool {
|
||||
for _, webhook := range canary.Spec.CanaryAnalysis.Webhooks {
|
||||
if webhook.Type == flaggerv1.PreRolloutHook {
|
||||
@@ -614,8 +697,8 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
|
||||
if r.Spec.Provider != "" {
|
||||
metricsProvider = r.Spec.Provider
|
||||
|
||||
// set the metrics provider to Linkerd Prometheus when using NGINX as Linkerd Ingress
|
||||
if r.Spec.Provider == "nginx" && strings.Contains(c.meshProvider, "linkerd") {
|
||||
// set the metrics server to Linkerd Prometheus when Linkerd is the default mesh provider
|
||||
if strings.Contains(c.meshProvider, "linkerd") {
|
||||
metricsProvider = "linkerd"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,12 +162,23 @@ func TestScheduler_NewRevisionReset(t *testing.T) {
|
||||
|
||||
func TestScheduler_Promotion(t *testing.T) {
|
||||
mocks := SetupMocks(false)
|
||||
|
||||
// init
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
// check initialized status
|
||||
c, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if c.Status.Phase != v1alpha3.CanaryPhaseInitialized {
|
||||
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryPhaseInitialized)
|
||||
}
|
||||
|
||||
// update
|
||||
dep2 := newTestDeploymentV2()
|
||||
_, err := mocks.kubeClient.AppsV1().Deployments("default").Update(dep2)
|
||||
_, err = mocks.kubeClient.AppsV1().Deployments("default").Update(dep2)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
@@ -205,9 +216,32 @@ func TestScheduler_Promotion(t *testing.T) {
|
||||
// advance
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
// check progressing status
|
||||
c, err = mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if c.Status.Phase != v1alpha3.CanaryPhaseProgressing {
|
||||
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryPhaseProgressing)
|
||||
}
|
||||
|
||||
// promote
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
// check promoting status
|
||||
c, err = mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if c.Status.Phase != v1alpha3.CanaryPhasePromoting {
|
||||
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryPhasePromoting)
|
||||
}
|
||||
|
||||
// finalise
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
primaryWeight, canaryWeight, err = mocks.router.GetRoutes(mocks.canary)
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
@@ -251,11 +285,15 @@ func TestScheduler_Promotion(t *testing.T) {
|
||||
}
|
||||
|
||||
// check finalising status
|
||||
c, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
c, err = mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if c.Status.Phase != v1alpha3.CanaryPhaseFinalising {
|
||||
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryPhaseFinalising)
|
||||
}
|
||||
|
||||
// scale canary to zero
|
||||
mocks.ctrl.advanceCanary("podinfo", "default", true)
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ func (task *BashTask) Run(ctx context.Context) (bool, error) {
|
||||
|
||||
if err != nil {
|
||||
task.logger.With("canary", task.canary).Errorf("command failed %s %v %s", task.command, err, out)
|
||||
return false, fmt.Errorf(" %v %v", err, out)
|
||||
return false, fmt.Errorf(" %v %s", err, out)
|
||||
} else {
|
||||
if task.logCmdOutput {
|
||||
fmt.Printf("%s\n", out)
|
||||
|
||||
@@ -20,14 +20,14 @@ func (task *HelmTask) Hash() string {
|
||||
}
|
||||
|
||||
func (task *HelmTask) Run(ctx context.Context) (bool, error) {
|
||||
helmCmd := fmt.Sprintf("helm %s", task.command)
|
||||
helmCmd := fmt.Sprintf("%s %s", TaskTypeHelm, task.command)
|
||||
task.logger.With("canary", task.canary).Infof("running command %v", helmCmd)
|
||||
|
||||
cmd := exec.CommandContext(ctx, "helm", strings.Fields(task.command)...)
|
||||
cmd := exec.CommandContext(ctx, TaskTypeHelm, strings.Fields(task.command)...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
task.logger.With("canary", task.canary).Errorf("command failed %s %v %s", task.command, err, out)
|
||||
return false, fmt.Errorf(" %v %v", err, out)
|
||||
return false, fmt.Errorf(" %v %s", err, out)
|
||||
} else {
|
||||
if task.logCmdOutput {
|
||||
fmt.Printf("%s\n", out)
|
||||
|
||||
42
pkg/loadtester/helmv3.go
Normal file
42
pkg/loadtester/helmv3.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package loadtester
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const TaskTypeHelmv3 = "helmv3"
|
||||
|
||||
type HelmTaskv3 struct {
|
||||
TaskBase
|
||||
command string
|
||||
logCmdOutput bool
|
||||
}
|
||||
|
||||
func (task *HelmTaskv3) Hash() string {
|
||||
return hash(task.canary + task.command)
|
||||
}
|
||||
|
||||
func (task *HelmTaskv3) Run(ctx context.Context) (bool, error) {
|
||||
helmCmd := fmt.Sprintf("%s %s", TaskTypeHelmv3, task.command)
|
||||
task.logger.With("canary", task.canary).Infof("running command %v", helmCmd)
|
||||
|
||||
cmd := exec.CommandContext(ctx, TaskTypeHelmv3, strings.Fields(task.command)...)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
task.logger.With("canary", task.canary).Errorf("command failed %s %v %s", task.command, err, out)
|
||||
return false, fmt.Errorf(" %v %s", err, out)
|
||||
} else {
|
||||
if task.logCmdOutput {
|
||||
fmt.Printf("%s\n", out)
|
||||
}
|
||||
task.logger.With("canary", task.canary).Infof("command finished %v", helmCmd)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (task *HelmTaskv3) String() string {
|
||||
return task.command
|
||||
}
|
||||
@@ -185,6 +185,31 @@ func ListenAndServe(port string, timeout time.Duration, logger *zap.SugaredLogge
|
||||
return
|
||||
}
|
||||
|
||||
// run helmv3 command (blocking task)
|
||||
if typ == TaskTypeHelmv3 {
|
||||
helm := HelmTaskv3{
|
||||
command: payload.Metadata["cmd"],
|
||||
logCmdOutput: true,
|
||||
TaskBase: TaskBase{
|
||||
canary: fmt.Sprintf("%s.%s", payload.Name, payload.Namespace),
|
||||
logger: logger,
|
||||
},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), taskRunner.timeout)
|
||||
defer cancel()
|
||||
|
||||
ok, err := helm.Run(ctx)
|
||||
if !ok {
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
w.Write([]byte(err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
|
||||
taskFactory, ok := GetTaskFactory(typ)
|
||||
if !ok {
|
||||
w.WriteHeader(http.StatusBadRequest)
|
||||
|
||||
@@ -28,6 +28,10 @@ func (factory Factory) Observer(provider string) Interface {
|
||||
return &HttpObserver{
|
||||
client: factory.Client,
|
||||
}
|
||||
case provider == "kubernetes":
|
||||
return &HttpObserver{
|
||||
client: factory.Client,
|
||||
}
|
||||
case provider == "appmesh":
|
||||
return &EnvoyObserver{
|
||||
client: factory.Client,
|
||||
|
||||
30
pkg/notifier/client_test.go
Normal file
30
pkg/notifier/client_test.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package notifier
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_postMessage(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var payload = make(map[string]string)
|
||||
err = json.Unmarshal(b, &payload)
|
||||
|
||||
if payload["status"] != "success" {
|
||||
t.Fatal("wrong payload")
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
err := postMessage(ts.URL, map[string]string{"status": "success"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
35
pkg/notifier/slack_test.go
Normal file
35
pkg/notifier/slack_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package notifier
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestSlack_Post(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var payload = SlackPayload{}
|
||||
err = json.Unmarshal(b, &payload)
|
||||
|
||||
if payload.Attachments[0].AuthorName != "podinfo.test" {
|
||||
t.Fatal("wrong author name")
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
slack, err := NewSlack(ts.URL, "test", "test")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = slack.Post("podinfo", "test", "test", nil, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
35
pkg/notifier/teams_test.go
Normal file
35
pkg/notifier/teams_test.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package notifier
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTeams_Post(t *testing.T) {
|
||||
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var payload = MSTeamsPayload{}
|
||||
err = json.Unmarshal(b, &payload)
|
||||
|
||||
if payload.Sections[0].ActivitySubtitle != "podinfo.test" {
|
||||
t.Fatal("wrong activity subtitle")
|
||||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
teams, err := NewMSTeams(ts.URL)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = teams.Post("podinfo", "test", "test", nil, true)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@@ -165,11 +165,19 @@ func (ar *AppMeshRouter) reconcileVirtualService(canary *flaggerv1.Canary, name
|
||||
MeshName: canary.Spec.Service.MeshName,
|
||||
VirtualRouter: &AppmeshV1beta1.VirtualRouter{
|
||||
Name: fmt.Sprintf("%s-router", targetName),
|
||||
Listeners: []AppmeshV1beta1.Listener{
|
||||
{
|
||||
PortMapping: AppmeshV1beta1.PortMapping{
|
||||
Port: int64(canary.Spec.Service.Port),
|
||||
Protocol: "http",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Routes: []AppmeshV1beta1.Route{
|
||||
{
|
||||
Name: fmt.Sprintf("%s-route", targetName),
|
||||
Http: AppmeshV1beta1.HttpRoute{
|
||||
Http: &AppmeshV1beta1.HttpRoute{
|
||||
Match: AppmeshV1beta1.HttpRouteMatch{
|
||||
Prefix: routePrefix,
|
||||
},
|
||||
|
||||
@@ -11,23 +11,26 @@ import (
|
||||
)
|
||||
|
||||
type Factory struct {
|
||||
kubeConfig *restclient.Config
|
||||
kubeClient kubernetes.Interface
|
||||
meshClient clientset.Interface
|
||||
flaggerClient clientset.Interface
|
||||
logger *zap.SugaredLogger
|
||||
kubeConfig *restclient.Config
|
||||
kubeClient kubernetes.Interface
|
||||
meshClient clientset.Interface
|
||||
flaggerClient clientset.Interface
|
||||
ingressAnnotationsPrefix string
|
||||
logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
func NewFactory(kubeConfig *restclient.Config, kubeClient kubernetes.Interface,
|
||||
flaggerClient clientset.Interface,
|
||||
ingressAnnotationsPrefix string,
|
||||
logger *zap.SugaredLogger,
|
||||
meshClient clientset.Interface) *Factory {
|
||||
return &Factory{
|
||||
kubeConfig: kubeConfig,
|
||||
meshClient: meshClient,
|
||||
kubeClient: kubeClient,
|
||||
flaggerClient: flaggerClient,
|
||||
logger: logger,
|
||||
kubeConfig: kubeConfig,
|
||||
meshClient: meshClient,
|
||||
kubeClient: kubeClient,
|
||||
flaggerClient: flaggerClient,
|
||||
ingressAnnotationsPrefix: ingressAnnotationsPrefix,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,8 +54,9 @@ func (factory *Factory) MeshRouter(provider string) Interface {
|
||||
return &NopRouter{}
|
||||
case provider == "nginx":
|
||||
return &IngressRouter{
|
||||
logger: factory.logger,
|
||||
kubeClient: factory.kubeClient,
|
||||
logger: factory.logger,
|
||||
kubeClient: factory.kubeClient,
|
||||
annotationsPrefix: factory.ingressAnnotationsPrefix,
|
||||
}
|
||||
case provider == "appmesh":
|
||||
return &AppMeshRouter{
|
||||
|
||||
@@ -2,6 +2,9 @@ package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
"go.uber.org/zap"
|
||||
@@ -10,13 +13,12 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type IngressRouter struct {
|
||||
kubeClient kubernetes.Interface
|
||||
logger *zap.SugaredLogger
|
||||
kubeClient kubernetes.Interface
|
||||
annotationsPrefix string
|
||||
logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
func (i *IngressRouter) Reconcile(canary *flaggerv1.Canary) error {
|
||||
@@ -115,7 +117,7 @@ func (i *IngressRouter) GetRoutes(canary *flaggerv1.Canary) (
|
||||
// A/B testing
|
||||
if len(canary.Spec.CanaryAnalysis.Match) > 0 {
|
||||
for k := range canaryIngress.Annotations {
|
||||
if k == "nginx.ingress.kubernetes.io/canary-by-cookie" || k == "nginx.ingress.kubernetes.io/canary-by-header" {
|
||||
if k == i.GetAnnotationWithPrefix("canary-by-cookie") || k == i.GetAnnotationWithPrefix("canary-by-header") {
|
||||
return 0, 100, nil
|
||||
}
|
||||
}
|
||||
@@ -123,7 +125,7 @@ func (i *IngressRouter) GetRoutes(canary *flaggerv1.Canary) (
|
||||
|
||||
// Canary
|
||||
for k, v := range canaryIngress.Annotations {
|
||||
if k == "nginx.ingress.kubernetes.io/canary-weight" {
|
||||
if k == i.GetAnnotationWithPrefix("canary-weight") {
|
||||
val, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
@@ -170,12 +172,12 @@ func (i *IngressRouter) SetRoutes(
|
||||
iClone.Annotations = i.makeHeaderAnnotations(iClone.Annotations, header, headerValue, cookie)
|
||||
} else {
|
||||
// canary
|
||||
iClone.Annotations["nginx.ingress.kubernetes.io/canary-weight"] = fmt.Sprintf("%v", canaryWeight)
|
||||
iClone.Annotations[i.GetAnnotationWithPrefix("canary-weight")] = fmt.Sprintf("%v", canaryWeight)
|
||||
}
|
||||
|
||||
// toggle canary
|
||||
if canaryWeight > 0 {
|
||||
iClone.Annotations["nginx.ingress.kubernetes.io/canary"] = "true"
|
||||
iClone.Annotations[i.GetAnnotationWithPrefix("canary")] = "true"
|
||||
} else {
|
||||
iClone.Annotations = i.makeAnnotations(iClone.Annotations)
|
||||
}
|
||||
@@ -191,14 +193,14 @@ func (i *IngressRouter) SetRoutes(
|
||||
func (i *IngressRouter) makeAnnotations(annotations map[string]string) map[string]string {
|
||||
res := make(map[string]string)
|
||||
for k, v := range annotations {
|
||||
if !strings.Contains(k, "nginx.ingress.kubernetes.io/canary") &&
|
||||
if !strings.Contains(k, i.GetAnnotationWithPrefix("canary")) &&
|
||||
!strings.Contains(k, "kubectl.kubernetes.io/last-applied-configuration") {
|
||||
res[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
res["nginx.ingress.kubernetes.io/canary"] = "false"
|
||||
res["nginx.ingress.kubernetes.io/canary-weight"] = "0"
|
||||
res[i.GetAnnotationWithPrefix("canary")] = "false"
|
||||
res[i.GetAnnotationWithPrefix("canary-weight")] = "0"
|
||||
|
||||
return res
|
||||
}
|
||||
@@ -207,25 +209,29 @@ func (i *IngressRouter) makeHeaderAnnotations(annotations map[string]string,
|
||||
header string, headerValue string, cookie string) map[string]string {
|
||||
res := make(map[string]string)
|
||||
for k, v := range annotations {
|
||||
if !strings.Contains(v, "nginx.ingress.kubernetes.io/canary") {
|
||||
if !strings.Contains(v, i.GetAnnotationWithPrefix("canary")) {
|
||||
res[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
res["nginx.ingress.kubernetes.io/canary"] = "true"
|
||||
res["nginx.ingress.kubernetes.io/canary-weight"] = "0"
|
||||
res[i.GetAnnotationWithPrefix("canary")] = "true"
|
||||
res[i.GetAnnotationWithPrefix("canary-weight")] = "0"
|
||||
|
||||
if cookie != "" {
|
||||
res["nginx.ingress.kubernetes.io/canary-by-cookie"] = cookie
|
||||
res[i.GetAnnotationWithPrefix("canary-by-cookie")] = cookie
|
||||
}
|
||||
|
||||
if header != "" {
|
||||
res["nginx.ingress.kubernetes.io/canary-by-header"] = header
|
||||
res[i.GetAnnotationWithPrefix("canary-by-header")] = header
|
||||
}
|
||||
|
||||
if headerValue != "" {
|
||||
res["nginx.ingress.kubernetes.io/canary-by-header-value"] = headerValue
|
||||
res[i.GetAnnotationWithPrefix("canary-by-header-value")] = headerValue
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
func (i *IngressRouter) GetAnnotationWithPrefix(suffix string) string {
|
||||
return fmt.Sprintf("%v/%v", i.annotationsPrefix, suffix)
|
||||
}
|
||||
|
||||
@@ -2,15 +2,17 @@ package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestIngressRouter_Reconcile(t *testing.T) {
|
||||
mocks := setupfakeClients()
|
||||
router := &IngressRouter{
|
||||
logger: mocks.logger,
|
||||
kubeClient: mocks.kubeClient,
|
||||
logger: mocks.logger,
|
||||
kubeClient: mocks.kubeClient,
|
||||
annotationsPrefix: "custom.ingress.kubernetes.io",
|
||||
}
|
||||
|
||||
err := router.Reconcile(mocks.ingressCanary)
|
||||
@@ -18,8 +20,8 @@ func TestIngressRouter_Reconcile(t *testing.T) {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
canaryAn := "nginx.ingress.kubernetes.io/canary"
|
||||
canaryWeightAn := "nginx.ingress.kubernetes.io/canary-weight"
|
||||
canaryAn := "custom.ingress.kubernetes.io/canary"
|
||||
canaryWeightAn := "custom.ingress.kubernetes.io/canary-weight"
|
||||
|
||||
canaryName := fmt.Sprintf("%s-canary", mocks.ingressCanary.Spec.IngressRef.Name)
|
||||
inCanary, err := router.kubeClient.ExtensionsV1beta1().Ingresses("default").Get(canaryName, metav1.GetOptions{})
|
||||
@@ -44,8 +46,9 @@ func TestIngressRouter_Reconcile(t *testing.T) {
|
||||
func TestIngressRouter_GetSetRoutes(t *testing.T) {
|
||||
mocks := setupfakeClients()
|
||||
router := &IngressRouter{
|
||||
logger: mocks.logger,
|
||||
kubeClient: mocks.kubeClient,
|
||||
logger: mocks.logger,
|
||||
kubeClient: mocks.kubeClient,
|
||||
annotationsPrefix: "prefix1.nginx.ingress.kubernetes.io",
|
||||
}
|
||||
|
||||
err := router.Reconcile(mocks.ingressCanary)
|
||||
@@ -66,8 +69,8 @@ func TestIngressRouter_GetSetRoutes(t *testing.T) {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
canaryAn := "nginx.ingress.kubernetes.io/canary"
|
||||
canaryWeightAn := "nginx.ingress.kubernetes.io/canary-weight"
|
||||
canaryAn := "prefix1.nginx.ingress.kubernetes.io/canary"
|
||||
canaryWeightAn := "prefix1.nginx.ingress.kubernetes.io/canary-weight"
|
||||
|
||||
canaryName := fmt.Sprintf("%s-canary", mocks.ingressCanary.Spec.IngressRef.Name)
|
||||
inCanary, err := router.kubeClient.ExtensionsV1beta1().Ingresses("default").Get(canaryName, metav1.GetOptions{})
|
||||
|
||||
@@ -2,6 +2,7 @@ package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
|
||||
@@ -9,7 +10,6 @@ import (
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -236,7 +236,7 @@ func (ir *IstioRouter) GetRoutes(canary *flaggerv1.Canary) (
|
||||
) {
|
||||
targetName := canary.Spec.TargetRef.Name
|
||||
vs := &istiov1alpha3.VirtualService{}
|
||||
vs, err = ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Get(targetName, v1.GetOptions{})
|
||||
vs, err = ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
err = fmt.Errorf("VirtualService %s.%s not found", targetName, canary.Namespace)
|
||||
@@ -283,7 +283,7 @@ func (ir *IstioRouter) SetRoutes(
|
||||
primaryName := fmt.Sprintf("%s-primary", targetName)
|
||||
canaryName := fmt.Sprintf("%s-canary", targetName)
|
||||
|
||||
vs, err := ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Get(targetName, v1.GetOptions{})
|
||||
vs, err := ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Get(targetName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
return fmt.Errorf("VirtualService %s.%s not found", targetName, canary.Namespace)
|
||||
@@ -383,12 +383,5 @@ func makeDestination(canary *flaggerv1.Canary, host string, weight int) istiov1a
|
||||
Weight: weight,
|
||||
}
|
||||
|
||||
// if port discovery is enabled then we need to explicitly set the destination port
|
||||
if canary.Spec.Service.PortDiscovery {
|
||||
dest.Destination.Port = &istiov1alpha3.PortSelector{
|
||||
Number: uint32(canary.Spec.Service.Port),
|
||||
}
|
||||
}
|
||||
|
||||
return dest
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package version
|
||||
|
||||
var VERSION = "0.18.0"
|
||||
var VERSION = "0.18.5"
|
||||
var REVISION = "unknown"
|
||||
|
||||
@@ -25,14 +25,17 @@ The e2e testing infrastructure is powered by CircleCI and [Kubernetes Kind](http
|
||||
* install latest stable kubectl [e2e-kind.sh](e2e-kind.sh)
|
||||
* install Kubernetes Kind [e2e-kind.sh](e2e-kind.sh)
|
||||
* create local Kubernetes cluster with kind [e2e-kind.sh](e2e-kind.sh)
|
||||
* install latest stable Helm CLI [e2e-nginx.sh](e2e-istio.sh)
|
||||
* deploy Tiller on the local cluster [e2e-nginx.sh](e2e-istio.sh)
|
||||
* install NGINX ingress with Helm [e2e-nginx.sh](e2e-istio.sh)
|
||||
* install latest stable Helm CLI [e2e-nginx.sh](e2e-nginx.sh)
|
||||
* deploy Tiller on the local cluster [e2e-nginx.sh](e2e-nginx.sh)
|
||||
* install NGINX ingress with Helm [e2e-nginx.sh](e2e-nginx.sh)
|
||||
* load Flagger image onto the local cluster [e2e-nginx.sh](e2e-nginx.sh)
|
||||
* install Flagger and Prometheus in the ingress-nginx namespace [e2e-nginx.sh](e2e-nginx.sh)
|
||||
* create a test namespace [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* deploy the load tester in the test namespace [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* deploy the demo workload (podinfo) and ingress in the test namespace [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* test the canary initialization [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* test the canary analysis and promotion using weighted traffic and the load testing webhook [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* test the A/B testing analysis and promotion using header filters and pre/post rollout webhooks [e2e-nginx-tests.sh](e2e-tests.sh)
|
||||
* create a test namespace [e2e-nginx-tests.sh](e2e-nginx-tests.sh)
|
||||
* deploy the load tester in the test namespace [e2e-nginx-tests.sh](e2e-nginx-tests.sh)
|
||||
* deploy the demo workload (podinfo) and ingress in the test namespace [e2e-nginx-tests.sh](e2e-nginx-tests.sh)
|
||||
* test the canary initialization [e2e-nginx-tests.sh](e2e-nginx-tests.sh)
|
||||
* test the canary analysis and promotion using weighted traffic and the load testing webhook [e2e-nginx-tests.sh](e2e-nginx-tests.sh)
|
||||
* test the A/B testing analysis and promotion using header filters and pre/post rollout webhooks [e2e-nginx-tests.sh](e2e-nginx-tests.sh)
|
||||
* cleanup test environment [e2e-nginx-cleanup.sh](e2e-nginx-cleanup.sh)
|
||||
* install NGINX Ingress and Flagger with custom ingress annotations prefix [e2e-nginx-custom-annotations.sh](e2e-nginx-custom-annotations.sh)
|
||||
* repeat the canary and A/B testing workflow [e2e-nginx-tests.sh](e2e-nginx-tests.sh)
|
||||
@@ -45,6 +45,7 @@ metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: gloo
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
@@ -62,12 +63,22 @@ spec:
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
webhooks:
|
||||
- name: "gate"
|
||||
type: confirm-rollout
|
||||
url: http://flagger-loadtester.test/gate/approve
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 10s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary:9898/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 10m -q 10 -c 2 -host app.example.com http://gateway-proxy.gloo-system"
|
||||
cmd: "hey -z 2m -q 5 -c 2 -host app.example.com http://gateway-proxy-v2.gloo-system"
|
||||
logCmdOutput: "true"
|
||||
EOF
|
||||
|
||||
@@ -103,7 +114,9 @@ until ${ok}; do
|
||||
if [[ ${count} -eq ${retries} ]]; then
|
||||
kubectl -n test describe deployment/podinfo
|
||||
kubectl -n test describe deployment/podinfo-primary
|
||||
kubectl -n test logs deployment/flagger-loadtester
|
||||
kubectl -n gloo-system logs deployment/flagger
|
||||
kubectl -n gloo-system get all
|
||||
kubectl -n gloo-system get virtualservice podinfo -oyaml
|
||||
echo "No more retries left"
|
||||
exit 1
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
set -o errexit
|
||||
|
||||
GLOO_VER="0.17.6"
|
||||
GLOO_VER="0.18.8"
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
@@ -12,7 +12,7 @@ helm upgrade -i gloo gloo/gloo --version ${GLOO_VER} \
|
||||
--namespace gloo-system
|
||||
|
||||
kubectl -n gloo-system rollout status deployment/gloo
|
||||
kubectl -n gloo-system rollout status deployment/gateway-proxy
|
||||
kubectl -n gloo-system rollout status deployment/gateway-proxy-v2
|
||||
kubectl -n gloo-system get all
|
||||
|
||||
echo '>>> Installing Flagger'
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
set -o errexit
|
||||
|
||||
ISTIO_VER="1.2.2"
|
||||
ISTIO_VER="1.3.0"
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
@@ -13,9 +13,9 @@ echo '>>> Installing Istio CRDs'
|
||||
helm upgrade -i istio-init istio.io/istio-init --wait --namespace istio-system
|
||||
|
||||
echo '>>> Waiting for Istio CRDs to be ready'
|
||||
kubectl -n istio-system wait --for=condition=complete job/istio-init-crd-10
|
||||
kubectl -n istio-system wait --for=condition=complete job/istio-init-crd-11
|
||||
kubectl -n istio-system wait --for=condition=complete job/istio-init-crd-12
|
||||
kubectl -n istio-system wait --for=condition=complete job/istio-init-crd-10-${ISTIO_VER}
|
||||
kubectl -n istio-system wait --for=condition=complete job/istio-init-crd-11-${ISTIO_VER}
|
||||
kubectl -n istio-system wait --for=condition=complete job/istio-init-crd-12-${ISTIO_VER}
|
||||
|
||||
echo '>>> Installing Istio control plane'
|
||||
helm upgrade -i istio istio.io/istio --wait --namespace istio-system -f ${REPO_ROOT}/test/e2e-istio-values.yaml
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
KIND_VERSION=v0.4.0
|
||||
KIND_VERSION=v0.5.1
|
||||
|
||||
if [[ "$1" ]]; then
|
||||
KIND_VERSION=$1
|
||||
|
||||
108
test/e2e-kubernetes-tests.sh
Executable file
108
test/e2e-kubernetes-tests.sh
Executable file
@@ -0,0 +1,108 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script runs e2e tests for Blue/Green initialization, analysis and promotion
|
||||
# Prerequisites: Kubernetes Kind, Kustomize
|
||||
|
||||
set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
echo '>>> Creating test namespace'
|
||||
kubectl create namespace test
|
||||
|
||||
echo '>>> Installing the load tester'
|
||||
kubectl apply -k ${REPO_ROOT}/kustomize/tester
|
||||
kubectl -n test rollout status deployment/flagger-loadtester
|
||||
|
||||
echo '>>> Initialising canary'
|
||||
kubectl apply -f ${REPO_ROOT}/test/e2e-workload.yaml
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: kubernetes
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
port: 9898
|
||||
portDiscovery: true
|
||||
canaryAnalysis:
|
||||
interval: 15s
|
||||
threshold: 10
|
||||
iterations: 5
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: "gate"
|
||||
type: confirm-rollout
|
||||
url: http://flagger-loadtester.test/gate/approve
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 10s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary:9898/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 10m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
logCmdOutput: "true"
|
||||
EOF
|
||||
|
||||
echo '>>> Waiting for primary to be ready'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false
|
||||
sleep 5
|
||||
count=$(($count + 1))
|
||||
if [[ ${count} -eq ${retries} ]]; then
|
||||
kubectl -n flagger-system logs deployment/flagger
|
||||
echo "No more retries left"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo '✔ Canary initialization test passed'
|
||||
|
||||
echo '>>> Triggering canary deployment'
|
||||
kubectl -n test set image deployment/podinfo podinfod=quay.io/stefanprodan/podinfo:1.7.0
|
||||
|
||||
echo '>>> Waiting for canary promotion'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl -n test describe deployment/podinfo-primary | grep '1.7.0' && ok=true || ok=false
|
||||
sleep 10
|
||||
kubectl -n flagger-system logs deployment/flagger --tail 1
|
||||
count=$(($count + 1))
|
||||
if [[ ${count} -eq ${retries} ]]; then
|
||||
kubectl -n test describe deployment/podinfo
|
||||
kubectl -n test describe deployment/podinfo-primary
|
||||
kubectl -n flagger-system logs deployment/flagger
|
||||
echo "No more retries left"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo '✔ Canary promotion test passed'
|
||||
|
||||
kubectl -n flagger-system logs deployment/flagger
|
||||
17
test/e2e-kubernetes.sh
Executable file
17
test/e2e-kubernetes.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
echo '>>> Loading Flagger image'
|
||||
kind load docker-image test/flagger:latest
|
||||
|
||||
echo '>>> Installing Flagger'
|
||||
kubectl apply -k ${REPO_ROOT}/kustomize/kubernetes
|
||||
|
||||
kubectl -n flagger-system set image deployment/flagger flagger=test/flagger:latest
|
||||
|
||||
kubectl -n flagger-system rollout status deployment/flagger
|
||||
kubectl -n flagger-system rollout status deployment/flagger-prometheus
|
||||
14
test/e2e-nginx-cleanup.sh
Executable file
14
test/e2e-nginx-cleanup.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
|
||||
echo '>>> Deleting NGINX Ingress'
|
||||
helm delete --purge nginx-ingress
|
||||
|
||||
echo '>>> Deleting Flagger'
|
||||
helm delete --purge flagger
|
||||
|
||||
echo '>>> Cleanup test namespace'
|
||||
kubectl delete namespace test --ignore-not-found=true
|
||||
41
test/e2e-nginx-custom-annotations.sh
Executable file
41
test/e2e-nginx-custom-annotations.sh
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
NGINX_VERSION=1.12.1
|
||||
|
||||
echo '>>> Installing NGINX Ingress'
|
||||
helm upgrade -i nginx-ingress stable/nginx-ingress --version=${NGINX_VERSION} \
|
||||
--wait \
|
||||
--namespace ingress-nginx \
|
||||
--set controller.stats.enabled=true \
|
||||
--set controller.metrics.enabled=true \
|
||||
--set controller.podAnnotations."prometheus\.io/scrape"=true \
|
||||
--set controller.podAnnotations."prometheus\.io/port"=10254 \
|
||||
--set controller.service.type=NodePort
|
||||
|
||||
kubectl -n ingress-nginx patch deployment/nginx-ingress-controller \
|
||||
--type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/args/-", "value": "--annotations-prefix=custom.ingress.kubernetes.io"}]'
|
||||
|
||||
kubectl -n ingress-nginx rollout status deployment/nginx-ingress-controller
|
||||
kubectl -n ingress-nginx get all
|
||||
|
||||
echo '>>> Loading Flagger image'
|
||||
kind load docker-image test/flagger:latest
|
||||
|
||||
echo '>>> Installing Flagger'
|
||||
helm install ${REPO_ROOT}/charts/flagger \
|
||||
--name flagger \
|
||||
--namespace ingress-nginx \
|
||||
--set prometheus.install=true \
|
||||
--set ingressAnnotationsPrefix="custom.ingress.kubernetes.io" \
|
||||
--set meshProvider=nginx \
|
||||
--set crd.create=false
|
||||
|
||||
kubectl -n ingress-nginx set image deployment/flagger flagger=test/flagger:latest
|
||||
|
||||
kubectl -n ingress-nginx rollout status deployment/flagger
|
||||
kubectl -n ingress-nginx rollout status deployment/flagger-prometheus
|
||||
|
||||
@@ -43,9 +43,31 @@ spec:
|
||||
maxWeight: 30
|
||||
stepWeight: 10
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
- name: "http-request-success-rate"
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
query: |
|
||||
100 - sum(
|
||||
rate(
|
||||
http_request_duration_seconds_count{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",
|
||||
path="root",
|
||||
status!~"5.*"
|
||||
}[1m]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
http_request_duration_seconds_count{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",
|
||||
path="root"
|
||||
}[1m]
|
||||
)
|
||||
)
|
||||
* 100
|
||||
- name: "latency"
|
||||
threshold: 0.5
|
||||
interval: 1m
|
||||
@@ -55,7 +77,8 @@ spec:
|
||||
rate(
|
||||
http_request_duration_seconds_bucket{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",
|
||||
path="root"
|
||||
}[1m]
|
||||
)
|
||||
) by (le)
|
||||
@@ -94,7 +117,14 @@ echo '>>> Waiting for canary promotion'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
failed=false
|
||||
until ${ok}; do
|
||||
kubectl -n test get canary/podinfo | grep 'Failed' && failed=true || failed=false
|
||||
if ${failed}; then
|
||||
kubectl -n ingress-nginx logs deployment/test-flagger
|
||||
echo "Canary failed!"
|
||||
exit 1
|
||||
fi
|
||||
kubectl -n test describe deployment/podinfo-primary | grep '1.4.1' && ok=true || ok=false
|
||||
sleep 10
|
||||
kubectl -n ingress-nginx logs deployment/flagger --tail 1
|
||||
@@ -144,9 +174,31 @@ spec:
|
||||
cookie:
|
||||
exact: "canary"
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
- name: "http-request-success-rate"
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
query: |
|
||||
100 - sum(
|
||||
rate(
|
||||
http_request_duration_seconds_count{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",
|
||||
path="root",
|
||||
status!~"5.*"
|
||||
}[1m]
|
||||
)
|
||||
)
|
||||
/
|
||||
sum(
|
||||
rate(
|
||||
http_request_duration_seconds_count{
|
||||
kubernetes_namespace="test",
|
||||
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)",
|
||||
path="root"
|
||||
}[1m]
|
||||
)
|
||||
)
|
||||
* 100
|
||||
webhooks:
|
||||
- name: pre
|
||||
type: pre-rollout
|
||||
|
||||
@@ -4,7 +4,7 @@ set -o errexit
|
||||
|
||||
REPO_ROOT=$(git rev-parse --show-toplevel)
|
||||
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
|
||||
NGINX_VERSION=1.8.2
|
||||
NGINX_VERSION=1.12.1
|
||||
|
||||
echo '>>> Installing NGINX Ingress'
|
||||
helm upgrade -i nginx-ingress stable/nginx-ingress --version=${NGINX_VERSION} \
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script runs e2e tests for Canary initialization, analysis and promotion
|
||||
# This script runs e2e tests for Canary, B/G and A/B initialization, analysis and promotion
|
||||
# Prerequisites: Kubernetes Kind, Helm and Istio
|
||||
|
||||
set -o errexit
|
||||
@@ -33,6 +33,7 @@ spec:
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
port: 9898
|
||||
portDiscovery: true
|
||||
headers:
|
||||
request:
|
||||
add:
|
||||
@@ -123,6 +124,21 @@ until ${ok}; do
|
||||
fi
|
||||
done
|
||||
|
||||
echo '>>> Waiting for canary finalization'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl -n test get canary/podinfo | grep 'Succeeded' && ok=true || ok=false
|
||||
sleep 5
|
||||
count=$(($count + 1))
|
||||
if [[ ${count} -eq ${retries} ]]; then
|
||||
kubectl -n istio-system logs deployment/flagger
|
||||
echo "No more retries left"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo '✔ Canary promotion test passed'
|
||||
|
||||
if [[ "$1" = "canary" ]]; then
|
||||
@@ -142,6 +158,80 @@ spec:
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
portDiscovery: true
|
||||
port: 9898
|
||||
canaryAnalysis:
|
||||
interval: 10s
|
||||
threshold: 5
|
||||
iterations: 5
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 5m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
EOF
|
||||
|
||||
echo '>>> Triggering B/G deployment'
|
||||
kubectl -n test set image deployment/podinfo podinfod=quay.io/stefanprodan/podinfo:1.4.2
|
||||
|
||||
echo '>>> Waiting for B/G promotion'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl -n test describe deployment/podinfo-primary | grep '1.4.2' && ok=true || ok=false
|
||||
sleep 10
|
||||
kubectl -n istio-system logs deployment/flagger --tail 1
|
||||
count=$(($count + 1))
|
||||
if [[ ${count} -eq ${retries} ]]; then
|
||||
kubectl -n test describe deployment/podinfo
|
||||
kubectl -n test describe deployment/podinfo-primary
|
||||
kubectl -n istio-system logs deployment/flagger
|
||||
echo "No more retries left"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo '>>> Waiting for B/G finalization'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl -n test get canary/podinfo | grep 'Succeeded' && ok=true || ok=false
|
||||
sleep 5
|
||||
count=$(($count + 1))
|
||||
if [[ ${count} -eq ${retries} ]]; then
|
||||
kubectl -n istio-system logs deployment/flagger
|
||||
echo "No more retries left"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
echo '✔ B/G promotion test passed'
|
||||
|
||||
cat <<EOF | kubectl apply -f -
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
portDiscovery: true
|
||||
port: 9898
|
||||
canaryAnalysis:
|
||||
interval: 10s
|
||||
@@ -167,6 +257,9 @@ spec:
|
||||
type: cmd
|
||||
cmd: "hey -z 10m -q 10 -c 2 -H 'Cookie: type=insider' http://podinfo-canary.test:9898/"
|
||||
logCmdOutput: "true"
|
||||
- name: promote-gate
|
||||
type: confirm-promotion
|
||||
url: http://flagger-loadtester.test/gate/approve
|
||||
- name: post
|
||||
type: post-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
@@ -178,14 +271,14 @@ spec:
|
||||
EOF
|
||||
|
||||
echo '>>> Triggering A/B testing'
|
||||
kubectl -n test set image deployment/podinfo podinfod=quay.io/stefanprodan/podinfo:1.4.2
|
||||
kubectl -n test set image deployment/podinfo podinfod=quay.io/stefanprodan/podinfo:1.4.3
|
||||
|
||||
echo '>>> Waiting for A/B testing promotion'
|
||||
retries=50
|
||||
count=0
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl -n test describe deployment/podinfo-primary | grep '1.4.2' && ok=true || ok=false
|
||||
kubectl -n test describe deployment/podinfo-primary | grep '1.4.3' && ok=true || ok=false
|
||||
sleep 10
|
||||
kubectl -n istio-system logs deployment/flagger --tail 1
|
||||
count=$(($count + 1))
|
||||
@@ -202,4 +295,4 @@ echo '✔ A/B testing promotion test passed'
|
||||
|
||||
kubectl -n istio-system logs deployment/flagger
|
||||
|
||||
echo '✔ All tests passed'
|
||||
echo '✔ All tests passed'
|
||||
|
||||
@@ -20,6 +20,7 @@ spec:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9797"
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
@@ -31,9 +32,13 @@ spec:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
- containerPort: 9797
|
||||
name: http-prom
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --port-metrics=9797
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
|
||||
Reference in New Issue
Block a user