Compare commits

...

93 Commits

Author SHA1 Message Date
Stefan Prodan
f6fa5e3891 Merge pull request #270 from weaveworks/prep-0.18.2
Release v0.18.2
2019-08-05 18:57:54 +03:00
stefanprodan
a305a0b705 Release v0.18.2 2019-08-05 18:43:57 +03:00
Stefan Prodan
dfe619e2ea Merge pull request #269 from weaveworks/helm-circleci
Publish Helm chart from CircleCI
2019-08-05 17:57:21 +03:00
stefanprodan
2b3d425b70 Publish Helm chart from CircleCI 2019-08-05 17:08:33 +03:00
Stefan Prodan
6e55fea413 Merge pull request #268 from weaveworks/istio-1.2.3
Update e2e backends
2019-08-03 15:44:53 +03:00
stefanprodan
b6a08b6615 Fix AppMesh mesh name in docs 2019-08-03 15:24:31 +03:00
stefanprodan
eaa6906516 Update e2e NGINX ingress to v1.12.1 2019-08-03 13:42:27 +03:00
stefanprodan
62a7a92f2a Update e2e Gloo to v0.18.8 2019-08-03 13:01:57 +03:00
stefanprodan
3aeb0945c5 Update e2e Istio to v1.2.3 2019-08-03 12:05:21 +03:00
Stefan Prodan
e8c85efeae Merge pull request #267 from fcantournet/fix_virtualservices_multipleports
Fix Port discovery with multiple port services
2019-08-03 12:04:04 +03:00
Félix Cantournet
6651f6452b Multiple port canary: fix FAQ and add e2e tests 2019-08-02 14:23:58 +02:00
Félix Cantournet
0ca48d77be Fix Port discovery with multiple port services
This fixes issue https://github.com/weaveworks/flagger/issues/263

We actually don't need to specify any ports in the VirtualService
and DestinationRules.
Istio will create clusters/listerners for each named port we have declared in
the kubernetes services and the router can be shared as it operates only on L7 criterias

Also contains a tiny clean-up of imports
2019-08-02 10:07:00 +02:00
Stefan Prodan
a9e0e018e3 Merge pull request #266 from ExpediaInc/master
Parameterize image pull secrets for private docker repos
2019-08-01 11:07:53 +03:00
Sky Moon
122d11f445 Merge pull request #1 from ExpediaInc/parameterizeImagePullSecrets
parameterize image pull secrets for private docker repos.
2019-08-01 00:50:15 -07:00
cmoon
b03555858c parameterize image pull secrets for private docker repos. 2019-08-01 00:47:07 -07:00
Stefan Prodan
dcc5a40441 Merge pull request #262 from weaveworks/prep-0.18.1
Release v0.18.1
2019-07-30 13:52:25 +03:00
stefanprodan
8c949f59de Package helm charts locally 2019-07-30 13:35:09 +03:00
stefanprodan
e8d91a0375 Release v0.18.1 2019-07-30 13:22:51 +03:00
Stefan Prodan
fae9aa664d Merge pull request #261 from weaveworks/blue-green-e2e
Fix Blue/Green metrics provider and add e2e tests
2019-07-30 13:16:20 +03:00
stefanprodan
c31e9e5a96 Use Linkerd metrics for ingress and kubernetes routers 2019-07-30 13:00:28 +03:00
stefanprodan
99fff98274 Kustomize: set Flagger log level to info 2019-07-30 12:43:02 +03:00
stefanprodan
11d84bf35d Enable kubernetes metric provider 2019-07-30 12:27:53 +03:00
stefanprodan
e56ba480c7 Add Blue/Green e2e tests 2019-07-30 12:02:15 +03:00
Stefan Prodan
b9f0517c5d Merge pull request #255 from weaveworks/prep-0.18.0
Release v0.18.0
2019-07-29 16:06:23 +03:00
stefanprodan
6e66f02585 Update changelog 2019-07-29 15:52:50 +03:00
stefanprodan
5922e96044 Merge branch 'prep-0.18.0' of https://github.com/weaveworks/flagger into prep-0.18.0 2019-07-29 15:06:43 +03:00
stefanprodan
f36e7e414a Add manual gating link to readme 2019-07-29 15:06:31 +03:00
stefanprodan
606754d4a5 Disable supergloo e2e 2019-07-29 15:06:31 +03:00
stefanprodan
a3847e64df Add Kustomize download link to docs 2019-07-29 15:06:31 +03:00
stefanprodan
7a3f9f2e73 Use Kustomize for Istio e2e testing 2019-07-29 15:06:31 +03:00
stefanprodan
2e4e8b0bf9 Make installer work with Kustomize v3 2019-07-29 15:06:31 +03:00
stefanprodan
951fe80115 Use crd.create=false in docs 2019-07-29 15:06:30 +03:00
stefanprodan
c0a8149acb Add kubectl min version to Kustomize docs 2019-07-29 15:06:30 +03:00
stefanprodan
80b75b227d Add CRD install step to chart 2019-07-29 15:06:30 +03:00
stefanprodan
dff7de09f2 Use kubectl for CRD install 2019-07-29 15:06:30 +03:00
stefanprodan
b3bbadfccf Add v0.18.0 to changelog 2019-07-29 15:06:30 +03:00
stefanprodan
fc676e3cb7 Release v0.18.0 2019-07-29 15:06:30 +03:00
stefanprodan
860c82dff9 Remove test artifacts 2019-07-29 15:06:30 +03:00
Stefan Prodan
4829f5af7f Merge pull request #257 from weaveworks/promotion
Implement promotion finalising state
2019-07-29 15:03:05 +03:00
stefanprodan
c463b6b231 Add finalising state tests 2019-07-29 14:02:16 +03:00
stefanprodan
b2ca0c4c16 Implement finalising state
Set the canary status to finalising after routing the traffic back to the primary. Run one final loop before scaling the canary to zero so that the canary has a chance to process all inflight requests.
2019-07-29 13:52:11 +03:00
stefanprodan
69875cb3dc Add finalising status phase to CRD 2019-07-29 13:43:30 +03:00
stefanprodan
9e33a116d4 Add manual gating link to readme 2019-07-29 11:33:28 +03:00
stefanprodan
dab3d53b65 Disable supergloo e2e 2019-07-28 11:28:00 +03:00
stefanprodan
e3f8bff6fc Add Kustomize download link to docs 2019-07-27 15:51:22 +03:00
stefanprodan
0648d81d34 Use Kustomize for Istio e2e testing 2019-07-27 14:49:57 +03:00
stefanprodan
ece5c4401e Make installer work with Kustomize v3 2019-07-27 14:45:49 +03:00
stefanprodan
bfc64c7cf1 Use crd.create=false in docs 2019-07-27 13:20:55 +03:00
stefanprodan
0a2c134ece Add kubectl min version to Kustomize docs 2019-07-27 13:07:47 +03:00
stefanprodan
8bea9253c3 Add CRD install step to chart 2019-07-27 13:06:27 +03:00
stefanprodan
e1dacc3983 Use kubectl for CRD install 2019-07-26 15:52:00 +03:00
stefanprodan
0c6a7355e7 Add v0.18.0 to changelog 2019-07-26 14:05:19 +03:00
stefanprodan
83046282c3 Release v0.18.0 2019-07-26 13:53:40 +03:00
stefanprodan
65c9817295 Remove test artifacts 2019-07-26 13:51:15 +03:00
Stefan Prodan
e4905d3d35 Merge pull request #254 from weaveworks/podinfo
Use Kustomize installer in Linkerd docs
2019-07-26 13:44:51 +03:00
stefanprodan
6bc0670a7a Use Kustomize installer in Linkerd docs 2019-07-26 13:30:28 +03:00
stefanprodan
95ff6adc19 Use podinfo 1.7 in GitOps demo 2019-07-26 13:20:06 +03:00
stefanprodan
7ee51c7def Add podinfo to Kustomize installer 2019-07-26 13:19:36 +03:00
Stefan Prodan
dfa065b745 Merge pull request #251 from weaveworks/gates
Implement confirm rollout gate, hook and API
2019-07-26 01:40:35 +03:00
stefanprodan
e3b03debde Use podinfo v1.7 2019-07-26 01:25:44 +03:00
Stefan Prodan
ef759305cb Merge pull request #253 from grampelberg/master
Update Linkerd to use correct canaries directory.
2019-07-26 00:24:52 +03:00
grampelberg
ad65497d4e Update Linkerd to use correct canaries directory. 2019-07-25 11:10:52 -07:00
stefanprodan
163f5292b0 Push a notification when a canary is waiting for approval 2019-07-25 19:13:22 +03:00
stefanprodan
e07a82d024 Add manual gating to docs 2019-07-25 13:32:58 +03:00
stefanprodan
046245a8b5 Use Gloo 0.17.6 in e2e tests 2019-07-24 19:54:33 +03:00
stefanprodan
aa6a180bcc Remove Gloo NodePort from e2e tests 2019-07-24 19:44:06 +03:00
stefanprodan
c4d28e14fc Upgrade Gloo e2e to v0.17.5 2019-07-24 19:35:02 +03:00
stefanprodan
bc4bdcdc1c Upgrade Gloo e2e to v0.17.6 2019-07-24 19:21:41 +03:00
stefanprodan
be22ff9951 Bump load tester version 2019-07-24 16:28:46 +03:00
stefanprodan
f204fe53f4 Implement canary gating API with in-memory storage
POST /gate/[check|open|close]
2019-07-24 16:14:22 +03:00
stefanprodan
28e7e89047 Pause or resume analysis on confirmation gate toggle 2019-07-24 16:09:13 +03:00
stefanprodan
75d49304f3 Add confirm-rollout hook to docs 2019-07-24 12:17:11 +03:00
stefanprodan
04cbacb6e0 Implement confirm rollout gate and hook
The confirm-rollout hooks are executed before the pre-rollout hooks. Flagger will halt the canary rollout until the confirm webhook returns HTTP status 200.
2019-07-24 12:09:39 +03:00
stefanprodan
c46c7b9e21 Add canary status conditions to docs 2019-07-24 12:04:05 +03:00
stefanprodan
919dafa567 Add gate halt and approve endpoints 2019-07-24 12:02:44 +03:00
stefanprodan
dfdcfed26e Add Waiting canary status phase
means the canary rollout is paused (waiting for confirmation to proceed)
2019-07-24 12:00:04 +03:00
Stefan Prodan
a0a4d4cfc5 Merge pull request #248 from weaveworks/ghz
Add gRPC load testing tool
2019-07-23 12:44:04 +03:00
stefanprodan
970a589fd3 Add load tester to kustomize installer 2019-07-23 12:30:38 +03:00
stefanprodan
56d2c0952a Add gPRC load test example to docs 2019-07-22 15:16:13 +03:00
stefanprodan
4871be0345 Release loadtester v0.5.0 2019-07-22 14:57:14 +03:00
stefanprodan
e3e112e279 Add gRPC load testing tool
https://ghz.sh
2019-07-22 14:55:19 +03:00
Stefan Prodan
d2cbd40d89 Merge pull request #240 from weaveworks/refactor
Refactor canary change detection and status
2019-07-22 14:33:02 +03:00
stefanprodan
3786a49f00 Update Linkerd e2e to v2.4.0 2019-07-16 11:20:42 +02:00
stefanprodan
ff4aa62061 Retry canary status update on conflict 2019-07-10 11:31:20 +03:00
stefanprodan
9b6cfdeef7 Update Canary CRD helm chart and Kustomize 2019-07-10 09:55:46 +03:00
stefanprodan
9d89e0c83f Log status update error 2019-07-10 09:55:20 +03:00
stefanprodan
559cbd0d36 Pin NGINX helm chart to v1.8.2 2019-07-10 09:49:39 +03:00
stefanprodan
caea00e47f Pin NGINX helm chart to version 1.8.2 2019-07-10 09:42:49 +03:00
stefanprodan
b26542f38d Do not trigger a canary deployment on manual rollback
Save the primary spec hash and check if it matches the canary spec. If the canary hash is identical with the primary one skip promotion.
2019-07-10 09:08:33 +03:00
stefanprodan
afa2d079f6 Add status conditions and descriptions to CRD 2019-07-09 17:11:13 +03:00
stefanprodan
108bf9ca65 Add initializing canary phase/status condition reason
Fix HPA reconciliation min replicas diff
2019-07-09 17:10:43 +03:00
stefanprodan
438f952128 Implement status conditions
Add Promoted status condition with the following reasons: Initialized, Progressing, Succeeded, Failed
Usage: `kubectl wait canary/app --for=condition=promoted`
Fix: #184
2019-07-09 15:22:56 +03:00
stefanprodan
3e84799644 Detect changes in pod template metadata
Use the pod template spec hash to track changes (breaking)
2019-07-09 08:52:31 +03:00
99 changed files with 1467 additions and 865 deletions

View File

@@ -78,6 +78,17 @@ jobs:
- run: test/e2e-istio.sh
- run: test/e2e-tests.sh
e2e-kubernetes-testing:
machine: true
steps:
- checkout
- attach_workspace:
at: /tmp/bin
- run: test/container-build.sh
- run: test/e2e-kind.sh
- run: test/e2e-kubernetes.sh
- run: test/e2e-kubernetes-tests.sh
e2e-smi-istio-testing:
machine: true
steps:
@@ -133,6 +144,47 @@ jobs:
- run: test/e2e-linkerd.sh
- run: test/e2e-linkerd-tests.sh
push-helm-charts:
docker:
- image: circleci/golang:1.12
steps:
- checkout
- run:
name: Install kubectl
command: sudo curl -L https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl && sudo chmod +x /usr/local/bin/kubectl
- run:
name: Install helm
command: sudo curl -L https://storage.googleapis.com/kubernetes-helm/helm-v2.14.2-linux-amd64.tar.gz | tar xz && sudo mv linux-amd64/helm /bin/helm && sudo rm -rf linux-amd64
- run:
name: Initialize helm
command: helm init --client-only --kubeconfig=$HOME/.kube/kubeconfig
- run:
name: Lint charts
command: |
helm lint ./charts/*
- run:
name: Package charts
command: |
mkdir $HOME/charts
helm package ./charts/* --destination $HOME/charts
- run:
name: Publish charts
command: |
if echo "${CIRCLE_TAG}" | grep -Eq "[0-9]+(\.[0-9]+)*(-[a-z]+)?$"; then
REPOSITORY="https://weaveworksbot:${GITHUB_TOKEN}@github.com/weaveworks/flagger.git"
git config user.email weaveworksbot@users.noreply.github.com
git config user.name weaveworksbot
git remote set-url origin ${REPOSITORY}
git checkout gh-pages
mv -f $HOME/charts/*.tgz .
helm repo index . --url https://flagger.app
git add .
git commit -m "Publish Helm charts v${CIRCLE_TAG}"
git push origin gh-pages
else
echo "Not a release! Skip charts publish"
fi
workflows:
version: 2
build-test-push:
@@ -145,12 +197,12 @@ workflows:
- e2e-istio-testing:
requires:
- build-binary
- e2e-smi-istio-testing:
requires:
- build-binary
- e2e-supergloo-testing:
- e2e-kubernetes-testing:
requires:
- build-binary
# - e2e-supergloo-testing:
# requires:
# - build-binary
- e2e-gloo-testing:
requires:
- build-binary
@@ -164,8 +216,8 @@ workflows:
requires:
- build-binary
- e2e-istio-testing
- e2e-smi-istio-testing
- e2e-supergloo-testing
- e2e-kubernetes-testing
#- e2e-supergloo-testing
- e2e-gloo-testing
- e2e-nginx-testing
- e2e-linkerd-testing
@@ -187,6 +239,14 @@ workflows:
tags:
ignore: /^chart.*/
- push-binary:
requires:
- push-container
filters:
branches:
ignore: /.*/
tags:
ignore: /^chart.*/
- push-helm-charts:
requires:
- push-container
filters:

View File

@@ -2,6 +2,45 @@
All notable changes to this project are documented in this file.
## 0.18.2 (2019-08-05)
Fixes multi-port support for Istio
#### Fixes
- Fix port discovery for multiple port services [#267](https://github.com/weaveworks/flagger/pull/267)
#### Improvements
- Update e2e testing to Istio v1.2.3, Gloo v0.18.8 and NGINX ingress chart v1.12.1 [#268](https://github.com/weaveworks/flagger/pull/268)
## 0.18.1 (2019-07-30)
Fixes Blue/Green style deployments for Kubernetes and Linkerd providers
#### Fixes
- Fix Blue/Green metrics provider and add e2e tests [#261](https://github.com/weaveworks/flagger/pull/261)
## 0.18.0 (2019-07-29)
Adds support for [manual gating](https://docs.flagger.app/how-it-works#manual-gating) and pausing/resuming an ongoing analysis
#### Features
- Implement confirm rollout gate, hook and API [#251](https://github.com/weaveworks/flagger/pull/251)
#### Improvements
- Refactor canary change detection and status [#240](https://github.com/weaveworks/flagger/pull/240)
- Implement finalising state [#257](https://github.com/weaveworks/flagger/pull/257)
- Add gRPC load testing tool [#248](https://github.com/weaveworks/flagger/pull/248)
#### Breaking changes
- Due to the status sub-resource changes in [#240](https://github.com/weaveworks/flagger/pull/240), when upgrading Flagger the canaries status phase will be reset to `Initialized`
- Upgrading Flagger with Helm will fail due to Helm poor support of CRDs, see [workaround](https://github.com/weaveworks/flagger/issues/223)
## 0.17.0 (2019-07-08)
Adds support for Linkerd (SMI Traffic Split API), MS Teams notifications and HA mode with leader election

View File

@@ -13,10 +13,15 @@ RUN curl -sSL "https://get.helm.sh/helm-v2.12.3-linux-amd64.tar.gz" | tar xvz &&
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helm && \
rm -rf linux-amd64
RUN curl -sSL "https://github.com/bojand/ghz/releases/download/v0.39.0/ghz_0.39.0_Linux_x86_64.tar.gz" | tar xz -C /tmp && \
mv /tmp/ghz /usr/local/bin && chmod +x /usr/local/bin/ghz && rm -rf /tmp/ghz-web
RUN ls /tmp
COPY ./bin/loadtester .
RUN chown -R app:app ./
USER app
ENTRYPOINT ["./loadtester"]
ENTRYPOINT ["./loadtester"]

View File

@@ -63,8 +63,9 @@ test: test-fmt test-codegen
helm-package:
cd charts/ && helm package ./*
mv charts/*.tgz docs/
helm repo index docs --url https://weaveworks.github.io/flagger --merge ./docs/index.yaml
mv charts/*.tgz bin/
curl -s https://raw.githubusercontent.com/weaveworks/flagger/gh-pages/index.yaml > ./bin/index.yaml
helm repo index bin --url https://flagger.app --merge ./bin/index.yaml
helm-up:
helm upgrade --install flagger ./charts/flagger --namespace=istio-system --set crd.create=false

View File

@@ -35,6 +35,7 @@ Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.ap
* [Custom metrics](https://docs.flagger.app/how-it-works#custom-metrics)
* [Webhooks](https://docs.flagger.app/how-it-works#webhooks)
* [Load testing](https://docs.flagger.app/how-it-works#load-testing)
* [Manual gating](https://docs.flagger.app/how-it-works#manual-gating)
* [FAQ](https://docs.flagger.app/faq)
* Usage
* [Istio canary deployments](https://docs.flagger.app/usage/progressive-delivery)

View File

@@ -25,7 +25,7 @@ spec:
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.4.0
image: quay.io/stefanprodan/podinfo:1.7.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898

View File

@@ -25,7 +25,7 @@ spec:
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.4.0
image: quay.io/stefanprodan/podinfo:1.7.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898

View File

@@ -25,7 +25,7 @@ spec:
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.4.0
image: quay.io/stefanprodan/podinfo:1.7.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898

View File

@@ -5,17 +5,17 @@ metadata:
namespace: test
annotations:
flux.weave.works/automated: "true"
flux.weave.works/tag.chart-image: regexp:^1.4.*
flux.weave.works/tag.chart-image: regexp:^1.7.*
spec:
releaseName: backend
chart:
repository: https://flagger.app/
name: podinfo
version: 2.0.0
version: 2.2.0
values:
image:
repository: quay.io/stefanprodan/podinfo
tag: 1.4.0
tag: 1.7.0
httpServer:
timeout: 30s
canary:

View File

@@ -5,17 +5,17 @@ metadata:
namespace: test
annotations:
flux.weave.works/automated: "true"
flux.weave.works/tag.chart-image: semver:~1.4
flux.weave.works/tag.chart-image: semver:~1.7
spec:
releaseName: frontend
chart:
repository: https://flagger.app/
name: podinfo
version: 2.0.0
version: 2.2.0
values:
image:
repository: quay.io/stefanprodan/podinfo
tag: 1.4.0
tag: 1.7.0
backend: http://backend-podinfo:9898/echo
canary:
enabled: true

View File

@@ -11,8 +11,8 @@ spec:
chart:
repository: https://flagger.app/
name: loadtester
version: 0.1.0
version: 0.6.0
values:
image:
repository: quay.io/stefanprodan/flagger-loadtester
tag: 0.1.0
repository: weaveworks/flagger-loadtester
tag: 0.6.1

View File

@@ -1,59 +0,0 @@
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
# Istio virtual service host names (optional)
hosts:
- app.iowa.weavedx.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# Istio Prometheus checks
metrics:
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: request-duration
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
# external checks (optional)
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: podinfo-config-env
namespace: test
data:
color: blue
---
apiVersion: v1
kind: ConfigMap
metadata:
name: podinfo-config-vol
namespace: test
data:
output: console
textmode: "true"

View File

@@ -1,89 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
minReadySeconds: 5
revisionHistoryLimit: 5
progressDeadlineSeconds: 60
strategy:
rollingUpdate:
maxUnavailable: 0
type: RollingUpdate
selector:
matchLabels:
app: podinfo
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: podinfo
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.3.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
name: http
protocol: TCP
command:
- ./podinfo
- --port=9898
- --level=info
- --random-delay=false
- --random-error=false
env:
- name: PODINFO_UI_COLOR
valueFrom:
configMapKeyRef:
name: podinfo-config-env
key: color
- name: SECRET_USER
valueFrom:
secretKeyRef:
name: podinfo-secret-env
key: user
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
initialDelaySeconds: 5
timeoutSeconds: 5
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
initialDelaySeconds: 5
timeoutSeconds: 5
resources:
limits:
cpu: 2000m
memory: 512Mi
requests:
cpu: 100m
memory: 64Mi
volumeMounts:
- name: configs
mountPath: /etc/podinfo/configs
readOnly: true
- name: secrets
mountPath: /etc/podinfo/secrets
readOnly: true
volumes:
- name: configs
configMap:
name: podinfo-config-vol
- name: secrets
secret:
secretName: podinfo-secret-vol

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: podinfo-secret-env
namespace: test
data:
password: cGFzc3dvcmQ=
user: YWRtaW4=
---
apiVersion: v1
kind: Secret
metadata:
name: podinfo-secret-vol
namespace: test
data:
key: cGFzc3dvcmQ=

View File

@@ -102,17 +102,23 @@ spec:
canaryAnalysis:
properties:
interval:
description: Canary schedule interval
type: string
pattern: "^[0-9]+(m|s)"
iterations:
description: Number of checks to run for A/B Testing and Blue/Green
type: number
threshold:
description: Max number of failed checks before rollback
type: number
maxWeight:
description: Max traffic percentage routed to canary
type: number
stepWeight:
description: Canary incremental traffic percentage step
type: number
metrics:
description: Prometheus query list for this canary
type: array
properties:
items:
@@ -120,15 +126,20 @@ spec:
required: ['name', 'threshold']
properties:
name:
description: Name of the Prometheus metric
type: string
interval:
description: Interval of the promql query
type: string
pattern: "^[0-9]+(m|s)"
threshold:
description: Max scalar value accepted for this metric
type: number
query:
description: Prometheus query
type: string
webhooks:
description: Webhook list for this canary
type: array
properties:
items:
@@ -136,8 +147,10 @@ spec:
required: ['name', 'url', 'timeout']
properties:
name:
description: Name of the webhook
type: string
type:
description: Type of the webhook pre, post or during rollout
type: string
enum:
- ""
@@ -145,28 +158,68 @@ spec:
- rollout
- post-rollout
url:
description: URL address of this webhook
type: string
format: url
timeout:
description: Request timeout for this webhook
type: string
pattern: "^[0-9]+(m|s)"
status:
properties:
phase:
description: Analysis phase of this canary
type: string
enum:
- ""
- Initializing
- Initialized
- Waiting
- Progressing
- Finalising
- Succeeded
- Failed
canaryWeight:
description: Traffic weight percentage routed to canary
type: number
failedChecks:
description: Failed check count of the current canary analysis
type: number
iterations:
description: Iteration count of the current canary analysis
type: number
lastAppliedSpec:
description: LastAppliedSpec of this canary
type: string
lastTransitionTime:
description: LastTransitionTime of this canary
format: date-time
type: string
conditions:
description: Status conditions of this canary
type: array
properties:
items:
type: object
required: ['type', 'status', 'reason']
properties:
lastTransitionTime:
description: LastTransitionTime of this condition
format: date-time
type: string
lastUpdateTime:
description: LastUpdateTime of this condition
format: date-time
type: string
message:
description: Message associated with this condition
type: string
reason:
description: Reason for the current status of this condition
type: string
status:
description: Status of this condition
type: string
type:
description: Type of this condition
type: string

View File

@@ -22,7 +22,7 @@ spec:
serviceAccountName: flagger
containers:
- name: flagger
image: weaveworks/flagger:0.17.0
image: weaveworks/flagger:0.18.2
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@@ -25,7 +25,7 @@ spec:
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.4.0
image: quay.io/stefanprodan/podinfo:1.7.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898

View File

@@ -17,7 +17,7 @@ spec:
spec:
containers:
- name: loadtester
image: weaveworks/flagger-loadtester:0.4.0
image: weaveworks/flagger-loadtester:0.6.1
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@@ -23,7 +23,7 @@ spec:
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.4.0
image: quay.io/stefanprodan/podinfo:1.7.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898

View File

@@ -1,45 +0,0 @@
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: podinfo
namespace: test
spec:
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
hosts:
- podinfo.istio.weavedx.com
- podinfo
http:
- route:
- destination:
host: podinfo
subset: primary
weight: 50
- destination:
host: podinfo
subset: canary
weight: 50
---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: podinfo-destination
namespace: test
spec:
host: podinfo
trafficPolicy:
loadBalancer:
consistentHash:
httpCookie:
name: istiouser
ttl: 30s
subsets:
- name: primary
labels:
app: podinfo
role: primary
- name: canary
labels:
app: podinfo
role: canary

View File

@@ -1,43 +0,0 @@
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: podinfo
namespace: test
spec:
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
hosts:
- app.istio.weavedx.com
- podinfo
http:
- match:
- headers:
user-agent:
regex: ^(?!.*Chrome)(?=.*\bSafari\b).*$
uri:
prefix: "/version/"
rewrite:
uri: /api/info
route:
- destination:
host: podinfo-primary
port:
number: 9898
weight: 0
- destination:
host: podinfo
port:
number: 9898
weight: 100
- match:
- uri:
prefix: "/version/"
rewrite:
uri: /api/info
route:
- destination:
host: podinfo-primary
port:
number: 9898
weight: 100

View File

@@ -1,25 +0,0 @@
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
hosts:
- podinfo.iowa.weavedx.com
- podinfo
http:
- route:
- destination:
host: podinfo-primary
port:
number: 9898
weight: 100
mirror:
host: podinfo
port:
number: 9898

View File

@@ -1,26 +0,0 @@
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
hosts:
- podinfo.iowa.weavedx.com
- podinfo
http:
- route:
- destination:
host: podinfo-primary
port:
number: 9898
weight: 100
- destination:
host: podinfo
port:
number: 9898
weight: 0

View File

@@ -1,13 +0,0 @@
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: podinfo-canary
namespace: test
spec:
host: podinfo-canary
trafficPolicy:
loadBalancer:
consistentHash:
httpCookie:
name: user
ttl: 0s

View File

@@ -1,23 +0,0 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: podinfo
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
minReplicas: 2
maxReplicas: 2
metrics:
- type: Resource
resource:
name: cpu
# scale up if usage is above
# 99% of the requested CPU (100m)
targetAverageUtilization: 99
- type: Resource
resource:
name: memory
targetAverageValue: 200Mi

View File

@@ -1,14 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: podinfo-canary
namespace: test
spec:
type: ClusterIP
selector:
app: podinfo
ports:
- name: http
port: 9898
protocol: TCP
targetPort: http

View File

@@ -1,71 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo-primary
namespace: test
labels:
app: podinfo-primary
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 0
type: RollingUpdate
selector:
matchLabels:
app: podinfo-primary
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: podinfo-primary
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.4.1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
name: http
protocol: TCP
command:
- ./podinfo
- --port=9898
- --level=info
- --random-delay=false
- --random-error=false
env:
- name: PODINFO_UI_COLOR
value: blue
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
initialDelaySeconds: 5
failureThreshold: 3
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
initialDelaySeconds: 5
failureThreshold: 3
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 1
resources:
limits:
cpu: 2000m
memory: 512Mi
requests:
cpu: 10m
memory: 64Mi

View File

@@ -1,13 +0,0 @@
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: podinfo-primary
namespace: test
spec:
host: podinfo-primary
trafficPolicy:
loadBalancer:
consistentHash:
httpCookie:
name: user
ttl: 0s

View File

@@ -1,19 +0,0 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: podinfo-primary
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo-primary
minReplicas: 2
maxReplicas: 2
metrics:
- type: Resource
resource:
name: cpu
# scale up if usage is above
# 99% of the requested CPU (100m)
targetAverageUtilization: 99

View File

@@ -1,16 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: podinfo-primary
namespace: test
labels:
app: podinfo-primary
spec:
type: ClusterIP
selector:
app: podinfo-primary
ports:
- name: http
port: 9898
protocol: TCP
targetPort: http

View File

@@ -1,14 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: podinfo
namespace: test
spec:
type: ClusterIP
selector:
app: podinfo-primary
ports:
- name: http
port: 9898
protocol: TCP
targetPort: http

View File

@@ -1,23 +0,0 @@
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
gateways:
- mesh
- public-gateway.istio-system.svc.cluster.local
hosts:
- podinfo
- app.istio.weavedx.com
http:
- route:
- destination:
host: podinfo-primary
weight: 50
- destination:
host: podinfo-canary
weight: 50
timeout: 5s

View File

@@ -1,10 +1,10 @@
apiVersion: v1
name: flagger
version: 0.17.0
appVersion: 0.17.0
version: 0.18.2
appVersion: 0.18.2
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio, App Mesh or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio, Linkerd, App Mesh, Gloo or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
sources:
@@ -17,4 +17,5 @@ keywords:
- canary
- istio
- appmesh
- linkerd
- gitops

View File

@@ -1,10 +1,10 @@
# Flagger
[Flagger](https://github.com/weaveworks/flagger) is a Kubernetes operator that automates the promotion of
canary deployments using Istio, App Mesh, NGINX or Gloo routing for traffic shifting and Prometheus metrics for canary analysis.
canary deployments using Istio, Linkerd, App Mesh, NGINX or Gloo routing for traffic shifting and Prometheus metrics for canary analysis.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
like HTTP requests success rate, requests average duration and pods health.
Based on the KPIs analysis a canary is promoted or aborted and the analysis result is published to Slack.
Based on the KPIs analysis a canary is promoted or aborted and the analysis result is published to Slack or MS Teams.
## Prerequisites
@@ -16,7 +16,13 @@ Based on the KPIs analysis a canary is promoted or aborted and the analysis resu
Add Flagger Helm repository:
```console
helm repo add flagger https://flagger.app
$ helm repo add flagger https://flagger.app
```
Install Flagger's custom resource definitions:
```console
$ kubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/master/artifacts/flagger/crd.yaml
```
To install the chart with the release name `flagger` for Istio:
@@ -24,6 +30,7 @@ To install the chart with the release name `flagger` for Istio:
```console
$ helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set crd.create=false \
--set meshProvider=istio \
--set metricsServer=http://prometheus:9090
```
@@ -33,6 +40,7 @@ To install the chart with the release name `flagger` for Linkerd:
```console
$ helm upgrade -i flagger flagger/flagger \
--namespace=linkerd \
--set crd.create=false \
--set meshProvider=linkerd \
--set metricsServer=http://linkerd-prometheus:9090
```
@@ -82,6 +90,7 @@ Specify each parameter using the `--set key=value[,key=value]` argument to `helm
```console
$ helm upgrade -i flagger flagger/flagger \
--namespace istio-system \
--set crd.create=false \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general
```

View File

@@ -103,17 +103,23 @@ spec:
canaryAnalysis:
properties:
interval:
description: Canary schedule interval
type: string
pattern: "^[0-9]+(m|s)"
iterations:
description: Number of checks to run for A/B Testing and Blue/Green
type: number
threshold:
description: Max number of failed checks before rollback
type: number
maxWeight:
description: Max traffic percentage routed to canary
type: number
stepWeight:
description: Canary incremental traffic percentage step
type: number
metrics:
description: Prometheus query list for this canary
type: array
properties:
items:
@@ -121,15 +127,20 @@ spec:
required: ['name', 'threshold']
properties:
name:
description: Name of the Prometheus metric
type: string
interval:
description: Interval of the promql query
type: string
pattern: "^[0-9]+(m|s)"
threshold:
description: Max scalar value accepted for this metric
type: number
query:
description: Prometheus query
type: string
webhooks:
description: Webhook list for this canary
type: array
properties:
items:
@@ -137,8 +148,10 @@ spec:
required: ['name', 'url', 'timeout']
properties:
name:
description: Name of the webhook
type: string
type:
description: Type of the webhook pre, post or during rollout
type: string
enum:
- ""
@@ -146,29 +159,69 @@ spec:
- rollout
- post-rollout
url:
description: URL address of this webhook
type: string
format: url
timeout:
description: Request timeout for this webhook
type: string
pattern: "^[0-9]+(m|s)"
status:
properties:
phase:
description: Analysis phase of this canary
type: string
enum:
- ""
- Initializing
- Initialized
- Waiting
- Progressing
- Finalising
- Succeeded
- Failed
canaryWeight:
description: Traffic weight percentage routed to canary
type: number
failedChecks:
description: Failed check count of the current canary analysis
type: number
iterations:
description: Iteration count of the current canary analysis
type: number
lastAppliedSpec:
description: LastAppliedSpec of this canary
type: string
lastTransitionTime:
description: LastTransitionTime of this canary
format: date-time
type: string
conditions:
description: Status conditions of this canary
type: array
properties:
items:
type: object
required: ['type', 'status', 'reason']
properties:
lastTransitionTime:
description: LastTransitionTime of this condition
format: date-time
type: string
lastUpdateTime:
description: LastUpdateTime of this condition
format: date-time
type: string
message:
description: Message associated with this condition
type: string
reason:
description: Reason for the current status of this condition
type: string
status:
description: Status of this condition
type: string
type:
description: Type of this condition
type: string
{{- end }}

View File

@@ -32,6 +32,10 @@ spec:
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
topologyKey: kubernetes.io/hostname
{{- if .Values.image.pullSecret }}
imagePullSecrets:
- name: {{ .Values.image.pullSecret }}
{{- end }}
containers:
- name: flagger
securityContext:

View File

@@ -2,8 +2,9 @@
image:
repository: weaveworks/flagger
tag: 0.17.0
tag: 0.18.2
pullPolicy: IfNotPresent
pullSecret:
metricsServer: "http://prometheus:9090"

View File

@@ -1,10 +1,10 @@
apiVersion: v1
name: loadtester
version: 0.4.1
appVersion: 0.4.0
version: 0.6.0
appVersion: 0.6.1
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger's load testing services based on rakyll/hey that generates traffic during canary analysis when configured as a webhook.
description: Flagger's load testing services based on rakyll/hey and bojand/ghz that generates traffic during canary analysis when configured as a webhook.
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
sources:

View File

@@ -2,7 +2,7 @@ replicaCount: 1
image:
repository: weaveworks/flagger-loadtester
tag: 0.4.0
tag: 0.6.1
pullPolicy: IfNotPresent
logLevel: info

View File

@@ -1,6 +1,6 @@
apiVersion: v1
version: 2.2.0
appVersion: 1.4.0
version: 2.3.0
appVersion: 1.7.0
name: podinfo
engine: gotpl
description: Flagger canary deployment demo chart

View File

@@ -1,7 +1,7 @@
# Default values for podinfo.
image:
repository: quay.io/stefanprodan/podinfo
tag: 1.4.0
tag: 1.7.0
pullPolicy: IfNotPresent
service:

View File

@@ -10,7 +10,7 @@ import (
"time"
)
var VERSION = "0.4.0"
var VERSION = "0.6.1"
var (
logLevel string
port string
@@ -47,5 +47,7 @@ func main() {
go taskRunner.Start(100*time.Millisecond, stopCh)
logger.Infof("Starting load tester v%s API on port %s", VERSION, port)
loadtester.ListenAndServe(port, time.Minute, logger, taskRunner, stopCh)
gateStorage := loadtester.NewGateStorage("in-memory")
loadtester.ListenAndServe(port, time.Minute, logger, taskRunner, gateStorage, stopCh)
}

View File

@@ -232,8 +232,7 @@ spec:
mode: ISTIO_MUTUAL
```
Both port `8080` and `9090` will be added to the ClusterIP services but the virtual service
will point to the port specified in `spec.service.port`.
Both port `8080` and `9090` will be added to the ClusterIP services.
### Label selectors

View File

@@ -105,6 +105,54 @@ convention you can specify your label with the `-selector-labels` flag.
The target deployment should expose a TCP port that will be used by Flagger to create the ClusterIP Service and
the Istio Virtual Service. The container port from the target deployment should match the `service.port` value.
### Canary status
Get the current status of canary deployments cluster wide:
```bash
kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 15 2019-06-30T14:05:07Z
prod frontend Succeeded 0 2019-06-30T16:15:07Z
prod backend Failed 0 2019-06-30T17:05:07Z
```
The status condition reflects the last know state of the canary analysis:
```bash
kubectl -n test get canary/podinfo -oyaml | awk '/status/,0'
```
A successful rollout status:
```yaml
status:
canaryWeight: 0
failedChecks: 0
iterations: 0
lastAppliedSpec: "14788816656920327485"
lastPromotedSpec: "14788816656920327485"
conditions:
- lastTransitionTime: "2019-07-10T08:23:18Z"
lastUpdateTime: "2019-07-10T08:23:18Z"
message: Canary analysis completed successfully, promotion finished.
reason: Succeeded
status: "True"
type: Promoted
```
The `Promoted` status condition can have one of the following reasons:
Initialized, Waiting, Progressing, Finalising, Succeeded or Failed.
A failed canary will have the promoted status set to `false`,
the reason to `failed` and the last applied spec will be different to the last promoted one.
Wait for a successful rollout:
```bash
kubectl wait canary/podinfo --for=condition=promoted
```
### Istio routing
Flagger creates an Istio Virtual Service and Destination Rules based on the Canary service spec.
@@ -608,6 +656,8 @@ The canary analysis can be extended with webhooks. Flagger will call each webhoo
determine from the response status code (HTTP 2xx) if the canary is failing or not.
There are three types of hooks:
* Confirm-rollout hooks are executed before scaling up the canary deployment and ca be used for manual approval.
The rollout is paused until the hook returns a successful HTTP status code.
* Pre-rollout hooks are executed before routing traffic to canary.
The canary advancement is paused if a pre-rollout hook fails and if the number of failures reach the
threshold the canary will be rollback.
@@ -621,6 +671,9 @@ Spec:
```yaml
canaryAnalysis:
webhooks:
- name: "start gate"
type: confirm-rollout
url: http://flagger-loadtester.test/gate/approve
- name: "smoke test"
type: pre-rollout
url: http://flagger-helmtester.kube-system/
@@ -677,7 +730,7 @@ that generates traffic during analysis when configured as a webhook.
![Flagger Load Testing Webhook](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-load-testing.png)
First you need to deploy the load test runner in a namespace with Istio sidecar injection enabled:
First you need to deploy the load test runner in a namespace with sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
@@ -720,7 +773,7 @@ When the canary analysis starts, Flagger will call the webhooks and the load tes
in the background, if they are not already running. This will ensure that during the
analysis, the `podinfo-canary.test` service will receive a steady stream of GET and POST requests.
If your workload is exposed outside the mesh with the Istio Gateway and TLS you can point `hey` to the
If your workload is exposed outside the mesh you can point `hey` to the
public URL and use HTTP2.
```yaml
@@ -733,6 +786,18 @@ webhooks:
cmd: "hey -z 1m -q 10 -c 2 -h2 https://podinfo.example.com/"
```
For gRPC services you can use [bojand/ghz](https://github.com/bojand/ghz) which is a similar tool to Hey but for gPRC:
```yaml
webhooks:
- name: grpc-load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "ghz -z 1m -q 10 -c 2 --insecure podinfo.test:9898"
```
The load tester can run arbitrary commands as long as the binary is present in the container image.
For example if you you want to replace `hey` with another CLI, you can create your own Docker image:
@@ -820,3 +885,66 @@ As an alternative to Helm you can use the [Bash Automated Testing System](https:
```
Note that you should create a ConfigMap with your Bats tests and mount it inside the tester container.
### Manual Gating
For manual approval of a canary deployment you can use the `confirm-rollout` webhook.
The confirmation hooks are executed before the pre-rollout hooks.
Flagger will halt the canary traffic shifting and analysis until the confirm webhook returns HTTP status 200.
Manual gating with Flagger's tester:
```yaml
canaryAnalysis:
webhooks:
- name: "gate"
type: confirm-rollout
url: http://flagger-loadtester.test/gate/halt
```
The `/gate/halt` returns HTTP 403 thus blocking the rollout.
If you have notifications enabled, Flagger will post a message to Slack or MS Teams if a canary rollout is waiting for approval.
Change the URL to `/gate/approve` to start the canary analysis:
```yaml
canaryAnalysis:
webhooks:
- name: "gate"
type: confirm-rollout
url: http://flagger-loadtester.test/gate/approve
```
Manual gating can be driven with Flagger's tester API. Set the confirmation URL to `/gate/check`:
```yaml
canaryAnalysis:
webhooks:
- name: "ask for confirmation"
type: confirm-rollout
url: http://flagger-loadtester.test/gate/check
```
By default the gate is closed, you can start or resume the canary rollout with:
```bash
kubectl -n test exec -it flagger-loadtester-xxxx-xxxx sh
curl -d '{"name": "podinfo","namespace":"test"}' http://localhost:8080/gate/open
```
You can pause the rollout at any time with:
```bash
curl -d '{"name": "podinfo","namespace":"test"}' http://localhost:8080/gate/close
```
If a canary analysis is paused the status will change to waiting:
```bash
kubectl get canary/podinfo
NAME STATUS WEIGHT
podinfo Waiting 0
```

View File

@@ -133,11 +133,18 @@ Add Flagger Helm repository:
helm repo add flagger https://flagger.app
```
Install Flagger's Canary CRD:
```yaml
kubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/master/artifacts/flagger/crd.yaml
```
Deploy Flagger and Prometheus in the _**appmesh-system**_ namespace:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=appmesh-system \
--set crd.create=false \
--set meshProvider=appmesh \
--set prometheus.install=true
```
@@ -150,6 +157,7 @@ You can enable **Slack** notifications with:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=appmesh-system \
--set crd.create=false \
--set meshProvider=appmesh \
--set metricsServer=http://prometheus.appmesh:9090 \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \

View File

@@ -354,11 +354,18 @@ Add Flagger Helm repository:
helm repo add flagger https://flagger.app
```
Install Flagger's Canary CRD:
```yaml
kubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/master/artifacts/flagger/crd.yaml
```
Deploy Flagger in the `istio-system` namespace with Slack notifications enabled:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set crd.create=false \
--set metricsServer=http://prometheus.istio-system:9090 \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \

View File

@@ -17,11 +17,18 @@ Add Flagger Helm repository:
helm repo add flagger https://flagger.app
```
Install Flagger's Canary CRD:
```yaml
kubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/master/artifacts/flagger/crd.yaml
```
Deploy Flagger for Istio:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set crd.create=false \
--set meshProvider=istio \
--set metricsServer=http://prometheus:9090
```
@@ -31,6 +38,7 @@ Deploy Flagger for Linkerd:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=linkerd \
--set crd.create=false \
--set meshProvider=linkerd \
--set metricsServer=http://linkerd-prometheus:9090
```
@@ -42,6 +50,7 @@ Enable **Slack** notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set crd.create=false \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
@@ -52,6 +61,7 @@ Enable **Microsoft Teams** notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set crd.create=false \
--set msteams.url=https://outlook.office.com/webhook/YOUR/TEAMS/WEBHOOK
```
@@ -131,25 +141,38 @@ As an alternative to Helm, Flagger can be installed with Kustomize.
Install Flagger for Istio:
```bash
kubectl apply -k github.com/weaveworks/flagger/kustomize/istio
kubectl apply -k github.com/weaveworks/flagger//kustomize/istio
```
This deploys Flagger in the `istio-system` namespace and sets the metrics server URL to `http://prometheus.istio-system:9090`.
This deploys Flagger in the `istio-system` namespace and sets the metrics server URL to Istio's Prometheus instance.
Note that you'll need kubectl 1.14 to run the above the command or you can download the
[kustomize binary](https://github.com/kubernetes-sigs/kustomize/releases) and run:
```bash
kustomize build github.com/weaveworks/flagger//kustomize/istio | kubectl apply -f -
```
Install Flagger for Linkerd:
```bash
kubectl apply -k github.com/weaveworks/flagger/kustomize/linkerd
kubectl apply -k github.com/weaveworks/flagger//kustomize/linkerd
```
This deploys Flagger in the `linkerd` namespace and sets the metrics server URL to `http://linkerd-prometheus.linkerd:9090`.
This deploys Flagger in the `linkerd` namespace and sets the metrics server URL to Linkerd's Prometheus instance.
If you want to install a specific Flagger release, add the version number to the URL:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/linkerd?ref=0.18.0
```
**Generic installer**
Install Flagger and Prometheus:
```bash
kubectl apply -k github.com/weaveworks/flagger/kustomize/kubernetes
kubectl apply -k github.com/weaveworks/flagger//kustomize/kubernetes
```
This deploys Flagger and Prometheus in the `flagger-system` namespace,
@@ -158,7 +181,7 @@ sets the metrics server URL to `http://flagger-prometheus.flagger-system:9090` a
The Prometheus instance has a two hours data retention and is configured to scrape all pods in your cluster that
have the `prometheus.io/scrape: "true"` annotation.
To target a specific provider you need to specify it in the canary custom resource:
To target a different provider you can specify it in the canary custom resource:
```yaml
apiVersion: flagger.app/v1alpha3
@@ -167,7 +190,7 @@ metadata:
name: app
namespace: test
spec:
# can be: kubernetes, istio, appmesh, linkerd, smi, nginx, gloo, supergloo
# can be: kubernetes, istio, linkerd, appmesh, nginx, gloo
# use the kubernetes provider for Blue/Green style deployments
provider: nginx
```
@@ -213,3 +236,5 @@ Install Flagger with Slack:
```bash
kubectl apply -k .
```
If you want to use MS Teams instead of Slack, replace `-slack-url` with `-msteams-url` and set the webhook address to `https://outlook.office.com/webhook/YOUR/TEAMS/WEBHOOK`.

View File

@@ -115,11 +115,18 @@ Add Flagger Helm repository:
helm repo add flagger https://flagger.app
```
Install Flagger's Canary CRD:
```yaml
kubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/master/artifacts/flagger/crd.yaml
```
Deploy Flagger in the _**istio-system**_ namespace and set the service mesh provider to SuperGloo:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set crd.create=false \
--set metricsServer=http://prometheus.istio-system:9090 \
--set meshProvider=supergloo:istio.supergloo-system
```

View File

@@ -138,7 +138,7 @@ helm upgrade -i frontend flagger/podinfo/ \
--reuse-values \
--set canary.loadtest.enabled=true \
--set canary.helmtest.enabled=true \
--set image.tag=1.4.1
--set image.tag=1.7.1
```
Flagger detects that the deployment revision changed and starts the canary analysis:
@@ -283,17 +283,17 @@ metadata:
namespace: test
annotations:
flux.weave.works/automated: "true"
flux.weave.works/tag.chart-image: semver:~1.4
flux.weave.works/tag.chart-image: semver:~1.7
spec:
releaseName: frontend
chart:
repository: https://stefanprodan.github.io/flagger/
name: podinfo
version: 2.0.0
version: 2.3.0
values:
image:
repository: quay.io/stefanprodan/podinfo
tag: 1.4.0
tag: 1.7.0
backend: http://backend-podinfo:9898/echo
canary:
enabled: true
@@ -311,7 +311,7 @@ In the `chart` section I've defined the release source by specifying the Helm re
In the `values` section I've overwritten the defaults set in values.yaml.
With the `flux.weave.works` annotations I instruct Flux to automate this release.
When an image tag in the sem ver range of `1.4.0 - 1.4.99` is pushed to Quay,
When an image tag in the sem ver range of `1.7.0 - 1.7.99` is pushed to Quay,
Flux will upgrade the Helm release and from there Flagger will pick up the change and start a canary deployment.
Install [Weave Flux](https://github.com/weaveworks/flux) and its Helm Operator by specifying your Git repo URL:
@@ -321,6 +321,7 @@ helm repo add weaveworks https://weaveworks.github.io/flux
helm install --name flux \
--set helmOperator.create=true \
--set helmOperator.createCRD=true \
--set git.url=git@github.com:<USERNAME>/<REPOSITORY> \
--namespace flux \
weaveworks/flux
@@ -343,9 +344,9 @@ launch the `frontend` and `backend` apps.
A CI/CD pipeline for the `frontend` release could look like this:
* cut a release from the master branch of the podinfo code repo with the git tag `1.4.1`
* CI builds the image and pushes the `podinfo:1.4.1` image to the container registry
* Flux scans the registry and updates the Helm release `image.tag` to `1.4.1`
* cut a release from the master branch of the podinfo code repo with the git tag `1.7.1`
* CI builds the image and pushes the `podinfo:1.7.1` image to the container registry
* Flux scans the registry and updates the Helm release `image.tag` to `1.7.1`
* Flux commits and push the change to the cluster repo
* Flux applies the updated Helm release on the cluster
* Flux Helm Operator picks up the change and calls Tiller to upgrade the release
@@ -355,7 +356,7 @@ A CI/CD pipeline for the `frontend` release could look like this:
* Based on the analysis result the canary deployment is promoted to production or rolled back
* Flagger sends a Slack notification with the canary result
If the canary fails, fix the bug, do another patch release eg `1.4.2` and the whole process will run again.
If the canary fails, fix the bug, do another patch release eg `1.7.2` and the whole process will run again.
A canary deployment can fail due to any of the following reasons:

View File

@@ -236,7 +236,7 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.1
podinfod=quay.io/stefanprodan/podinfo:1.7.1
```
Flagger detects that the deployment revision changed and starts a new rollout:

View File

@@ -131,7 +131,7 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/abtest \
podinfod=quay.io/stefanprodan/podinfo:1.4.1
podinfod=quay.io/stefanprodan/podinfo:1.7.1
```
Flagger detects that the deployment revision changed and starts a new rollout:

View File

@@ -37,8 +37,10 @@ Deploy the load testing service to generate traffic during the canary analysis:
```bash
helm upgrade -i flagger-loadtester flagger/loadtester \
--namespace=test \
--set meshName=global.appmesh-system \
--set "backends[0]=podinfo.test"
--set meshName=global \
--set "backends[0]=podinfo.test" \
--set "backends[1]=podinfo-canary.test" \
--set "backends[2]=podinfo-primary.test"
```
Create a canary custom resource:
@@ -67,7 +69,7 @@ spec:
# container port
port: 9898
# App Mesh reference
meshName: global.appmesh-system
meshName: global
# App Mesh egress (optional)
backends:
- backend.test
@@ -176,7 +178,7 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.1
podinfod=quay.io/stefanprodan/podinfo:1.7.1
```
Flagger detects that the deployment revision changed and starts a new rollout:

View File

@@ -59,8 +59,8 @@ Create a deployment and a horizontal pod autoscaler:
```bash
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/canary/deployment.yaml
kubectl apply -f ${REPO}/artifacts/canary/hpa.yaml
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
```
Deploy the load testing service to generate traffic during the analysis:
@@ -172,7 +172,7 @@ Trigger a deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.1
podinfod=quay.io/stefanprodan/podinfo:1.7.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
@@ -297,7 +297,7 @@ Trigger a deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.3
podinfod=quay.io/stefanprodan/podinfo:1.7.3
```
Generate 404s:

View File

@@ -197,7 +197,7 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.1
podinfod=quay.io/stefanprodan/podinfo:1.7.1
```
Flagger detects that the deployment revision changed and starts a new rollout:

View File

@@ -6,34 +6,23 @@ This guide shows you how to use Linkerd and Flagger to automate canary deploymen
### Prerequisites
Flagger requires a Kubernetes cluster **v1.11** or newer and Linker with support for SMI Traffic Spit API.
Flagger requires a Kubernetes cluster **v1.11** or newer and Linkerd **2.4** or newer.
Install Flagger in the linkerd namespace:
```bash
helm repo add flagger https://flagger.app
helm upgrade -i flagger flagger/flagger \
--namespace linkerd \
--set metricsServer=http://linkerd-prometheus:9090 \
--set meshProvider=linkerd
kubectl apply -k github.com/weaveworks/flagger//kustomize/linkerd
```
Optionally you can enable Slack notifications:
Note that you'll need kubectl 1.14 or newer to run the above command.
```bash
helm upgrade -i flagger flagger/flagger \
--reuse-values \
--namespace linkerd \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
To enable Slack or MS Teams notifications,
see Flagger's [install docs](https://docs.flagger.app/install/flagger-install-on-kubernetes) for Kustomize or Helm options.
### Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployments, ClusterIP services and SMI traffic split).
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployments, ClusterIP services and SMI traffic split).
These objects expose the application inside the mesh and drive the canary analysis and promotion.
Create a test namespace and enable Linkerd proxy injection:
@@ -46,17 +35,13 @@ kubectl annotate namespace test linkerd.io/inject=enabled
Install the load testing service to generate traffic during the canary analysis:
```bash
helm upgrade -i flagger-loadtester flagger/loadtester \
--namespace=test
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
```
Create a deployment and a horizontal pod autoscaler:
```bash
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/canary/deployment.yaml
kubectl apply -f ${REPO}/artifacts/canary/hpa.yaml
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
```
Create a canary custom resource for the podinfo deployment:
@@ -129,19 +114,19 @@ Save the above resource as podinfo-canary.yaml and then apply it:
kubectl apply -f ./podinfo-canary.yaml
```
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
The canary analysis will run for five minutes while validating the HTTP metrics and rollout hooks every half a minute.
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
ingresses.extensions/podinfo
canary.flagger.app/podinfo
# generated
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
@@ -150,12 +135,12 @@ service/podinfo-primary
trafficsplits.split.smi-spec.io/podinfo
```
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed
to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
### Automated canary promotion
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
like HTTP requests success rate, requests average duration and pod health.
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
@@ -165,7 +150,7 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.1
podinfod=quay.io/stefanprodan/podinfo:1.7.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
@@ -223,7 +208,7 @@ Trigger another canary deployment:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.2
podinfod=quay.io/stefanprodan/podinfo:1.7.2
```
Exec into the load tester pod with:
@@ -244,7 +229,7 @@ Generate latency:
watch -n 1 curl http://podinfo-canary.test:9898/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
```text
@@ -271,7 +256,7 @@ Events:
### Custom metrics
The canary analysis can be extended with Prometheus queries.
The canary analysis can be extended with Prometheus queries.
Let's a define a check for not found errors. Edit the canary analysis and add the following metric:
@@ -290,8 +275,8 @@ Let's a define a check for not found errors. Edit the canary analysis and add th
direction="inbound"
}[1m]
)
)
/
)
/
sum(
rate(
response_total{
@@ -300,19 +285,19 @@ Let's a define a check for not found errors. Edit the canary analysis and add th
direction="inbound"
}[1m]
)
)
)
* 100
```
The above configuration validates the canary version by checking if the HTTP 404 req/sec percentage is below
three percent of the total traffic. If the 404s rate reaches the 3% threshold, then the analysis is aborted and the
The above configuration validates the canary version by checking if the HTTP 404 req/sec percentage is below
three percent of the total traffic. If the 404s rate reaches the 3% threshold, then the analysis is aborted and the
canary is marked as failed.
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.3
podinfod=quay.io/stefanprodan/podinfo:1.7.3
```
Generate 404s:
@@ -377,10 +362,10 @@ spec:
servicePort: 9898
```
When using an ingress controller, the Linkerd traffic split does not apply to incoming traffic since NGINX in running outside of
When using an ingress controller, the Linkerd traffic split does not apply to incoming traffic since NGINX in running outside of
the mesh. In order to run a canary analysis for a frontend app, Flagger creates a shadow ingress and sets the NGINX specific annotations.
### A/B Testing
### A/B Testing
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
@@ -450,16 +435,16 @@ spec:
cmd: "hey -z 2m -q 10 -c 2 -H 'Cookie: canary=always' http://app.example.com"
```
The above configuration will run an analysis for ten minutes targeting users that have a `canary` cookie set to `always` or
those that call the service using the `X-Canary: always` header.
The above configuration will run an analysis for ten minutes targeting users that have a `canary` cookie set to `always` or
those that call the service using the `X-Canary: always` header.
**Note** that the load test now targets the external address and uses the canary cookie.
**Note** that the load test now targets the external address and uses the canary cookie.
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.5.0
podinfod=quay.io/stefanprodan/podinfo:1.7.4
```
Flagger detects that the deployment revision changed and starts the A/B testing:

View File

@@ -189,7 +189,7 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.1
podinfod=quay.io/stefanprodan/podinfo:1.7.1
```
Flagger detects that the deployment revision changed and starts a new rollout:

View File

@@ -119,7 +119,7 @@ Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.1
podinfod=quay.io/stefanprodan/podinfo:1.7.1
```
Flagger detects that the deployment revision changed and starts a new rollout:

2
go.mod
View File

@@ -30,6 +30,8 @@ require (
github.com/mattn/go-isatty v0.0.7 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
github.com/mitchellh/hashstructure v1.0.0
github.com/pkg/errors v0.8.1
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 // indirect
github.com/prometheus/common v0.3.0 // indirect

7
go.sum
View File

@@ -31,6 +31,7 @@ github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxU
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/avast/retry-go v2.2.0+incompatible h1:m+w7mVLWa/oKqX2xYqiEKQQkeGH8DDEXB/XnjS54Wyw=
@@ -38,6 +39,7 @@ github.com/avast/retry-go v2.2.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevB
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
@@ -80,6 +82,7 @@ github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fgrosse/zaptest v1.1.0 h1:sK9hP0/xBoNX5qfFo3KWFluDXfc809APomI1QXuYELA=
github.com/fgrosse/zaptest v1.1.0/go.mod h1:vMnRSul6kW7kIUXZgnZZcDwyTn8k49ODfAULL8nmL5w=
@@ -192,6 +195,7 @@ github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
@@ -202,6 +206,7 @@ github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=
@@ -265,6 +270,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
@@ -315,6 +321,7 @@ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=

View File

@@ -1,37 +1,50 @@
# Flagger Kustomize installer
As an alternative to Helm, Flagger can be installed with Kustomize.
As an alternative to Helm, Flagger can be installed with [Kustomize](https://kustomize.io/).
## Service mesh specific installers
Install Flagger for Istio:
```bash
kubectl apply -k github.com/weaveworks/flagger/kustomize/istio
kubectl apply -k github.com/weaveworks/flagger//kustomize/istio
```
This deploys Flagger in the `istio-system` namespace and sets the metrics server URL to `http://prometheus.istio-system:9090`.
This deploys Flagger in the `istio-system` namespace and sets the metrics server URL to Istio's Prometheus instance.
Note that you'll need kubectl 1.14 to run the above the command or you can download the
[kustomize binary](https://github.com/kubernetes-sigs/kustomize/releases) and run:
```bash
kustomize build github.com/weaveworks/flagger//kustomize/istio | kubectl apply -f -
```
Install Flagger for Linkerd:
```bash
kubectl apply -k github.com/weaveworks/flagger/kustomize/linkerd
kubectl apply -k github.com/weaveworks/flagger//kustomize/linkerd
```
This deploys Flagger in the `linkerd` namespace and sets the metrics server URL to `http://linkerd-prometheus.linkerd:9090`.
This deploys Flagger in the `linkerd` namespace and sets the metrics server URL to Linkerd's Prometheus instance.
If you want to install a specific Flagger release, add the version number to the URL:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/linkerd?ref=0.18.0
```
## Generic installer
Install Flagger and Prometheus:
```bash
kubectl apply -k github.com/weaveworks/flagger/kustomize/kubernetes
kubectl apply -k github.com/weaveworks/flagger//kustomize/kubernetes
```
This deploys Flagger and Prometheus in the `flagger-system` namespace,
sets the metrics server URL to `http://flagger-prometheus.flagger-system:9090` and the mesh provider to `kubernetes`.
To target a specific provider you need to specify it in the canary custom resource:
To target a different provider you can specify it in the canary custom resource:
```yaml
apiVersion: flagger.app/v1alpha3
@@ -40,12 +53,16 @@ metadata:
name: app
namespace: test
spec:
# can be: kubernetes, istio, appmesh, linkerd, smi, nginx, gloo, supergloo
# can be: kubernetes, istio, linkerd, appmesh, nginx, gloo
# use the kubernetes provider for Blue/Green style deployments
provider: nginx
```
## Customized installer
You'll need Prometheus when using Flagger with AWS App Mesh, Gloo or NGINX ingress controller.
The Prometheus instance has a two hours data retention and is configured to scrape all pods in your cluster that
have the `prometheus.io/scrape: "true"` annotation.
## Configure Slack notifications
Create a kustomization file using flagger as base:
@@ -81,9 +98,48 @@ spec:
EOF
```
Install Flagger with Slack:
Install Flagger for Istio with Slack notifications:
```bash
kubectl apply -k .
```
## Configure MS Teams notifications
Create a kustomization file using flagger as base:
```bash
cat > kustomization.yaml <<EOF
namespace: linkerd
bases:
- github.com/weaveworks/flagger/kustomize/base/flagger
patchesStrategicMerge:
- patch.yaml
EOF
```
Create a patch and set the MS Teams webhook URL:
```bash
cat > patch.yaml <<EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: flagger
spec:
template:
spec:
containers:
- name: flagger
args:
- -mesh-provider=linkerd
- -metrics-server=http://linkerd-prometheus:9090
- -msteams-url=https://outlook.office.com/webhook/YOUR/TEAMS/WEBHOOK
EOF
```
Install Flagger for Linkerd with MS Teams notifications:
```bash
kubectl apply -k .
```

View File

@@ -102,17 +102,23 @@ spec:
canaryAnalysis:
properties:
interval:
description: Canary schedule interval
type: string
pattern: "^[0-9]+(m|s)"
iterations:
description: Number of checks to run for A/B Testing and Blue/Green
type: number
threshold:
description: Max number of failed checks before rollback
type: number
maxWeight:
description: Max traffic percentage routed to canary
type: number
stepWeight:
description: Canary incremental traffic percentage step
type: number
metrics:
description: Prometheus query list for this canary
type: array
properties:
items:
@@ -120,15 +126,20 @@ spec:
required: ['name', 'threshold']
properties:
name:
description: Name of the Prometheus metric
type: string
interval:
description: Interval of the promql query
type: string
pattern: "^[0-9]+(m|s)"
threshold:
description: Max scalar value accepted for this metric
type: number
query:
description: Prometheus query
type: string
webhooks:
description: Webhook list for this canary
type: array
properties:
items:
@@ -136,8 +147,10 @@ spec:
required: ['name', 'url', 'timeout']
properties:
name:
description: Name of the webhook
type: string
type:
description: Type of the webhook pre, post or during rollout
type: string
enum:
- ""
@@ -145,28 +158,68 @@ spec:
- rollout
- post-rollout
url:
description: URL address of this webhook
type: string
format: url
timeout:
description: Request timeout for this webhook
type: string
pattern: "^[0-9]+(m|s)"
status:
properties:
phase:
description: Analysis phase of this canary
type: string
enum:
- ""
- Initializing
- Initialized
- Waiting
- Progressing
- Finalising
- Succeeded
- Failed
canaryWeight:
description: Traffic weight percentage routed to canary
type: number
failedChecks:
description: Failed check count of the current canary analysis
type: number
iterations:
description: Iteration count of the current canary analysis
type: number
lastAppliedSpec:
description: LastAppliedSpec of this canary
type: string
lastTransitionTime:
description: LastTransitionTime of this canary
format: date-time
type: string
conditions:
description: Status conditions of this canary
type: array
properties:
items:
type: object
required: ['type', 'status', 'reason']
properties:
lastTransitionTime:
description: LastTransitionTime of this condition
format: date-time
type: string
lastUpdateTime:
description: LastUpdateTime of this condition
format: date-time
type: string
message:
description: Message associated with this condition
type: string
reason:
description: Reason for the current status of this condition
type: string
status:
description: Status of this condition
type: string
type:
description: Type of this condition
type: string

View File

@@ -2,7 +2,6 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: flagger
namespace: flagger-system
spec:
replicas: 1
strategy:

View File

@@ -8,4 +8,4 @@ resources:
- deployment.yaml
images:
- name: weaveworks/flagger
newTag: 0.17.0
newTag: 0.18.2

View File

@@ -8,6 +8,7 @@ spec:
containers:
- name: flagger
args:
- -log-level=info
- -mesh-provider=istio
- -metrics-server=http://prometheus:9090
- -slack-user=flagger

View File

@@ -8,6 +8,7 @@ spec:
containers:
- name: flagger
args:
- -log-level=info
- -mesh-provider=kubernetes
- -metrics-server=http://flagger-prometheus:9090
- -slack-user=flagger

View File

@@ -8,6 +8,7 @@ spec:
containers:
- name: flagger
args:
- -log-level=info
- -mesh-provider=linkerd
- -metrics-server=http://linkerd-prometheus:9090
- -slack-user=flagger

View File

@@ -2,11 +2,12 @@ apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
replicas: 1
minReadySeconds: 5
revisionHistoryLimit: 5
progressDeadlineSeconds: 60
strategy:
rollingUpdate:
maxUnavailable: 0
@@ -23,7 +24,7 @@ spec:
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.4.0
image: quay.io/stefanprodan/podinfo:1.7.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
@@ -37,7 +38,7 @@ spec:
- --random-error=false
env:
- name: PODINFO_UI_COLOR
value: green
value: blue
livenessProbe:
exec:
command:
@@ -45,10 +46,8 @@ spec:
- check
- http
- localhost:9898/healthz
failureThreshold: 3
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
initialDelaySeconds: 5
timeoutSeconds: 5
readinessProbe:
exec:
command:
@@ -56,14 +55,12 @@ spec:
- check
- http
- localhost:9898/readyz
failureThreshold: 3
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 2
initialDelaySeconds: 5
timeoutSeconds: 5
resources:
limits:
cpu: 1000m
memory: 256Mi
cpu: 2000m
memory: 512Mi
requests:
cpu: 100m
memory: 16Mi
memory: 64Mi

View File

@@ -2,13 +2,12 @@ apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: podinfo
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
minReplicas: 1
minReplicas: 2
maxReplicas: 4
metrics:
- type: Resource

View File

@@ -0,0 +1,5 @@
namespace: test
resources:
- hpa.yaml
- deployment.yaml

View File

@@ -0,0 +1,59 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: flagger-loadtester
labels:
app: flagger-loadtester
spec:
selector:
matchLabels:
app: flagger-loadtester
template:
metadata:
labels:
app: flagger-loadtester
annotations:
prometheus.io/scrape: "true"
spec:
containers:
- name: loadtester
image: weaveworks/flagger-loadtester:0.6.1
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 8080
command:
- ./loadtester
- -port=8080
- -log-level=info
- -timeout=1h
livenessProbe:
exec:
command:
- wget
- --quiet
- --tries=1
- --timeout=4
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
readinessProbe:
exec:
command:
- wget
- --quiet
- --tries=1
- --timeout=4
- --spider
- http://localhost:8080/healthz
timeoutSeconds: 5
resources:
limits:
memory: "512Mi"
cpu: "1000m"
requests:
memory: "32Mi"
cpu: "10m"
securityContext:
readOnlyRootFilesystem: true
runAsUser: 10001

View File

@@ -0,0 +1,7 @@
namespace: test
resources:
- service.yaml
- deployment.yaml
images:
- name: weaveworks/flagger-loadtester
newTag: 0.6.1

View File

@@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: flagger-loadtester
labels:
app: flagger-loadtester
spec:
type: ClusterIP
selector:
app: flagger-loadtester
ports:
- name: http
port: 80
protocol: TCP
targetPort: http

View File

@@ -0,0 +1,76 @@
package v1alpha3
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// CanaryConditionType is the type of a CanaryCondition
type CanaryConditionType string
const (
// PromotedType refers to the result of the last canary analysis
PromotedType CanaryConditionType = "Promoted"
)
// CanaryCondition is a status condition for a Canary
type CanaryCondition struct {
// Type of this condition
Type CanaryConditionType `json:"type"`
// Status of this condition
Status corev1.ConditionStatus `json:"status"`
// LastUpdateTime of this condition
LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"`
// LastTransitionTime of this condition
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
// Reason for the current status of this condition
Reason string `json:"reason,omitempty"`
// Message associated with this condition
Message string `json:"message,omitempty"`
}
// CanaryPhase is a label for the condition of a canary at the current time
type CanaryPhase string
const (
// CanaryPhaseInitializing means the canary initializing is underway
CanaryPhaseInitializing CanaryPhase = "Initializing"
// CanaryPhaseInitialized means the primary deployment, hpa and ClusterIP services
// have been created along with the service mesh or ingress objects
CanaryPhaseInitialized CanaryPhase = "Initialized"
// CanaryPhaseWaiting means the canary rollout is paused (waiting for confirmation to proceed)
CanaryPhaseWaiting CanaryPhase = "Waiting"
// CanaryPhaseProgressing means the canary analysis is underway
CanaryPhaseProgressing CanaryPhase = "Progressing"
// CanaryPhaseProgressing means the canary analysis is finished and traffic has been routed back to primary
CanaryPhaseFinalising CanaryPhase = "Finalising"
// CanaryPhaseSucceeded means the canary analysis has been successful
// and the canary deployment has been promoted
CanaryPhaseSucceeded CanaryPhase = "Succeeded"
// CanaryPhaseFailed means the canary analysis failed
// and the canary deployment has been scaled to zero
CanaryPhaseFailed CanaryPhase = "Failed"
)
// CanaryStatus is used for state persistence (read-only)
type CanaryStatus struct {
Phase CanaryPhase `json:"phase"`
FailedChecks int `json:"failedChecks"`
CanaryWeight int `json:"canaryWeight"`
Iterations int `json:"iterations"`
// +optional
TrackedConfigs *map[string]string `json:"trackedConfigs,omitempty"`
// +optional
LastAppliedSpec string `json:"lastAppliedSpec,omitempty"`
// +optional
LastPromotedSpec string `json:"lastPromotedSpec,omitempty"`
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
// +optional
Conditions []CanaryCondition `json:"conditions,omitempty"`
}

View File

@@ -85,37 +85,6 @@ type CanaryList struct {
Items []Canary `json:"items"`
}
// CanaryPhase is a label for the condition of a canary at the current time
type CanaryPhase string
const (
// CanaryInitialized means the primary deployment, hpa and ClusterIP services
// have been created along with the Istio virtual service
CanaryInitialized CanaryPhase = "Initialized"
// CanaryProgressing means the canary analysis is underway
CanaryProgressing CanaryPhase = "Progressing"
// CanarySucceeded means the canary analysis has been successful
// and the canary deployment has been promoted
CanarySucceeded CanaryPhase = "Succeeded"
// CanaryFailed means the canary analysis failed
// and the canary deployment has been scaled to zero
CanaryFailed CanaryPhase = "Failed"
)
// CanaryStatus is used for state persistence (read-only)
type CanaryStatus struct {
Phase CanaryPhase `json:"phase"`
FailedChecks int `json:"failedChecks"`
CanaryWeight int `json:"canaryWeight"`
Iterations int `json:"iterations"`
// +optional
TrackedConfigs *map[string]string `json:"trackedConfigs,omitempty"`
// +optional
LastAppliedSpec string `json:"lastAppliedSpec,omitempty"`
// +optional
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
}
// CanaryService is used to create ClusterIP services
// and Istio Virtual Service
type CanaryService struct {
@@ -168,6 +137,8 @@ const (
PreRolloutHook HookType = "pre-rollout"
// PreRolloutHook execute webhook after the canary analysis
PostRolloutHook HookType = "post-rollout"
// ConfirmRolloutHook halt canary analysis until webhook returns HTTP 200
ConfirmRolloutHook HookType = "confirm-rollout"
)
// CanaryWebhook holds the reference to external checks used for canary analysis

View File

@@ -89,6 +89,24 @@ func (in *CanaryAnalysis) DeepCopy() *CanaryAnalysis {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CanaryCondition) DeepCopyInto(out *CanaryCondition) {
*out = *in
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanaryCondition.
func (in *CanaryCondition) DeepCopy() *CanaryCondition {
if in == nil {
return nil
}
out := new(CanaryCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CanaryList) DeepCopyInto(out *CanaryList) {
*out = *in
@@ -250,6 +268,13 @@ func (in *CanaryStatus) DeepCopyInto(out *CanaryStatus) {
}
}
in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime)
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]CanaryCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}

View File

@@ -2,20 +2,18 @@ package canary
import (
"crypto/rand"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/mitchellh/hashstructure"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
"go.uber.org/zap"
"io"
appsv1 "k8s.io/api/apps/v1"
hpav1 "k8s.io/api/autoscaling/v2beta1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
@@ -39,7 +37,7 @@ func (c *Deployer) Initialize(cd *flaggerv1.Canary, skipLivenessChecks bool) (la
return "", ports, fmt.Errorf("creating deployment %s.%s failed: %v", primaryName, cd.Namespace, err)
}
if cd.Status.Phase == "" {
if cd.Status.Phase == "" || cd.Status.Phase == flaggerv1.CanaryPhaseInitializing {
if !skipLivenessChecks {
_, readyErr := c.IsPrimaryReady(cd)
if readyErr != nil {
@@ -115,6 +113,7 @@ func (c *Deployer) Promote(cd *flaggerv1.Canary) error {
primaryCopy.Spec.Template.Labels = makePrimaryLabels(canary.Spec.Template.Labels, primaryName, label)
// apply update
_, err = c.KubeClient.AppsV1().Deployments(cd.Namespace).Update(primaryCopy)
if err != nil {
return fmt.Errorf("updating deployment %s.%s template spec failed: %v",
@@ -146,19 +145,17 @@ func (c *Deployer) HasDeploymentChanged(cd *flaggerv1.Canary) (bool, error) {
return true, nil
}
newSpec := &canary.Spec.Template.Spec
oldSpecJson, err := base64.StdEncoding.DecodeString(cd.Status.LastAppliedSpec)
newHash, err := hashstructure.Hash(canary.Spec.Template, nil)
if err != nil {
return false, fmt.Errorf("%s.%s decode error %v", cd.Name, cd.Namespace, err)
}
oldSpec := &corev1.PodSpec{}
err = json.Unmarshal(oldSpecJson, oldSpec)
if err != nil {
return false, fmt.Errorf("%s.%s unmarshal error %v", cd.Name, cd.Namespace, err)
return false, fmt.Errorf("hash error %v", err)
}
if diff := cmp.Diff(*newSpec, *oldSpec, cmpopts.IgnoreUnexported(resource.Quantity{})); diff != "" {
//fmt.Println(diff)
// do not trigger a canary deployment on manual rollback
if cd.Status.LastPromotedSpec == fmt.Sprintf("%d", newHash) {
return false, nil
}
if cd.Status.LastAppliedSpec != fmt.Sprintf("%d", newHash) {
return true, nil
}
@@ -337,7 +334,8 @@ func (c *Deployer) reconcilePrimaryHpa(cd *flaggerv1.Canary, init bool) error {
// update HPA
if !init && primaryHpa != nil {
diff := cmp.Diff(hpaSpec.Metrics, primaryHpa.Spec.Metrics)
if diff != "" || hpaSpec.MinReplicas != primaryHpa.Spec.MinReplicas || hpaSpec.MaxReplicas != primaryHpa.Spec.MaxReplicas {
if diff != "" || int32Default(hpaSpec.MinReplicas) != int32Default(primaryHpa.Spec.MinReplicas) || hpaSpec.MaxReplicas != primaryHpa.Spec.MaxReplicas {
fmt.Println(diff, hpaSpec.MinReplicas, primaryHpa.Spec.MinReplicas, hpaSpec.MaxReplicas, primaryHpa.Spec.MaxReplicas)
hpaClone := primaryHpa.DeepCopy()
hpaClone.Spec.MaxReplicas = hpaSpec.MaxReplicas
hpaClone.Spec.MinReplicas = hpaSpec.MinReplicas
@@ -434,3 +432,11 @@ func makePrimaryLabels(labels map[string]string, primaryName string, label strin
func int32p(i int32) *int32 {
return &i
}
func int32Default(i *int32) int32 {
if i == nil {
return 1
}
return *i
}

View File

@@ -229,7 +229,7 @@ func TestCanaryDeployer_SetState(t *testing.T) {
t.Fatal(err.Error())
}
err = mocks.deployer.SetStatusPhase(mocks.canary, v1alpha3.CanaryProgressing)
err = mocks.deployer.SetStatusPhase(mocks.canary, v1alpha3.CanaryPhaseProgressing)
if err != nil {
t.Fatal(err.Error())
}
@@ -239,8 +239,8 @@ func TestCanaryDeployer_SetState(t *testing.T) {
t.Fatal(err.Error())
}
if res.Status.Phase != v1alpha3.CanaryProgressing {
t.Errorf("Got %v wanted %v", res.Status.Phase, v1alpha3.CanaryProgressing)
if res.Status.Phase != v1alpha3.CanaryPhaseProgressing {
t.Errorf("Got %v wanted %v", res.Status.Phase, v1alpha3.CanaryPhaseProgressing)
}
}
@@ -252,7 +252,7 @@ func TestCanaryDeployer_SyncStatus(t *testing.T) {
}
status := v1alpha3.CanaryStatus{
Phase: v1alpha3.CanaryProgressing,
Phase: v1alpha3.CanaryPhaseProgressing,
FailedChecks: 2,
}
err = mocks.deployer.SyncStatus(mocks.canary, status)

View File

@@ -1,11 +1,13 @@
package canary
import (
"encoding/base64"
"encoding/json"
"fmt"
"k8s.io/client-go/util/retry"
"github.com/mitchellh/hashstructure"
ex "github.com/pkg/errors"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@@ -17,88 +19,227 @@ func (c *Deployer) SyncStatus(cd *flaggerv1.Canary, status flaggerv1.CanaryStatu
if errors.IsNotFound(err) {
return fmt.Errorf("deployment %s.%s not found", cd.Spec.TargetRef.Name, cd.Namespace)
}
return fmt.Errorf("deployment %s.%s query error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
}
specJson, err := json.Marshal(dep.Spec.Template.Spec)
if err != nil {
return fmt.Errorf("deployment %s.%s marshal error %v", cd.Spec.TargetRef.Name, cd.Namespace, err)
return ex.Wrap(err, "SyncStatus deployment query error")
}
configs, err := c.ConfigTracker.GetConfigRefs(cd)
if err != nil {
return fmt.Errorf("configs query error %v", err)
return ex.Wrap(err, "SyncStatus configs query error")
}
cdCopy := cd.DeepCopy()
cdCopy.Status.Phase = status.Phase
cdCopy.Status.CanaryWeight = status.CanaryWeight
cdCopy.Status.FailedChecks = status.FailedChecks
cdCopy.Status.Iterations = status.Iterations
cdCopy.Status.LastAppliedSpec = base64.StdEncoding.EncodeToString(specJson)
cdCopy.Status.LastTransitionTime = metav1.Now()
cdCopy.Status.TrackedConfigs = configs
cd, err = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
hash, err := hashstructure.Hash(dep.Spec.Template, nil)
if err != nil {
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
return ex.Wrap(err, "SyncStatus hash error")
}
firstTry := true
err = retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
var selErr error
if !firstTry {
cd, selErr = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
if selErr != nil {
return selErr
}
}
cdCopy := cd.DeepCopy()
cdCopy.Status.Phase = status.Phase
cdCopy.Status.CanaryWeight = status.CanaryWeight
cdCopy.Status.FailedChecks = status.FailedChecks
cdCopy.Status.Iterations = status.Iterations
cdCopy.Status.LastAppliedSpec = fmt.Sprintf("%d", hash)
cdCopy.Status.LastTransitionTime = metav1.Now()
cdCopy.Status.TrackedConfigs = configs
if ok, conditions := c.MakeStatusConditions(cd.Status, status.Phase); ok {
cdCopy.Status.Conditions = conditions
}
_, err = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
firstTry = false
return
})
if err != nil {
return ex.Wrap(err, "SyncStatus")
}
return nil
}
// SetStatusFailedChecks updates the canary failed checks counter
func (c *Deployer) SetStatusFailedChecks(cd *flaggerv1.Canary, val int) error {
cdCopy := cd.DeepCopy()
cdCopy.Status.FailedChecks = val
cdCopy.Status.LastTransitionTime = metav1.Now()
firstTry := true
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
var selErr error
if !firstTry {
cd, selErr = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
if selErr != nil {
return selErr
}
}
cdCopy := cd.DeepCopy()
cdCopy.Status.FailedChecks = val
cdCopy.Status.LastTransitionTime = metav1.Now()
cd, err := c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
_, err = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
firstTry = false
return
})
if err != nil {
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
return ex.Wrap(err, "SetStatusFailedChecks")
}
return nil
}
// SetStatusWeight updates the canary status weight value
func (c *Deployer) SetStatusWeight(cd *flaggerv1.Canary, val int) error {
cdCopy := cd.DeepCopy()
cdCopy.Status.CanaryWeight = val
cdCopy.Status.LastTransitionTime = metav1.Now()
firstTry := true
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
var selErr error
if !firstTry {
cd, selErr = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
if selErr != nil {
return selErr
}
}
cdCopy := cd.DeepCopy()
cdCopy.Status.CanaryWeight = val
cdCopy.Status.LastTransitionTime = metav1.Now()
cd, err := c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
_, err = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
firstTry = false
return
})
if err != nil {
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
return ex.Wrap(err, "SetStatusWeight")
}
return nil
}
// SetStatusIterations updates the canary status iterations value
func (c *Deployer) SetStatusIterations(cd *flaggerv1.Canary, val int) error {
cdCopy := cd.DeepCopy()
cdCopy.Status.Iterations = val
cdCopy.Status.LastTransitionTime = metav1.Now()
firstTry := true
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
var selErr error
if !firstTry {
cd, selErr = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
if selErr != nil {
return selErr
}
}
cdCopy := cd.DeepCopy()
cdCopy.Status.Iterations = val
cdCopy.Status.LastTransitionTime = metav1.Now()
_, err = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
firstTry = false
return
})
cd, err := c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
if err != nil {
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
return ex.Wrap(err, "SetStatusIterations")
}
return nil
}
// SetStatusPhase updates the canary status phase
func (c *Deployer) SetStatusPhase(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) error {
cdCopy := cd.DeepCopy()
cdCopy.Status.Phase = phase
cdCopy.Status.LastTransitionTime = metav1.Now()
firstTry := true
err := retry.RetryOnConflict(retry.DefaultBackoff, func() (err error) {
var selErr error
if !firstTry {
cd, selErr = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).Get(cd.GetName(), metav1.GetOptions{})
if selErr != nil {
return selErr
}
}
cdCopy := cd.DeepCopy()
cdCopy.Status.Phase = phase
cdCopy.Status.LastTransitionTime = metav1.Now()
if phase != flaggerv1.CanaryProgressing {
cdCopy.Status.CanaryWeight = 0
cdCopy.Status.Iterations = 0
}
if phase != flaggerv1.CanaryPhaseProgressing && phase != flaggerv1.CanaryPhaseWaiting {
cdCopy.Status.CanaryWeight = 0
cdCopy.Status.Iterations = 0
}
cd, err := c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
// on promotion set primary spec hash
if phase == flaggerv1.CanaryPhaseInitialized || phase == flaggerv1.CanaryPhaseSucceeded {
cdCopy.Status.LastPromotedSpec = cd.Status.LastAppliedSpec
}
if ok, conditions := c.MakeStatusConditions(cdCopy.Status, phase); ok {
cdCopy.Status.Conditions = conditions
}
_, err = c.FlaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
firstTry = false
return
})
if err != nil {
return fmt.Errorf("canary %s.%s status update error %v", cdCopy.Name, cdCopy.Namespace, err)
return ex.Wrap(err, "SetStatusPhase")
}
return nil
}
// GetStatusCondition returns a condition based on type
func (c *Deployer) getStatusCondition(status flaggerv1.CanaryStatus, conditionType flaggerv1.CanaryConditionType) *flaggerv1.CanaryCondition {
for i := range status.Conditions {
c := status.Conditions[i]
if c.Type == conditionType {
return &c
}
}
return nil
}
// MakeStatusCondition updates the canary status conditions based on canary phase
func (c *Deployer) MakeStatusConditions(canaryStatus flaggerv1.CanaryStatus,
phase flaggerv1.CanaryPhase) (bool, []flaggerv1.CanaryCondition) {
currentCondition := c.getStatusCondition(canaryStatus, flaggerv1.PromotedType)
message := "New deployment detected, starting initialization."
status := corev1.ConditionUnknown
switch phase {
case flaggerv1.CanaryPhaseInitializing:
status = corev1.ConditionUnknown
message = "New deployment detected, starting initialization."
case flaggerv1.CanaryPhaseInitialized:
status = corev1.ConditionTrue
message = "Deployment initialization completed."
case flaggerv1.CanaryPhaseWaiting:
status = corev1.ConditionUnknown
message = "Waiting for approval."
case flaggerv1.CanaryPhaseProgressing:
status = corev1.ConditionUnknown
message = "New revision detected, starting canary analysis."
case flaggerv1.CanaryPhaseFinalising:
status = corev1.ConditionUnknown
message = "Canary analysis completed, routing all traffic to primary."
case flaggerv1.CanaryPhaseSucceeded:
status = corev1.ConditionTrue
message = "Canary analysis completed successfully, promotion finished."
case flaggerv1.CanaryPhaseFailed:
status = corev1.ConditionFalse
message = "Canary analysis failed, deployment scaled to zero."
}
newCondition := &flaggerv1.CanaryCondition{
Type: flaggerv1.PromotedType,
Status: status,
LastUpdateTime: metav1.Now(),
LastTransitionTime: metav1.Now(),
Message: message,
Reason: string(phase),
}
if currentCondition != nil &&
currentCondition.Status == newCondition.Status &&
currentCondition.Reason == newCondition.Reason {
return false, nil
}
if currentCondition != nil && currentCondition.Status == newCondition.Status {
newCondition.LastTransitionTime = currentCondition.LastTransitionTime
}
return true, []flaggerv1.CanaryCondition{*newCondition}
}

View File

@@ -5,20 +5,21 @@ import (
"sync"
"time"
"github.com/weaveworks/flagger/pkg/canary"
"github.com/weaveworks/flagger/pkg/metrics"
"github.com/weaveworks/flagger/pkg/router"
"github.com/google/go-cmp/cmp"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
"github.com/weaveworks/flagger/pkg/canary"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
flaggerscheme "github.com/weaveworks/flagger/pkg/client/clientset/versioned/scheme"
flaggerinformers "github.com/weaveworks/flagger/pkg/client/informers/externalversions/flagger/v1alpha3"
flaggerlisters "github.com/weaveworks/flagger/pkg/client/listers/flagger/v1alpha3"
"github.com/weaveworks/flagger/pkg/metrics"
"github.com/weaveworks/flagger/pkg/notifier"
"github.com/weaveworks/flagger/pkg/router"
"github.com/google/go-cmp/cmp"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
@@ -211,10 +212,25 @@ func (c *Controller) syncHandler(key string) error {
}
cd, err := c.flaggerLister.Canaries(namespace).Get(name)
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("'%s' in work queue no longer exists", key))
utilruntime.HandleError(fmt.Errorf("%s in work queue no longer exists", key))
return nil
}
// set status condition for new canaries
if cd.Status.Conditions == nil {
if ok, conditions := c.deployer.MakeStatusConditions(cd.Status, flaggerv1.CanaryPhaseInitializing); ok {
cdCopy := cd.DeepCopy()
cdCopy.Status.Conditions = conditions
cdCopy.Status.LastTransitionTime = metav1.Now()
cdCopy.Status.Phase = flaggerv1.CanaryPhaseInitializing
_, err := c.flaggerClient.FlaggerV1alpha3().Canaries(cd.Namespace).UpdateStatus(cdCopy)
if err != nil {
c.logger.Errorf("%s status condition update error: %v", key, err)
return fmt.Errorf("%s status condition update error: %v", key, err)
}
}
}
c.canaries.Store(fmt.Sprintf("%s.%s", cd.Name, cd.Namespace), cd)
c.logger.Infof("Synced %s", key)

View File

@@ -134,6 +134,11 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
return
}
// check gates
if isApproved := c.runConfirmRolloutHooks(cd); !isApproved {
return
}
// set max weight default value to 100%
maxWeight := 100
if cd.Spec.CanaryAnalysis.MaxWeight > 0 {
@@ -178,7 +183,7 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
// reset status
status := flaggerv1.CanaryStatus{
Phase: flaggerv1.CanaryProgressing,
Phase: flaggerv1.CanaryPhaseProgressing,
CanaryWeight: 0,
FailedChecks: 0,
Iterations: 0,
@@ -209,8 +214,28 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
return
}
// scale canary to zero if analysis has succeeded
if cd.Status.Phase == flaggerv1.CanaryPhaseFinalising {
if err := c.deployer.Scale(cd, 0); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
// set status to succeeded
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanaryPhaseSucceeded); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
c.recorder.SetStatus(cd, flaggerv1.CanaryPhaseSucceeded)
c.runPostRolloutHooks(cd, flaggerv1.CanaryPhaseSucceeded)
c.recordEventInfof(cd, "Promotion completed! Scaling down %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
c.sendNotification(cd, "Canary analysis completed successfully, promotion finished.",
false, false)
return
}
// check if the number of failed checks reached the threshold
if cd.Status.Phase == flaggerv1.CanaryProgressing &&
if cd.Status.Phase == flaggerv1.CanaryPhaseProgressing &&
(!retriable || cd.Status.FailedChecks >= cd.Spec.CanaryAnalysis.Threshold) {
if cd.Status.FailedChecks >= cd.Spec.CanaryAnalysis.Threshold {
@@ -246,13 +271,13 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
}
// mark canary as failed
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryFailed, CanaryWeight: 0}); err != nil {
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryPhaseFailed, CanaryWeight: 0}); err != nil {
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Errorf("%v", err)
return
}
c.recorder.SetStatus(cd, flaggerv1.CanaryFailed)
c.runPostRolloutHooks(cd, flaggerv1.CanaryFailed)
c.recorder.SetStatus(cd, flaggerv1.CanaryPhaseFailed)
c.runPostRolloutHooks(cd, flaggerv1.CanaryPhaseFailed)
return
}
@@ -314,31 +339,23 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
return
}
// shutdown canary
// route all traffic to primary
if cd.Spec.CanaryAnalysis.Iterations < cd.Status.Iterations {
// route all traffic to the primary
if err := meshRouter.SetRoutes(cd, 100, 0); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
c.recorder.SetWeight(cd, 100, 0)
c.recordEventInfof(cd, "Promotion completed! Scaling down %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
// canary scale to zero
if err := c.deployer.Scale(cd, 0); err != nil {
primaryWeight = 100
canaryWeight = 0
if err := meshRouter.SetRoutes(cd, primaryWeight, canaryWeight); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
c.recorder.SetWeight(cd, primaryWeight, canaryWeight)
// update status phase
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanarySucceeded); err != nil {
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanaryPhaseFinalising); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
c.recorder.SetStatus(cd, flaggerv1.CanarySucceeded)
c.runPostRolloutHooks(cd, flaggerv1.CanarySucceeded)
c.sendNotification(cd, "Canary analysis completed successfully, promotion finished.",
false, false)
c.recordEventInfof(cd, "Routing all traffic to primary")
return
}
@@ -380,32 +397,23 @@ func (c *Controller) advanceCanary(name string, namespace string, skipLivenessCh
}
}
} else {
// route all traffic back to primary
// route all traffic to primary
primaryWeight = 100
canaryWeight = 0
if err := meshRouter.SetRoutes(cd, primaryWeight, canaryWeight); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
c.recorder.SetWeight(cd, primaryWeight, canaryWeight)
c.recordEventInfof(cd, "Promotion completed! Scaling down %s.%s", cd.Spec.TargetRef.Name, cd.Namespace)
// shutdown canary
if err := c.deployer.Scale(cd, 0); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
// update status phase
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanarySucceeded); err != nil {
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanaryPhaseFinalising); err != nil {
c.recordEventWarningf(cd, "%v", err)
return
}
c.recorder.SetStatus(cd, flaggerv1.CanarySucceeded)
c.runPostRolloutHooks(cd, flaggerv1.CanarySucceeded)
c.sendNotification(cd, "Canary analysis completed successfully, promotion finished.",
false, false)
c.recordEventInfof(cd, "Routing all traffic to primary")
return
}
}
@@ -438,13 +446,13 @@ func (c *Controller) shouldSkipAnalysis(cd *flaggerv1.Canary, meshRouter router.
}
// update status phase
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanarySucceeded); err != nil {
if err := c.deployer.SetStatusPhase(cd, flaggerv1.CanaryPhaseSucceeded); err != nil {
c.recordEventWarningf(cd, "%v", err)
return false
}
// notify
c.recorder.SetStatus(cd, flaggerv1.CanarySucceeded)
c.recorder.SetStatus(cd, flaggerv1.CanaryPhaseSucceeded)
c.recordEventInfof(cd, "Promotion completed! Canary analysis was skipped for %s.%s",
cd.Spec.TargetRef.Name, cd.Namespace)
c.sendNotification(cd, "Canary analysis was skipped, promotion finished.",
@@ -454,7 +462,11 @@ func (c *Controller) shouldSkipAnalysis(cd *flaggerv1.Canary, meshRouter router.
}
func (c *Controller) shouldAdvance(cd *flaggerv1.Canary) (bool, error) {
if cd.Status.LastAppliedSpec == "" || cd.Status.Phase == flaggerv1.CanaryProgressing {
if cd.Status.LastAppliedSpec == "" ||
cd.Status.Phase == flaggerv1.CanaryPhaseInitializing ||
cd.Status.Phase == flaggerv1.CanaryPhaseProgressing ||
cd.Status.Phase == flaggerv1.CanaryPhaseWaiting ||
cd.Status.Phase == flaggerv1.CanaryPhaseFinalising {
return true, nil
}
@@ -477,16 +489,17 @@ func (c *Controller) shouldAdvance(cd *flaggerv1.Canary) (bool, error) {
func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, shouldAdvance bool) bool {
c.recorder.SetStatus(cd, cd.Status.Phase)
if cd.Status.Phase == flaggerv1.CanaryProgressing {
if cd.Status.Phase == flaggerv1.CanaryPhaseProgressing ||
cd.Status.Phase == flaggerv1.CanaryPhaseFinalising {
return true
}
if cd.Status.Phase == "" {
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryInitialized}); err != nil {
if cd.Status.Phase == "" || cd.Status.Phase == flaggerv1.CanaryPhaseInitializing {
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryPhaseInitialized}); err != nil {
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Errorf("%v", err)
return false
}
c.recorder.SetStatus(cd, flaggerv1.CanaryInitialized)
c.recorder.SetStatus(cd, flaggerv1.CanaryPhaseInitialized)
c.recordEventInfof(cd, "Initialization done! %s.%s", cd.Name, cd.Namespace)
c.sendNotification(cd, "New deployment detected, initialization completed.",
true, false)
@@ -501,18 +514,18 @@ func (c *Controller) checkCanaryStatus(cd *flaggerv1.Canary, shouldAdvance bool)
c.recordEventErrorf(cd, "%v", err)
return false
}
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryProgressing}); err != nil {
if err := c.deployer.SyncStatus(cd, flaggerv1.CanaryStatus{Phase: flaggerv1.CanaryPhaseProgressing}); err != nil {
c.logger.With("canary", fmt.Sprintf("%s.%s", cd.Name, cd.Namespace)).Errorf("%v", err)
return false
}
c.recorder.SetStatus(cd, flaggerv1.CanaryProgressing)
c.recorder.SetStatus(cd, flaggerv1.CanaryPhaseProgressing)
return false
}
return false
}
func (c *Controller) hasCanaryRevisionChanged(cd *flaggerv1.Canary) bool {
if cd.Status.Phase == flaggerv1.CanaryProgressing {
if cd.Status.Phase == flaggerv1.CanaryPhaseProgressing {
if diff, _ := c.deployer.HasDeploymentChanged(cd); diff {
return true
}
@@ -523,10 +536,39 @@ func (c *Controller) hasCanaryRevisionChanged(cd *flaggerv1.Canary) bool {
return false
}
func (c *Controller) runConfirmRolloutHooks(canary *flaggerv1.Canary) bool {
for _, webhook := range canary.Spec.CanaryAnalysis.Webhooks {
if webhook.Type == flaggerv1.ConfirmRolloutHook {
err := CallWebhook(canary.Name, canary.Namespace, flaggerv1.CanaryPhaseProgressing, webhook)
if err != nil {
if canary.Status.Phase != flaggerv1.CanaryPhaseWaiting {
if err := c.deployer.SetStatusPhase(canary, flaggerv1.CanaryPhaseWaiting); err != nil {
c.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).Errorf("%v", err)
}
c.recordEventWarningf(canary, "Halt %s.%s advancement waiting for approval %s",
canary.Name, canary.Namespace, webhook.Name)
c.sendNotification(canary, "Canary is waiting for approval.", false, false)
}
return false
} else {
if canary.Status.Phase == flaggerv1.CanaryPhaseWaiting {
if err := c.deployer.SetStatusPhase(canary, flaggerv1.CanaryPhaseProgressing); err != nil {
c.logger.With("canary", fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)).Errorf("%v", err)
return false
}
c.recordEventInfof(canary, "Confirm-rollout check %s passed", webhook.Name)
return false
}
}
}
}
return true
}
func (c *Controller) runPreRolloutHooks(canary *flaggerv1.Canary) bool {
for _, webhook := range canary.Spec.CanaryAnalysis.Webhooks {
if webhook.Type == flaggerv1.PreRolloutHook {
err := CallWebhook(canary.Name, canary.Namespace, flaggerv1.CanaryProgressing, webhook)
err := CallWebhook(canary.Name, canary.Namespace, flaggerv1.CanaryPhaseProgressing, webhook)
if err != nil {
c.recordEventWarningf(canary, "Halt %s.%s advancement pre-rollout check %s failed %v",
canary.Name, canary.Namespace, webhook.Name, err)
@@ -558,7 +600,7 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
// run external checks
for _, webhook := range r.Spec.CanaryAnalysis.Webhooks {
if webhook.Type == "" || webhook.Type == flaggerv1.RolloutHook {
err := CallWebhook(r.Name, r.Namespace, flaggerv1.CanaryProgressing, webhook)
err := CallWebhook(r.Name, r.Namespace, flaggerv1.CanaryPhaseProgressing, webhook)
if err != nil {
c.recordEventWarningf(r, "Halt %s.%s advancement external check %s failed %v",
r.Name, r.Namespace, webhook.Name, err)
@@ -572,8 +614,8 @@ func (c *Controller) analyseCanary(r *flaggerv1.Canary) bool {
if r.Spec.Provider != "" {
metricsProvider = r.Spec.Provider
// set the metrics provider to Linkerd Prometheus when using NGINX as Linkerd Ingress
if r.Spec.Provider == "nginx" && strings.Contains(c.meshProvider, "linkerd") {
// set the metrics server to Linkerd Prometheus when Linkerd is the default mesh provider
if strings.Contains(c.meshProvider, "linkerd") {
metricsProvider = "linkerd"
}
}

View File

@@ -47,7 +47,7 @@ func TestScheduler_Rollback(t *testing.T) {
mocks.ctrl.advanceCanary("podinfo", "default", true)
// update failed checks to max
err := mocks.deployer.SyncStatus(mocks.canary, v1alpha3.CanaryStatus{Phase: v1alpha3.CanaryProgressing, FailedChecks: 11})
err := mocks.deployer.SyncStatus(mocks.canary, v1alpha3.CanaryStatus{Phase: v1alpha3.CanaryPhaseProgressing, FailedChecks: 11})
if err != nil {
t.Fatal(err.Error())
}
@@ -60,8 +60,8 @@ func TestScheduler_Rollback(t *testing.T) {
t.Fatal(err.Error())
}
if c.Status.Phase != v1alpha3.CanaryFailed {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryFailed)
if c.Status.Phase != v1alpha3.CanaryPhaseFailed {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryPhaseFailed)
}
}
@@ -101,8 +101,8 @@ func TestScheduler_SkipAnalysis(t *testing.T) {
t.Errorf("Got skip analysis %v wanted %v", c.Spec.SkipAnalysis, true)
}
if c.Status.Phase != v1alpha3.CanarySucceeded {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanarySucceeded)
if c.Status.Phase != v1alpha3.CanaryPhaseSucceeded {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryPhaseSucceeded)
}
}
@@ -250,13 +250,22 @@ func TestScheduler_Promotion(t *testing.T) {
t.Errorf("Got primary secret %s wanted %s", secretPrimary.Data["apiKey"], secret2.Data["apiKey"])
}
// check finalising status
c, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if c.Status.Phase != v1alpha3.CanarySucceeded {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanarySucceeded)
// scale canary to zero
mocks.ctrl.advanceCanary("podinfo", "default", true)
c, err = mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if c.Status.Phase != v1alpha3.CanaryPhaseSucceeded {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryPhaseSucceeded)
}
}
@@ -302,9 +311,22 @@ func TestScheduler_ABTesting(t *testing.T) {
t.Fatal(err.Error())
}
// promote
// advance
mocks.ctrl.advanceCanary("podinfo", "default", true)
// finalising
mocks.ctrl.advanceCanary("podinfo", "default", true)
// check finalising status
c, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if c.Status.Phase != v1alpha3.CanaryPhaseFinalising {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryPhaseFinalising)
}
// check if the container image tag was updated
primaryDep, err := mocks.kubeClient.AppsV1().Deployments("default").Get("podinfo-primary", metav1.GetOptions{})
if err != nil {
@@ -321,13 +343,13 @@ func TestScheduler_ABTesting(t *testing.T) {
mocks.ctrl.advanceCanary("podinfo", "default", true)
// check rollout status
c, err := mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
c, err = mocks.flaggerClient.FlaggerV1alpha3().Canaries("default").Get("podinfo", metav1.GetOptions{})
if err != nil {
t.Fatal(err.Error())
}
if c.Status.Phase != v1alpha3.CanarySucceeded {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanarySucceeded)
if c.Status.Phase != v1alpha3.CanaryPhaseSucceeded {
t.Errorf("Got canary state %v wanted %v", c.Status.Phase, v1alpha3.CanaryPhaseSucceeded)
}
}

View File

@@ -19,7 +19,7 @@ func TestCallWebhook(t *testing.T) {
Metadata: &map[string]string{"key1": "val1"},
}
err := CallWebhook("podinfo", "default", flaggerv1.CanaryProgressing, hook)
err := CallWebhook("podinfo", "default", flaggerv1.CanaryPhaseProgressing, hook)
if err != nil {
t.Fatal(err.Error())
}
@@ -35,7 +35,7 @@ func TestCallWebhook_StatusCode(t *testing.T) {
URL: ts.URL,
}
err := CallWebhook("podinfo", "default", flaggerv1.CanaryProgressing, hook)
err := CallWebhook("podinfo", "default", flaggerv1.CanaryPhaseProgressing, hook)
if err == nil {
t.Errorf("Got no error wanted %v", http.StatusInternalServerError)
}

31
pkg/loadtester/gate.go Normal file
View File

@@ -0,0 +1,31 @@
package loadtester
import "sync"
type GateStorage struct {
backend string
data *sync.Map
}
func NewGateStorage(backend string) *GateStorage {
return &GateStorage{
backend: backend,
data: new(sync.Map),
}
}
func (gs *GateStorage) open(key string) {
gs.data.Store(key, true)
}
func (gs *GateStorage) close(key string) {
gs.data.Store(key, false)
}
func (gs *GateStorage) isOpen(key string) (locked bool) {
val, ok := gs.data.LoadOrStore(key, false)
if ok {
return val.(bool)
}
return
}

View File

@@ -14,13 +14,101 @@ import (
)
// ListenAndServe starts a web server and waits for SIGTERM
func ListenAndServe(port string, timeout time.Duration, logger *zap.SugaredLogger, taskRunner *TaskRunner, stopCh <-chan struct{}) {
func ListenAndServe(port string, timeout time.Duration, logger *zap.SugaredLogger, taskRunner *TaskRunner, gate *GateStorage, stopCh <-chan struct{}) {
mux := http.DefaultServeMux
mux.Handle("/metrics", promhttp.Handler())
mux.HandleFunc("/healthz", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte("OK"))
})
mux.HandleFunc("/gate/approve", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Write([]byte("OK"))
})
mux.HandleFunc("/gate/halt", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusForbidden)
w.Write([]byte("Forbidden"))
})
mux.HandleFunc("/gate/check", func(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
logger.Error("reading the request body failed", zap.Error(err))
w.WriteHeader(http.StatusBadRequest)
return
}
defer r.Body.Close()
canary := &flaggerv1.CanaryWebhookPayload{}
err = json.Unmarshal(body, canary)
if err != nil {
logger.Error("decoding the request body failed", zap.Error(err))
w.WriteHeader(http.StatusBadRequest)
return
}
canaryName := fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)
approved := gate.isOpen(canaryName)
if approved {
w.WriteHeader(http.StatusOK)
w.Write([]byte("Approved"))
} else {
w.WriteHeader(http.StatusForbidden)
w.Write([]byte("Forbidden"))
}
logger.Infof("%s gate check: approved %v", canaryName, approved)
})
mux.HandleFunc("/gate/open", func(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
logger.Error("reading the request body failed", zap.Error(err))
w.WriteHeader(http.StatusBadRequest)
return
}
defer r.Body.Close()
canary := &flaggerv1.CanaryWebhookPayload{}
err = json.Unmarshal(body, canary)
if err != nil {
logger.Error("decoding the request body failed", zap.Error(err))
w.WriteHeader(http.StatusBadRequest)
return
}
canaryName := fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)
gate.open(canaryName)
w.WriteHeader(http.StatusAccepted)
logger.Infof("%s gate opened", canaryName)
})
mux.HandleFunc("/gate/close", func(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
logger.Error("reading the request body failed", zap.Error(err))
w.WriteHeader(http.StatusBadRequest)
return
}
defer r.Body.Close()
canary := &flaggerv1.CanaryWebhookPayload{}
err = json.Unmarshal(body, canary)
if err != nil {
logger.Error("decoding the request body failed", zap.Error(err))
w.WriteHeader(http.StatusBadRequest)
return
}
canaryName := fmt.Sprintf("%s.%s", canary.Name, canary.Namespace)
gate.close(canaryName)
w.WriteHeader(http.StatusAccepted)
logger.Infof("%s gate closed", canaryName)
})
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
if err != nil {

View File

@@ -28,6 +28,10 @@ func (factory Factory) Observer(provider string) Interface {
return &HttpObserver{
client: factory.Client,
}
case provider == "kubernetes":
return &HttpObserver{
client: factory.Client,
}
case provider == "appmesh":
return &EnvoyObserver{
client: factory.Client,

View File

@@ -87,9 +87,9 @@ func (cr *Recorder) SetTotal(namespace string, total int) {
func (cr *Recorder) SetStatus(cd *flaggerv1.Canary, phase flaggerv1.CanaryPhase) {
status := 1
switch phase {
case flaggerv1.CanaryProgressing:
case flaggerv1.CanaryPhaseProgressing:
status = 0
case flaggerv1.CanaryFailed:
case flaggerv1.CanaryPhaseFailed:
status = 2
default:
status = 1

View File

@@ -2,6 +2,7 @@ package router
import (
"fmt"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
flaggerv1 "github.com/weaveworks/flagger/pkg/apis/flagger/v1alpha3"
@@ -9,7 +10,6 @@ import (
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
@@ -236,7 +236,7 @@ func (ir *IstioRouter) GetRoutes(canary *flaggerv1.Canary) (
) {
targetName := canary.Spec.TargetRef.Name
vs := &istiov1alpha3.VirtualService{}
vs, err = ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Get(targetName, v1.GetOptions{})
vs, err = ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
err = fmt.Errorf("VirtualService %s.%s not found", targetName, canary.Namespace)
@@ -283,7 +283,7 @@ func (ir *IstioRouter) SetRoutes(
primaryName := fmt.Sprintf("%s-primary", targetName)
canaryName := fmt.Sprintf("%s-canary", targetName)
vs, err := ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Get(targetName, v1.GetOptions{})
vs, err := ir.istioClient.NetworkingV1alpha3().VirtualServices(canary.Namespace).Get(targetName, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return fmt.Errorf("VirtualService %s.%s not found", targetName, canary.Namespace)
@@ -383,12 +383,5 @@ func makeDestination(canary *flaggerv1.Canary, host string, weight int) istiov1a
Weight: weight,
}
// if port discovery is enabled then we need to explicitly set the destination port
if canary.Spec.Service.PortDiscovery {
dest.Destination.Port = &istiov1alpha3.PortSelector{
Number: uint32(canary.Spec.Service.Port),
}
}
return dest
}

View File

@@ -1,4 +1,4 @@
package version
var VERSION = "0.17.0"
var VERSION = "0.18.2"
var REVISION = "unknown"

View File

@@ -55,7 +55,7 @@ spec:
canaryAnalysis:
interval: 15s
threshold: 15
maxWeight: 30
maxWeight: 50
stepWeight: 10
metrics:
- name: request-success-rate

View File

@@ -2,19 +2,17 @@
set -o errexit
GLOO_VER="0.14.2"
GLOO_VER="0.18.8"
REPO_ROOT=$(git rev-parse --show-toplevel)
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
echo '>>> Installing Gloo'
helm repo add gloo https://storage.googleapis.com/solo-public-helm
helm upgrade -i gloo gloo/gloo --version ${GLOO_VER} \
--wait \
--namespace gloo-system \
--set gatewayProxies.gateway-proxy.service.type=NodePort
--namespace gloo-system
kubectl -n gloo-system rollout status deployment/gloo
kubectl -n gloo-system rollout status deployment/gateway-proxy
kubectl -n gloo-system rollout status deployment/gateway-proxy-v2
kubectl -n gloo-system get all
echo '>>> Installing Flagger'

View File

@@ -2,7 +2,7 @@
set -o errexit
ISTIO_VER="1.2.2"
ISTIO_VER="1.2.3"
REPO_ROOT=$(git rev-parse --show-toplevel)
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
@@ -26,9 +26,7 @@ echo '>>> Load Flagger image in Kind'
kind load docker-image test/flagger:latest
echo '>>> Installing Flagger'
helm upgrade -i flagger ${REPO_ROOT}/charts/flagger \
--namespace istio-system \
--set meshProvider=istio
kubectl apply -k ${REPO_ROOT}/kustomize/istio
kubectl -n istio-system set image deployment/flagger flagger=test/flagger:latest
kubectl -n istio-system rollout status deployment/flagger

98
test/e2e-kubernetes-tests.sh Executable file
View File

@@ -0,0 +1,98 @@
#!/usr/bin/env bash
# This script runs e2e tests for Blue/Green initialization, analysis and promotion
# Prerequisites: Kubernetes Kind, Kustomize
set -o errexit
REPO_ROOT=$(git rev-parse --show-toplevel)
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
echo '>>> Creating test namespace'
kubectl create namespace test
echo '>>> Installing the load tester'
kubectl apply -k ${REPO_ROOT}/kustomize/tester
kubectl -n test rollout status deployment/flagger-loadtester
echo '>>> Initialising canary'
kubectl apply -f ${REPO_ROOT}/test/e2e-workload.yaml
cat <<EOF | kubectl apply -f -
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
provider: kubernetes
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
progressDeadlineSeconds: 60
service:
port: 9898
portDiscovery: true
canaryAnalysis:
interval: 15s
threshold: 10
iterations: 5
metrics:
- name: request-success-rate
threshold: 99
interval: 1m
- name: request-duration
threshold: 500
interval: 30s
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 10m -q 10 -c 2 http://podinfo-canary.test:9898/"
logCmdOutput: "true"
EOF
echo '>>> Waiting for primary to be ready'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test get canary/podinfo | grep 'Initialized' && ok=true || ok=false
sleep 5
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n flagger-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
echo '✔ Canary initialization test passed'
echo '>>> Triggering canary deployment'
kubectl -n test set image deployment/podinfo podinfod=quay.io/stefanprodan/podinfo:1.7.0
echo '>>> Waiting for canary promotion'
retries=50
count=0
ok=false
until ${ok}; do
kubectl -n test describe deployment/podinfo-primary | grep '1.7.0' && ok=true || ok=false
sleep 10
kubectl -n flagger-system logs deployment/flagger --tail 1
count=$(($count + 1))
if [[ ${count} -eq ${retries} ]]; then
kubectl -n test describe deployment/podinfo
kubectl -n test describe deployment/podinfo-primary
kubectl -n flagger-system logs deployment/flagger
echo "No more retries left"
exit 1
fi
done
echo '✔ Canary promotion test passed'
kubectl -n flagger-system logs deployment/flagger

17
test/e2e-kubernetes.sh Executable file
View File

@@ -0,0 +1,17 @@
#!/usr/bin/env bash
set -o errexit
REPO_ROOT=$(git rev-parse --show-toplevel)
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
echo '>>> Loading Flagger image'
kind load docker-image test/flagger:latest
echo '>>> Installing Flagger'
kubectl apply -k ${REPO_ROOT}/kustomize/kubernetes
kubectl -n flagger-system set image deployment/flagger flagger=test/flagger:latest
kubectl -n flagger-system rollout status deployment/flagger
kubectl -n flagger-system rollout status deployment/flagger-prometheus

View File

@@ -2,7 +2,7 @@
set -o errexit
LINKERD_VER="edge-19.6.4"
LINKERD_VER="stable-2.4.0"
REPO_ROOT=$(git rev-parse --show-toplevel)
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"

View File

@@ -4,9 +4,10 @@ set -o errexit
REPO_ROOT=$(git rev-parse --show-toplevel)
export KUBECONFIG="$(kind get kubeconfig-path --name="kind")"
NGINX_VERSION=1.12.1
echo '>>> Installing NGINX Ingress'
helm upgrade -i nginx-ingress stable/nginx-ingress \
helm upgrade -i nginx-ingress stable/nginx-ingress --version=${NGINX_VERSION} \
--wait \
--namespace ingress-nginx \
--set controller.stats.enabled=true \

View File

@@ -13,7 +13,7 @@ kubectl create namespace test
kubectl label namespace test istio-injection=enabled
echo '>>> Installing the load tester'
kubectl -n test apply -f ${REPO_ROOT}/artifacts/loadtester/
kubectl apply -k ${REPO_ROOT}/kustomize/tester
kubectl -n test rollout status deployment/flagger-loadtester
echo '>>> Initialising canary'
@@ -33,6 +33,7 @@ spec:
progressDeadlineSeconds: 60
service:
port: 9898
portDiscovery: true
headers:
request:
add:
@@ -142,6 +143,7 @@ spec:
name: podinfo
progressDeadlineSeconds: 60
service:
portDiscovery: true
port: 9898
canaryAnalysis:
interval: 10s
@@ -202,4 +204,4 @@ echo '✔ A/B testing promotion test passed'
kubectl -n istio-system logs deployment/flagger
echo '✔ All tests passed'
echo '✔ All tests passed'

View File

@@ -20,6 +20,7 @@ spec:
metadata:
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "9797"
labels:
app: podinfo
spec:
@@ -31,9 +32,13 @@ spec:
- containerPort: 9898
name: http
protocol: TCP
- containerPort: 9797
name: http-prom
protocol: TCP
command:
- ./podinfo
- --port=9898
- --port-metrics=9797
- --level=info
- --random-delay=false
- --random-error=false