mirror of
https://github.com/fluxcd/flagger.git
synced 2026-02-15 18:40:12 +00:00
Compare commits
273 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2be6f3d678 | ||
|
|
3d7091a56b | ||
|
|
1f0305949e | ||
|
|
1332db85c5 | ||
|
|
1f06ec838d | ||
|
|
308351918c | ||
|
|
558a1fc6e6 | ||
|
|
bc3256e1c5 | ||
|
|
6eaf421f98 | ||
|
|
1271f12d3f | ||
|
|
4776b1d285 | ||
|
|
e4dc923299 | ||
|
|
98ba38d436 | ||
|
|
9d765feb38 | ||
|
|
7e6a70bdbf | ||
|
|
455ec1b6e7 | ||
|
|
3b152a370f | ||
|
|
8d7d5e6810 | ||
|
|
8dc4c03258 | ||
|
|
0082b3307b | ||
|
|
b1a9c33d36 | ||
|
|
6e06cf1074 | ||
|
|
8d61e6f893 | ||
|
|
9c71e70a0a | ||
|
|
91395ea1ab | ||
|
|
0894304fce | ||
|
|
9cfa0ac43f | ||
|
|
1d5029d607 | ||
|
|
e6d1880c93 | ||
|
|
6da533090a | ||
|
|
17efcaa6d1 | ||
|
|
38dfda9d8f | ||
|
|
0abc254ef2 | ||
|
|
db427b5e54 | ||
|
|
b49d63bdfe | ||
|
|
c84f7addff | ||
|
|
5d72398925 | ||
|
|
11d16468c9 | ||
|
|
82b61d69b7 | ||
|
|
824391321f | ||
|
|
a7c242e437 | ||
|
|
1544610203 | ||
|
|
14ca775ed9 | ||
|
|
f1d29f5951 | ||
|
|
ad0a66ffcc | ||
|
|
4288fa261c | ||
|
|
a537637dc9 | ||
|
|
851c6701b3 | ||
|
|
bb4591106a | ||
|
|
7641190ecb | ||
|
|
02b579f128 | ||
|
|
9cf6b407f1 | ||
|
|
c3564176f8 | ||
|
|
ae9cf57fd5 | ||
|
|
ae63b01373 | ||
|
|
c066a9163b | ||
|
|
38b04f2690 | ||
|
|
ee0e7b091a | ||
|
|
e922c3e9d9 | ||
|
|
2c31a4bf90 | ||
|
|
7332e6b173 | ||
|
|
968d67a7c3 | ||
|
|
266b957fc6 | ||
|
|
357ef86c8b | ||
|
|
d75ade5e8c | ||
|
|
806b95c8ce | ||
|
|
bf58cd763f | ||
|
|
52856177e3 | ||
|
|
58c3cebaac | ||
|
|
1e5d05c3fc | ||
|
|
020129bf5c | ||
|
|
3ff0786e1f | ||
|
|
a60dc55dad | ||
|
|
ff6acae544 | ||
|
|
09b5295c85 | ||
|
|
9e423a6f71 | ||
|
|
0ef05edf1e | ||
|
|
a59901aaa9 | ||
|
|
53be3e07d2 | ||
|
|
2eb2ae52cd | ||
|
|
7bcc76eca0 | ||
|
|
0d531e7bd1 | ||
|
|
08851f83c7 | ||
|
|
295f5d7b39 | ||
|
|
a828524957 | ||
|
|
6661406b75 | ||
|
|
8766523279 | ||
|
|
b02a6da614 | ||
|
|
89d7cb1b04 | ||
|
|
59d18de753 | ||
|
|
e1d8703a15 | ||
|
|
1ba595bc6f | ||
|
|
446a2b976c | ||
|
|
9af6ade54d | ||
|
|
3fbe62aa47 | ||
|
|
4454c9b5b5 | ||
|
|
c2cf9bf4b1 | ||
|
|
3afc7978bd | ||
|
|
7a0ba8b477 | ||
|
|
0eb21a98a5 | ||
|
|
2876092912 | ||
|
|
3dbfa34a53 | ||
|
|
656f81787c | ||
|
|
920d558fde | ||
|
|
638a9f1c93 | ||
|
|
f1c3ee7a82 | ||
|
|
878f106573 | ||
|
|
945eded6bf | ||
|
|
f94f9c23d6 | ||
|
|
527b73e8ef | ||
|
|
d4555c5919 | ||
|
|
560bb93e3d | ||
|
|
e7fc72e6b5 | ||
|
|
4203232b05 | ||
|
|
a06aa05201 | ||
|
|
8e582e9b73 | ||
|
|
0e9fe8a446 | ||
|
|
27b4bcc648 | ||
|
|
614b7c74c4 | ||
|
|
5901129ec6 | ||
|
|
ded14345b4 | ||
|
|
dd272c6870 | ||
|
|
b31c7c6230 | ||
|
|
b0297213c3 | ||
|
|
d0fba2d111 | ||
|
|
9924cc2152 | ||
|
|
008a74f86c | ||
|
|
4ca110292f | ||
|
|
55b4c19670 | ||
|
|
8349dd1cda | ||
|
|
402fb66b2a | ||
|
|
f991274b97 | ||
|
|
0d94a49b6a | ||
|
|
7c14225442 | ||
|
|
2af0a050bc | ||
|
|
582f8d6abd | ||
|
|
eeea3123ac | ||
|
|
51fe43e169 | ||
|
|
6e6b127092 | ||
|
|
c9bacdfe05 | ||
|
|
f56a69770c | ||
|
|
0196124c9f | ||
|
|
63756d9d5f | ||
|
|
8e346960ac | ||
|
|
1b485b3459 | ||
|
|
ee05108279 | ||
|
|
dfaa039c9c | ||
|
|
46579d2ee6 | ||
|
|
f372523fb8 | ||
|
|
5e434df6ea | ||
|
|
d6c5bdd241 | ||
|
|
cdcd97244c | ||
|
|
60c4bba263 | ||
|
|
2b73bc5e38 | ||
|
|
03652dc631 | ||
|
|
00155aff37 | ||
|
|
206c3e6d7a | ||
|
|
8345fea812 | ||
|
|
c11dba1e05 | ||
|
|
7d4c3c5814 | ||
|
|
9b36794c9d | ||
|
|
1f34c656e9 | ||
|
|
9982dc9c83 | ||
|
|
780f3d2ab9 | ||
|
|
1cb09890fb | ||
|
|
faae6a7c3b | ||
|
|
d4250f3248 | ||
|
|
a8ee477b62 | ||
|
|
673b6102a7 | ||
|
|
316de42a2c | ||
|
|
dfb4b35e6c | ||
|
|
61ab596d1b | ||
|
|
3345692751 | ||
|
|
dff9287c75 | ||
|
|
b5fb7cdae5 | ||
|
|
2e79817437 | ||
|
|
5f439adc36 | ||
|
|
45df96ff3c | ||
|
|
98ee150364 | ||
|
|
d328a2146a | ||
|
|
4513f2e8be | ||
|
|
095fef1de6 | ||
|
|
754f02a30f | ||
|
|
01a4e7f6a8 | ||
|
|
6bba84422d | ||
|
|
26190d0c6a | ||
|
|
2d9098e43c | ||
|
|
7581b396b2 | ||
|
|
67a6366906 | ||
|
|
5605fab740 | ||
|
|
b76d0001ed | ||
|
|
625eed0840 | ||
|
|
37f9151de3 | ||
|
|
20af98e4dc | ||
|
|
76800d0ed0 | ||
|
|
3103bde7f7 | ||
|
|
298d8c2d65 | ||
|
|
5cdacf81e3 | ||
|
|
2141d88ce1 | ||
|
|
e8a2d4be2e | ||
|
|
9a9baadf0e | ||
|
|
a21e53fa31 | ||
|
|
61f8aea7d8 | ||
|
|
e384b03d49 | ||
|
|
0c60cf39f8 | ||
|
|
268fa9999f | ||
|
|
ff7d4e747c | ||
|
|
121fc57aa6 | ||
|
|
991fa1cfc8 | ||
|
|
fb2961715d | ||
|
|
74c1c2f1ef | ||
|
|
4da6c1b6e4 | ||
|
|
fff03b170f | ||
|
|
434acbb71b | ||
|
|
01962c32cd | ||
|
|
6b0856a054 | ||
|
|
708dbd6bbc | ||
|
|
e3801cbff6 | ||
|
|
fc68635098 | ||
|
|
6706ca5d65 | ||
|
|
44c2fd57c5 | ||
|
|
a9aab3e3ac | ||
|
|
6478d0b6cf | ||
|
|
958af18dc0 | ||
|
|
54b8257c60 | ||
|
|
e86f62744e | ||
|
|
0734773993 | ||
|
|
888cc667f1 | ||
|
|
053d0da617 | ||
|
|
7a4e0bc80c | ||
|
|
7b7306584f | ||
|
|
d6027af632 | ||
|
|
761746af21 | ||
|
|
510a6eaaed | ||
|
|
655df36913 | ||
|
|
2e079ba7a1 | ||
|
|
9df6bfbb5e | ||
|
|
2ff86fa56e | ||
|
|
1b2e0481b9 | ||
|
|
fe96af64e9 | ||
|
|
77d8e4e4d3 | ||
|
|
800b0475ee | ||
|
|
b58e13809c | ||
|
|
9845578cdd | ||
|
|
96ccfa54fb | ||
|
|
b8a64c79be | ||
|
|
4a4c261a88 | ||
|
|
8282f86d9c | ||
|
|
2b6966d8e3 | ||
|
|
c667c947ad | ||
|
|
105b28bf42 | ||
|
|
37a1ff5c99 | ||
|
|
d19a070faf | ||
|
|
d908355ab3 | ||
|
|
a6d86f2e81 | ||
|
|
9d856a4f96 | ||
|
|
a7112fafb0 | ||
|
|
93f9e51280 | ||
|
|
65e9a402cf | ||
|
|
f7513b33a6 | ||
|
|
0b3fa517d3 | ||
|
|
507075920c | ||
|
|
a212f032a6 | ||
|
|
eb8755249f | ||
|
|
73bb2a9fa2 | ||
|
|
5d3ffa8c90 | ||
|
|
87f143f5fd | ||
|
|
f56b6dd6a7 | ||
|
|
5e40340f9c | ||
|
|
2456737df7 | ||
|
|
1191d708de | ||
|
|
4d26971fc7 | ||
|
|
0421b32834 |
@@ -3,13 +3,16 @@ jobs:
|
||||
|
||||
build-binary:
|
||||
docker:
|
||||
- image: circleci/golang:1.12
|
||||
- image: circleci/golang:1.13
|
||||
working_directory: ~/build
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- go-mod-v3-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
name: Run go mod download
|
||||
command: go mod download
|
||||
- run:
|
||||
name: Run go fmt
|
||||
command: make test-fmt
|
||||
@@ -44,7 +47,7 @@ jobs:
|
||||
|
||||
push-container:
|
||||
docker:
|
||||
- image: circleci/golang:1.12
|
||||
- image: circleci/golang:1.13
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
@@ -56,7 +59,7 @@ jobs:
|
||||
|
||||
push-binary:
|
||||
docker:
|
||||
- image: circleci/golang:1.12
|
||||
- image: circleci/golang:1.13
|
||||
working_directory: ~/build
|
||||
steps:
|
||||
- checkout
|
||||
@@ -85,10 +88,21 @@ jobs:
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-kind.sh v1.17.0
|
||||
- run: test/e2e-kubernetes.sh
|
||||
- run: test/e2e-kubernetes-tests.sh
|
||||
|
||||
e2e-kubernetes-svc-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-kubernetes.sh
|
||||
- run: test/e2e-kubernetes-svc-tests.sh
|
||||
|
||||
e2e-smi-istio-testing:
|
||||
machine: true
|
||||
steps:
|
||||
@@ -100,17 +114,6 @@ jobs:
|
||||
- run: test/e2e-smi-istio.sh
|
||||
- run: test/e2e-tests.sh canary
|
||||
|
||||
e2e-supergloo-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh 0.2.1
|
||||
- run: test/e2e-supergloo.sh
|
||||
- run: test/e2e-tests.sh canary
|
||||
|
||||
e2e-gloo-testing:
|
||||
machine: true
|
||||
steps:
|
||||
@@ -132,6 +135,9 @@ jobs:
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-nginx.sh
|
||||
- run: test/e2e-nginx-tests.sh
|
||||
- run: test/e2e-nginx-cleanup.sh
|
||||
- run: test/e2e-nginx-custom-annotations.sh
|
||||
- run: test/e2e-nginx-tests.sh
|
||||
|
||||
e2e-linkerd-testing:
|
||||
machine: true
|
||||
@@ -144,9 +150,20 @@ jobs:
|
||||
- run: test/e2e-linkerd.sh
|
||||
- run: test/e2e-linkerd-tests.sh
|
||||
|
||||
e2e-contour-testing:
|
||||
machine: true
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- run: test/container-build.sh
|
||||
- run: test/e2e-kind.sh
|
||||
- run: test/e2e-contour.sh
|
||||
- run: test/e2e-contour-tests.sh
|
||||
|
||||
push-helm-charts:
|
||||
docker:
|
||||
- image: circleci/golang:1.12
|
||||
- image: circleci/golang:1.13
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
@@ -200,9 +217,6 @@ workflows:
|
||||
- e2e-kubernetes-testing:
|
||||
requires:
|
||||
- build-binary
|
||||
# - e2e-supergloo-testing:
|
||||
# requires:
|
||||
# - build-binary
|
||||
- e2e-gloo-testing:
|
||||
requires:
|
||||
- build-binary
|
||||
@@ -212,12 +226,14 @@ workflows:
|
||||
- e2e-linkerd-testing:
|
||||
requires:
|
||||
- build-binary
|
||||
- e2e-contour-testing:
|
||||
requires:
|
||||
- build-binary
|
||||
- push-container:
|
||||
requires:
|
||||
- build-binary
|
||||
- e2e-istio-testing
|
||||
- e2e-kubernetes-testing
|
||||
#- e2e-supergloo-testing
|
||||
- e2e-gloo-testing
|
||||
- e2e-nginx-testing
|
||||
- e2e-linkerd-testing
|
||||
|
||||
164
CHANGELOG.md
164
CHANGELOG.md
@@ -2,6 +2,170 @@
|
||||
|
||||
All notable changes to this project are documented in this file.
|
||||
|
||||
## 0.22.0 (2020-01-16)
|
||||
|
||||
Adds event dispatching through webhooks
|
||||
|
||||
#### Features
|
||||
|
||||
- Implement event dispatching webhook [#409](https://github.com/weaveworks/flagger/pull/409)
|
||||
- Add general purpose event webhook [#401](https://github.com/weaveworks/flagger/pull/401)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Update Contour to v1.1 and add Linkerd header [#411](https://github.com/weaveworks/flagger/pull/411)
|
||||
- Update Istio e2e to v1.4.3 [#407](https://github.com/weaveworks/flagger/pull/407)
|
||||
- Update Kubernetes packages to 1.17 [#406](https://github.com/weaveworks/flagger/pull/406)
|
||||
|
||||
## 0.21.0 (2020-01-06)
|
||||
|
||||
Adds support for Contour ingress controller
|
||||
|
||||
#### Features
|
||||
|
||||
- Add support for Contour ingress controller [#397](https://github.com/weaveworks/flagger/pull/397)
|
||||
- Add support for Envoy managed by Crossover via SMI [#386](https://github.com/weaveworks/flagger/pull/386)
|
||||
- Extend canary target ref to Kubernetes Service kind [#372](https://github.com/weaveworks/flagger/pull/372)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add Prometheus operator PodMonitor template to Helm chart [#399](https://github.com/weaveworks/flagger/pull/399)
|
||||
- Update e2e tests to Kubernetes v1.16 [#390](https://github.com/weaveworks/flagger/pull/390)
|
||||
|
||||
## 0.20.4 (2019-12-03)
|
||||
|
||||
Adds support for taking over a running deployment without disruption
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add initialization phase to Kubernetes router [#384](https://github.com/weaveworks/flagger/pull/384)
|
||||
- Add canary controller interface and Kubernetes deployment kind implementation [#378](https://github.com/weaveworks/flagger/pull/378)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Skip primary check on skip analysis [#380](https://github.com/weaveworks/flagger/pull/380)
|
||||
|
||||
## 0.20.3 (2019-11-13)
|
||||
|
||||
Adds wrk to load tester tools and the App Mesh gateway chart to Flagger Helm repository
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add wrk to load tester tools [#368](https://github.com/weaveworks/flagger/pull/368)
|
||||
- Add App Mesh gateway chart [#365](https://github.com/weaveworks/flagger/pull/365)
|
||||
|
||||
## 0.20.2 (2019-11-07)
|
||||
|
||||
Adds support for exposing canaries outside the cluster using App Mesh Gateway annotations
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Expose canaries on public domains with App Mesh Gateway [#358](https://github.com/weaveworks/flagger/pull/358)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Use the specified replicas when scaling up the canary [#363](https://github.com/weaveworks/flagger/pull/363)
|
||||
|
||||
## 0.20.1 (2019-11-03)
|
||||
|
||||
Fixes promql execution and updates the load testing tools
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Update load tester Helm tools [#8349dd1](https://github.com/weaveworks/flagger/commit/8349dd1cda59a741c7bed9a0f67c0fc0fbff4635)
|
||||
- e2e testing: update providers [#346](https://github.com/weaveworks/flagger/pull/346)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix Prometheus query escape [#353](https://github.com/weaveworks/flagger/pull/353)
|
||||
- Updating hey release link [#350](https://github.com/weaveworks/flagger/pull/350)
|
||||
|
||||
## 0.20.0 (2019-10-21)
|
||||
|
||||
Adds support for [A/B Testing](https://docs.flagger.app/usage/progressive-delivery#traffic-mirroring) and retry policies when using App Mesh
|
||||
|
||||
#### Features
|
||||
|
||||
- Implement App Mesh A/B testing based on HTTP headers match conditions [#340](https://github.com/weaveworks/flagger/pull/340)
|
||||
- Implement App Mesh HTTP retry policy [#338](https://github.com/weaveworks/flagger/pull/338)
|
||||
- Implement metrics server override [#342](https://github.com/weaveworks/flagger/pull/342)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add the app/name label to services and primary deployment [#333](https://github.com/weaveworks/flagger/pull/333)
|
||||
- Allow setting Slack and Teams URLs with env vars [#334](https://github.com/weaveworks/flagger/pull/334)
|
||||
- Refactor Gloo integration [#344](https://github.com/weaveworks/flagger/pull/344)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Generate unique names for App Mesh virtual routers and routes [#336](https://github.com/weaveworks/flagger/pull/336)
|
||||
|
||||
## 0.19.0 (2019-10-08)
|
||||
|
||||
Adds support for canary and blue/green [traffic mirroring](https://docs.flagger.app/usage/progressive-delivery#traffic-mirroring)
|
||||
|
||||
#### Features
|
||||
|
||||
- Add traffic mirroring for Istio service mesh [#311](https://github.com/weaveworks/flagger/pull/311)
|
||||
- Implement canary service target port [#327](https://github.com/weaveworks/flagger/pull/327)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Allow gPRC protocol for App Mesh [#325](https://github.com/weaveworks/flagger/pull/325)
|
||||
- Enforce blue/green when using Kubernetes networking [#326](https://github.com/weaveworks/flagger/pull/326)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix port discovery diff [#324](https://github.com/weaveworks/flagger/pull/324)
|
||||
- Helm chart: Enable Prometheus scraping of Flagger metrics [#2141d88](https://github.com/weaveworks/flagger/commit/2141d88ce1cc6be220dab34171c215a334ecde24)
|
||||
|
||||
## 0.18.6 (2019-10-03)
|
||||
|
||||
Adds support for App Mesh conformance tests and latency metric checks
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Add support for acceptance testing when using App Mesh [#322](https://github.com/weaveworks/flagger/pull/322)
|
||||
- Add Kustomize installer for App Mesh [#310](https://github.com/weaveworks/flagger/pull/310)
|
||||
- Update Linkerd to v2.5.0 and Prometheus to v2.12.0 [#323](https://github.com/weaveworks/flagger/pull/323)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Fix slack/teams notification fields mapping [#318](https://github.com/weaveworks/flagger/pull/318)
|
||||
|
||||
## 0.18.5 (2019-10-02)
|
||||
|
||||
Adds support for [confirm-promotion](https://docs.flagger.app/how-it-works#webhooks) webhooks and blue/green deployments when using a service mesh
|
||||
|
||||
#### Features
|
||||
|
||||
- Implement confirm-promotion hook [#307](https://github.com/weaveworks/flagger/pull/307)
|
||||
- Implement B/G for service mesh providers [#305](https://github.com/weaveworks/flagger/pull/305)
|
||||
|
||||
#### Improvements
|
||||
|
||||
- Canary promotion improvements to avoid dropping in-flight requests [#310](https://github.com/weaveworks/flagger/pull/310)
|
||||
- Update end-to-end tests to Kubernetes v1.15.3 and Istio 1.3.0 [#306](https://github.com/weaveworks/flagger/pull/306)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- Skip primary check for App Mesh [#315](https://github.com/weaveworks/flagger/pull/315)
|
||||
|
||||
## 0.18.4 (2019-09-08)
|
||||
|
||||
Adds support for NGINX custom annotations and Helm v3 acceptance testing
|
||||
|
||||
#### Features
|
||||
|
||||
- Add annotations prefix for NGINX ingresses [#293](https://github.com/weaveworks/flagger/pull/293)
|
||||
- Add wide columns in CRD [#289](https://github.com/weaveworks/flagger/pull/289)
|
||||
- loadtester: implement Helm v3 test command [#296](https://github.com/weaveworks/flagger/pull/296)
|
||||
- loadtester: add gPRC health check to load tester image [#295](https://github.com/weaveworks/flagger/pull/295)
|
||||
|
||||
#### Fixes
|
||||
|
||||
- loadtester: fix tests error logging [#286](https://github.com/weaveworks/flagger/pull/286)
|
||||
|
||||
## 0.18.3 (2019-08-22)
|
||||
|
||||
Adds support for tillerless helm tests and protobuf health checking
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM alpine:3.9
|
||||
FROM alpine:3.10
|
||||
|
||||
RUN addgroup -S flagger \
|
||||
&& adduser -S -g flagger flagger \
|
||||
|
||||
@@ -1,33 +1,64 @@
|
||||
FROM alpine:3.10.3 as build
|
||||
|
||||
RUN apk --no-cache add alpine-sdk perl curl
|
||||
|
||||
RUN curl -sSLo hey "https://storage.googleapis.com/hey-release/hey_linux_amd64" && \
|
||||
chmod +x hey && mv hey /usr/local/bin/hey
|
||||
|
||||
RUN HELM2_VERSION=2.16.1 && \
|
||||
curl -sSL "https://get.helm.sh/helm-v${HELM2_VERSION}-linux-amd64.tar.gz" | tar xvz && \
|
||||
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helm && \
|
||||
chmod +x linux-amd64/tiller && mv linux-amd64/tiller /usr/local/bin/tiller
|
||||
|
||||
RUN HELM3_VERSION=3.0.1 && \
|
||||
curl -sSL "https://get.helm.sh/helm-v${HELM3_VERSION}-linux-amd64.tar.gz" | tar xvz && \
|
||||
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helmv3
|
||||
|
||||
RUN GRPC_HEALTH_PROBE_VERSION=v0.3.1 && \
|
||||
wget -qO /usr/local/bin/grpc_health_probe https://github.com/grpc-ecosystem/grpc-health-probe/releases/download/${GRPC_HEALTH_PROBE_VERSION}/grpc_health_probe-linux-amd64 && \
|
||||
chmod +x /usr/local/bin/grpc_health_probe
|
||||
|
||||
RUN GHZ_VERSION=0.39.0 && \
|
||||
curl -sSL "https://github.com/bojand/ghz/releases/download/v${GHZ_VERSION}/ghz_${GHZ_VERSION}_Linux_x86_64.tar.gz" | tar xz -C /tmp && \
|
||||
mv /tmp/ghz /usr/local/bin && chmod +x /usr/local/bin/ghz
|
||||
|
||||
RUN HELM_TILLER_VERSION=0.9.3 && \
|
||||
curl -sSL "https://github.com/rimusz/helm-tiller/archive/v${HELM_TILLER_VERSION}.tar.gz" | tar xz -C /tmp && \
|
||||
mv /tmp/helm-tiller-${HELM_TILLER_VERSION} /tmp/helm-tiller
|
||||
|
||||
RUN WRK_VERSION=4.0.2 && \
|
||||
cd /tmp && git clone -b ${WRK_VERSION} https://github.com/wg/wrk
|
||||
RUN cd /tmp/wrk && make
|
||||
|
||||
FROM bats/bats:v1.1.0
|
||||
|
||||
RUN addgroup -S app \
|
||||
&& adduser -S -g app app \
|
||||
&& apk --no-cache add ca-certificates curl jq
|
||||
RUN addgroup -S app && \
|
||||
adduser -S -g app app && \
|
||||
apk --no-cache add ca-certificates curl jq libgcc
|
||||
|
||||
WORKDIR /home/app
|
||||
|
||||
RUN curl -sSLo hey "https://storage.googleapis.com/jblabs/dist/hey_linux_v0.1.2" && \
|
||||
chmod +x hey && mv hey /usr/local/bin/hey
|
||||
|
||||
RUN curl -sSL "https://get.helm.sh/helm-v2.14.3-linux-amd64.tar.gz" | tar xvz && \
|
||||
chmod +x linux-amd64/helm && mv linux-amd64/helm /usr/local/bin/helm && \
|
||||
chmod +x linux-amd64/tiller && mv linux-amd64/tiller /usr/local/bin/tiller && \
|
||||
rm -rf linux-amd64
|
||||
|
||||
RUN curl -sSL "https://github.com/bojand/ghz/releases/download/v0.39.0/ghz_0.39.0_Linux_x86_64.tar.gz" | tar xz -C /tmp && \
|
||||
mv /tmp/ghz /usr/local/bin && chmod +x /usr/local/bin/ghz && rm -rf /tmp/ghz-web
|
||||
|
||||
COPY --from=build /usr/local/bin/hey /usr/local/bin/
|
||||
COPY --from=build /tmp/wrk/wrk /usr/local/bin/
|
||||
COPY --from=build /usr/local/bin/helm /usr/local/bin/
|
||||
COPY --from=build /usr/local/bin/tiller /usr/local/bin/
|
||||
COPY --from=build /usr/local/bin/ghz /usr/local/bin/
|
||||
COPY --from=build /usr/local/bin/helmv3 /usr/local/bin/
|
||||
COPY --from=build /usr/local/bin/grpc_health_probe /usr/local/bin/
|
||||
COPY --from=build /tmp/helm-tiller /tmp/helm-tiller
|
||||
ADD https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/health/v1/health.proto /tmp/ghz/health.proto
|
||||
|
||||
RUN ls /tmp
|
||||
|
||||
COPY ./bin/loadtester .
|
||||
|
||||
RUN chown -R app:app ./
|
||||
|
||||
USER app
|
||||
|
||||
RUN curl -sSL "https://github.com/rimusz/helm-tiller/archive/v0.8.3.tar.gz" | tar xvz && \
|
||||
helm init --client-only && helm plugin install helm-tiller-0.8.3 && helm plugin list
|
||||
# test load generator tools
|
||||
RUN hey -n 1 -c 1 https://flagger.app > /dev/null && echo $? | grep 0
|
||||
RUN wrk -d 1s -c 1 -t 1 https://flagger.app > /dev/null && echo $? | grep 0
|
||||
|
||||
# install Helm v2 plugins
|
||||
RUN helm init --client-only && helm plugin install /tmp/helm-tiller
|
||||
|
||||
ENTRYPOINT ["./loadtester"]
|
||||
|
||||
15
Makefile
15
Makefile
@@ -7,15 +7,8 @@ LT_VERSION?=$(shell grep 'VERSION' cmd/loadtester/main.go | awk '{ print $$4 }'
|
||||
TS=$(shell date +%Y-%m-%d_%H-%M-%S)
|
||||
|
||||
run:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=istio -namespace=test \
|
||||
-metrics-server=https://prometheus.istio.weavedx.com \
|
||||
-enable-leader-election=true
|
||||
|
||||
run2:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=istio -namespace=test \
|
||||
-metrics-server=https://prometheus.istio.weavedx.com \
|
||||
-enable-leader-election=true \
|
||||
-port=9092
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=istio -namespace=test-istio \
|
||||
-metrics-server=https://prometheus.istio.flagger.dev
|
||||
|
||||
run-appmesh:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=appmesh \
|
||||
@@ -38,8 +31,8 @@ run-nop:
|
||||
-metrics-server=https://prometheus.istio.weavedx.com
|
||||
|
||||
run-linkerd:
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=smi:linkerd -namespace=demo \
|
||||
-metrics-server=https://linkerd-prometheus.istio.weavedx.com
|
||||
GO111MODULE=on go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=linkerd -namespace=dev \
|
||||
-metrics-server=https://prometheus.linkerd.flagger.dev
|
||||
|
||||
build:
|
||||
GIT_COMMIT=$$(git rev-list -1 HEAD) && GO111MODULE=on CGO_ENABLED=0 GOOS=linux go build -ldflags "-s -w -X github.com/weaveworks/flagger/pkg/version.REVISION=$${GIT_COMMIT}" -a -installsuffix cgo -o ./bin/flagger ./cmd/flagger/*
|
||||
|
||||
54
README.md
54
README.md
@@ -7,7 +7,7 @@
|
||||
[](https://github.com/weaveworks/flagger/releases)
|
||||
|
||||
Flagger is a Kubernetes operator that automates the promotion of canary deployments
|
||||
using Istio, Linkerd, App Mesh, NGINX or Gloo routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
using Istio, Linkerd, App Mesh, NGINX, Contour or Gloo routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running acceptance tests,
|
||||
load tests or any other custom validation.
|
||||
|
||||
@@ -39,11 +39,12 @@ Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.ap
|
||||
* [FAQ](https://docs.flagger.app/faq)
|
||||
* Usage
|
||||
* [Istio canary deployments](https://docs.flagger.app/usage/progressive-delivery)
|
||||
* [Istio A/B testing](https://docs.flagger.app/usage/ab-testing)
|
||||
* [Linkerd canary deployments](https://docs.flagger.app/usage/linkerd-progressive-delivery)
|
||||
* [App Mesh canary deployments](https://docs.flagger.app/usage/appmesh-progressive-delivery)
|
||||
* [NGINX ingress controller canary deployments](https://docs.flagger.app/usage/nginx-progressive-delivery)
|
||||
* [Gloo ingress controller canary deployments](https://docs.flagger.app/usage/gloo-progressive-delivery)
|
||||
* [Contour Canary Deployments](https://docs.flagger.app/usage/contour-progressive-delivery)
|
||||
* [Crossover canary deployments](https://docs.flagger.app/usage/crossover-progressive-delivery)
|
||||
* [Blue/Green deployments](https://docs.flagger.app/usage/blue-green)
|
||||
* [Monitoring](https://docs.flagger.app/usage/monitoring)
|
||||
* [Alerting](https://docs.flagger.app/usage/alerting)
|
||||
@@ -69,8 +70,7 @@ metadata:
|
||||
namespace: test
|
||||
spec:
|
||||
# service mesh provider (optional)
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, gloo, supergloo
|
||||
# use the kubernetes provider for Blue/Green style deployments
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, contour, gloo, supergloo
|
||||
provider: istio
|
||||
# deployment reference
|
||||
targetRef:
|
||||
@@ -86,14 +86,12 @@ spec:
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
# ClusterIP port number
|
||||
port: 9898
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- podinfo.example.com
|
||||
# container port name or number (optional)
|
||||
targetPort: 9898
|
||||
# port name can be http or grpc (default http)
|
||||
portName: http
|
||||
# HTTP match conditions (optional)
|
||||
match:
|
||||
- uri:
|
||||
@@ -101,10 +99,6 @@ spec:
|
||||
# HTTP rewrite (optional)
|
||||
rewrite:
|
||||
uri: /
|
||||
# cross-origin resource sharing policy (optional)
|
||||
corsPolicy:
|
||||
allowOrigin:
|
||||
- example.com
|
||||
# request timeout (optional)
|
||||
timeout: 5s
|
||||
# promote the canary without analysing it (default false)
|
||||
@@ -144,7 +138,7 @@ spec:
|
||||
topic="podinfo"
|
||||
}[1m]
|
||||
)
|
||||
# external checks (optional)
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
@@ -157,19 +151,21 @@ For more details on how the canary analysis and promotion works please [read the
|
||||
|
||||
## Features
|
||||
|
||||
| Feature | Istio | Linkerd | App Mesh | NGINX | Gloo |
|
||||
| -------------------------------------------- | ------------------ | ------------------ |------------------ |------------------ |------------------ |
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| A/B testing (headers and cookies filters) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Custom promql checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Traffic policy, CORS, retries and timeouts | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: |
|
||||
| Feature | Istio | Linkerd | App Mesh | NGINX | Gloo | Contour | CNI |
|
||||
| -------------------------------------------- | ------------------ | ------------------ |------------------ |------------------ |------------------ |------------------ |------------------ |
|
||||
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
| Custom promql checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
|
||||
| Traffic policy, CORS, retries and timeouts | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_minus_sign: |
|
||||
|
||||
## Roadmap
|
||||
|
||||
* Integrate with other ingress controllers like Contour, HAProxy, ALB
|
||||
* Integrate with other service mesh like Consul Connect and ingress controllers like HAProxy, ALB
|
||||
* Add support for comparing the canary metrics to the primary ones and do the validation based on the derivation between the two
|
||||
|
||||
## Contributing
|
||||
@@ -180,9 +176,9 @@ When submitting bug reports please include as much details as possible:
|
||||
|
||||
* which Flagger version
|
||||
* which Flagger CRD version
|
||||
* which Kubernetes/Istio version
|
||||
* what configuration (canary, virtual service and workloads definitions)
|
||||
* what happened (Flagger, Istio Pilot and Proxy logs)
|
||||
* which Kubernetes version
|
||||
* what configuration (canary, ingress and workloads definitions)
|
||||
* what happened (Flagger and Proxy logs)
|
||||
|
||||
## Getting Help
|
||||
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: abtest
|
||||
namespace: test
|
||||
labels:
|
||||
app: abtest
|
||||
spec:
|
||||
minReadySeconds: 5
|
||||
revisionHistoryLimit: 5
|
||||
progressDeadlineSeconds: 60
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: abtest
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: abtest
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
value: blue
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: abtest
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: abtest
|
||||
minReplicas: 2
|
||||
maxReplicas: 4
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
# scale up if usage is above
|
||||
# 99% of the requested CPU (100m)
|
||||
targetAverageUtilization: 99
|
||||
@@ -20,8 +20,16 @@ spec:
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# container port name (optional)
|
||||
# can be http or grpc
|
||||
portName: http
|
||||
# App Mesh reference
|
||||
meshName: global
|
||||
# App Mesh retry policy (optional)
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 1s
|
||||
retryOn: "gateway-error,client-error,stream-error"
|
||||
# define the canary analysis timing and KPIs
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
@@ -41,8 +49,20 @@ spec:
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
# external checks (optional)
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
|
||||
@@ -25,7 +25,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:2.0.0
|
||||
image: stefanprodan/podinfo:3.1.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
|
||||
@@ -13,7 +13,7 @@ data:
|
||||
- address:
|
||||
socket_address:
|
||||
address: 0.0.0.0
|
||||
port_value: 80
|
||||
port_value: 8080
|
||||
filter_chains:
|
||||
- filters:
|
||||
- name: envoy.http_connection_manager
|
||||
@@ -48,11 +48,15 @@ data:
|
||||
connect_timeout: 0.30s
|
||||
type: strict_dns
|
||||
lb_policy: round_robin
|
||||
http2_protocol_options: {}
|
||||
hosts:
|
||||
- socket_address:
|
||||
address: podinfo.test
|
||||
port_value: 9898
|
||||
load_assignment:
|
||||
cluster_name: podinfo
|
||||
endpoints:
|
||||
- lb_endpoints:
|
||||
- endpoint:
|
||||
address:
|
||||
socket_address:
|
||||
address: podinfo.test
|
||||
port_value: 9898
|
||||
admin:
|
||||
access_log_path: /dev/null
|
||||
address:
|
||||
@@ -91,7 +95,7 @@ spec:
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
- name: ingress
|
||||
image: "envoyproxy/envoy-alpine:d920944aed67425f91fc203774aebce9609e5d9a"
|
||||
image: "envoyproxy/envoy-alpine:v1.11.1"
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
@@ -99,25 +103,20 @@ spec:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
command:
|
||||
- /usr/bin/dumb-init
|
||||
- --
|
||||
args:
|
||||
- /usr/local/bin/envoy
|
||||
- --base-id 30
|
||||
- --v2-config-only
|
||||
args:
|
||||
- -l
|
||||
- $loglevel
|
||||
- -c
|
||||
- /config/envoy.yaml
|
||||
- --base-id
|
||||
- "1234"
|
||||
ports:
|
||||
- name: admin
|
||||
containerPort: 9999
|
||||
protocol: TCP
|
||||
- name: http
|
||||
containerPort: 80
|
||||
protocol: TCP
|
||||
- name: https
|
||||
containerPort: 443
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 5
|
||||
@@ -151,11 +150,7 @@ spec:
|
||||
- protocol: TCP
|
||||
name: http
|
||||
port: 80
|
||||
targetPort: 80
|
||||
- protocol: TCP
|
||||
name: https
|
||||
port: 443
|
||||
targetPort: 443
|
||||
targetPort: http
|
||||
type: LoadBalancer
|
||||
---
|
||||
apiVersion: appmesh.k8s.aws/v1beta1
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: abtest
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: abtest
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
@@ -16,7 +16,7 @@ spec:
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: abtest
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
@@ -26,7 +26,12 @@ spec:
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- abtest.istio.weavedx.com
|
||||
- app.example.com
|
||||
# Istio traffic policy (optional)
|
||||
trafficPolicy:
|
||||
tls:
|
||||
# use ISTIO_MUTUAL when mTLS is enabled
|
||||
mode: DISABLE
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
@@ -36,12 +41,12 @@ spec:
|
||||
iterations: 10
|
||||
# canary match condition
|
||||
match:
|
||||
- headers:
|
||||
user-agent:
|
||||
regex: "^(?!.*Chrome)(?=.*\bSafari\b).*$"
|
||||
- headers:
|
||||
cookie:
|
||||
regex: "^(.*?;)?(type=insider)(;.*)?$"
|
||||
- headers:
|
||||
user-agent:
|
||||
regex: "(?=.*Safari)(?!.*Chrome).*$"
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
@@ -37,7 +37,7 @@ spec:
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.istio.weavedx.com
|
||||
- app.example.com
|
||||
# Istio traffic policy (optional)
|
||||
trafficPolicy:
|
||||
tls:
|
||||
|
||||
@@ -20,12 +20,13 @@ spec:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "9898"
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: stefanprodan/podinfo:2.0.0
|
||||
image: stefanprodan/podinfo:3.1.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
|
||||
@@ -81,6 +81,11 @@ rules:
|
||||
- virtualservices
|
||||
- gateways
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- projectcontour.io
|
||||
resources:
|
||||
- httpproxies
|
||||
verbs: ["*"]
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
|
||||
@@ -33,6 +33,26 @@ spec:
|
||||
- name: Weight
|
||||
type: string
|
||||
JSONPath: .status.canaryWeight
|
||||
- name: FailedChecks
|
||||
type: string
|
||||
JSONPath: .status.failedChecks
|
||||
priority: 1
|
||||
- name: Interval
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.interval
|
||||
priority: 1
|
||||
- name: Mirror
|
||||
type: boolean
|
||||
JSONPath: .spec.canaryAnalysis.mirror
|
||||
priority: 1
|
||||
- name: StepWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.stepWeight
|
||||
priority: 1
|
||||
- name: MaxWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.maxWeight
|
||||
priority: 1
|
||||
- name: LastTransitionTime
|
||||
type: string
|
||||
JSONPath: .status.lastTransitionTime
|
||||
@@ -48,13 +68,16 @@ spec:
|
||||
provider:
|
||||
description: Traffic managent provider
|
||||
type: string
|
||||
metricsServer:
|
||||
description: Prometheus URL
|
||||
type: string
|
||||
progressDeadlineSeconds:
|
||||
description: Deployment progress deadline
|
||||
type: number
|
||||
targetRef:
|
||||
description: Deployment selector
|
||||
type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
@@ -67,7 +90,7 @@ spec:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
@@ -80,7 +103,7 @@ spec:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
@@ -90,7 +113,7 @@ spec:
|
||||
type: string
|
||||
service:
|
||||
type: object
|
||||
required: ['port']
|
||||
required: ["port"]
|
||||
properties:
|
||||
port:
|
||||
description: Container port number
|
||||
@@ -98,6 +121,11 @@ spec:
|
||||
portName:
|
||||
description: Container port name
|
||||
type: string
|
||||
targetPort:
|
||||
description: Container target port name
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: number
|
||||
portDiscovery:
|
||||
description: Enable port dicovery
|
||||
type: boolean
|
||||
@@ -167,6 +195,9 @@ spec:
|
||||
stepWeight:
|
||||
description: Canary incremental traffic percentage step
|
||||
type: number
|
||||
mirror:
|
||||
description: Mirror traffic to canary before shifting
|
||||
type: boolean
|
||||
match:
|
||||
description: A/B testing match conditions
|
||||
anyOf:
|
||||
@@ -178,7 +209,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'threshold']
|
||||
required: ["name", "threshold"]
|
||||
properties:
|
||||
name:
|
||||
description: Name of the Prometheus metric
|
||||
@@ -199,7 +230,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'url', 'timeout']
|
||||
required: ["name", "url"]
|
||||
properties:
|
||||
name:
|
||||
description: Name of the webhook
|
||||
@@ -212,7 +243,9 @@ spec:
|
||||
- confirm-rollout
|
||||
- pre-rollout
|
||||
- rollout
|
||||
- confirm-promotion
|
||||
- post-rollout
|
||||
- event
|
||||
url:
|
||||
description: URL address of this webhook
|
||||
type: string
|
||||
@@ -237,6 +270,7 @@ spec:
|
||||
- Initialized
|
||||
- Waiting
|
||||
- Progressing
|
||||
- Promoting
|
||||
- Finalising
|
||||
- Succeeded
|
||||
- Failed
|
||||
@@ -262,7 +296,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['type', 'status', 'reason']
|
||||
required: ["type", "status", "reason"]
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this condition
|
||||
|
||||
@@ -22,7 +22,7 @@ spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: weaveworks/flagger:0.18.3
|
||||
image: weaveworks/flagger:0.22.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -4,6 +4,7 @@ metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
provider: gloo
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
@@ -28,9 +29,24 @@ spec:
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 10s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary:9898/token | grep token"
|
||||
- name: gloo-acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 10s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' -H 'Host: app.example.com' http://gateway-proxy-v2.gloo-system/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://gloo.example.com/"
|
||||
cmd: "hey -z 2m -q 5 -c 2 -host app.example.com http://gateway-proxy-v2.gloo-system"
|
||||
logCmdOutput: "true"
|
||||
|
||||
@@ -1,67 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
minReadySeconds: 5
|
||||
revisionHistoryLimit: 5
|
||||
progressDeadlineSeconds: 60
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
value: blue
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
initialDelaySeconds: 5
|
||||
timeoutSeconds: 5
|
||||
resources:
|
||||
limits:
|
||||
cpu: 2000m
|
||||
memory: 512Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 64Mi
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
minReplicas: 1
|
||||
maxReplicas: 4
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
# scale up if usage is above
|
||||
# 99% of the requested CPU (100m)
|
||||
targetAverageUtilization: 99
|
||||
@@ -7,11 +7,11 @@ spec:
|
||||
virtualHost:
|
||||
domains:
|
||||
- '*'
|
||||
name: podinfo.default
|
||||
name: podinfo
|
||||
routes:
|
||||
- matcher:
|
||||
prefix: /
|
||||
routeAction:
|
||||
upstreamGroup:
|
||||
name: podinfo
|
||||
namespace: gloo
|
||||
namespace: test
|
||||
|
||||
@@ -19,7 +19,7 @@ spec:
|
||||
serviceAccountName: tiller
|
||||
containers:
|
||||
- name: helmtester
|
||||
image: weaveworks/flagger-loadtester:0.4.0
|
||||
image: weaveworks/flagger-loadtester:0.8.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -17,7 +17,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: loadtester
|
||||
image: weaveworks/flagger-loadtester:0.6.1
|
||||
image: weaveworks/flagger-loadtester:0.12.1
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -23,8 +23,10 @@ spec:
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# ClusterIP port number
|
||||
port: 80
|
||||
# container port number or name
|
||||
targetPort: 9898
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
|
||||
@@ -1,69 +0,0 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 0
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: podinfo
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
app: podinfo
|
||||
spec:
|
||||
containers:
|
||||
- name: podinfod
|
||||
image: quay.io/stefanprodan/podinfo:1.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 9898
|
||||
name: http
|
||||
protocol: TCP
|
||||
command:
|
||||
- ./podinfo
|
||||
- --port=9898
|
||||
- --level=info
|
||||
- --random-delay=false
|
||||
- --random-error=false
|
||||
env:
|
||||
- name: PODINFO_UI_COLOR
|
||||
value: green
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/healthz
|
||||
failureThreshold: 3
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 2
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- podcli
|
||||
- check
|
||||
- http
|
||||
- localhost:9898/readyz
|
||||
failureThreshold: 3
|
||||
periodSeconds: 3
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 2
|
||||
resources:
|
||||
limits:
|
||||
cpu: 1000m
|
||||
memory: 256Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 16Mi
|
||||
@@ -1,19 +0,0 @@
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
minReplicas: 2
|
||||
maxReplicas: 4
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
# scale up if usage is above
|
||||
# 99% of the requested CPU (100m)
|
||||
targetAverageUtilization: 99
|
||||
@@ -1,131 +0,0 @@
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: trafficsplits.split.smi-spec.io
|
||||
spec:
|
||||
additionalPrinterColumns:
|
||||
- JSONPath: .spec.service
|
||||
description: The service
|
||||
name: Service
|
||||
type: string
|
||||
group: split.smi-spec.io
|
||||
names:
|
||||
kind: TrafficSplit
|
||||
listKind: TrafficSplitList
|
||||
plural: trafficsplits
|
||||
singular: trafficsplit
|
||||
scope: Namespaced
|
||||
subresources:
|
||||
status: {}
|
||||
version: v1alpha1
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: true
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: smi-adapter-istio
|
||||
namespace: istio-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: smi-adapter-istio
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- services
|
||||
- endpoints
|
||||
- persistentvolumeclaims
|
||||
- events
|
||||
- configmaps
|
||||
- secrets
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- deployments
|
||||
- daemonsets
|
||||
- replicasets
|
||||
- statefulsets
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- monitoring.coreos.com
|
||||
resources:
|
||||
- servicemonitors
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- apiGroups:
|
||||
- apps
|
||||
resourceNames:
|
||||
- smi-adapter-istio
|
||||
resources:
|
||||
- deployments/finalizers
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- split.smi-spec.io
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- networking.istio.io
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- '*'
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: smi-adapter-istio
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: smi-adapter-istio
|
||||
namespace: istio-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: smi-adapter-istio
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: smi-adapter-istio
|
||||
namespace: istio-system
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
name: smi-adapter-istio
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: smi-adapter-istio
|
||||
annotations:
|
||||
sidecar.istio.io/inject: "false"
|
||||
spec:
|
||||
serviceAccountName: smi-adapter-istio
|
||||
containers:
|
||||
- name: smi-adapter-istio
|
||||
image: docker.io/stefanprodan/smi-adapter-istio:0.0.2-beta.1
|
||||
command:
|
||||
- smi-adapter-istio
|
||||
imagePullPolicy: Always
|
||||
env:
|
||||
- name: WATCH_NAMESPACE
|
||||
value: ""
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: OPERATOR_NAME
|
||||
value: "smi-adapter-istio"
|
||||
21
charts/appmesh-gateway/.helmignore
Normal file
21
charts/appmesh-gateway/.helmignore
Normal file
@@ -0,0 +1,21 @@
|
||||
# Patterns to ignore when building packages.
|
||||
# This supports shell glob matching, relative path matching, and
|
||||
# negation (prefixed with !). Only one pattern per line.
|
||||
.DS_Store
|
||||
# Common VCS dirs
|
||||
.git/
|
||||
.gitignore
|
||||
.bzr/
|
||||
.bzrignore
|
||||
.hg/
|
||||
.hgignore
|
||||
.svn/
|
||||
# Common backup files
|
||||
*.swp
|
||||
*.bak
|
||||
*.tmp
|
||||
*~
|
||||
# Various IDEs
|
||||
.project
|
||||
.idea/
|
||||
*.tmproj
|
||||
19
charts/appmesh-gateway/Chart.yaml
Normal file
19
charts/appmesh-gateway/Chart.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: v1
|
||||
name: appmesh-gateway
|
||||
description: Flagger Gateway for AWS App Mesh is an edge L7 load balancer that exposes applications outside the mesh.
|
||||
version: 1.1.1
|
||||
appVersion: 1.1.0
|
||||
home: https://flagger.app
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/weaveworks.png
|
||||
sources:
|
||||
- https://github.com/stefanprodan/appmesh-gateway
|
||||
maintainers:
|
||||
- name: Stefan Prodan
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
keywords:
|
||||
- flagger
|
||||
- appmesh
|
||||
- envoy
|
||||
- gateway
|
||||
- ingress
|
||||
87
charts/appmesh-gateway/README.md
Normal file
87
charts/appmesh-gateway/README.md
Normal file
@@ -0,0 +1,87 @@
|
||||
# Flagger Gateway for App Mesh
|
||||
|
||||
[Flagger Gateway for App Mesh](https://github.com/stefanprodan/appmesh-gateway) is an
|
||||
Envoy-powered load balancer that exposes applications outside the mesh.
|
||||
The gateway facilitates canary deployments and A/B testing for user-facing web applications running on AWS App Mesh.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.13
|
||||
* [App Mesh controller](https://github.com/aws/eks-charts/tree/master/stable/appmesh-controller) >= 0.2.0
|
||||
* [App Mesh inject](https://github.com/aws/eks-charts/tree/master/stable/appmesh-inject) >= 0.2.0
|
||||
|
||||
## Installing the Chart
|
||||
|
||||
Add Flagger Helm repository:
|
||||
|
||||
```console
|
||||
$ helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
Create a namespace with App Mesh sidecar injection enabled:
|
||||
|
||||
```sh
|
||||
kubectl create ns flagger-system
|
||||
kubectl label namespace test appmesh.k8s.aws/sidecarInjectorWebhook=enabled
|
||||
```
|
||||
|
||||
Install App Mesh Gateway for an existing mesh:
|
||||
|
||||
```sh
|
||||
helm upgrade -i appmesh-gateway flagger/appmesh-gateway \
|
||||
--namespace flagger-system \
|
||||
--set mesh.name=global
|
||||
```
|
||||
|
||||
Optionally you can create a mesh at install time:
|
||||
|
||||
```sh
|
||||
helm upgrade -i appmesh-gateway flagger/appmesh-gateway \
|
||||
--namespace flagger-system \
|
||||
--set mesh.name=global \
|
||||
--set mesh.create=true
|
||||
```
|
||||
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `appmesh-gateway` deployment:
|
||||
|
||||
```console
|
||||
helm delete --purge appmesh-gateway
|
||||
```
|
||||
|
||||
The command removes all the Kubernetes components associated with the chart and deletes the release.
|
||||
|
||||
## Configuration
|
||||
|
||||
The following tables lists the configurable parameters of the chart and their default values.
|
||||
|
||||
Parameter | Description | Default
|
||||
--- | --- | ---
|
||||
`service.type` | When set to LoadBalancer it creates an AWS NLB | `LoadBalancer`
|
||||
`proxy.access_log_path` | to enable the access logs, set it to `/dev/stdout` | `/dev/null`
|
||||
`proxy.image.repository` | image repository | `envoyproxy/envoy`
|
||||
`proxy.image.tag` | image tag | `<VERSION>`
|
||||
`proxy.image.pullPolicy` | image pull policy | `IfNotPresent`
|
||||
`controller.image.repository` | image repository | `weaveworks/flagger-appmesh-gateway`
|
||||
`controller.image.tag` | image tag | `<VERSION>`
|
||||
`controller.image.pullPolicy` | image pull policy | `IfNotPresent`
|
||||
`resources.requests/cpu` | pod CPU request | `100m`
|
||||
`resources.requests/memory` | pod memory request | `128Mi`
|
||||
`resources.limits/memory` | pod memory limit | `2Gi`
|
||||
`nodeSelector` | node labels for pod assignment | `{}`
|
||||
`tolerations` | list of node taints to tolerate | `[]`
|
||||
`rbac.create` | if `true`, create and use RBAC resources | `true`
|
||||
`rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false`
|
||||
`serviceAccount.create` | If `true`, create a new service account | `true`
|
||||
`serviceAccount.name` | Service account to be used | None
|
||||
`mesh.create` | If `true`, create mesh custom resource | `false`
|
||||
`mesh.name` | The name of the mesh to use | `global`
|
||||
`mesh.discovery` | The service discovery type to use, can be dns or cloudmap | `dns`
|
||||
`hpa.enabled` | `true` if HPA resource should be created, metrics-server is required | `true`
|
||||
`hpa.maxReplicas` | number of max replicas | `3`
|
||||
`hpa.cpu` | average total CPU usage per pod (1-100) | `99`
|
||||
`hpa.memory` | average memory usage per pod (100Mi-1Gi) | None
|
||||
`discovery.optIn` | `true` if only services with the 'expose' annotation are discoverable | `true`
|
||||
1
charts/appmesh-gateway/templates/NOTES.txt
Normal file
1
charts/appmesh-gateway/templates/NOTES.txt
Normal file
@@ -0,0 +1 @@
|
||||
App Mesh Gateway installed!
|
||||
56
charts/appmesh-gateway/templates/_helpers.tpl
Normal file
56
charts/appmesh-gateway/templates/_helpers.tpl
Normal file
@@ -0,0 +1,56 @@
|
||||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "appmesh-gateway.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "appmesh-gateway.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "appmesh-gateway.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Common labels
|
||||
*/}}
|
||||
{{- define "appmesh-gateway.labels" -}}
|
||||
app.kubernetes.io/name: {{ include "appmesh-gateway.name" . }}
|
||||
helm.sh/chart: {{ include "appmesh-gateway.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- if .Chart.AppVersion }}
|
||||
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create the name of the service account to use
|
||||
*/}}
|
||||
{{- define "appmesh-gateway.serviceAccountName" -}}
|
||||
{{- if .Values.serviceAccount.create -}}
|
||||
{{ default (include "appmesh-gateway.fullname" .) .Values.serviceAccount.name }}
|
||||
{{- else -}}
|
||||
{{ default "default" .Values.serviceAccount.name }}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
8
charts/appmesh-gateway/templates/account.yaml
Normal file
8
charts/appmesh-gateway/templates/account.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
{{- if .Values.serviceAccount.create }}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ template "appmesh-gateway.serviceAccountName" . }}
|
||||
labels:
|
||||
{{ include "appmesh-gateway.labels" . | indent 4 }}
|
||||
{{- end }}
|
||||
41
charts/appmesh-gateway/templates/config.yaml
Normal file
41
charts/appmesh-gateway/templates/config.yaml
Normal file
@@ -0,0 +1,41 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "appmesh-gateway.fullname" . }}
|
||||
labels:
|
||||
{{ include "appmesh-gateway.labels" . | indent 4 }}
|
||||
data:
|
||||
envoy.yaml: |-
|
||||
admin:
|
||||
access_log_path: {{ .Values.proxy.access_log_path }}
|
||||
address:
|
||||
socket_address:
|
||||
address: 0.0.0.0
|
||||
port_value: 8081
|
||||
|
||||
dynamic_resources:
|
||||
ads_config:
|
||||
api_type: GRPC
|
||||
grpc_services:
|
||||
- envoy_grpc:
|
||||
cluster_name: xds
|
||||
cds_config:
|
||||
ads: {}
|
||||
lds_config:
|
||||
ads: {}
|
||||
|
||||
static_resources:
|
||||
clusters:
|
||||
- name: xds
|
||||
connect_timeout: 0.50s
|
||||
type: static
|
||||
http2_protocol_options: {}
|
||||
load_assignment:
|
||||
cluster_name: xds
|
||||
endpoints:
|
||||
- lb_endpoints:
|
||||
- endpoint:
|
||||
address:
|
||||
socket_address:
|
||||
address: 127.0.0.1
|
||||
port_value: 18000
|
||||
144
charts/appmesh-gateway/templates/deployment.yaml
Normal file
144
charts/appmesh-gateway/templates/deployment.yaml
Normal file
@@ -0,0 +1,144 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "appmesh-gateway.fullname" . }}
|
||||
labels:
|
||||
{{ include "appmesh-gateway.labels" . | indent 4 }}
|
||||
spec:
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
strategy:
|
||||
type: Recreate
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "appmesh-gateway.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "appmesh-gateway.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/part-of: appmesh
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/path: "/stats/prometheus"
|
||||
prometheus.io/port: "8081"
|
||||
# exclude inbound traffic on port 8080
|
||||
appmesh.k8s.aws/ports: "444"
|
||||
# exclude egress traffic to xDS server and Kubernetes API
|
||||
appmesh.k8s.aws/egressIgnoredPorts: "18000,22,443"
|
||||
checksum/config: {{ include (print $.Template.BasePath "/config.yaml") . | sha256sum | quote }}
|
||||
spec:
|
||||
serviceAccountName: {{ include "appmesh-gateway.serviceAccountName" . }}
|
||||
terminationGracePeriodSeconds: 45
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "appmesh-gateway.name" . }}
|
||||
topologyKey: kubernetes.io/hostname
|
||||
weight: 100
|
||||
volumes:
|
||||
- name: appmesh-gateway-config
|
||||
configMap:
|
||||
name: {{ template "appmesh-gateway.fullname" . }}
|
||||
containers:
|
||||
- name: controller
|
||||
image: "{{ .Values.controller.image.repository }}:{{ .Values.controller.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.controller.image.pullPolicy }}
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
runAsUser: 10001
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
command:
|
||||
- ./flagger-appmesh-gateway
|
||||
- --opt-in={{ .Values.discovery.optIn }}
|
||||
- --gateway-mesh={{ .Values.mesh.name }}
|
||||
- --gateway-name=$(POD_SERVICE_ACCOUNT)
|
||||
- --gateway-namespace=$(POD_NAMESPACE)
|
||||
env:
|
||||
- name: POD_SERVICE_ACCOUNT
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.serviceAccountName
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- name: grpc
|
||||
containerPort: 18000
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 5
|
||||
tcpSocket:
|
||||
port: grpc
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 5
|
||||
tcpSocket:
|
||||
port: grpc
|
||||
resources:
|
||||
limits:
|
||||
memory: 1Gi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 32Mi
|
||||
- name: proxy
|
||||
image: "{{ .Values.proxy.image.repository }}:{{ .Values.proxy.image.tag }}"
|
||||
imagePullPolicy: {{ .Values.proxy.image.pullPolicy }}
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
args:
|
||||
- -c
|
||||
- /config/envoy.yaml
|
||||
- --service-cluster $(POD_NAMESPACE)
|
||||
- --service-node $(POD_NAME)
|
||||
- --log-level info
|
||||
- --base-id 1234
|
||||
ports:
|
||||
- name: admin
|
||||
containerPort: 8081
|
||||
protocol: TCP
|
||||
- name: http
|
||||
containerPort: 8080
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
initialDelaySeconds: 5
|
||||
tcpSocket:
|
||||
port: admin
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 5
|
||||
httpGet:
|
||||
path: /ready
|
||||
port: admin
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: appmesh-gateway-config
|
||||
mountPath: /config
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{ toYaml . | indent 8 }}
|
||||
{{- end }}
|
||||
28
charts/appmesh-gateway/templates/hpa.yaml
Normal file
28
charts/appmesh-gateway/templates/hpa.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
{{- if .Values.hpa.enabled }}
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: {{ template "appmesh-gateway.fullname" . }}
|
||||
labels:
|
||||
{{ include "appmesh-gateway.labels" . | indent 4 }}
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: {{ template "appmesh-gateway.fullname" . }}
|
||||
minReplicas: {{ .Values.replicaCount }}
|
||||
maxReplicas: {{ .Values.hpa.maxReplicas }}
|
||||
metrics:
|
||||
{{- if .Values.hpa.cpu }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
targetAverageUtilization: {{ .Values.hpa.cpu }}
|
||||
{{- end }}
|
||||
{{- if .Values.hpa.memory }}
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
targetAverageValue: {{ .Values.hpa.memory }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
12
charts/appmesh-gateway/templates/mesh.yaml
Normal file
12
charts/appmesh-gateway/templates/mesh.yaml
Normal file
@@ -0,0 +1,12 @@
|
||||
{{- if .Values.mesh.create }}
|
||||
apiVersion: appmesh.k8s.aws/v1beta1
|
||||
kind: Mesh
|
||||
metadata:
|
||||
name: {{ .Values.mesh.name }}
|
||||
annotations:
|
||||
helm.sh/resource-policy: keep
|
||||
labels:
|
||||
{{ include "appmesh-gateway.labels" . | indent 4 }}
|
||||
spec:
|
||||
serviceDiscoveryType: {{ .Values.mesh.discovery }}
|
||||
{{- end }}
|
||||
57
charts/appmesh-gateway/templates/psp.yaml
Normal file
57
charts/appmesh-gateway/templates/psp.yaml
Normal file
@@ -0,0 +1,57 @@
|
||||
{{- if .Values.rbac.pspEnabled }}
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: {{ template "appmesh-gateway.fullname" . }}
|
||||
labels:
|
||||
{{ include "appmesh-gateway.labels" . | indent 4 }}
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
spec:
|
||||
privileged: false
|
||||
hostIPC: false
|
||||
hostNetwork: false
|
||||
hostPID: false
|
||||
readOnlyRootFilesystem: false
|
||||
allowPrivilegeEscalation: false
|
||||
allowedCapabilities:
|
||||
- '*'
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- '*'
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: {{ template "appmesh-gateway.fullname" . }}-psp
|
||||
labels:
|
||||
{{ include "appmesh-gateway.labels" . | indent 4 }}
|
||||
rules:
|
||||
- apiGroups: ['policy']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames:
|
||||
- {{ template "appmesh-gateway.fullname" . }}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ template "appmesh-gateway.fullname" . }}-psp
|
||||
labels:
|
||||
{{ include "appmesh-gateway.labels" . | indent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "appmesh-gateway.fullname" . }}-psp
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ template "appmesh-gateway.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
39
charts/appmesh-gateway/templates/rbac.yaml
Normal file
39
charts/appmesh-gateway/templates/rbac.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
{{- if .Values.rbac.create }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: {{ template "appmesh-gateway.fullname" . }}
|
||||
labels:
|
||||
{{ include "appmesh-gateway.labels" . | indent 4 }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- appmesh.k8s.aws
|
||||
resources:
|
||||
- meshes
|
||||
- meshes/status
|
||||
- virtualnodes
|
||||
- virtualnodes/status
|
||||
- virtualservices
|
||||
- virtualservices/status
|
||||
verbs: ["*"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: {{ template "appmesh-gateway.fullname" . }}
|
||||
labels:
|
||||
{{ include "appmesh-gateway.labels" . | indent 4 }}
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: {{ template "appmesh-gateway.fullname" . }}
|
||||
subjects:
|
||||
- name: {{ template "appmesh-gateway.serviceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
kind: ServiceAccount
|
||||
{{- end }}
|
||||
24
charts/appmesh-gateway/templates/service.yaml
Normal file
24
charts/appmesh-gateway/templates/service.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ template "appmesh-gateway.fullname" . }}
|
||||
annotations:
|
||||
gateway.appmesh.k8s.aws/expose: "false"
|
||||
{{- if eq .Values.service.type "LoadBalancer" }}
|
||||
service.beta.kubernetes.io/aws-load-balancer-type: "nlb"
|
||||
{{- end }}
|
||||
labels:
|
||||
{{ include "appmesh-gateway.labels" . | indent 4 }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
{{- if eq .Values.service.type "LoadBalancer" }}
|
||||
externalTrafficPolicy: Local
|
||||
{{- end }}
|
||||
ports:
|
||||
- port: {{ .Values.service.port }}
|
||||
targetPort: http
|
||||
protocol: TCP
|
||||
name: http
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "appmesh-gateway.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
69
charts/appmesh-gateway/values.yaml
Normal file
69
charts/appmesh-gateway/values.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
# Default values for appmesh-gateway.
|
||||
# This is a YAML-formatted file.
|
||||
# Declare variables to be passed into your templates.
|
||||
|
||||
replicaCount: 1
|
||||
discovery:
|
||||
# discovery.optIn `true` if only services with the 'expose' annotation are discoverable
|
||||
optIn: true
|
||||
|
||||
proxy:
|
||||
access_log_path: /dev/null
|
||||
image:
|
||||
repository: docker.io/envoyproxy/envoy
|
||||
tag: v1.12.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
controller:
|
||||
image:
|
||||
repository: weaveworks/flagger-appmesh-gateway
|
||||
tag: v1.1.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
service:
|
||||
# service.type: When set to LoadBalancer it creates an AWS NLB
|
||||
type: LoadBalancer
|
||||
port: 80
|
||||
|
||||
hpa:
|
||||
# hpa.enabled `true` if HPA resource should be created, metrics-server is required
|
||||
enabled: true
|
||||
maxReplicas: 3
|
||||
# hpa.cpu average total CPU usage per pod (1-100)
|
||||
cpu: 99
|
||||
# hpa.memory average memory usage per pod (100Mi-1Gi)
|
||||
memory:
|
||||
|
||||
resources:
|
||||
limits:
|
||||
memory: 2Gi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
serviceAccount:
|
||||
# serviceAccount.create: Whether to create a service account or not
|
||||
create: true
|
||||
# serviceAccount.name: The name of the service account to create or use
|
||||
name: ""
|
||||
|
||||
rbac:
|
||||
# rbac.create: `true` if rbac resources should be created
|
||||
create: true
|
||||
# rbac.pspEnabled: `true` if PodSecurityPolicy resources should be created
|
||||
pspEnabled: false
|
||||
|
||||
mesh:
|
||||
# mesh.create: `true` if mesh resource should be created
|
||||
create: false
|
||||
# mesh.name: The name of the mesh to use
|
||||
name: "global"
|
||||
# mesh.discovery: The service discovery type to use, can be dns or cloudmap
|
||||
discovery: dns
|
||||
@@ -1,21 +1,23 @@
|
||||
apiVersion: v1
|
||||
name: flagger
|
||||
version: 0.18.3
|
||||
appVersion: 0.18.3
|
||||
version: 0.22.0
|
||||
appVersion: 0.22.0
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio, Linkerd, App Mesh, Gloo or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
home: https://docs.flagger.app
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
|
||||
description: Flagger is a progressive delivery operator for Kubernetes
|
||||
home: https://flagger.app
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/weaveworks.png
|
||||
sources:
|
||||
- https://github.com/weaveworks/flagger
|
||||
- https://github.com/weaveworks/flagger
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
keywords:
|
||||
- canary
|
||||
- istio
|
||||
- appmesh
|
||||
- linkerd
|
||||
- gitops
|
||||
- flagger
|
||||
- istio
|
||||
- appmesh
|
||||
- linkerd
|
||||
- gloo
|
||||
- gitops
|
||||
- canary
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
# Flagger
|
||||
|
||||
[Flagger](https://github.com/weaveworks/flagger) is a Kubernetes operator that automates the promotion of
|
||||
canary deployments using Istio, Linkerd, App Mesh, NGINX or Gloo routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
[Flagger](https://github.com/weaveworks/flagger) is a Kubernetes operator that automates the promotion of canary
|
||||
deployments using Istio, Linkerd, App Mesh, NGINX or Gloo routing for traffic shifting and Prometheus metrics for canary analysis.
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
|
||||
like HTTP requests success rate, requests average duration and pods health.
|
||||
Based on the KPIs analysis a canary is promoted or aborted and the analysis result is published to Slack or MS Teams.
|
||||
@@ -45,6 +46,16 @@ $ helm upgrade -i flagger flagger/flagger \
|
||||
--set metricsServer=http://linkerd-prometheus:9090
|
||||
```
|
||||
|
||||
To install the chart with the release name `flagger` for AWS App Mesh:
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=appmesh-system \
|
||||
--set crd.create=false \
|
||||
--set meshProvider=appmesh \
|
||||
--set metricsServer=http://appmesh-prometheus:9090
|
||||
```
|
||||
|
||||
The [configuration](#configuration) section lists the parameters that can be configured during installation.
|
||||
|
||||
## Uninstalling the Chart
|
||||
@@ -68,12 +79,19 @@ Parameter | Description | Default
|
||||
`image.pullPolicy` | image pull policy | `IfNotPresent`
|
||||
`prometheus.install` | if `true`, installs Prometheus configured to scrape all pods in the custer including the App Mesh sidecar | `false`
|
||||
`metricsServer` | Prometheus URL, used when `prometheus.install` is `false` | `http://prometheus.istio-system:9090`
|
||||
`selectorLabels` | list of labels that Flagger uses to create pod selectors | `app,name,app.kubernetes.io/name`
|
||||
`slack.url` | Slack incoming webhook | None
|
||||
`slack.channel` | Slack channel | None
|
||||
`slack.user` | Slack username | `flagger`
|
||||
`eventWebhook` | If set, Flagger will publish events to the given webhook | None
|
||||
`msteams.url` | Microsoft Teams incoming webhook | None
|
||||
`podMonitor.enabled` | if `true`, create a PodMonitor for [monitoring the metrics](https://docs.flagger.app/usage/monitoring#metrics) | `false`
|
||||
`podMonitor.namespace` | the namespace where the PodMonitor is created | the same namespace
|
||||
`podMonitor.interval` | interval at which metrics should be scraped | `15s`
|
||||
`podMonitor.podMonitor` | additional labels to add to the PodMonitor | `{}`
|
||||
`leaderElection.enabled` | leader election must be enabled when running more than one replica | `false`
|
||||
`leaderElection.replicaCount` | number of replicas | `1`
|
||||
`ingressAnnotationsPrefix` | annotations prefix for ingresses | `custom.ingress.kubernetes.io`
|
||||
`rbac.create` | if `true`, create and use RBAC resources | `true`
|
||||
`rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false`
|
||||
`crd.create` | if `true`, create Flagger's CRDs | `true`
|
||||
@@ -89,7 +107,7 @@ Specify each parameter using the `--set key=value[,key=value]` argument to `helm
|
||||
|
||||
```console
|
||||
$ helm upgrade -i flagger flagger/flagger \
|
||||
--namespace istio-system \
|
||||
--namespace flagger-system \
|
||||
--set crd.create=false \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general
|
||||
|
||||
@@ -34,6 +34,26 @@ spec:
|
||||
- name: Weight
|
||||
type: string
|
||||
JSONPath: .status.canaryWeight
|
||||
- name: FailedChecks
|
||||
type: string
|
||||
JSONPath: .status.failedChecks
|
||||
priority: 1
|
||||
- name: Interval
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.interval
|
||||
priority: 1
|
||||
- name: Mirror
|
||||
type: boolean
|
||||
JSONPath: .spec.canaryAnalysis.mirror
|
||||
priority: 1
|
||||
- name: StepWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.stepWeight
|
||||
priority: 1
|
||||
- name: MaxWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.maxWeight
|
||||
priority: 1
|
||||
- name: LastTransitionTime
|
||||
type: string
|
||||
JSONPath: .status.lastTransitionTime
|
||||
@@ -49,6 +69,9 @@ spec:
|
||||
provider:
|
||||
description: Traffic managent provider
|
||||
type: string
|
||||
metricsServer:
|
||||
description: Prometheus URL
|
||||
type: string
|
||||
progressDeadlineSeconds:
|
||||
description: Deployment progress deadline
|
||||
type: number
|
||||
@@ -99,6 +122,11 @@ spec:
|
||||
portName:
|
||||
description: Container port name
|
||||
type: string
|
||||
targetPort:
|
||||
description: Container target port name
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: number
|
||||
portDiscovery:
|
||||
description: Enable port dicovery
|
||||
type: boolean
|
||||
@@ -168,6 +196,9 @@ spec:
|
||||
stepWeight:
|
||||
description: Canary incremental traffic percentage step
|
||||
type: number
|
||||
mirror:
|
||||
description: Mirror traffic to canary before shifting
|
||||
type: boolean
|
||||
match:
|
||||
description: A/B testing match conditions
|
||||
anyOf:
|
||||
@@ -200,7 +231,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'url', 'timeout']
|
||||
required: ["name", "url"]
|
||||
properties:
|
||||
name:
|
||||
description: Name of the webhook
|
||||
@@ -213,7 +244,9 @@ spec:
|
||||
- confirm-rollout
|
||||
- pre-rollout
|
||||
- rollout
|
||||
- confirm-promotion
|
||||
- post-rollout
|
||||
- event
|
||||
url:
|
||||
description: URL address of this webhook
|
||||
type: string
|
||||
@@ -238,6 +271,7 @@ spec:
|
||||
- Initialized
|
||||
- Waiting
|
||||
- Progressing
|
||||
- Promoting
|
||||
- Finalising
|
||||
- Succeeded
|
||||
- Failed
|
||||
|
||||
@@ -20,6 +20,10 @@ spec:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
annotations:
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
serviceAccountName: {{ template "flagger.serviceAccountName" . }}
|
||||
affinity:
|
||||
@@ -57,6 +61,9 @@ spec:
|
||||
{{- else }}
|
||||
- -metrics-server={{ .Values.metricsServer }}
|
||||
{{- end }}
|
||||
{{- if .Values.selectorLabels }}
|
||||
- -selector-labels={{ .Values.selectorLabels }}
|
||||
{{- end }}
|
||||
{{- if .Values.namespace }}
|
||||
- -namespace={{ .Values.namespace }}
|
||||
{{- end }}
|
||||
@@ -72,6 +79,12 @@ spec:
|
||||
- -enable-leader-election=true
|
||||
- -leader-election-namespace={{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
{{- if .Values.ingressAnnotationsPrefix }}
|
||||
- -ingress-annotations-prefix={{ .Values.ingressAnnotationsPrefix }}
|
||||
{{- end }}
|
||||
{{- if .Values.eventWebhook }}
|
||||
- -event-webhook={{ .Values.eventWebhook }}
|
||||
{{- end }}
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
@@ -92,6 +105,10 @@ spec:
|
||||
- --spider
|
||||
- http://localhost:8080/healthz
|
||||
timeoutSeconds: 5
|
||||
{{- if .Values.env }}
|
||||
env:
|
||||
{{ toYaml .Values.env | indent 12 }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
{{- with .Values.nodeSelector }}
|
||||
|
||||
27
charts/flagger/templates/podmonitor.yaml
Normal file
27
charts/flagger/templates/podmonitor.yaml
Normal file
@@ -0,0 +1,27 @@
|
||||
{{- if .Values.podMonitor.enabled }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: PodMonitor
|
||||
metadata:
|
||||
labels:
|
||||
helm.sh/chart: {{ template "flagger.chart" . }}
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- range $k, $v := .Values.podMonitor.additionalLabels }}
|
||||
{{ $k }}: {{ $v | quote }}
|
||||
{{- end }}
|
||||
name: {{ include "flagger.fullname" . }}
|
||||
namespace: {{ .Values.podMonitor.namespace | default .Release.Namespace }}
|
||||
spec:
|
||||
podMetricsEndpoints:
|
||||
- interval: {{ .Values.podMonitor.interval }}
|
||||
path: /metrics
|
||||
port: http
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ template "flagger.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- end }}
|
||||
@@ -133,38 +133,22 @@ data:
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
insecure_skip_verify: true
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
relabel_configs:
|
||||
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
|
||||
action: keep
|
||||
regex: kubernetes;https
|
||||
|
||||
# Scrape config for nodes
|
||||
- job_name: 'kubernetes-nodes'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
- target_label: __address__
|
||||
replacement: kubernetes.default.svc:443
|
||||
- source_labels: [__meta_kubernetes_node_name]
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics
|
||||
|
||||
# scrape config for cAdvisor
|
||||
- job_name: 'kubernetes-cadvisor'
|
||||
scheme: https
|
||||
tls_config:
|
||||
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
insecure_skip_verify: true
|
||||
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
kubernetes_sd_configs:
|
||||
- role: node
|
||||
- role: node
|
||||
relabel_configs:
|
||||
- action: labelmap
|
||||
regex: __meta_kubernetes_node_label_(.+)
|
||||
@@ -174,6 +158,14 @@ data:
|
||||
regex: (.+)
|
||||
target_label: __metrics_path__
|
||||
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
|
||||
# exclude high cardinality metrics
|
||||
metric_relabel_configs:
|
||||
- source_labels: [__name__]
|
||||
regex: (container|machine)_(cpu|memory|network|fs)_(.+)
|
||||
action: keep
|
||||
- source_labels: [__name__]
|
||||
regex: container_memory_failures_total
|
||||
action: drop
|
||||
|
||||
# scrape config for pods
|
||||
- job_name: kubernetes-pods
|
||||
@@ -238,7 +230,7 @@ spec:
|
||||
serviceAccountName: {{ template "flagger.serviceAccountName" . }}-prometheus
|
||||
containers:
|
||||
- name: prometheus
|
||||
image: "docker.io/prom/prometheus:v2.10.0"
|
||||
image: "docker.io/prom/prometheus:v2.15.2"
|
||||
imagePullPolicy: IfNotPresent
|
||||
args:
|
||||
- '--storage.tsdb.retention=2h'
|
||||
|
||||
@@ -77,6 +77,11 @@ rules:
|
||||
- virtualservices
|
||||
- gateways
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- projectcontour.io
|
||||
resources:
|
||||
- httpproxies
|
||||
verbs: ["*"]
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
|
||||
@@ -2,28 +2,59 @@
|
||||
|
||||
image:
|
||||
repository: weaveworks/flagger
|
||||
tag: 0.18.3
|
||||
tag: 0.22.0
|
||||
pullPolicy: IfNotPresent
|
||||
pullSecret:
|
||||
|
||||
podAnnotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8080"
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: disabled
|
||||
|
||||
metricsServer: "http://prometheus:9090"
|
||||
|
||||
# accepted values are istio, appmesh, nginx or supergloo:mesh.namespace (defaults to istio)
|
||||
# accepted values are kubernetes, istio, linkerd, appmesh, nginx, gloo or supergloo:mesh.namespace (defaults to istio)
|
||||
meshProvider: ""
|
||||
|
||||
# single namespace restriction
|
||||
namespace: ""
|
||||
|
||||
# list of pod labels that Flagger uses to create pod selectors
|
||||
# defaults to: app,name,app.kubernetes.io/name
|
||||
selectorLabels: ""
|
||||
|
||||
slack:
|
||||
user: flagger
|
||||
channel:
|
||||
# incoming webhook https://api.slack.com/incoming-webhooks
|
||||
url:
|
||||
|
||||
# when specified, flagger will publish events to the provided webhook
|
||||
eventWebhook: ""
|
||||
|
||||
msteams:
|
||||
# MS Teams incoming webhook URL
|
||||
url:
|
||||
|
||||
podMonitor:
|
||||
enabled: false
|
||||
namespace:
|
||||
interval: 15s
|
||||
additionalLabels: {}
|
||||
|
||||
#env:
|
||||
#- name: SLACK_URL
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: slack
|
||||
# key: url
|
||||
#- name: MSTEAMS_URL
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: msteams
|
||||
# key: url
|
||||
env: []
|
||||
|
||||
leaderElection:
|
||||
enabled: false
|
||||
replicaCount: 1
|
||||
|
||||
@@ -1,13 +1,20 @@
|
||||
apiVersion: v1
|
||||
name: grafana
|
||||
version: 1.3.0
|
||||
appVersion: 6.2.5
|
||||
version: 1.4.0
|
||||
appVersion: 6.5.1
|
||||
description: Grafana dashboards for monitoring Flagger canary deployments
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/weaveworks.png
|
||||
home: https://flagger.app
|
||||
sources:
|
||||
- https://github.com/weaveworks/flagger
|
||||
- https://github.com/weaveworks/flagger
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
keywords:
|
||||
- flagger
|
||||
- grafana
|
||||
- canary
|
||||
- istio
|
||||
- appmesh
|
||||
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
# Flagger Grafana
|
||||
|
||||
Grafana dashboards for monitoring progressive deployments powered by Istio, Prometheus and Flagger.
|
||||
Grafana dashboards for monitoring progressive deployments powered by Flagger and Prometheus.
|
||||
|
||||

|
||||
|
||||
## Prerequisites
|
||||
|
||||
* Kubernetes >= 1.11
|
||||
* Istio >= 1.0
|
||||
* Prometheus >= 2.6
|
||||
|
||||
## Installing the Chart
|
||||
@@ -18,14 +17,20 @@ Add Flagger Helm repository:
|
||||
helm repo add flagger https://flagger.app
|
||||
```
|
||||
|
||||
To install the chart with the release name `flagger-grafana`:
|
||||
To install the chart for Istio run:
|
||||
|
||||
```console
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=istio-system \
|
||||
--set url=http://prometheus:9090 \
|
||||
--set user=admin \
|
||||
--set password=admin
|
||||
--set url=http://prometheus:9090
|
||||
```
|
||||
|
||||
To install the chart for AWS App Mesh run:
|
||||
|
||||
```console
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=appmesh-system \
|
||||
--set url=http://appmesh-prometheus:9090
|
||||
```
|
||||
|
||||
The command deploys Grafana on the Kubernetes cluster in the default namespace.
|
||||
@@ -56,10 +61,7 @@ Parameter | Description | Default
|
||||
`affinity` | node/pod affinities | `node`
|
||||
`nodeSelector` | node labels for pod assignment | `{}`
|
||||
`service.type` | type of service | `ClusterIP`
|
||||
`url` | Prometheus URL, used when Weave Cloud token is empty | `http://prometheus:9090`
|
||||
`token` | Weave Cloud token | `none`
|
||||
`user` | Grafana admin username | `admin`
|
||||
`password` | Grafana admin password | `admin`
|
||||
`url` | Prometheus URL | `http://prometheus:9090`
|
||||
|
||||
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
|
||||
|
||||
|
||||
1226
charts/grafana/dashboards/envoy.json
Normal file
1226
charts/grafana/dashboards/envoy.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,4 +1,4 @@
|
||||
apiVersion: apps/v1beta2
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: {{ template "grafana.fullname" . }}
|
||||
@@ -20,6 +20,9 @@ spec:
|
||||
release: {{ .Release.Name }}
|
||||
annotations:
|
||||
prometheus.io/scrape: 'false'
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
|
||||
@@ -6,9 +6,11 @@ replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: grafana/grafana
|
||||
tag: 6.2.5
|
||||
tag: 6.5.1
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
service:
|
||||
type: ClusterIP
|
||||
port: 80
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
apiVersion: v1
|
||||
name: loadtester
|
||||
version: 0.7.0
|
||||
appVersion: 0.7.0
|
||||
version: 0.12.1
|
||||
appVersion: 0.12.1
|
||||
kubeVersion: ">=1.11.0-0"
|
||||
engine: gotpl
|
||||
description: Flagger's load testing services based on rakyll/hey and bojand/ghz that generates traffic during canary analysis when configured as a webhook.
|
||||
home: https://docs.flagger.app
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/weaveworks.png
|
||||
sources:
|
||||
- https://github.com/weaveworks/flagger
|
||||
maintainers:
|
||||
@@ -14,8 +14,10 @@ maintainers:
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
keywords:
|
||||
- canary
|
||||
- flagger
|
||||
- istio
|
||||
- appmesh
|
||||
- linkerd
|
||||
- gloo
|
||||
- gitops
|
||||
- load testing
|
||||
|
||||
@@ -18,6 +18,9 @@ spec:
|
||||
app: {{ include "loadtester.name" . }}
|
||||
annotations:
|
||||
appmesh.k8s.aws/ports: "444"
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.serviceAccountName }}
|
||||
serviceAccountName: {{ .Values.serviceAccountName }}
|
||||
|
||||
@@ -2,9 +2,13 @@ replicaCount: 1
|
||||
|
||||
image:
|
||||
repository: weaveworks/flagger-loadtester
|
||||
tag: 0.7.0
|
||||
tag: 0.12.1
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
podAnnotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8080"
|
||||
|
||||
logLevel: info
|
||||
cmd:
|
||||
timeout: 1h
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
apiVersion: v1
|
||||
version: 3.0.0
|
||||
appVersion: 2.0.0
|
||||
version: 3.1.0
|
||||
appVersion: 3.1.0
|
||||
name: podinfo
|
||||
engine: gotpl
|
||||
description: Flagger canary deployment demo chart
|
||||
home: https://github.com/weaveworks/flagger
|
||||
maintainers:
|
||||
- email: stefanprodan@users.noreply.github.com
|
||||
name: stefanprodan
|
||||
description: Flagger canary deployment demo application
|
||||
home: https://docs.flagger.app
|
||||
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/weaveworks.png
|
||||
sources:
|
||||
- https://github.com/weaveworks/flagger
|
||||
- https://github.com/stefanprodan/podinfo
|
||||
maintainers:
|
||||
- name: stefanprodan
|
||||
url: https://github.com/stefanprodan
|
||||
email: stefanprodan@users.noreply.github.com
|
||||
|
||||
@@ -21,6 +21,9 @@ spec:
|
||||
app: {{ template "podinfo.fullname" . }}
|
||||
annotations:
|
||||
prometheus.io/scrape: 'true'
|
||||
{{- if .Values.podAnnotations }}
|
||||
{{ toYaml .Values.podAnnotations | indent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 30
|
||||
containers:
|
||||
@@ -34,6 +37,9 @@ spec:
|
||||
- --random-delay={{ .Values.faults.delay }}
|
||||
- --random-error={{ .Values.faults.error }}
|
||||
- --config-path=/podinfo/config
|
||||
{{- range .Values.backends }}
|
||||
- --backend-url={{ . }}
|
||||
{{- end }}
|
||||
env:
|
||||
{{- if .Values.message }}
|
||||
- name: PODINFO_UI_MESSAGE
|
||||
|
||||
29
charts/podinfo/templates/tests/jwt.yaml
Normal file
29
charts/podinfo/templates/tests/jwt.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}-jwt-test-{{ randAlphaNum 5 | lower }}
|
||||
labels:
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
app: {{ template "podinfo.name" . }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
sidecar.istio.io/inject: "false"
|
||||
linkerd.io/inject: disabled
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: disabled
|
||||
spec:
|
||||
containers:
|
||||
- name: tools
|
||||
image: giantswarm/tiny-tools
|
||||
command:
|
||||
- sh
|
||||
- -c
|
||||
- |
|
||||
TOKEN=$(curl -sd 'test' ${PODINFO_SVC}/token | jq -r .token) &&
|
||||
curl -H "Authorization: Bearer ${TOKEN}" ${PODINFO_SVC}/token/validate | grep test
|
||||
env:
|
||||
- name: PODINFO_SVC
|
||||
value: {{ template "podinfo.fullname" . }}:{{ .Values.service.port }}
|
||||
restartPolicy: Never
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
{{- $url := printf "%s%s.%s:%v" (include "podinfo.fullname" .) (include "podinfo.suffix" .) .Release.Namespace .Values.service.port -}}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}-tests
|
||||
labels:
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
app: {{ template "podinfo.name" . }}
|
||||
data:
|
||||
run.sh: |-
|
||||
@test "HTTP POST /echo" {
|
||||
run curl --retry 3 --connect-timeout 2 -sSX POST -d 'test' {{ $url }}/echo
|
||||
[ $output = "test" ]
|
||||
}
|
||||
@test "HTTP POST /store" {
|
||||
curl --retry 3 --connect-timeout 2 -sSX POST -d 'test' {{ $url }}/store
|
||||
}
|
||||
@test "HTTP GET /" {
|
||||
curl --retry 3 --connect-timeout 2 -sS {{ $url }} | grep hostname
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: {{ template "podinfo.fullname" . }}-tests-{{ randAlphaNum 5 | lower }}
|
||||
annotations:
|
||||
"helm.sh/hook": test-success
|
||||
sidecar.istio.io/inject: "false"
|
||||
labels:
|
||||
heritage: {{ .Release.Service }}
|
||||
release: {{ .Release.Name }}
|
||||
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
|
||||
app: {{ template "podinfo.name" . }}
|
||||
spec:
|
||||
initContainers:
|
||||
- name: "test-framework"
|
||||
image: "dduportal/bats:0.4.0"
|
||||
command:
|
||||
- "bash"
|
||||
- "-c"
|
||||
- |
|
||||
set -ex
|
||||
# copy bats to tools dir
|
||||
cp -R /usr/local/libexec/ /tools/bats/
|
||||
volumeMounts:
|
||||
- mountPath: /tools
|
||||
name: tools
|
||||
containers:
|
||||
- name: {{ .Release.Name }}-ui-test
|
||||
image: dduportal/bats:0.4.0
|
||||
command: ["/tools/bats/bats", "-t", "/tests/run.sh"]
|
||||
volumeMounts:
|
||||
- mountPath: /tests
|
||||
name: tests
|
||||
readOnly: true
|
||||
- mountPath: /tools
|
||||
name: tools
|
||||
volumes:
|
||||
- name: tests
|
||||
configMap:
|
||||
name: {{ template "podinfo.fullname" . }}-tests
|
||||
- name: tools
|
||||
emptyDir: {}
|
||||
restartPolicy: Never
|
||||
@@ -1,22 +1,25 @@
|
||||
# Default values for podinfo.
|
||||
image:
|
||||
repository: stefanprodan/podinfo
|
||||
tag: 2.0.0
|
||||
tag: 3.1.0
|
||||
pullPolicy: IfNotPresent
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
service:
|
||||
enabled: false
|
||||
type: ClusterIP
|
||||
port: 9898
|
||||
|
||||
hpa:
|
||||
enabled: true
|
||||
minReplicas: 2
|
||||
maxReplicas: 2
|
||||
maxReplicas: 4
|
||||
cpu: 80
|
||||
memory: 512Mi
|
||||
|
||||
canary:
|
||||
enabled: true
|
||||
enabled: false
|
||||
# Istio traffic policy tls can be DISABLE or ISTIO_MUTUAL
|
||||
istioTLS: DISABLE
|
||||
istioIngress:
|
||||
@@ -69,6 +72,7 @@ fullnameOverride: ""
|
||||
|
||||
logLevel: info
|
||||
backend: #http://backend-podinfo:9898/echo
|
||||
backends: []
|
||||
message: #UI greetings
|
||||
|
||||
faults:
|
||||
|
||||
@@ -9,17 +9,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions"
|
||||
"github.com/weaveworks/flagger/pkg/controller"
|
||||
"github.com/weaveworks/flagger/pkg/logger"
|
||||
"github.com/weaveworks/flagger/pkg/metrics"
|
||||
"github.com/weaveworks/flagger/pkg/notifier"
|
||||
"github.com/weaveworks/flagger/pkg/router"
|
||||
"github.com/weaveworks/flagger/pkg/server"
|
||||
"github.com/weaveworks/flagger/pkg/signals"
|
||||
"github.com/weaveworks/flagger/pkg/version"
|
||||
"github.com/Masterminds/semver/v3"
|
||||
"go.uber.org/zap"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -30,28 +20,42 @@ import (
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/client-go/transport"
|
||||
_ "k8s.io/code-generator/cmd/client-gen/generators"
|
||||
|
||||
"github.com/weaveworks/flagger/pkg/canary"
|
||||
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
|
||||
informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions"
|
||||
"github.com/weaveworks/flagger/pkg/controller"
|
||||
"github.com/weaveworks/flagger/pkg/logger"
|
||||
"github.com/weaveworks/flagger/pkg/metrics"
|
||||
"github.com/weaveworks/flagger/pkg/notifier"
|
||||
"github.com/weaveworks/flagger/pkg/router"
|
||||
"github.com/weaveworks/flagger/pkg/server"
|
||||
"github.com/weaveworks/flagger/pkg/signals"
|
||||
"github.com/weaveworks/flagger/pkg/version"
|
||||
)
|
||||
|
||||
var (
|
||||
masterURL string
|
||||
kubeconfig string
|
||||
metricsServer string
|
||||
controlLoopInterval time.Duration
|
||||
logLevel string
|
||||
port string
|
||||
msteamsURL string
|
||||
slackURL string
|
||||
slackUser string
|
||||
slackChannel string
|
||||
threadiness int
|
||||
zapReplaceGlobals bool
|
||||
zapEncoding string
|
||||
namespace string
|
||||
meshProvider string
|
||||
selectorLabels string
|
||||
enableLeaderElection bool
|
||||
leaderElectionNamespace string
|
||||
ver bool
|
||||
masterURL string
|
||||
kubeconfig string
|
||||
metricsServer string
|
||||
controlLoopInterval time.Duration
|
||||
logLevel string
|
||||
port string
|
||||
msteamsURL string
|
||||
slackURL string
|
||||
slackUser string
|
||||
slackChannel string
|
||||
eventWebhook string
|
||||
threadiness int
|
||||
zapReplaceGlobals bool
|
||||
zapEncoding string
|
||||
namespace string
|
||||
meshProvider string
|
||||
selectorLabels string
|
||||
ingressAnnotationsPrefix string
|
||||
enableLeaderElection bool
|
||||
leaderElectionNamespace string
|
||||
ver bool
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -64,6 +68,7 @@ func init() {
|
||||
flag.StringVar(&slackURL, "slack-url", "", "Slack hook URL.")
|
||||
flag.StringVar(&slackUser, "slack-user", "flagger", "Slack user name.")
|
||||
flag.StringVar(&slackChannel, "slack-channel", "", "Slack channel.")
|
||||
flag.StringVar(&eventWebhook, "event-webhook", "", "Webhook for publishing flagger events")
|
||||
flag.StringVar(&msteamsURL, "msteams-url", "", "MS Teams incoming webhook URL.")
|
||||
flag.IntVar(&threadiness, "threadiness", 2, "Worker concurrency.")
|
||||
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
|
||||
@@ -71,6 +76,7 @@ func init() {
|
||||
flag.StringVar(&namespace, "namespace", "", "Namespace that flagger would watch canary object.")
|
||||
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio, linkerd, appmesh, supergloo, nginx or smi.")
|
||||
flag.StringVar(&selectorLabels, "selector-labels", "app,name,app.kubernetes.io/name", "List of pod labels that Flagger uses to create pod selectors.")
|
||||
flag.StringVar(&ingressAnnotationsPrefix, "ingress-annotations-prefix", "nginx.ingress.kubernetes.io", "Annotations prefix for ingresses.")
|
||||
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, "Enable leader election.")
|
||||
flag.StringVar(&leaderElectionNamespace, "leader-election-namespace", "kube-system", "Namespace used to create the leader election config map.")
|
||||
flag.BoolVar(&ver, "version", false, "Print version")
|
||||
@@ -157,7 +163,7 @@ func main() {
|
||||
logger.Infof("Watching namespace %s", namespace)
|
||||
}
|
||||
|
||||
observerFactory, err := metrics.NewFactory(metricsServer, meshProvider, 5*time.Second)
|
||||
observerFactory, err := metrics.NewFactory(metricsServer, 5*time.Second)
|
||||
if err != nil {
|
||||
logger.Fatalf("Error building prometheus client: %s", err.Error())
|
||||
}
|
||||
@@ -175,7 +181,13 @@ func main() {
|
||||
// start HTTP server
|
||||
go server.ListenAndServe(port, 3*time.Second, logger, stopCh)
|
||||
|
||||
routerFactory := router.NewFactory(cfg, kubeClient, flaggerClient, logger, meshClient)
|
||||
routerFactory := router.NewFactory(cfg, kubeClient, flaggerClient, ingressAnnotationsPrefix, logger, meshClient)
|
||||
configTracker := canary.ConfigTracker{
|
||||
Logger: logger,
|
||||
KubeClient: kubeClient,
|
||||
FlaggerClient: flaggerClient,
|
||||
}
|
||||
canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, labels, logger)
|
||||
|
||||
c := controller.NewController(
|
||||
kubeClient,
|
||||
@@ -185,11 +197,12 @@ func main() {
|
||||
controlLoopInterval,
|
||||
logger,
|
||||
notifierClient,
|
||||
canaryFactory,
|
||||
routerFactory,
|
||||
observerFactory,
|
||||
meshProvider,
|
||||
version.VERSION,
|
||||
labels,
|
||||
eventWebhook,
|
||||
)
|
||||
|
||||
flaggerInformerFactory.Start(stopCh)
|
||||
@@ -284,10 +297,10 @@ func startLeaderElection(ctx context.Context, run func(), ns string, kubeClient
|
||||
|
||||
func initNotifier(logger *zap.SugaredLogger) (client notifier.Interface) {
|
||||
provider := "slack"
|
||||
notifierURL := slackURL
|
||||
if msteamsURL != "" {
|
||||
notifierURL := fromEnv("SLACK_URL", slackURL)
|
||||
if msteamsURL != "" || os.Getenv("MSTEAMS_URL") != "" {
|
||||
provider = "msteams"
|
||||
notifierURL = msteamsURL
|
||||
notifierURL = fromEnv("MSTEAMS_URL", msteamsURL)
|
||||
}
|
||||
notifierFactory := notifier.NewFactory(notifierURL, slackUser, slackChannel)
|
||||
|
||||
@@ -302,3 +315,10 @@ func initNotifier(logger *zap.SugaredLogger) (client notifier.Interface) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func fromEnv(envVar string, defaultVal string) string {
|
||||
if os.Getenv(envVar) != "" {
|
||||
return os.Getenv(envVar)
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
var VERSION = "0.7.0"
|
||||
var VERSION = "0.12.1"
|
||||
var (
|
||||
logLevel string
|
||||
port string
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 158 KiB After Width: | Height: | Size: 30 KiB |
BIN
docs/diagrams/flagger-canary-traffic-mirroring.png
Normal file
BIN
docs/diagrams/flagger-canary-traffic-mirroring.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 39 KiB |
BIN
docs/diagrams/flagger-contour-overview.png
Normal file
BIN
docs/diagrams/flagger-contour-overview.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 40 KiB |
BIN
docs/diagrams/flagger-gitops-contour.png
Normal file
BIN
docs/diagrams/flagger-gitops-contour.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 37 KiB |
@@ -5,7 +5,7 @@ description: Flagger is a progressive delivery Kubernetes operator
|
||||
# Introduction
|
||||
|
||||
[Flagger](https://github.com/weaveworks/flagger) is a **Kubernetes** operator that automates the promotion of canary
|
||||
deployments using **Istio**, **Linkerd**, **App Mesh**, **NGINX** or **Gloo** routing for traffic shifting and **Prometheus** metrics for canary analysis.
|
||||
deployments using **Istio**, **Linkerd**, **App Mesh**, **NGINX**, **Contour** or **Gloo** routing for traffic shifting and **Prometheus** metrics for canary analysis.
|
||||
The canary analysis can be extended with webhooks for running system integration/acceptance tests, load tests, or any other custom validation.
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
|
||||
@@ -16,7 +16,7 @@ Based on analysis of the **KPIs** a canary is promoted or aborted, and the analy
|
||||
|
||||
Flagger can be configured with Kubernetes custom resources and is compatible with
|
||||
any CI/CD solutions made for Kubernetes. Since Flagger is declarative and reacts to Kubernetes events,
|
||||
it can be used in **GitOps** pipelines together with Weave Flux or JenkinsX.
|
||||
it can be used in **GitOps** pipelines together with Flux CD or JenkinsX.
|
||||
|
||||
This project is sponsored by [Weaveworks](https://www.weave.works/)
|
||||
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
* [App Mesh Canary Deployments](usage/appmesh-progressive-delivery.md)
|
||||
* [NGINX Canary Deployments](usage/nginx-progressive-delivery.md)
|
||||
* [Gloo Canary Deployments](usage/gloo-progressive-delivery.md)
|
||||
* [Contour Canary Deployments](usage/contour-progressive-delivery.md)
|
||||
* [Crossover Canary Deployments](usage/crossover-progressive-delivery.md)
|
||||
* [Blue/Green Deployments](usage/blue-green.md)
|
||||
* [Monitoring](usage/monitoring.md)
|
||||
* [Alerting](usage/alerting.md)
|
||||
|
||||
@@ -6,11 +6,13 @@
|
||||
|
||||
Flagger can run automated application analysis, promotion and rollback for the following deployment strategies:
|
||||
* Canary (progressive traffic shifting)
|
||||
* Istio, Linkerd, App Mesh, NGINX, Gloo
|
||||
* Istio, Linkerd, App Mesh, NGINX, Contour, Gloo
|
||||
* Canary (traffic mirroring)
|
||||
* Istio
|
||||
* A/B Testing (HTTP headers and cookies traffic routing)
|
||||
* Istio, NGINX
|
||||
* Istio, App Mesh, NGINX, Contour
|
||||
* Blue/Green (traffic switch)
|
||||
* Kubernetes CNI
|
||||
* Kubernetes CNI, Istio, Linkerd, App Mesh, NGINX, Contour, Gloo
|
||||
|
||||
For Canary deployments and A/B testing you'll need a Layer 7 traffic management solution like a service mesh or an ingress controller.
|
||||
For Blue/Green deployments no service mesh or ingress controller is required.
|
||||
@@ -41,6 +43,36 @@ Istio example:
|
||||
regex: "^(.*?;)?(canary=always)(;.*)?$"
|
||||
```
|
||||
|
||||
App Mesh example:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
interval: 1m
|
||||
threshold: 10
|
||||
iterations: 2
|
||||
match:
|
||||
- headers:
|
||||
user-agent:
|
||||
regex: ".*Chrome.*"
|
||||
```
|
||||
|
||||
Note that App Mesh supports a single condition.
|
||||
|
||||
Contour example:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
interval: 1m
|
||||
threshold: 10
|
||||
iterations: 2
|
||||
match:
|
||||
- headers:
|
||||
user-agent:
|
||||
prefix: "Chrome"
|
||||
```
|
||||
|
||||
Note that Contour does not support regex, you can use prefix, suffix or exact.
|
||||
|
||||
NGINX example:
|
||||
|
||||
```yaml
|
||||
@@ -102,6 +134,42 @@ The above configuration will run an analysis for five minutes.
|
||||
Flagger starts the load test for the canary service (green version) and checks the Prometheus metrics every 30 seconds.
|
||||
If the analysis result is positive, Flagger will promote the canary (green version) to primary (blue version).
|
||||
|
||||
**When can I use traffic mirroring?**
|
||||
|
||||
Traffic Mirroring is a pre-stage in a Canary (progressive traffic shifting) or
|
||||
Blue/Green deployment strategy. Traffic mirroring will copy each incoming
|
||||
request, sending one request to the primary and one to the canary service.
|
||||
The response from the primary is sent back to the user. The response from the canary
|
||||
is discarded. Metrics are collected on both requests so that the deployment will
|
||||
only proceed if the canary metrics are healthy.
|
||||
|
||||
Mirroring is supported by Istio only.
|
||||
|
||||
In Istio, mirrored requests have `-shadow` appended to the `Host` (HTTP) or
|
||||
`Authority` (HTTP/2) header; for example requests to `podinfo.test` that are
|
||||
mirrored will be reported in telemetry with a destination host `podinfo.test-shadow`.
|
||||
|
||||
Mirroring must only be used for requests that are **idempotent** or capable of
|
||||
being processed twice (once by the primary and once by the canary). Reads are
|
||||
idempotent. Before using mirroring on requests that may be writes, you should
|
||||
consider what will happen if a write is duplicated and handled by the primary
|
||||
and canary.
|
||||
|
||||
To use mirroring, set `spec.canaryAnalysis.mirror` to `true`. Example for
|
||||
traffic shifting:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
spec:
|
||||
provider: istio
|
||||
canaryAnalysis:
|
||||
mirror: true
|
||||
interval: 30s
|
||||
stepWeight: 20
|
||||
maxWeight: 50
|
||||
```
|
||||
|
||||
### Kubernetes services
|
||||
|
||||
**How is an application exposed inside the cluster?**
|
||||
@@ -120,8 +188,10 @@ spec:
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
service:
|
||||
# container port (required)
|
||||
# ClusterIP port number (required)
|
||||
port: 9898
|
||||
# container port name or number
|
||||
targetPort: http
|
||||
# port name can be http or grpc (default http)
|
||||
portName: http
|
||||
```
|
||||
@@ -291,6 +361,195 @@ spec:
|
||||
topologyKey: kubernetes.io/hostname
|
||||
```
|
||||
|
||||
### Istio routing
|
||||
|
||||
**How does Flagger interact with Istio?**
|
||||
|
||||
Flagger creates an Istio Virtual Service and Destination Rules based on the Canary service spec.
|
||||
The service configuration lets you expose an app inside or outside the mesh.
|
||||
You can also define traffic policies, HTTP match conditions, URI rewrite rules, CORS policies, timeout and retries.
|
||||
|
||||
The following spec exposes the `frontend` workload inside the mesh on `frontend.test.svc.cluster.local:9898`
|
||||
and outside the mesh on `frontend.example.com`. You'll have to specify an Istio ingress gateway for external hosts.
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: frontend
|
||||
namespace: test
|
||||
spec:
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# service port name (optional, will default to "http")
|
||||
portName: http-frontend
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- frontend.example.com
|
||||
# Istio traffic policy
|
||||
trafficPolicy:
|
||||
tls:
|
||||
# use ISTIO_MUTUAL when mTLS is enabled
|
||||
mode: DISABLE
|
||||
# HTTP match conditions (optional)
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
# HTTP rewrite (optional)
|
||||
rewrite:
|
||||
uri: /
|
||||
# Istio retry policy (optional)
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 1s
|
||||
retryOn: "gateway-error,connect-failure,refused-stream"
|
||||
# Add headers (optional)
|
||||
headers:
|
||||
request:
|
||||
add:
|
||||
x-some-header: "value"
|
||||
# cross-origin resource sharing policy (optional)
|
||||
corsPolicy:
|
||||
allowOrigin:
|
||||
- example.com
|
||||
allowMethods:
|
||||
- GET
|
||||
allowCredentials: false
|
||||
allowHeaders:
|
||||
- x-some-header
|
||||
maxAge: 24h
|
||||
```
|
||||
|
||||
For the above spec Flagger will generate the following virtual service:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: frontend
|
||||
namespace: test
|
||||
ownerReferences:
|
||||
- apiVersion: flagger.app/v1alpha3
|
||||
blockOwnerDeletion: true
|
||||
controller: true
|
||||
kind: Canary
|
||||
name: podinfo
|
||||
uid: 3a4a40dd-3875-11e9-8e1d-42010a9c0fd1
|
||||
spec:
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
hosts:
|
||||
- frontend.example.com
|
||||
- frontend
|
||||
http:
|
||||
- appendHeaders:
|
||||
x-some-header: "value"
|
||||
corsPolicy:
|
||||
allowHeaders:
|
||||
- x-some-header
|
||||
allowMethods:
|
||||
- GET
|
||||
allowOrigin:
|
||||
- example.com
|
||||
maxAge: 24h
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
rewrite:
|
||||
uri: /
|
||||
route:
|
||||
- destination:
|
||||
host: podinfo-primary
|
||||
weight: 100
|
||||
- destination:
|
||||
host: podinfo-canary
|
||||
weight: 0
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 1s
|
||||
retryOn: "gateway-error,connect-failure,refused-stream"
|
||||
```
|
||||
|
||||
For each destination in the virtual service a rule is generated:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: frontend-primary
|
||||
namespace: test
|
||||
spec:
|
||||
host: frontend-primary
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: DISABLE
|
||||
---
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: frontend-canary
|
||||
namespace: test
|
||||
spec:
|
||||
host: frontend-canary
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: DISABLE
|
||||
```
|
||||
|
||||
Flagger keeps in sync the virtual service and destination rules with the canary service spec.
|
||||
Any direct modification to the virtual service spec will be overwritten.
|
||||
|
||||
To expose a workload inside the mesh on `http://backend.test.svc.cluster.local:9898`,
|
||||
the service spec can contain only the container port and the traffic policy:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: backend
|
||||
namespace: test
|
||||
spec:
|
||||
service:
|
||||
port: 9898
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: DISABLE
|
||||
```
|
||||
|
||||
Based on the above spec, Flagger will create several ClusterIP services like:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: backend-primary
|
||||
ownerReferences:
|
||||
- apiVersion: flagger.app/v1alpha3
|
||||
blockOwnerDeletion: true
|
||||
controller: true
|
||||
kind: Canary
|
||||
name: backend
|
||||
uid: 2ca1a9c7-2ef6-11e9-bd01-42010a9c0145
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: http
|
||||
port: 9898
|
||||
protocol: TCP
|
||||
targetPort: 9898
|
||||
selector:
|
||||
app: backend-primary
|
||||
```
|
||||
|
||||
Flagger works for user facing apps exposed outside the cluster via an ingress gateway
|
||||
and for backend HTTP APIs that are accessible only from inside the mesh.
|
||||
|
||||
### Istio Ingress Gateway
|
||||
|
||||
**How can I expose multiple canaries on the same external domain?**
|
||||
|
||||
@@ -4,8 +4,6 @@
|
||||
a horizontal pod autoscaler \(HPA\) and creates a series of objects
|
||||
\(Kubernetes deployments, ClusterIP services, virtual service, traffic split or ingress\) to drive the canary analysis and promotion.
|
||||
|
||||

|
||||
|
||||
### Canary Custom Resource
|
||||
|
||||
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
|
||||
@@ -19,8 +17,7 @@ metadata:
|
||||
spec:
|
||||
# service mesh provider (optional)
|
||||
# can be: kubernetes, istio, linkerd, appmesh, nginx, gloo, supergloo
|
||||
# use the kubernetes provider for Blue/Green style deployments
|
||||
provider: istio
|
||||
provider: linkerd
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
@@ -35,16 +32,15 @@ spec:
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
# ClusterIP port number
|
||||
port: 9898
|
||||
# service port name (optional, will default to "http")
|
||||
portName: http-podinfo
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- podinfo.example.com
|
||||
# ClusterIP port name can be http or grpc (default http)
|
||||
portName: http
|
||||
# container port number or name (optional)
|
||||
targetPort: 9898
|
||||
# add all the other container ports
|
||||
# to the ClusterIP services (default false)
|
||||
portDiscovery: false
|
||||
# promote the canary without analysing it (default false)
|
||||
skipAnalysis: false
|
||||
# define the canary analysis timing and KPIs
|
||||
@@ -71,15 +67,13 @@ spec:
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# external checks (optional)
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: integration-tests
|
||||
url: http://podinfo.test:9898/echo
|
||||
timeout: 1m
|
||||
# key-value pairs (optional)
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
test: "all"
|
||||
token: "16688eb5e9f289f1991c"
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
```
|
||||
|
||||
**Note** that the target deployment must have a single label selector in the format `app: <DEPLOYMENT-NAME>`:
|
||||
@@ -99,11 +93,13 @@ spec:
|
||||
app: podinfo
|
||||
```
|
||||
|
||||
Besides `app` Flagger supports `name` and `app.kubernetes.io/name` selectors. If you use a different
|
||||
convention you can specify your label with the `-selector-labels` flag.
|
||||
Besides `app` Flagger supports `name` and `app.kubernetes.io/name` selectors.
|
||||
If you use a different convention you can specify your label with
|
||||
the `-selector-labels=my-app-label` command flag in the Flagger deployment manifest under containers args
|
||||
or by setting `--set selectorLabels=my-app-label` when installing Flagger with Helm.
|
||||
|
||||
The target deployment should expose a TCP port that will be used by Flagger to create the ClusterIP Service and
|
||||
the Istio Virtual Service. The container port from the target deployment should match the `service.port` value.
|
||||
The target deployment should expose a TCP port that will be used by Flagger to create the ClusterIP Services.
|
||||
The container port from the target deployment should match the `service.port` or `service.targetPort`.
|
||||
|
||||
### Canary status
|
||||
|
||||
@@ -143,7 +139,7 @@ status:
|
||||
```
|
||||
|
||||
The `Promoted` status condition can have one of the following reasons:
|
||||
Initialized, Waiting, Progressing, Finalising, Succeeded or Failed.
|
||||
Initialized, Waiting, Progressing, Promoting, Finalising, Succeeded or Failed.
|
||||
A failed canary will have the promoted status set to `false`,
|
||||
the reason to `failed` and the last applied spec will be different to the last promoted one.
|
||||
|
||||
@@ -153,184 +149,26 @@ Wait for a successful rollout:
|
||||
kubectl wait canary/podinfo --for=condition=promoted
|
||||
```
|
||||
|
||||
### Istio routing
|
||||
CI example:
|
||||
|
||||
Flagger creates an Istio Virtual Service and Destination Rules based on the Canary service spec.
|
||||
The service configuration lets you expose an app inside or outside the mesh.
|
||||
You can also define traffic policies, HTTP match conditions, URI rewrite rules, CORS policies, timeout and retries.
|
||||
```bash
|
||||
# update the container image
|
||||
kubectl set image deployment/podinfo podinfod=stefanprodan/podinfo:3.0.1
|
||||
|
||||
The following spec exposes the `frontend` workload inside the mesh on `frontend.test.svc.cluster.local:9898`
|
||||
and outside the mesh on `frontend.example.com`. You'll have to specify an Istio ingress gateway for external hosts.
|
||||
# wait for Flagger to detect the change
|
||||
ok=false
|
||||
until ${ok}; do
|
||||
kubectl get canary/podinfo | grep 'Progressing' && ok=true || ok=false
|
||||
sleep 5
|
||||
done
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: frontend
|
||||
namespace: test
|
||||
spec:
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# service port name (optional, will default to "http")
|
||||
portName: http-frontend
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- frontend.example.com
|
||||
# Istio traffic policy (optional)
|
||||
trafficPolicy:
|
||||
loadBalancer:
|
||||
simple: LEAST_CONN
|
||||
# HTTP match conditions (optional)
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
# HTTP rewrite (optional)
|
||||
rewrite:
|
||||
uri: /
|
||||
# Envoy timeout and retry policy (optional)
|
||||
headers:
|
||||
request:
|
||||
add:
|
||||
x-envoy-upstream-rq-timeout-ms: "15000"
|
||||
x-envoy-max-retries: "10"
|
||||
x-envoy-retry-on: "gateway-error,connect-failure,refused-stream"
|
||||
# cross-origin resource sharing policy (optional)
|
||||
corsPolicy:
|
||||
allowOrigin:
|
||||
- example.com
|
||||
allowMethods:
|
||||
- GET
|
||||
allowCredentials: false
|
||||
allowHeaders:
|
||||
- x-some-header
|
||||
maxAge: 24h
|
||||
# wait for the canary analysis to finish
|
||||
kubectl wait canary/podinfo --for=condition=promoted --timeout=5m
|
||||
|
||||
# check if the deployment was successful
|
||||
kubectl get canary/podinfo | grep Succeeded
|
||||
```
|
||||
|
||||
For the above spec Flagger will generate the following virtual service:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: frontend
|
||||
namespace: test
|
||||
ownerReferences:
|
||||
- apiVersion: flagger.app/v1alpha3
|
||||
blockOwnerDeletion: true
|
||||
controller: true
|
||||
kind: Canary
|
||||
name: podinfo
|
||||
uid: 3a4a40dd-3875-11e9-8e1d-42010a9c0fd1
|
||||
spec:
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
- mesh
|
||||
hosts:
|
||||
- frontend.example.com
|
||||
- frontend
|
||||
http:
|
||||
- appendHeaders:
|
||||
x-envoy-max-retries: "10"
|
||||
x-envoy-retry-on: gateway-error,connect-failure,refused-stream
|
||||
x-envoy-upstream-rq-timeout-ms: "15000"
|
||||
corsPolicy:
|
||||
allowHeaders:
|
||||
- x-some-header
|
||||
allowMethods:
|
||||
- GET
|
||||
allowOrigin:
|
||||
- example.com
|
||||
maxAge: 24h
|
||||
match:
|
||||
- uri:
|
||||
prefix: /
|
||||
rewrite:
|
||||
uri: /
|
||||
route:
|
||||
- destination:
|
||||
host: podinfo-primary
|
||||
weight: 100
|
||||
- destination:
|
||||
host: podinfo-canary
|
||||
weight: 0
|
||||
```
|
||||
|
||||
For each destination in the virtual service a rule is generated:
|
||||
|
||||
```yaml
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: frontend-primary
|
||||
namespace: test
|
||||
spec:
|
||||
host: frontend-primary
|
||||
trafficPolicy:
|
||||
loadBalancer:
|
||||
simple: LEAST_CONN
|
||||
---
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: frontend-canary
|
||||
namespace: test
|
||||
spec:
|
||||
host: frontend-canary
|
||||
trafficPolicy:
|
||||
loadBalancer:
|
||||
simple: LEAST_CONN
|
||||
```
|
||||
|
||||
Flagger keeps in sync the virtual service and destination rules with the canary service spec.
|
||||
Any direct modification to the virtual service spec will be overwritten.
|
||||
|
||||
To expose a workload inside the mesh on `http://backend.test.svc.cluster.local:9898`,
|
||||
the service spec can contain only the container port:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: backend
|
||||
namespace: test
|
||||
spec:
|
||||
service:
|
||||
port: 9898
|
||||
```
|
||||
|
||||
Based on the above spec, Flagger will create several ClusterIP services like:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: backend-primary
|
||||
ownerReferences:
|
||||
- apiVersion: flagger.app/v1alpha3
|
||||
blockOwnerDeletion: true
|
||||
controller: true
|
||||
kind: Canary
|
||||
name: backend
|
||||
uid: 2ca1a9c7-2ef6-11e9-bd01-42010a9c0145
|
||||
spec:
|
||||
type: ClusterIP
|
||||
ports:
|
||||
- name: http
|
||||
port: 9898
|
||||
protocol: TCP
|
||||
targetPort: 9898
|
||||
selector:
|
||||
app: backend-primary
|
||||
```
|
||||
|
||||
Flagger works for user facing apps exposed outside the cluster via an ingress gateway
|
||||
and for backend HTTP APIs that are accessible only from inside the mesh.
|
||||
|
||||
### Canary Stages
|
||||
|
||||

|
||||
@@ -344,12 +182,13 @@ A canary deployment is triggered by changes in any of the following objects:
|
||||
Gated canary promotion stages:
|
||||
|
||||
* scan for canary deployments
|
||||
* check Istio virtual service routes are mapped to primary and canary ClusterIP services
|
||||
* check primary and canary deployments status
|
||||
* check primary and canary deployment status
|
||||
* halt advancement if a rolling update is underway
|
||||
* halt advancement if pods are unhealthy
|
||||
* call pre-rollout webhooks are check results
|
||||
* halt advancement if any hook returned a non HTTP 2xx result
|
||||
* call confirm-rollout webhooks and check results
|
||||
* halt advancement if any hook returns a non HTTP 2xx result
|
||||
* call pre-rollout webhooks and check results
|
||||
* halt advancement if any hook returns a non HTTP 2xx result
|
||||
* increment the failed checks counter
|
||||
* increase canary traffic weight percentage from 0% to 5% (step weight)
|
||||
* call rollout webhooks and check results
|
||||
@@ -366,8 +205,11 @@ Gated canary promotion stages:
|
||||
* halt advancement if any webhook call fails
|
||||
* halt advancement while canary request success rate is under the threshold
|
||||
* halt advancement while canary request duration P99 is over the threshold
|
||||
* halt advancement while any custom metric check fails
|
||||
* halt advancement if the primary or canary deployment becomes unhealthy
|
||||
* halt advancement while canary deployment is being scaled up/down by HPA
|
||||
* call confirm-promotion webhooks and check results
|
||||
* halt advancement if any hook returns a non HTTP 2xx result
|
||||
* promote canary to primary
|
||||
* copy ConfigMaps and Secrets from canary to primary
|
||||
* copy canary deployment spec template over primary
|
||||
@@ -377,7 +219,7 @@ Gated canary promotion stages:
|
||||
* scale to zero the canary deployment
|
||||
* mark rollout as finished
|
||||
* call post-rollout webhooks
|
||||
* post the analysis result to Slack
|
||||
* post the analysis result to Slack or MS Teams
|
||||
* wait for the canary deployment to be updated and start over
|
||||
|
||||
### Canary Analysis
|
||||
@@ -464,6 +306,45 @@ interval * threshold
|
||||
|
||||
Make sure that the analysis threshold is lower than the number of iterations.
|
||||
|
||||
### Blue/Green deployments
|
||||
|
||||
For applications that are not deployed on a service mesh, Flagger can orchestrate blue/green style deployments
|
||||
with Kubernetes L4 networking. When using Istio you have the option to mirror traffic between blue and green.
|
||||
|
||||
You can use the blue/green deployment strategy by replacing `stepWeight/maxWeight` with `iterations` in the `canaryAnalysis` spec:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# total number of iterations
|
||||
iterations: 10
|
||||
# max number of failed iterations before rollback
|
||||
threshold: 2
|
||||
# Traffic shadowing (compatible with Istio only)
|
||||
mirror: true
|
||||
```
|
||||
|
||||
With the above configuration Flagger will run conformance and load tests on the canary pods for ten minutes.
|
||||
If the metrics analysis succeeds, live traffic will be switched from the old version to the new one when the
|
||||
canary is promoted.
|
||||
|
||||
The blue/green deployment strategy is supported for all service mesh providers.
|
||||
|
||||
Blue/Green rollout steps for service mesh:
|
||||
* scale up the canary (green)
|
||||
* run conformance tests for the canary pods
|
||||
* run load tests and metric checks for the canary pods
|
||||
* route traffic to canary
|
||||
* promote canary spec over primary (blue)
|
||||
* wait for primary rollout
|
||||
* route traffic to primary
|
||||
* scale down canary
|
||||
|
||||
After the analysis finishes, the traffic is routed to the canary (green) before triggering the primary (blue)
|
||||
rolling update, this ensures a smooth transition to the new version avoiding dropping in-flight requests during
|
||||
the Kubernetes deployment rollout.
|
||||
|
||||
### HTTP Metrics
|
||||
|
||||
The canary analysis is using the following Prometheus queries:
|
||||
@@ -507,7 +388,7 @@ sum(
|
||||
)
|
||||
```
|
||||
|
||||
App Mesh query:
|
||||
Envoy query (App Mesh, Contour or Gloo):
|
||||
|
||||
```javascript
|
||||
sum(
|
||||
@@ -515,7 +396,7 @@ sum(
|
||||
envoy_cluster_upstream_rq{
|
||||
kubernetes_namespace="$namespace",
|
||||
kubernetes_pod_name=~"$workload",
|
||||
response_code!~"5.*"
|
||||
envoy_response_code!~"5.*"
|
||||
}[$interval]
|
||||
)
|
||||
)
|
||||
@@ -560,7 +441,7 @@ histogram_quantile(0.99,
|
||||
)
|
||||
```
|
||||
|
||||
App Mesh query:
|
||||
Envoy query (App Mesh, Contour or Gloo):
|
||||
|
||||
```javascript
|
||||
histogram_quantile(0.99,
|
||||
@@ -656,15 +537,20 @@ The canary analysis can be extended with webhooks. Flagger will call each webhoo
|
||||
determine from the response status code (HTTP 2xx) if the canary is failing or not.
|
||||
|
||||
There are three types of hooks:
|
||||
* Confirm-rollout hooks are executed before scaling up the canary deployment and ca be used for manual approval.
|
||||
* Confirm-rollout hooks are executed before scaling up the canary deployment and can be used for manual approval.
|
||||
The rollout is paused until the hook returns a successful HTTP status code.
|
||||
* Pre-rollout hooks are executed before routing traffic to canary.
|
||||
The canary advancement is paused if a pre-rollout hook fails and if the number of failures reach the
|
||||
threshold the canary will be rollback.
|
||||
* Rollout hooks are executed during the analysis on each iteration before the metric checks.
|
||||
If a rollout hook call fails the canary advancement is paused and eventfully rolled back.
|
||||
* Confirm-promotion hooks are executed before the promotion step.
|
||||
The canary promotion is paused until the hooks return HTTP 200.
|
||||
While the promotion is paused, Flagger will continue to run the metrics checks and rollout hooks.
|
||||
* Post-rollout hooks are executed after the canary has been promoted or rolled back.
|
||||
If a post rollout hook fails the error is logged.
|
||||
* Event hooks are executed every time Flagger emits a Kubernetes event. When configured,
|
||||
every action that Flagger takes during a canary deployment will be sent as JSON via an HTTP POST request.
|
||||
|
||||
Spec:
|
||||
|
||||
@@ -687,12 +573,18 @@ Spec:
|
||||
timeout: 15s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 5 -c 2 http://podinfo-canary.test:9898/"
|
||||
- name: "promotion gate"
|
||||
type: confirm-promotion
|
||||
url: http://flagger-loadtester.test/gate/approve
|
||||
- name: "notify"
|
||||
type: post-rollout
|
||||
url: http://telegram.bot:8080/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
some: "message"
|
||||
- name: "send to Slack"
|
||||
type: event
|
||||
url: http://event-recevier.notifications/slack
|
||||
```
|
||||
|
||||
> **Note** that the sum of all rollout webhooks timeouts should be lower than the analysis interval.
|
||||
@@ -718,6 +610,24 @@ Response status codes:
|
||||
|
||||
On a non-2xx response Flagger will include the response body (if any) in the failed checks log and Kubernetes events.
|
||||
|
||||
Event payload (HTTP POST):
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "string (canary name)",
|
||||
"namespace": "string (canary namespace)",
|
||||
"phase": "string (canary phase)",
|
||||
"metadata": {
|
||||
"eventMessage": "string (canary event message)",
|
||||
"eventType": "string (canary event type)",
|
||||
"timestamp": "string (unix timestamp ms)"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The event receiver can create alerts based on the received phase
|
||||
(possible values: ` Initialized`, `Waiting`, `Progressing`, `Promoting`, `Finalising`, `Succeeded` or `Failed`).
|
||||
|
||||
### Load Testing
|
||||
|
||||
For workloads that are not receiving constant traffic Flagger can be configured with a webhook,
|
||||
@@ -882,6 +792,20 @@ Now you can add pre-rollout webhooks to the canary analysis spec:
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
|
||||
If the helm test fails, Flagger will retry until the analysis threshold is reached and the canary is rolled back.
|
||||
|
||||
If you are using Helm v3, you'll have to create a dedicated service account and add the release namespace to the test command:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
webhooks:
|
||||
- name: "smoke test"
|
||||
type: pre-rollout
|
||||
url: http://flagger-helmtester.kube-system/
|
||||
timeout: 3m
|
||||
metadata:
|
||||
type: "helmv3"
|
||||
cmd: "test run {{ .Release.Name }} --cleanup -n {{ .Release.Namespace }}"
|
||||
```
|
||||
|
||||
As an alternative to Helm you can use the [Bash Automated Testing System](https://github.com/bats-core/bats-core) to run your tests.
|
||||
|
||||
```yaml
|
||||
@@ -900,8 +824,8 @@ Note that you should create a ConfigMap with your Bats tests and mount it inside
|
||||
|
||||
### Manual Gating
|
||||
|
||||
For manual approval of a canary deployment you can use the `confirm-rollout` webhook.
|
||||
The confirmation hooks are executed before the pre-rollout hooks.
|
||||
For manual approval of a canary deployment you can use the `confirm-rollout` and `confirm-promotion` webhooks.
|
||||
The confirmation rollout hooks are executed before the pre-rollout hooks.
|
||||
Flagger will halt the canary traffic shifting and analysis until the confirm webhook returns HTTP status 200.
|
||||
|
||||
Manual gating with Flagger's tester:
|
||||
@@ -960,3 +884,16 @@ kubectl get canary/podinfo
|
||||
NAME STATUS WEIGHT
|
||||
podinfo Waiting 0
|
||||
```
|
||||
|
||||
The `confirm-promotion` hook type can be used to manually approve the canary promotion.
|
||||
While the promotion is paused, Flagger will continue to run the metrics checks and load tests.
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
webhooks:
|
||||
- name: "promotion gate"
|
||||
type: confirm-promotion
|
||||
url: http://flagger-loadtester.test/gate/halt
|
||||
```
|
||||
|
||||
If you have notifications enabled, Flagger will post a message to Slack or MS Teams if a canary promotion is waiting for approval.
|
||||
|
||||
@@ -12,21 +12,12 @@ The App Mesh integration with EKS is made out of the following components:
|
||||
* `virtualservice.appmesh.k8s.aws` defines the routing rules for a workload inside the mesh
|
||||
* CRD controller - keeps the custom resources in sync with the App Mesh control plane
|
||||
* Admission controller - injects the Envoy sidecar and assigns Kubernetes pods to App Mesh virtual nodes
|
||||
* Metrics server - Prometheus instance that collects and stores Envoy's metrics
|
||||
|
||||
Prerequisites:
|
||||
|
||||
* jq
|
||||
* homebrew
|
||||
* openssl
|
||||
* kubectl
|
||||
* AWS CLI (default region us-west-2)
|
||||
* Telemetry service - Prometheus instance that collects and stores Envoy's metrics
|
||||
|
||||
### Create a Kubernetes cluster
|
||||
|
||||
In order to create an EKS cluster you can use [eksctl](https://eksctl.io).
|
||||
Eksctl is an open source command-line utility made by Weaveworks in collaboration with Amazon,
|
||||
it’s a Kubernetes-native tool written in Go.
|
||||
Eksctl is an open source command-line utility made by Weaveworks in collaboration with Amazon.
|
||||
|
||||
On MacOS you can install eksctl with Homebrew:
|
||||
|
||||
@@ -40,6 +31,8 @@ Create an EKS cluster:
|
||||
```bash
|
||||
eksctl create cluster --name=appmesh \
|
||||
--region=us-west-2 \
|
||||
--nodes 3 \
|
||||
--node-volume-size=120 \
|
||||
--appmesh-access
|
||||
```
|
||||
|
||||
@@ -86,7 +79,8 @@ Install the Horizontal Pod Autoscaler (HPA) metrics provider:
|
||||
|
||||
```bash
|
||||
helm upgrade -i metrics-server stable/metrics-server \
|
||||
--namespace kube-system
|
||||
--namespace kube-system \
|
||||
--set args[0]=--kubelet-preferred-address-types=InternalIP
|
||||
```
|
||||
|
||||
After a minute, the metrics API should report CPU and memory usage for pods.
|
||||
@@ -98,21 +92,39 @@ kubectl -n kube-system top pods
|
||||
|
||||
### Install the App Mesh components
|
||||
|
||||
Run the App Mesh installer:
|
||||
Create the `appmesh-system` namespace:
|
||||
|
||||
```bash
|
||||
curl -fsSL https://git.io/get-app-mesh-eks.sh | bash -
|
||||
```sh
|
||||
kubectl create ns appmesh-system
|
||||
```
|
||||
|
||||
The installer does the following:
|
||||
Apply the App Mesh CRDs:
|
||||
|
||||
* creates the `appmesh-system` namespace
|
||||
* generates a certificate signed by Kubernetes CA
|
||||
* registers the App Mesh mutating webhook
|
||||
* deploys the App Mesh webhook in `appmesh-system` namespace
|
||||
* deploys the App Mesh CRDs
|
||||
* deploys the App Mesh controller in `appmesh-system` namespace
|
||||
* creates a mesh called `global`
|
||||
```sh
|
||||
kubectl apply -k github.com/aws/eks-charts/stable/appmesh-controller//crds
|
||||
```
|
||||
|
||||
Add the EKS repository to Helm:
|
||||
|
||||
```sh
|
||||
helm repo add eks https://aws.github.io/eks-charts
|
||||
```
|
||||
|
||||
Install the App Mesh CRD controller:
|
||||
|
||||
```sh
|
||||
helm upgrade -i appmesh-controller eks/appmesh-controller \
|
||||
--wait --namespace appmesh-system
|
||||
```
|
||||
|
||||
Install the App Mesh admission controller and create a mesh called `global`:
|
||||
|
||||
```sh
|
||||
helm upgrade -i appmesh-inject eks/appmesh-inject \
|
||||
--wait --namespace appmesh-system \
|
||||
--set mesh.create=true \
|
||||
--set mesh.name=global
|
||||
```
|
||||
|
||||
Verify that the global mesh is active:
|
||||
|
||||
@@ -125,6 +137,16 @@ Status:
|
||||
Type: MeshActive
|
||||
```
|
||||
|
||||
In order to collect the App Mesh metrics that Flagger needs to run the canary analysis,
|
||||
you'll need to setup a Prometheus instance to scrape the Envoy sidecars.
|
||||
|
||||
Install the App Mesh Prometheus:
|
||||
|
||||
```sh
|
||||
helm upgrade -i appmesh-prometheus eks/appmesh-prometheus \
|
||||
--wait --namespace appmesh-system
|
||||
```
|
||||
|
||||
### Install Flagger and Grafana
|
||||
|
||||
Add Flagger Helm repository:
|
||||
@@ -139,27 +161,22 @@ Install Flagger's Canary CRD:
|
||||
kubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/master/artifacts/flagger/crd.yaml
|
||||
```
|
||||
|
||||
Deploy Flagger and Prometheus in the _**appmesh-system**_ namespace:
|
||||
Deploy Flagger in the _**appmesh-system**_ namespace:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=appmesh-system \
|
||||
--set crd.create=false \
|
||||
--set meshProvider=appmesh \
|
||||
--set prometheus.install=true
|
||||
--set metricsServer=http://appmesh-prometheus:9090
|
||||
```
|
||||
|
||||
In order to collect the App Mesh metrics that Flagger needs to run the canary analysis,
|
||||
you'll need to setup a Prometheus instance to scrape the Envoy sidecars.
|
||||
|
||||
You can enable **Slack** notifications with:
|
||||
You can enable Slack or MS Teams notifications with:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--reuse-values \
|
||||
--namespace=appmesh-system \
|
||||
--set crd.create=false \
|
||||
--set meshProvider=appmesh \
|
||||
--set metricsServer=http://prometheus.appmesh:9090 \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
@@ -171,7 +188,7 @@ Deploy Grafana in the _**appmesh-system**_ namespace:
|
||||
```bash
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=appmesh-system \
|
||||
--set url=http://flagger-prometheus.appmesh-system:9090
|
||||
--set url=http://appmesh-prometheus:9090
|
||||
```
|
||||
|
||||
You can access Grafana using port forwarding:
|
||||
|
||||
@@ -186,7 +186,7 @@ Install cert-manager's CRDs:
|
||||
```bash
|
||||
CERT_REPO=https://raw.githubusercontent.com/jetstack/cert-manager
|
||||
|
||||
kubectl apply -f ${CERT_REPO}/release-0.7/deploy/manifests/00-crds.yaml
|
||||
kubectl apply -f ${CERT_REPO}/release-0.10/deploy/manifests/00-crds.yaml
|
||||
```
|
||||
|
||||
Create the cert-manager namespace and disable resource validation:
|
||||
@@ -204,7 +204,7 @@ helm repo add jetstack https://charts.jetstack.io && \
|
||||
helm repo update && \
|
||||
helm upgrade -i cert-manager \
|
||||
--namespace cert-manager \
|
||||
--version v0.7.0 \
|
||||
--version v0.10.0 \
|
||||
jetstack/cert-manager
|
||||
```
|
||||
|
||||
@@ -339,7 +339,7 @@ Find the GKE Istio version with:
|
||||
kubectl -n istio-system get deploy istio-pilot -oyaml | grep image:
|
||||
```
|
||||
|
||||
Install Prometheus in istio-system namespace (replace `1.0.6-gke.3` with your version):
|
||||
Install Prometheus in istio-system namespace:
|
||||
|
||||
```bash
|
||||
kubectl -n istio-system apply -f \
|
||||
|
||||
@@ -43,6 +43,16 @@ helm upgrade -i flagger flagger/flagger \
|
||||
--set metricsServer=http://linkerd-prometheus:9090
|
||||
```
|
||||
|
||||
Deploy Flagger for App Mesh:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=appmesh-system \
|
||||
--set crd.create=false \
|
||||
--set meshProvider=appmesh \
|
||||
--set metricsServer=http://appmesh-prometheus:9090
|
||||
```
|
||||
|
||||
You can install Flagger in any namespace as long as it can talk to the Prometheus service on port 9090.
|
||||
|
||||
Enable **Slack** notifications:
|
||||
@@ -153,6 +163,14 @@ Note that you'll need kubectl 1.14 to run the above the command or you can downl
|
||||
kustomize build github.com/weaveworks/flagger//kustomize/istio | kubectl apply -f -
|
||||
```
|
||||
|
||||
Install Flagger for AWS App Mesh:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/appmesh
|
||||
```
|
||||
|
||||
This deploys Flagger and sets the metrics server URL to App Mesh's Prometheus instance.
|
||||
|
||||
Install Flagger for Linkerd:
|
||||
|
||||
```bash
|
||||
|
||||
@@ -51,7 +51,7 @@ helm upgrade -i frontend flagger/podinfo \
|
||||
--namespace test \
|
||||
--set nameOverride=frontend \
|
||||
--set backend=http://backend.test:9898/echo \
|
||||
--set canary.loadtest.enabled=true \
|
||||
--set canary.enabled=true \
|
||||
--set canary.istioIngress.enabled=true \
|
||||
--set canary.istioIngress.gateway=public-gateway.istio-system.svc.cluster.local \
|
||||
--set canary.istioIngress.host=frontend.istio.example.com
|
||||
@@ -91,7 +91,7 @@ Now let's install the `backend` release without exposing it outside the mesh:
|
||||
helm upgrade -i backend flagger/podinfo \
|
||||
--namespace test \
|
||||
--set nameOverride=backend \
|
||||
--set canary.loadtest.enabled=true \
|
||||
--set canary.enabled=true \
|
||||
--set canary.istioIngress.enabled=false
|
||||
```
|
||||
|
||||
@@ -138,7 +138,7 @@ helm upgrade -i frontend flagger/podinfo/ \
|
||||
--reuse-values \
|
||||
--set canary.loadtest.enabled=true \
|
||||
--set canary.helmtest.enabled=true \
|
||||
--set image.tag=2.0.1
|
||||
--set image.tag=3.1.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the canary analysis:
|
||||
@@ -177,6 +177,7 @@ Now trigger a canary deployment for the `backend` app, but this time you'll chan
|
||||
helm upgrade -i backend flagger/podinfo/ \
|
||||
--namespace test \
|
||||
--reuse-values \
|
||||
--set canary.loadtest.enabled=true \
|
||||
--set canary.helmtest.enabled=true \
|
||||
--set httpServer.timeout=25s
|
||||
```
|
||||
@@ -283,7 +284,7 @@ metadata:
|
||||
namespace: test
|
||||
annotations:
|
||||
flux.weave.works/automated: "true"
|
||||
flux.weave.works/tag.chart-image: semver:~2.0
|
||||
flux.weave.works/tag.chart-image: semver:~3.1
|
||||
spec:
|
||||
releaseName: frontend
|
||||
chart:
|
||||
@@ -293,7 +294,7 @@ spec:
|
||||
values:
|
||||
image:
|
||||
repository: stefanprodan/podinfo
|
||||
tag: 2.0.0
|
||||
tag: 3.1.0
|
||||
backend: http://backend-podinfo:9898/echo
|
||||
canary:
|
||||
enabled: true
|
||||
@@ -311,7 +312,7 @@ In the `chart` section I've defined the release source by specifying the Helm re
|
||||
In the `values` section I've overwritten the defaults set in values.yaml.
|
||||
|
||||
With the `flux.weave.works` annotations I instruct Flux to automate this release.
|
||||
When an image tag in the sem ver range of `2.0.0 - 2.0.99` is pushed to Quay,
|
||||
When an image tag in the sem ver range of `3.1.0 - 3.1.99` is pushed to Docker Hub,
|
||||
Flux will upgrade the Helm release and from there Flagger will pick up the change and start a canary deployment.
|
||||
|
||||
Install [Weave Flux](https://github.com/weaveworks/flux) and its Helm Operator by specifying your Git repo URL:
|
||||
@@ -344,9 +345,9 @@ launch the `frontend` and `backend` apps.
|
||||
|
||||
A CI/CD pipeline for the `frontend` release could look like this:
|
||||
|
||||
* cut a release from the master branch of the podinfo code repo with the git tag `2.0.1`
|
||||
* CI builds the image and pushes the `podinfo:2.0.1` image to the container registry
|
||||
* Flux scans the registry and updates the Helm release `image.tag` to `2.0.1`
|
||||
* cut a release from the master branch of the podinfo code repo with the git tag `3.1.1`
|
||||
* CI builds the image and pushes the `podinfo:3.1.1` image to the container registry
|
||||
* Flux scans the registry and updates the Helm release `image.tag` to `3.1.1`
|
||||
* Flux commits and push the change to the cluster repo
|
||||
* Flux applies the updated Helm release on the cluster
|
||||
* Flux Helm Operator picks up the change and calls Tiller to upgrade the release
|
||||
@@ -354,9 +355,9 @@ A CI/CD pipeline for the `frontend` release could look like this:
|
||||
* Flagger runs the helm test before routing traffic to the canary service
|
||||
* Flagger starts the load test and runs the canary analysis
|
||||
* Based on the analysis result the canary deployment is promoted to production or rolled back
|
||||
* Flagger sends a Slack notification with the canary result
|
||||
* Flagger sends a Slack or MS Teams notification with the canary result
|
||||
|
||||
If the canary fails, fix the bug, do another patch release eg `2.0.2` and the whole process will run again.
|
||||
If the canary fails, fix the bug, do another patch release eg `3.1.2` and the whole process will run again.
|
||||
|
||||
A canary deployment can fail due to any of the following reasons:
|
||||
|
||||
|
||||
@@ -4,42 +4,16 @@ This guide shows you how to use the SMI Istio adapter and Flagger to automate ca
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.11** or newer with the following admission controllers enabled:
|
||||
* Kubernetes > 1.13
|
||||
* Istio > 1.0
|
||||
|
||||
* MutatingAdmissionWebhook
|
||||
* ValidatingAdmissionWebhook
|
||||
### Install Istio SMI adapter
|
||||
|
||||
Flagger depends on [Istio](https://istio.io/docs/setup/kubernetes/quick-start/) **v1.0.3** or newer
|
||||
with traffic management, telemetry and Prometheus enabled.
|
||||
|
||||
A minimal Istio installation should contain the following services:
|
||||
|
||||
* istio-pilot
|
||||
* istio-ingressgateway
|
||||
* istio-sidecar-injector
|
||||
* istio-telemetry
|
||||
* prometheus
|
||||
|
||||
### Install Istio and the SMI adapter
|
||||
|
||||
Add Istio Helm repository:
|
||||
Install the SMI adapter:
|
||||
|
||||
```bash
|
||||
helm repo add istio.io https://storage.googleapis.com/istio-release/releases/1.1.5/charts
|
||||
```
|
||||
|
||||
Install Istio CRDs:
|
||||
|
||||
```bash
|
||||
helm upgrade -i istio-init istio.io/istio-init --wait --namespace istio-system
|
||||
|
||||
kubectl -n istio-system wait --for=condition=complete job/istio-init-crd-11
|
||||
```
|
||||
|
||||
Install Istio:
|
||||
|
||||
```bash
|
||||
helm upgrade -i istio istio.io/istio --wait --namespace istio-system
|
||||
kubectl apply -f https://raw.githubusercontent.com/deislabs/smi-adapter-istio/master/deploy/crds/crds.yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/deislabs/smi-adapter-istio/master/deploy/operator-and-rbac.yaml
|
||||
```
|
||||
|
||||
Create a generic Istio gateway to expose services outside the mesh on HTTP:
|
||||
@@ -74,14 +48,6 @@ Find the Gateway load balancer IP and add a DNS record for it:
|
||||
kubectl -n istio-system get svc/istio-ingressgateway -ojson | jq -r .status.loadBalancer.ingress[0].ip
|
||||
```
|
||||
|
||||
Install the SMI adapter:
|
||||
|
||||
```bash
|
||||
REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/smi/istio-adapter.yaml
|
||||
```
|
||||
|
||||
### Install Flagger and Grafana
|
||||
|
||||
Add Flagger Helm repository:
|
||||
@@ -95,7 +61,6 @@ Deploy Flagger in the _**istio-system**_ namespace:
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace=istio-system \
|
||||
--set image.tag=master-12d84b2 \
|
||||
--set meshProvider=smi:istio
|
||||
```
|
||||
|
||||
@@ -119,24 +84,23 @@ kubectl -n istio-system port-forward svc/flagger-grafana 3000:80
|
||||
|
||||
Create a test namespace with Istio sidecar injection enabled:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
|
||||
Create a test namespace and enable Linkerd proxy injection:
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
|
||||
```bash
|
||||
kubectl create ns test
|
||||
kubectl label namespace test istio-injection=enabled
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
|
||||
```
|
||||
|
||||
Create a canary custom resource (replace example.com with your own domain):
|
||||
@@ -236,7 +200,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=quay.io/stefanprodan/podinfo:1.7.1
|
||||
podinfod=quay.io/stefanprodan/podinfo:3.1.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -287,7 +251,7 @@ Create a tester pod and exec into it:
|
||||
|
||||
```bash
|
||||
kubectl -n test run tester \
|
||||
--image=quay.io/stefanprodan/podinfo:1.2.1 \
|
||||
--image=quay.io/stefanprodan/podinfo:3.1.2 \
|
||||
-- ./podinfo --port=9898
|
||||
|
||||
kubectl -n test exec -it tester-xx-xx sh
|
||||
|
||||
@@ -27,7 +27,7 @@ You application should expose a HTTP endpoint that Kubernetes can call to determ
|
||||
your app transitioned to a broken state from which it can't recover and needs to be restarted.
|
||||
|
||||
```yaml
|
||||
readinessProbe:
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
@@ -49,7 +49,7 @@ You application should expose a HTTP endpoint that Kubernetes can call to determ
|
||||
your app is ready to receive traffic.
|
||||
|
||||
```yaml
|
||||
livenessProbe:
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- wget
|
||||
|
||||
@@ -13,23 +13,20 @@ This is particularly useful for frontend applications that require session affin
|
||||
Create a test namespace with Istio sidecar injection enabled:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
|
||||
kubectl create ns test
|
||||
kubectl label namespace test istio-injection=enabled
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/ab-testing/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/ab-testing/hpa.yaml
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
|
||||
```
|
||||
|
||||
Create a canary custom resource (replace example.com with your own domain):
|
||||
@@ -38,14 +35,14 @@ Create a canary custom resource (replace example.com with your own domain):
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: abtest
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: abtest
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
@@ -53,7 +50,7 @@ spec:
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: abtest
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
@@ -63,6 +60,11 @@ spec:
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.example.com
|
||||
# Istio traffic policy (optional)
|
||||
trafficPolicy:
|
||||
tls:
|
||||
# use ISTIO_MUTUAL when mTLS is enabled
|
||||
mode: DISABLE
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
@@ -110,19 +112,19 @@ After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/abtest
|
||||
horizontalpodautoscaler.autoscaling/abtest
|
||||
canary.flagger.app/abtest
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/abtest-primary
|
||||
horizontalpodautoscaler.autoscaling/abtest-primary
|
||||
service/abtest
|
||||
service/abtest-canary
|
||||
service/abtest-primary
|
||||
destinationrule.networking.istio.io/abtest-canary
|
||||
destinationrule.networking.istio.io/abtest-primary
|
||||
virtualservice.networking.istio.io/abtest
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
destinationrule.networking.istio.io/podinfo-canary
|
||||
destinationrule.networking.istio.io/podinfo-primary
|
||||
virtualservice.networking.istio.io/podinfo
|
||||
```
|
||||
|
||||
### Automated canary promotion
|
||||
@@ -131,7 +133,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/abtest \
|
||||
podinfod=stefanprodan/podinfo:2.0.1
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -145,22 +147,22 @@ Status:
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger New revision detected abtest.test
|
||||
Normal Synced 3m flagger Scaling up abtest.test
|
||||
Warning Synced 3m flagger Waiting for abtest.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Normal Synced 3m flagger Advance abtest.test canary iteration 1/10
|
||||
Normal Synced 3m flagger Advance abtest.test canary iteration 2/10
|
||||
Normal Synced 3m flagger Advance abtest.test canary iteration 3/10
|
||||
Normal Synced 2m flagger Advance abtest.test canary iteration 4/10
|
||||
Normal Synced 2m flagger Advance abtest.test canary iteration 5/10
|
||||
Normal Synced 1m flagger Advance abtest.test canary iteration 6/10
|
||||
Normal Synced 1m flagger Advance abtest.test canary iteration 7/10
|
||||
Normal Synced 55s flagger Advance abtest.test canary iteration 8/10
|
||||
Normal Synced 45s flagger Advance abtest.test canary iteration 9/10
|
||||
Normal Synced 35s flagger Advance abtest.test canary iteration 10/10
|
||||
Normal Synced 25s flagger Copying abtest.test template spec to abtest-primary.test
|
||||
Normal Synced 3m flagger New revision detected podinfo.test
|
||||
Normal Synced 3m flagger Scaling up podinfo.test
|
||||
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 1/10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 2/10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 3/10
|
||||
Normal Synced 2m flagger Advance podinfo.test canary iteration 4/10
|
||||
Normal Synced 2m flagger Advance podinfo.test canary iteration 5/10
|
||||
Normal Synced 1m flagger Advance podinfo.test canary iteration 6/10
|
||||
Normal Synced 1m flagger Advance podinfo.test canary iteration 7/10
|
||||
Normal Synced 55s flagger Advance podinfo.test canary iteration 8/10
|
||||
Normal Synced 45s flagger Advance podinfo.test canary iteration 9/10
|
||||
Normal Synced 35s flagger Advance podinfo.test canary iteration 10/10
|
||||
Normal Synced 25s flagger Copying podinfo.test template spec to abtest-primary.test
|
||||
Warning Synced 15s flagger Waiting for abtest-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down abtest.test
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
@@ -204,12 +206,12 @@ Status:
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger Starting canary deployment for abtest.test
|
||||
Normal Synced 3m flagger Advance abtest.test canary iteration 1/10
|
||||
Normal Synced 3m flagger Advance abtest.test canary iteration 2/10
|
||||
Normal Synced 3m flagger Advance abtest.test canary iteration 3/10
|
||||
Normal Synced 3m flagger Halt abtest.test advancement success rate 69.17% < 99%
|
||||
Normal Synced 2m flagger Halt abtest.test advancement success rate 61.39% < 99%
|
||||
Warning Synced 2m flagger Rolling back abtest.test failed checks threshold reached 2
|
||||
Warning Synced 1m flagger Canary failed! Scaling down abtest.test
|
||||
Normal Synced 3m flagger Starting canary deployment for podinfo.test
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 1/10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 2/10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary iteration 3/10
|
||||
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Warning Synced 2m flagger Rolling back podinfo.test failed checks threshold reached 2
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
@@ -53,3 +53,52 @@ Besides Slack, you can use Alertmanager to trigger alerts when a canary deployme
|
||||
description: "Workload {{ $labels.name }} namespace {{ $labels.namespace }}"
|
||||
```
|
||||
|
||||
### Event Webhook
|
||||
|
||||
Flagger can be configured to send event payloads to a specified webhook:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--set eventWebhook=https://example.com/flagger-canary-event-webhook
|
||||
```
|
||||
|
||||
When configured, every action that Flagger takes during a canary deployment will be sent as JSON via an HTTP POST
|
||||
request. The JSON payload has the following schema:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "string (canary name)",
|
||||
"namespace": "string (canary namespace)",
|
||||
"phase": "string (canary phase)",
|
||||
"metadata": {
|
||||
"eventMessage": "string (canary event message)",
|
||||
"eventType": "string (canary event type)",
|
||||
"timestamp": "string (unix timestamp ms)"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Example:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "podinfo",
|
||||
"namespace": "default",
|
||||
"phase": "Progressing",
|
||||
"metadata": {
|
||||
"eventMessage": "New revision detected! Scaling up podinfo.default",
|
||||
"eventType": "Normal",
|
||||
"timestamp": "1578607635167"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The event webhook can be overwritten at canary level with:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
webhooks:
|
||||
- name: "send to Slack"
|
||||
type: event
|
||||
url: http://event-recevier.notifications/slack
|
||||
```
|
||||
|
||||
@@ -14,22 +14,33 @@ The only App Mesh object you need to create by yourself is the mesh resource.
|
||||
Create a mesh called `global`:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/appmesh/global-mesh.yaml
|
||||
cat << EOF | kubectl apply -f -
|
||||
apiVersion: appmesh.k8s.aws/v1beta1
|
||||
kind: Mesh
|
||||
metadata:
|
||||
name: global
|
||||
spec:
|
||||
serviceDiscoveryType: dns
|
||||
EOF
|
||||
```
|
||||
|
||||
Create a test namespace with App Mesh sidecar injection enabled:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
|
||||
cat << EOF | kubectl apply -f -
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: test
|
||||
labels:
|
||||
appmesh.k8s.aws/sidecarInjectorWebhook: enabled
|
||||
EOF
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/appmesh/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/appmesh/hpa.yaml
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
@@ -39,8 +50,7 @@ helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test \
|
||||
--set meshName=global \
|
||||
--set "backends[0]=podinfo.test" \
|
||||
--set "backends[1]=podinfo-canary.test" \
|
||||
--set "backends[2]=podinfo-primary.test"
|
||||
--set "backends[1]=podinfo-canary.test"
|
||||
```
|
||||
|
||||
Create a canary custom resource:
|
||||
@@ -68,17 +78,30 @@ spec:
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# container port name (optional)
|
||||
# can be http or grpc
|
||||
portName: http
|
||||
# App Mesh reference
|
||||
meshName: global
|
||||
# App Mesh ingress (optional)
|
||||
hosts:
|
||||
- "*"
|
||||
# App Mesh ingress timeout (optional)
|
||||
timeout: 5s
|
||||
# App Mesh egress (optional)
|
||||
backends:
|
||||
- backend.test
|
||||
# App Mesh retry policy (optional)
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 5s
|
||||
retryOn: "gateway-error,client-error,stream-error"
|
||||
# define the canary analysis timing and KPIs
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 10
|
||||
threshold: 5
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
@@ -92,13 +115,25 @@ spec:
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
# external checks (optional)
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
@@ -127,14 +162,18 @@ virtualnode.appmesh.k8s.aws/podinfo
|
||||
virtualnode.appmesh.k8s.aws/podinfo-canary
|
||||
virtualnode.appmesh.k8s.aws/podinfo-primary
|
||||
virtualservice.appmesh.k8s.aws/podinfo.test
|
||||
virtualservice.appmesh.k8s.aws/podinfo-canary.test
|
||||
```
|
||||
|
||||
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed
|
||||
to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
|
||||
The App Mesh specific settings are:
|
||||
|
||||
```yaml
|
||||
service:
|
||||
port: 9898
|
||||
meshName: global.appmesh-system
|
||||
meshName: global
|
||||
backends:
|
||||
- backend1.test
|
||||
- backend2.test
|
||||
@@ -143,42 +182,47 @@ The App Mesh specific settings are:
|
||||
App Mesh blocks all egress traffic by default. If your application needs to call another service, you have to create an
|
||||
App Mesh virtual service for it and add the virtual service name to the backend list.
|
||||
|
||||
### Setup App Mesh ingress (optional)
|
||||
### Setup App Mesh Gateway (optional)
|
||||
|
||||
In order to expose the podinfo app outside the mesh you'll be using an Envoy ingress and an AWS classic load balancer.
|
||||
The ingress binds to an internet domain and forwards the calls into the mesh through the App Mesh sidecar.
|
||||
If podinfo becomes unavailable due to a HPA downscaling or a node restart,
|
||||
the ingress will retry the calls for a short period of time.
|
||||
In order to expose the podinfo app outside the mesh you'll be using an Envoy-powered ingress gateway and an AWS network load balancer.
|
||||
The gateway binds to an internet domain and forwards the calls into the mesh through the App Mesh sidecar.
|
||||
If podinfo becomes unavailable due to a cluster downscaling or a node restart,
|
||||
the gateway will retry the calls for a short period of time.
|
||||
|
||||
Deploy the ingress and the AWS ELB service:
|
||||
Deploy the gateway behind an AWS NLB:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/appmesh/ingress.yaml
|
||||
helm upgrade -i appmesh-gateway flagger/appmesh-gateway \
|
||||
--namespace test \
|
||||
--set mesh.name=global
|
||||
```
|
||||
|
||||
Find the ingress public address:
|
||||
Find the gateway public address:
|
||||
|
||||
```bash
|
||||
kubectl -n test describe svc/ingress | grep Ingress
|
||||
|
||||
LoadBalancer Ingress: yyy-xx.us-west-2.elb.amazonaws.com
|
||||
export URL="http://$(kubectl -n test get svc/appmesh-gateway -ojson | jq -r ".status.loadBalancer.ingress[].hostname")"
|
||||
echo $URL
|
||||
```
|
||||
|
||||
Wait for the ELB to become active:
|
||||
Wait for the NLB to become active:
|
||||
|
||||
```bash
|
||||
watch curl -sS ${INGRESS_URL}
|
||||
watch curl -sS $URL
|
||||
```
|
||||
|
||||
Open your browser and navigate to the ingress address to access podinfo UI.
|
||||
|
||||
### Automated canary promotion
|
||||
|
||||
A canary deployment is triggered by changes in any of the following objects:
|
||||
* Deployment PodSpec (container image, command, ports, env, resources, etc)
|
||||
* ConfigMaps and Secrets mounted as volumes or mapped to environment variables
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:2.0.1
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -191,26 +235,27 @@ Status:
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger New revision detected podinfo.test
|
||||
Normal Synced 3m flagger Scaling up podinfo.test
|
||||
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 20
|
||||
Normal Synced 2m flagger Advance podinfo.test canary weight 25
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 30
|
||||
Normal Synced 1m flagger Advance podinfo.test canary weight 35
|
||||
Normal Synced 55s flagger Advance podinfo.test canary weight 40
|
||||
Normal Synced 45s flagger Advance podinfo.test canary weight 45
|
||||
Normal Synced 35s flagger Advance podinfo.test canary weight 50
|
||||
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
|
||||
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
New revision detected! Scaling up podinfo.test
|
||||
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Advance podinfo.test canary weight 20
|
||||
Advance podinfo.test canary weight 25
|
||||
Advance podinfo.test canary weight 30
|
||||
Advance podinfo.test canary weight 35
|
||||
Advance podinfo.test canary weight 40
|
||||
Advance podinfo.test canary weight 45
|
||||
Advance podinfo.test canary weight 50
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Routing all traffic to primary
|
||||
Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
During the analysis the canary’s progress can be monitored with Grafana. The App Mesh dashboard URL is
|
||||
@@ -224,9 +269,9 @@ You can monitor all canaries with:
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 15 2019-03-16T14:05:07Z
|
||||
prod frontend Succeeded 0 2019-03-15T16:15:07Z
|
||||
prod backend Failed 0 2019-03-14T17:05:07Z
|
||||
test podinfo Progressing 15 2019-10-02T14:05:07Z
|
||||
prod frontend Succeeded 0 2019-10-02T16:15:07Z
|
||||
prod backend Failed 0 2019-10-02T17:05:07Z
|
||||
```
|
||||
|
||||
If you’ve enabled the Slack notifications, you should receive the following messages:
|
||||
@@ -235,54 +280,125 @@ If you’ve enabled the Slack notifications, you should receive the following me
|
||||
|
||||
### Automated rollback
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors to test if Flagger pauses the rollout.
|
||||
During the canary analysis you can generate HTTP 500 errors or high latency to test if Flagger pauses the rollout.
|
||||
|
||||
Trigger a canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:2.0.2
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it flagger-loadtester-xx-xx sh
|
||||
kubectl -n test exec -it deploy/flagger-loadtester bash
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
hey -z 1m -c 5 -q 5 http://podinfo.test:9898/status/500
|
||||
hey -z 1m -c 5 -q 5 http://podinfo-canary.test:9898/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
watch -n 1 curl http://podinfo-canary.test:9898/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
|
||||
the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
kubectl -n appmesh-system logs deploy/flagger -f | jq .msg
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 10
|
||||
Phase: Failed
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Normal Synced 3m flagger Starting canary deployment for podinfo.test
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 5
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 10
|
||||
Normal Synced 3m flagger Advance podinfo.test canary weight 15
|
||||
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
|
||||
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
|
||||
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
|
||||
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
New revision detected! Starting canary analysis for podinfo.test
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Halt podinfo.test advancement success rate 55.06% < 99%
|
||||
Halt podinfo.test advancement request duration 1.20s > 0.5s
|
||||
Halt podinfo.test advancement request duration 1.45s > 0.5s
|
||||
Rolling back podinfo.test failed checks threshold reached 5
|
||||
Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
If you’ve enabled the Slack notifications, you’ll receive a message if the progress deadline is exceeded,
|
||||
or if the analysis reached the maximum number of failed checks:
|
||||
|
||||

|
||||
|
||||
### A/B Testing
|
||||
|
||||
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
|
||||
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
|
||||
This is particularly useful for frontend applications that require session affinity.
|
||||
|
||||

|
||||
|
||||
Edit the canary analysis, remove the max/step weight and add the match conditions and iterations:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
interval: 1m
|
||||
threshold: 5
|
||||
iterations: 10
|
||||
match:
|
||||
- headers:
|
||||
x-canary:
|
||||
exact: "insider"
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 -H 'X-Canary: insider' http://podinfo.test:9898/"
|
||||
```
|
||||
|
||||
The above configuration will run an analysis for ten minutes targeting users that have a `X-Canary: insider` header.
|
||||
|
||||
You can also use a HTTP cookie, to target all users with a `canary` cookie set to `insider` the match condition should be:
|
||||
|
||||
```yaml
|
||||
match:
|
||||
- headers:
|
||||
cookie:
|
||||
regex: "^(.*?;)?(canary=insider)(;.*)?$"
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 -H 'Cookie: canary=insider' http://podinfo.test:9898/"
|
||||
```
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.3
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B test:
|
||||
|
||||
```text
|
||||
kubectl -n appmesh-system logs deploy/flagger -f | jq .msg
|
||||
|
||||
New revision detected! Starting canary analysis for podinfo.test
|
||||
Advance podinfo.test canary iteration 1/10
|
||||
Advance podinfo.test canary iteration 2/10
|
||||
Advance podinfo.test canary iteration 3/10
|
||||
Advance podinfo.test canary iteration 4/10
|
||||
Advance podinfo.test canary iteration 5/10
|
||||
Advance podinfo.test canary iteration 6/10
|
||||
Advance podinfo.test canary iteration 7/10
|
||||
Advance podinfo.test canary iteration 8/10
|
||||
Advance podinfo.test canary iteration 9/10
|
||||
Advance podinfo.test canary iteration 10/10
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Routing all traffic to primary
|
||||
Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
@@ -4,6 +4,7 @@ This guide shows you how to automate Blue/Green deployments with Flagger and Kub
|
||||
|
||||
For applications that are not deployed on a service mesh, Flagger can orchestrate Blue/Green style deployments
|
||||
with Kubernetes L4 networking.
|
||||
When using a service mesh blue/green can be used as specified [here](https://docs.flagger.app/how-it-works#blue-green-deployments).
|
||||
|
||||

|
||||
|
||||
@@ -80,7 +81,6 @@ metadata:
|
||||
namespace: test
|
||||
spec:
|
||||
# service mesh provider can be: kubernetes, istio, appmesh, nginx, gloo
|
||||
# use the kubernetes provider for Blue/Green style deployments
|
||||
provider: kubernetes
|
||||
# deployment reference
|
||||
targetRef:
|
||||
@@ -96,7 +96,6 @@ spec:
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
portDiscovery: true
|
||||
canaryAnalysis:
|
||||
@@ -172,7 +171,7 @@ Trigger a deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:2.0.1
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -297,7 +296,7 @@ Trigger a deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:2.0.3
|
||||
podinfod=stefanprodan/podinfo:3.1.3
|
||||
```
|
||||
|
||||
Generate 404s:
|
||||
|
||||
434
docs/gitbook/usage/contour-progressive-delivery.md
Normal file
434
docs/gitbook/usage/contour-progressive-delivery.md
Normal file
@@ -0,0 +1,434 @@
|
||||
# Contour Canary Deployments
|
||||
|
||||
This guide shows you how to use [Contour](https://projectcontour.io/) ingress controller and Flagger to automate canary releases and A/B testing.
|
||||
|
||||

|
||||
|
||||
### Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.11** or newer and Contour **v1.0** or newer.
|
||||
|
||||
Install Contour on a cluster with LoadBalancer support:
|
||||
|
||||
```bash
|
||||
kubectl apply -f https://projectcontour.io/quickstart/contour.yaml
|
||||
```
|
||||
|
||||
The above command will deploy Contour and an Envoy daemonset in the `projectcontour` namespace.
|
||||
|
||||
Install Flagger using Kustomize (kubectl 1.14) in the `projectcontour` namespace:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/contour
|
||||
```
|
||||
|
||||
The above command will deploy Flagger and Prometheus configured to scrape the Contour's Envoy instances.
|
||||
You can also enable Slack or MS Teams notifications,
|
||||
see the Kustomize install [docs](https://docs.flagger.app/install/flagger-install-on-kubernetes#install-flagger-with-kustomize).
|
||||
|
||||
Or you can install Flagger using Helm:
|
||||
|
||||
```sh
|
||||
helm repo add flagger https://flagger.app
|
||||
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace projectcontour \
|
||||
--set meshProvider=contour \
|
||||
--set prometheus.install=true \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
### Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services and Contour HTTPProxy).
|
||||
These objects expose the application in the cluster and drive the canary analysis and promotion.
|
||||
|
||||
Create a test namespace:
|
||||
|
||||
```bash
|
||||
kubectl create ns test
|
||||
```
|
||||
|
||||
Install the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
|
||||
```
|
||||
|
||||
Create a canary custom resource (replace `app.example.com` with your own domain):
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# HPA reference
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# service port
|
||||
port: 80
|
||||
# container port
|
||||
targetPort: 9898
|
||||
# Contour request timeout
|
||||
timeout: 15s
|
||||
# Contour retry policy
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 5s
|
||||
# define the canary analysis timing and KPIs
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 30s
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# Contour Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# maximum req duration P99 in milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# testing
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
type: rollout
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 -host app.example.com http://envoy.projectcontour"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
The canary analysis will run for five minutes while validating the HTTP metrics and rollout hooks every half a minute.
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
httpproxy.projectcontour.io/podinfo
|
||||
```
|
||||
|
||||
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed
|
||||
to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
|
||||
### Expose the app outside the cluster
|
||||
|
||||
Find the external address of Contour's Envoy load balancer:
|
||||
|
||||
```bash
|
||||
export ADDRESS="$(kubectl -n projectcontour get svc/envoy -ojson \
|
||||
| jq -r ".status.loadBalancer.ingress[].hostname")"
|
||||
echo $ADDRESS
|
||||
```
|
||||
|
||||
Configure your DNS server with a CNAME record (AWS) or A record (GKE/AKS/DOKS) and point a domain e.g. `app.example.com` to the LB address.
|
||||
|
||||
Create a HTTPProxy definition and include the podinfo proxy generated by Flagger (replace `app.example.com` with your own domain):
|
||||
|
||||
```yaml
|
||||
apiVersion: projectcontour.io/v1
|
||||
kind: HTTPProxy
|
||||
metadata:
|
||||
name: podinfo-ingress
|
||||
namespace: test
|
||||
spec:
|
||||
virtualhost:
|
||||
fqdn: app.example.com
|
||||
includes:
|
||||
- name: podinfo
|
||||
namespace: test
|
||||
conditions:
|
||||
- prefix: /
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-ingress.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-ingress.yaml
|
||||
```
|
||||
|
||||
Verify that Contour processed the proxy definition with:
|
||||
|
||||
```sh
|
||||
kubectl -n test get httpproxies
|
||||
|
||||
NAME FQDN STATUS
|
||||
podinfo valid
|
||||
podinfo-ingress app.example.com valid
|
||||
```
|
||||
|
||||
Now you can access podinfo UI using your domain address.
|
||||
|
||||
Note that you should be using HTTPS when exposing production workloads on internet.
|
||||
You can obtain free TLS certs from Let's Encrypt, read this [guide](https://github.com/stefanprodan/eks-contour-ingress)
|
||||
on how to configure cert-manager to secure Contour with TLS certificates.
|
||||
|
||||
### Automated canary promotion
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
|
||||
like HTTP requests success rate, requests average duration and pod health.
|
||||
Based on analysis of the KPIs a canary is promoted or aborted.
|
||||
|
||||

|
||||
|
||||
A canary deployment is triggered by changes in any of the following objects:
|
||||
* Deployment PodSpec (container image, command, ports, env, resources, etc)
|
||||
* ConfigMaps and Secrets mounted as volumes or mapped to environment variables
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
New revision detected! Scaling up podinfo.test
|
||||
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Advance podinfo.test canary weight 20
|
||||
Advance podinfo.test canary weight 25
|
||||
Advance podinfo.test canary weight 30
|
||||
Advance podinfo.test canary weight 35
|
||||
Advance podinfo.test canary weight 40
|
||||
Advance podinfo.test canary weight 45
|
||||
Advance podinfo.test canary weight 50
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Routing all traffic to primary
|
||||
Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 15 2019-12-20T14:05:07Z
|
||||
```
|
||||
|
||||
If you’ve enabled the Slack notifications, you should receive the following messages:
|
||||
|
||||

|
||||
|
||||
### Automated rollback
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors or high latency to test if Flagger pauses the rollout.
|
||||
|
||||
Trigger a canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it deploy/flagger-loadtester bash
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
hey -z 1m -c 5 -q 5 http://app.example.com/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
watch -n 1 curl http://app.example.com/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
|
||||
the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n projectcontour logs deploy/flagger -f | jq .msg
|
||||
|
||||
New revision detected! Starting canary analysis for podinfo.test
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Halt podinfo.test advancement success rate 55.06% < 99%
|
||||
Halt podinfo.test advancement request duration 1.20s > 500ms
|
||||
Halt podinfo.test advancement request duration 1.45s > 500ms
|
||||
Rolling back podinfo.test failed checks threshold reached 5
|
||||
Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
If you’ve enabled the Slack notifications, you’ll receive a message if the progress deadline is exceeded,
|
||||
or if the analysis reached the maximum number of failed checks:
|
||||
|
||||

|
||||
|
||||
### A/B Testing
|
||||
|
||||
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
|
||||
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
|
||||
This is particularly useful for frontend applications that require session affinity.
|
||||
|
||||

|
||||
|
||||
Edit the canary analysis, remove the max/step weight and add the match conditions and iterations:
|
||||
|
||||
```yaml
|
||||
canaryAnalysis:
|
||||
interval: 1m
|
||||
threshold: 5
|
||||
iterations: 10
|
||||
match:
|
||||
- headers:
|
||||
x-canary:
|
||||
exact: "insider"
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 5 -c 5 -H 'X-Canary: insider' -host app.example.com http://envoy.projectcontour"
|
||||
```
|
||||
|
||||
The above configuration will run an analysis for ten minutes targeting users that have a `X-Canary: insider` header.
|
||||
|
||||
You can also use a HTTP cookie. To target all users with a cookie set to `insider`, the match condition should be:
|
||||
|
||||
```yaml
|
||||
match:
|
||||
- headers:
|
||||
cookie:
|
||||
suffix: "insider"
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 5 -c 5 -H 'Cookie: canary=insider' -host app.example.com http://envoy.projectcontour"
|
||||
```
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.3
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B test:
|
||||
|
||||
```text
|
||||
kubectl -n appmesh-system logs deploy/flagger -f | jq .msg
|
||||
|
||||
New revision detected! Starting canary analysis for podinfo.test
|
||||
Advance podinfo.test canary iteration 1/10
|
||||
Advance podinfo.test canary iteration 2/10
|
||||
Advance podinfo.test canary iteration 3/10
|
||||
Advance podinfo.test canary iteration 4/10
|
||||
Advance podinfo.test canary iteration 5/10
|
||||
Advance podinfo.test canary iteration 6/10
|
||||
Advance podinfo.test canary iteration 7/10
|
||||
Advance podinfo.test canary iteration 8/10
|
||||
Advance podinfo.test canary iteration 9/10
|
||||
Advance podinfo.test canary iteration 10/10
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Routing all traffic to primary
|
||||
Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
The web browser user agent header allows user segmentation based on device or OS.
|
||||
|
||||
For example, if you want to route all mobile users to the canary instance:
|
||||
|
||||
```yaml
|
||||
match:
|
||||
- headers:
|
||||
user-agent:
|
||||
prefix: "Mobile"
|
||||
```
|
||||
|
||||
Or if you want to target only Android users:
|
||||
|
||||
```yaml
|
||||
match:
|
||||
- headers:
|
||||
user-agent:
|
||||
prefix: "Android"
|
||||
```
|
||||
|
||||
Or a specific browser version:
|
||||
|
||||
```yaml
|
||||
match:
|
||||
- headers:
|
||||
user-agent:
|
||||
suffix: "Firefox/71.0"
|
||||
```
|
||||
319
docs/gitbook/usage/crossover-progressive-delivery.md
Normal file
319
docs/gitbook/usage/crossover-progressive-delivery.md
Normal file
@@ -0,0 +1,319 @@
|
||||
# Envoy/Crossover Canary Deployments
|
||||
|
||||
This guide shows you how to use Envoy, [Crossover](https://github.com/mumoshu/crossover) and Flagger to automate canary deployments.
|
||||
|
||||
Crossover is a minimal Envoy xDS implementation supports [Service Mesh Interface](https://smi-spec.io/).
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Flagger requires a Kubernetes cluster **v1.11** or newer and Envoy paired with [Crossover](https://github.com/mumoshu/crossover) sidecar.
|
||||
|
||||
Create a test namespace:
|
||||
|
||||
```bash
|
||||
kubectl create ns test
|
||||
```
|
||||
|
||||
Install Envoy along with the Crossover sidecar with Helm:
|
||||
|
||||
```bash
|
||||
helm repo add crossover https://mumoshu.github.io/crossover
|
||||
|
||||
helm upgrade --install envoy crossover/envoy \
|
||||
--namespace test \
|
||||
-f <(cat <<EOF
|
||||
smi:
|
||||
apiVersions:
|
||||
trafficSplits: v1alpha1
|
||||
upstreams:
|
||||
podinfo:
|
||||
smi:
|
||||
enabled: true
|
||||
backends:
|
||||
podinfo-primary:
|
||||
port: 9898
|
||||
weight: 100
|
||||
podinfo-canary:
|
||||
port: 9898
|
||||
weight: 0
|
||||
EOF
|
||||
)
|
||||
```
|
||||
|
||||
Install Flagger and the Prometheus add-on in the same namespace as Envoy:
|
||||
|
||||
```bash
|
||||
helm repo add flagger https://flagger.app
|
||||
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--namespace test \
|
||||
--set prometheus.install=true \
|
||||
--set meshProvider=smi:crossover
|
||||
```
|
||||
|
||||
Optionally you can enable Slack notifications:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger flagger/flagger \
|
||||
--reuse-values \
|
||||
--namespace test \
|
||||
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
|
||||
--set slack.channel=general \
|
||||
--set slack.user=flagger
|
||||
```
|
||||
|
||||
### Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services, SMI traffic splits).
|
||||
These objects expose the application on the mesh and drive the canary analysis and promotion.
|
||||
There's no SMI object you need to create by yourself.
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test
|
||||
```
|
||||
|
||||
Create a canary custom resource:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
# specify mesh provider if it isn't the default one
|
||||
# provider: "smi:crossover"
|
||||
# deployment reference
|
||||
targetRef:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
name: podinfo
|
||||
# the maximum time in seconds for the canary deployment
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
# HPA reference (optional)
|
||||
autoscalerRef:
|
||||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# ClusterIP port number
|
||||
port: 9898
|
||||
# container port number or name (optional)
|
||||
targetPort: 9898
|
||||
# define the canary analysis timing and KPIs
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# max traffic percentage routed to canary
|
||||
# percentage (0-100)
|
||||
maxWeight: 50
|
||||
# canary increment step
|
||||
# percentage (0-100)
|
||||
stepWeight: 5
|
||||
# App Mesh Prometheus checks
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
# minimum req success rate (non 5xx responses)
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
# maximum req duration P99
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 -H 'Host: podinfo.test' http://envoy.test:10000/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ./podinfo-canary.yaml
|
||||
```
|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
# applied
|
||||
deployment.apps/podinfo
|
||||
horizontalpodautoscaler.autoscaling/podinfo
|
||||
canary.flagger.app/podinfo
|
||||
|
||||
# generated
|
||||
deployment.apps/podinfo-primary
|
||||
horizontalpodautoscaler.autoscaling/podinfo-primary
|
||||
service/podinfo
|
||||
service/podinfo-canary
|
||||
service/podinfo-primary
|
||||
trafficsplits.split.smi-spec.io/podinfo
|
||||
```
|
||||
|
||||
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed
|
||||
to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
|
||||
|
||||
### Automated canary promotion
|
||||
|
||||
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators like HTTP requests success rate, requests average duration and pod health. Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
|
||||
|
||||

|
||||
|
||||
A canary deployment is triggered by changes in any of the following objects:
|
||||
|
||||
* Deployment PodSpec (container image, command, ports, env, resources, etc)
|
||||
* ConfigMaps and Secrets mounted as volumes or mapped to environment variables
|
||||
|
||||
Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.5
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
|
||||
```text
|
||||
kubectl -n test describe canary/podinfo
|
||||
|
||||
Status:
|
||||
Canary Weight: 0
|
||||
Failed Checks: 0
|
||||
Phase: Succeeded
|
||||
Events:
|
||||
New revision detected! Scaling up podinfo.test
|
||||
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Advance podinfo.test canary weight 20
|
||||
Advance podinfo.test canary weight 25
|
||||
Advance podinfo.test canary weight 30
|
||||
Advance podinfo.test canary weight 35
|
||||
Advance podinfo.test canary weight 40
|
||||
Advance podinfo.test canary weight 45
|
||||
Advance podinfo.test canary weight 50
|
||||
Copying podinfo.test template spec to podinfo-primary.test
|
||||
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Routing all traffic to primary
|
||||
Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
During the analysis the canary’s progress can be monitored with Grafana.
|
||||
|
||||
Flagger comes with a Grafana dashboard made for canary analysis. Install Grafana with Helm:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-grafana flagger/grafana \
|
||||
--namespace=test \
|
||||
--set url=http://flagger-prometheus:9090
|
||||
```
|
||||
|
||||
Run:
|
||||
|
||||
```bash
|
||||
kubectl port-forward --namespace test svc/flagger-grafana 3000:80
|
||||
```
|
||||
|
||||
The Envoy dashboard URL is
|
||||
http://localhost:3000/d/flagger-envoy/envoy-canary?refresh=10s&orgId=1&var-namespace=test&var-target=podinfo
|
||||
|
||||

|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
watch kubectl get canaries --all-namespaces
|
||||
|
||||
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
|
||||
test podinfo Progressing 15 2019-10-02T14:05:07Z
|
||||
prod frontend Succeeded 0 2019-10-02T16:15:07Z
|
||||
prod backend Failed 0 2019-10-02T17:05:07Z
|
||||
```
|
||||
|
||||
If you’ve enabled the Slack notifications, you should receive the following messages:
|
||||
|
||||

|
||||
|
||||
### Automated rollback
|
||||
|
||||
During the canary analysis you can generate HTTP 500 errors or high latency to test if Flagger pauses the rollout.
|
||||
|
||||
Trigger a canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
|
||||
```bash
|
||||
kubectl -n test exec -it deploy/flagger-loadtester bash
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
hey -z 1m -c 5 -q 5 -H 'Host: podinfo.test' http://envoy.test:10000/status/500
|
||||
```
|
||||
|
||||
Generate latency:
|
||||
|
||||
```bash
|
||||
watch -n 1 curl -H 'Host: podinfo.test' http://envoy.test:10000/delay/1
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
|
||||
the canary is scaled to zero and the rollout is marked as failed.
|
||||
|
||||
```text
|
||||
kubectl -n test logs deploy/flagger -f | jq .msg
|
||||
|
||||
New revision detected! Starting canary analysis for podinfo.test
|
||||
Pre-rollout check acceptance-test passed
|
||||
Advance podinfo.test canary weight 5
|
||||
Advance podinfo.test canary weight 10
|
||||
Advance podinfo.test canary weight 15
|
||||
Halt podinfo.test advancement success rate 69.17% < 99%
|
||||
Halt podinfo.test advancement success rate 61.39% < 99%
|
||||
Halt podinfo.test advancement success rate 55.06% < 99%
|
||||
Halt podinfo.test advancement request duration 1.20s > 0.5s
|
||||
Halt podinfo.test advancement request duration 1.45s > 0.5s
|
||||
Rolling back podinfo.test failed checks threshold reached 5
|
||||
Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
If you’ve enabled the Slack notifications, you’ll receive a message if the progress deadline is exceeded,
|
||||
or if the analysis reached the maximum number of failed checks:
|
||||
|
||||

|
||||
@@ -54,15 +54,13 @@ kubectl create ns test
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/gloo/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/gloo/hpa.yaml
|
||||
kubectl -n test apply -k github.com/weaveworks/flagger//kustomize/podinfo
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
helm upgrade -i flagger-loadtester flagger/loadtester \
|
||||
--namespace=test
|
||||
kubectl -n test apply -k github.com/weaveworks/flagger//kustomize/tester
|
||||
```
|
||||
|
||||
Create an virtual service definition that references an upstream group that will be generated by Flagger
|
||||
@@ -118,8 +116,10 @@ spec:
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
# container port
|
||||
# ClusterIP port number
|
||||
port: 9898
|
||||
# container port number or name (optional)
|
||||
targetPort: 9898
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
@@ -143,14 +143,21 @@ spec:
|
||||
# milliseconds
|
||||
threshold: 500
|
||||
interval: 30s
|
||||
# load testing (optional)
|
||||
# testing (optinal)
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 10s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary:9898/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://app.example.com/"
|
||||
cmd: "hey -z 2m -q 5 -c 2 -host app.example.com http://gateway-proxy-v2.gloo-system"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
@@ -198,7 +205,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:2.0.1
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -252,19 +259,19 @@ Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:2.0.2
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
|
||||
```bash
|
||||
watch curl http://app.example.com/status/500
|
||||
watch curl -H 'Host: app.example.com' http://gateway-proxy-v2.gloo-system/status/500
|
||||
```
|
||||
|
||||
Generate high latency:
|
||||
|
||||
```bash
|
||||
watch curl http://app.example.com/delay/2
|
||||
watch curl -H 'Host: app.example.com' http://gateway-proxy-v2.gloo-system/delay/2
|
||||
```
|
||||
|
||||
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
|
||||
@@ -335,13 +342,13 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:2.0.3
|
||||
podinfod=stefanprodan/podinfo:3.1.3
|
||||
```
|
||||
|
||||
Generate 404s:
|
||||
|
||||
```bash
|
||||
watch curl http://app.example.com/status/400
|
||||
watch curl -H 'Host: app.example.com' http://gateway-proxy-v2.gloo-system/status/400
|
||||
```
|
||||
|
||||
Watch Flagger logs:
|
||||
|
||||
@@ -67,8 +67,10 @@ spec:
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
# container port
|
||||
# ClusterIP port number
|
||||
port: 9898
|
||||
# container port number or name (optional)
|
||||
targetPort: 9898
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 30s
|
||||
@@ -100,12 +102,12 @@ spec:
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary:9898/token | grep token"
|
||||
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
|
||||
- name: load-test
|
||||
type: rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
metadata:
|
||||
cmd: "hey -z 2m -q 10 -c 2 http://podinfo:9898/"
|
||||
cmd: "hey -z 2m -q 10 -c 2 http://podinfo-canary.test:9898/"
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-canary.yaml and then apply it:
|
||||
@@ -150,7 +152,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:2.0.1
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -208,7 +210,7 @@ Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:2.0.2
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
@@ -297,7 +299,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:2.0.3
|
||||
podinfod=stefanprodan/podinfo:3.1.3
|
||||
```
|
||||
|
||||
Generate 404s:
|
||||
@@ -444,7 +446,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:2.0.4
|
||||
podinfod=stefanprodan/podinfo:3.1.4
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B testing:
|
||||
|
||||
@@ -56,8 +56,7 @@ kubectl create ns test
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/nginx/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/nginx/hpa.yaml
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
@@ -86,7 +85,7 @@ spec:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: podinfo
|
||||
servicePort: 9898
|
||||
servicePort: 80
|
||||
```
|
||||
|
||||
Save the above resource as podinfo-ingress.yaml and then apply it:
|
||||
@@ -124,8 +123,10 @@ spec:
|
||||
# to make progress before it is rollback (default 600s)
|
||||
progressDeadlineSeconds: 60
|
||||
service:
|
||||
# container port
|
||||
port: 9898
|
||||
# ClusterIP port number
|
||||
port: 80
|
||||
# container port number or name
|
||||
targetPort: 9898
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 10s
|
||||
@@ -144,13 +145,19 @@ spec:
|
||||
# percentage (0-100)
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
# load testing (optional)
|
||||
# testing (optional)
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://app.example.com/"
|
||||
```
|
||||
|
||||
@@ -190,7 +197,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:2.0.1
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -244,7 +251,7 @@ Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:2.0.2
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
```
|
||||
|
||||
Generate HTTP 500 errors:
|
||||
@@ -314,7 +321,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:2.0.3
|
||||
podinfod=stefanprodan/podinfo:3.1.3
|
||||
```
|
||||
|
||||
Generate high response latency:
|
||||
@@ -373,12 +380,10 @@ Edit the canary analysis, remove the max/step weight and add the match condition
|
||||
interval: 1m
|
||||
webhooks:
|
||||
- name: load-test
|
||||
url: http://localhost:8888/
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
type: cmd
|
||||
cmd: "hey -z 1m -q 10 -c 2 -H 'Cookie: canary=always' http://app.example.com/"
|
||||
logCmdOutput: "true"
|
||||
```
|
||||
|
||||
The above configuration will run an analysis for ten minutes targeting users that have a `canary` cookie set to `always` or
|
||||
@@ -388,7 +393,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:2.0.4
|
||||
podinfod=stefanprodan/podinfo:3.1.4
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts the A/B testing:
|
||||
@@ -419,4 +424,3 @@ Events:
|
||||
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
|
||||
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
|
||||
@@ -2,28 +2,31 @@
|
||||
|
||||
This guide shows you how to use Istio and Flagger to automate canary deployments.
|
||||
|
||||

|
||||
|
||||
### Bootstrap
|
||||
|
||||
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
|
||||
then creates a series of objects (Kubernetes deployments, ClusterIP services, Istio destination rules and virtual services).
|
||||
These objects expose the application inside the mesh and drive the canary analysis and promotion.
|
||||
|
||||
Create a test namespace with Istio sidecar injection enabled:
|
||||
|
||||
```bash
|
||||
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
|
||||
|
||||
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
|
||||
kubectl create ns test
|
||||
kubectl label namespace test istio-injection=enabled
|
||||
```
|
||||
|
||||
Create a deployment and a horizontal pod autoscaler:
|
||||
|
||||
```bash
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
|
||||
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
|
||||
```
|
||||
|
||||
Deploy the load testing service to generate traffic during the canary analysis:
|
||||
|
||||
```bash
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
|
||||
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
|
||||
```
|
||||
|
||||
Create a canary custom resource (replace example.com with your own domain):
|
||||
@@ -49,14 +52,26 @@ spec:
|
||||
kind: HorizontalPodAutoscaler
|
||||
name: podinfo
|
||||
service:
|
||||
# container port
|
||||
# service port number
|
||||
port: 9898
|
||||
# container port number or name (optional)
|
||||
targetPort: 9898
|
||||
# Istio gateways (optional)
|
||||
gateways:
|
||||
- public-gateway.istio-system.svc.cluster.local
|
||||
# Istio virtual service host names (optional)
|
||||
hosts:
|
||||
- app.example.com
|
||||
# Istio traffic policy (optional)
|
||||
trafficPolicy:
|
||||
tls:
|
||||
# use ISTIO_MUTUAL when mTLS is enabled
|
||||
mode: DISABLE
|
||||
# Istio retry policy (optional)
|
||||
retries:
|
||||
attempts: 3
|
||||
perTryTimeout: 1s
|
||||
retryOn: "gateway-error,connect-failure,refused-stream"
|
||||
canaryAnalysis:
|
||||
# schedule interval (default 60s)
|
||||
interval: 1m
|
||||
@@ -104,6 +119,8 @@ kubectl apply -f ./podinfo-canary.yaml
|
||||
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
|
||||
The canary analysis will run for five minutes while validating the HTTP metrics and rollout hooks every minute.
|
||||
|
||||

|
||||
|
||||
After a couple of seconds Flagger will create the canary objects:
|
||||
|
||||
```bash
|
||||
@@ -129,7 +146,7 @@ Trigger a canary deployment by updating the container image:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:2.0.1
|
||||
podinfod=stefanprodan/podinfo:3.1.1
|
||||
```
|
||||
|
||||
Flagger detects that the deployment revision changed and starts a new rollout:
|
||||
@@ -164,6 +181,11 @@ Events:
|
||||
|
||||
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
|
||||
|
||||
A canary deployment is triggered by changes in any of the following objects:
|
||||
* Deployment PodSpec (container image, command, ports, env, resources, etc)
|
||||
* ConfigMaps mounted as volumes or mapped to environment variables
|
||||
* Secrets mounted as volumes or mapped to environment variables
|
||||
|
||||
You can monitor all canaries with:
|
||||
|
||||
```bash
|
||||
@@ -183,7 +205,7 @@ Trigger another canary deployment:
|
||||
|
||||
```bash
|
||||
kubectl -n test set image deployment/podinfo \
|
||||
podinfod=stefanprodan/podinfo:2.0.2
|
||||
podinfod=stefanprodan/podinfo:3.1.2
|
||||
```
|
||||
|
||||
Exec into the load tester pod with:
|
||||
@@ -229,3 +251,82 @@ Events:
|
||||
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
|
||||
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
|
||||
```
|
||||
|
||||
### Traffic mirroring
|
||||
|
||||

|
||||
|
||||
For applications that perform read operations, Flagger can be configured to drive canary releases with traffic mirroring.
|
||||
Istio traffic mirroring will copy each incoming request, sending one request to the primary and one to the canary service.
|
||||
The response from the primary is sent back to the user and the response from the canary is discarded.
|
||||
Metrics are collected on both requests so that the deployment will only proceed if the canary metrics are within the threshold values.
|
||||
|
||||
Note that mirroring should be used for requests that are **idempotent** or capable of being processed twice
|
||||
(once by the primary and once by the canary).
|
||||
|
||||
You can enable mirroring by replacing `stepWeight/maxWeight` with `iterations` and
|
||||
by setting `canaryAnalysis.mirror` to `true`:
|
||||
|
||||
```yaml
|
||||
apiVersion: flagger.app/v1alpha3
|
||||
kind: Canary
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: test
|
||||
spec:
|
||||
canaryAnalysis:
|
||||
# schedule interval
|
||||
interval: 1m
|
||||
# max number of failed metric checks before rollback
|
||||
threshold: 5
|
||||
# total number of iterations
|
||||
iterations: 10
|
||||
# enable traffic shadowing
|
||||
mirror: true
|
||||
metrics:
|
||||
- name: request-success-rate
|
||||
threshold: 99
|
||||
interval: 1m
|
||||
- name: request-duration
|
||||
threshold: 500
|
||||
interval: 1m
|
||||
webhooks:
|
||||
- name: acceptance-test
|
||||
type: pre-rollout
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 30s
|
||||
metadata:
|
||||
type: bash
|
||||
cmd: "curl -sd 'test' http://podinfo-canary:9898/token | grep token"
|
||||
- name: load-test
|
||||
url: http://flagger-loadtester.test/
|
||||
timeout: 5s
|
||||
metadata:
|
||||
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
|
||||
```
|
||||
|
||||
With the above configuration, Flagger will run a canary release with the following steps:
|
||||
* detect new revision (deployment spec, secrets or configmaps changes)
|
||||
* scale from zero the canary deployment
|
||||
* wait for the HPA to set the canary minimum replicas
|
||||
* check canary pods health
|
||||
* run the acceptance tests
|
||||
* abort the canary release if tests fail
|
||||
* start the load tests
|
||||
* mirror traffic from primary to canary
|
||||
* check request success rate and request duration every minute
|
||||
* abort the canary release if the metrics check failure threshold is reached
|
||||
* stop traffic mirroring after the number of iterations is reached
|
||||
* route live traffic to the canary pods
|
||||
* promote the canary (update the primary secrets, configmaps and deployment spec)
|
||||
* wait for the primary deployment rollout to finish
|
||||
* wait for the HPA to set the primary minimum replicas
|
||||
* check primary pods health
|
||||
* switch live traffic back to primary
|
||||
* scale to zero the canary
|
||||
* send notification with the canary analysis result
|
||||
|
||||
The above procedure can be extended with [custom metrics](https://docs.flagger.app/how-it-works#custom-metrics) checks,
|
||||
[webhooks](https://docs.flagger.app/how-it-works#webhooks),
|
||||
[manual promotion](https://docs.flagger.app/how-it-works#manual-gating) approval and
|
||||
[Slack or MS Teams](https://docs.flagger.app/usage/alerting) notifications.
|
||||
|
||||
BIN
docs/logo/weaveworks.png
Normal file
BIN
docs/logo/weaveworks.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 3.0 KiB |
73
go.mod
73
go.mod
@@ -1,72 +1,25 @@
|
||||
module github.com/weaveworks/flagger
|
||||
|
||||
go 1.12
|
||||
go 1.13
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.37.4 // indirect
|
||||
github.com/Masterminds/semver v1.4.2
|
||||
github.com/beorn7/perks v1.0.0 // indirect
|
||||
github.com/bxcodec/faker v2.0.1+incompatible // indirect
|
||||
github.com/envoyproxy/go-control-plane v0.8.0 // indirect
|
||||
github.com/gogo/googleapis v1.2.0 // indirect
|
||||
github.com/gogo/protobuf v1.2.1
|
||||
github.com/golang/protobuf v1.3.1 // indirect
|
||||
github.com/golang/snappy v0.0.1 // indirect
|
||||
github.com/google/btree v1.0.0 // indirect
|
||||
github.com/Masterminds/semver/v3 v3.0.3
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect
|
||||
github.com/google/go-cmp v0.3.0
|
||||
github.com/hashicorp/consul v1.4.4 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3 // indirect
|
||||
github.com/hashicorp/go-rootcerts v1.0.0 // indirect
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hashicorp/serf v0.8.3 // indirect
|
||||
github.com/hashicorp/vault v1.1.0 // indirect
|
||||
github.com/googleapis/gnostic v0.2.0 // indirect
|
||||
github.com/imdario/mergo v0.3.7 // indirect
|
||||
github.com/k0kubun/pp v3.0.1+incompatible // indirect
|
||||
github.com/linkerd/linkerd2 v0.0.0-20190221030352-5e47cb150a33 // indirect
|
||||
github.com/lyft/protoc-gen-validate v0.0.14 // indirect
|
||||
github.com/mattn/go-colorable v0.1.1 // indirect
|
||||
github.com/mattn/go-isatty v0.0.7 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
|
||||
github.com/mitchellh/hashstructure v1.0.0
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 // indirect
|
||||
github.com/prometheus/common v0.3.0 // indirect
|
||||
github.com/prometheus/procfs v0.0.0-20190416084830-8368d24ba045 // indirect
|
||||
github.com/radovskyb/watcher v1.0.6 // indirect
|
||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||
github.com/solo-io/gloo v0.13.17
|
||||
github.com/solo-io/go-utils v0.7.11 // indirect
|
||||
github.com/solo-io/solo-kit v0.6.3
|
||||
github.com/solo-io/supergloo v0.3.11
|
||||
go.opencensus.io v0.20.2 // indirect
|
||||
go.uber.org/zap v1.9.1
|
||||
golang.org/x/crypto v0.0.0-20190418161225-b43e412143f9 // indirect
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
|
||||
github.com/prometheus/client_golang v1.0.0
|
||||
go.uber.org/atomic v1.3.2 // indirect
|
||||
go.uber.org/multierr v1.1.0 // indirect
|
||||
go.uber.org/zap v1.10.0
|
||||
gopkg.in/h2non/gock.v1 v1.0.14
|
||||
k8s.io/api v0.0.0-20190620073856-dcce3486da33
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed // indirect
|
||||
k8s.io/apimachinery v0.0.0-20190620073744-d16981aedf33
|
||||
k8s.io/client-go v11.0.0+incompatible
|
||||
k8s.io/code-generator v0.0.0-20190620073620-d55040311883
|
||||
k8s.io/kube-openapi v0.0.0-20190418160015-6b3d3b2d5666 // indirect
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/google/uuid => github.com/google/uuid v1.0.0
|
||||
golang.org/x/crypto => golang.org/x/crypto v0.0.0-20181025213731-e84da0312774
|
||||
golang.org/x/net => golang.org/x/net v0.0.0-20190206173232-65e2d4e15006
|
||||
golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f
|
||||
golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503
|
||||
golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9
|
||||
k8s.io/api => k8s.io/api v0.0.0-20190620073856-dcce3486da33
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190620073744-d16981aedf33
|
||||
k8s.io/client-go => k8s.io/client-go v0.0.0-20190620074045-585a16d2e773
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.0.0-20190620073620-d55040311883
|
||||
k8s.io/component-base => k8s.io/component-base v0.0.0-20190620074451-e5083e713460
|
||||
k8s.io/api v0.17.0
|
||||
k8s.io/apimachinery v0.17.1-beta.0
|
||||
k8s.io/client-go v0.17.0
|
||||
k8s.io/code-generator v0.17.0
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f
|
||||
)
|
||||
|
||||
replace k8s.io/klog => github.com/stefanprodan/klog v0.0.0-20190418165334-9cbb78b20423
|
||||
|
||||
451
go.sum
451
go.sum
@@ -1,288 +1,127 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU=
|
||||
cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw=
|
||||
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
|
||||
github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
|
||||
github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc=
|
||||
github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/Masterminds/sprig v2.18.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
|
||||
github.com/Masterminds/semver/v3 v3.0.3 h1:znjIyLfpXEDQjOIEWh+ehwpTU14UzUPub3c3sm36u14=
|
||||
github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc=
|
||||
github.com/Netflix/go-expect v0.0.0-20180928190340-9d1f4485533b/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/airbrake/gobrake v3.6.1+incompatible/go.mod h1:wM4gu3Cn0W0K7GUuVWnlXZU11AGBXMILnrdOU8Kn00o=
|
||||
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310 h1:BUAU3CGlLvorLI26FmByPp2eC2qla6E1Tw+scpcg/to=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/avast/retry-go v2.2.0+incompatible h1:m+w7mVLWa/oKqX2xYqiEKQQkeGH8DDEXB/XnjS54Wyw=
|
||||
github.com/avast/retry-go v2.2.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/bugsnag/bugsnag-go v1.4.0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
|
||||
github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
||||
github.com/bxcodec/faker v2.0.1+incompatible h1:P0KUpUw5w6WJXwrPfv35oc91i4d8nf40Nwln+M/+faA=
|
||||
github.com/bxcodec/faker v2.0.1+incompatible/go.mod h1:BNzfpVdTwnFJ6GtfYTcQu6l6rHShT+veBxNCnjCx5XM=
|
||||
github.com/certifi/gocertifi v0.0.0-20190105021004-abcd57078448/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4=
|
||||
github.com/chai2010/gettext-go v0.0.0-20170215093142-bf70f2a70fb1/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/coreos/etcd v3.3.12+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/cpuguy83/go-md2man v1.0.8/go.mod h1:N6JayAiVKtlHSnuTCeuLSQVs75hb8q+dYQLjr7cDsKY=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
|
||||
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.3+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful-swagger12 v0.0.0-20170926063155-7524189396c6/go.mod h1:qr0VowGBT4CS4Q8vFF8BSeKz34PuqKGxs/L0IAQA9DQ=
|
||||
github.com/emirpasic/gods v1.9.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
|
||||
github.com/envoyproxy/go-control-plane v0.8.0 h1:uE6Fp4fOcAJdc1wTQXLJ+SYistkbG1dNoi6Zs1+Ybvk=
|
||||
github.com/envoyproxy/go-control-plane v0.8.0/go.mod h1:GSSbY9P1neVhdY7G4wu+IK1rk/dqhiCC/4ExuWJZVuk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.0.14 h1:YBW6/cKy9prEGRYLnaGa4IDhzxZhRCtKsax8srGKDnM=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc=
|
||||
github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fgrosse/zaptest v1.1.0 h1:sK9hP0/xBoNX5qfFo3KWFluDXfc809APomI1QXuYELA=
|
||||
github.com/fgrosse/zaptest v1.1.0/go.mod h1:vMnRSul6kW7kIUXZgnZZcDwyTn8k49ODfAULL8nmL5w=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
|
||||
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
|
||||
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
|
||||
github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
|
||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||
github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
|
||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
||||
github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
|
||||
github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
|
||||
github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
|
||||
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||
github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
|
||||
github.com/go-openapi/spec v0.19.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
|
||||
github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
|
||||
github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
|
||||
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
|
||||
github.com/go-openapi/validate v0.19.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
|
||||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
|
||||
github.com/gogo/googleapis v1.2.0 h1:Z0v3OJDotX9ZBpdz2V+AI7F4fITSZhVE5mg6GQppwMM=
|
||||
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
|
||||
github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
|
||||
github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/goph/emperror v0.17.1/go.mod h1:+ZbQ+fUNO/6FNiUo0ujtMjhgad9Xa6fQL9KhH4LNHic=
|
||||
github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f h1:ShTPMJQes6tubcjzGMODIVG5hlrCeImaBnZzKF2N8SM=
|
||||
github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
|
||||
github.com/hashicorp/consul v1.4.4 h1:DR1+5EGgnPsd/LIsK3c9RDvajcsV5GOkGQBSNd3dpn8=
|
||||
github.com/hashicorp/consul v1.4.4/go.mod h1:mFrjN1mfidgJfYP1xrJCF+AfRhr6Eaqhb2+sfyn/OOI=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=
|
||||
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
|
||||
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
|
||||
github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
github.com/hashicorp/serf v0.8.3 h1:MWYcmct5EtKz0efYooPcL0yNkem+7kWxqXDi/UIh+8k=
|
||||
github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k=
|
||||
github.com/hashicorp/vault v1.1.0 h1:v79NUgO5xCZnXVzUkIqFOXtP8YhpnHAi1fk3eo9cuOE=
|
||||
github.com/hashicorp/vault v1.1.0/go.mod h1:KfSyffbKxoVyspOdlaGVjIuwLobi07qD1bAbosPMpP0=
|
||||
github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A=
|
||||
github.com/hinshun/vt10x v0.0.0-20180809195222-d55458df857c/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
|
||||
github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88 h1:uC1QfSlInpQF+M0ao65imhwqKnz3Q2z/d8PWZRMQvDM=
|
||||
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
|
||||
github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg=
|
||||
github.com/k0kubun/pp v3.0.1+incompatible h1:3tqvf7QgUnZ5tXO6pNAZlrvHgl6DvifjDrd9g2S9Z40=
|
||||
github.com/k0kubun/pp v3.0.1+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg=
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/kevinburke/ssh_config v0.0.0-20180830205328-81db2a75821e/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/linkerd/linkerd2 v0.0.0-20190221030352-5e47cb150a33 h1:+eM/rkJK2iCSi0fDzp218TzJSAglSGeI985YYgRS/mY=
|
||||
github.com/linkerd/linkerd2 v0.0.0-20190221030352-5e47cb150a33/go.mod h1:n9QnL65Uv2gAG97S0t1q2aYmP33wPQ3oAh0+DJhQSSw=
|
||||
github.com/lyft/protoc-gen-validate v0.0.14 h1:xbdDVIHd0Xq5Bfzu+8JR9s7mFmJPMvNLmfGhgcHJdFU=
|
||||
github.com/lyft/protoc-gen-validate v0.0.14/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
|
||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.1 h1:G1f5SKeVxmagw/IyvzvtZE4Gybcc4Tr1tf7I8z0XgOg=
|
||||
github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc=
|
||||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/mitchellh/cli v1.0.0 h1:iGBIsUe3+HZ/AD/Vd7DErOt5sU9fa8Uj7A2s1aggv1Y=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||
github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y=
|
||||
github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
@@ -296,165 +135,142 @@ github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32 h1:W6apQkHrMkS0Muv8G/TipAy
|
||||
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w=
|
||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
|
||||
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1 h1:ccV59UEOTzVDnDUEFdT95ZzHVZ+5+158q8+SJb2QV5w=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.0.0-20190104105734-b1c43a6df3ae/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.3.0 h1:taZ4h8Tkxv2kNyoSctBvfXEHmBmxrwmIidZTIaHons4=
|
||||
github.com/prometheus/common v0.3.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190104112138-b1a0a9a36d74/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190416084830-8368d24ba045 h1:Raos9GP+3BlCBicScEQ+SjTLpYYac34fZMoeqj9McSM=
|
||||
github.com/prometheus/procfs v0.0.0-20190416084830-8368d24ba045/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/radovskyb/watcher v1.0.6 h1:8WIQ9UxEYMZjem1OwU7dVH94DXXk9mAIE1i8eqHD+IY=
|
||||
github.com/radovskyb/watcher v1.0.6/go.mod h1:78okwvY5wPdzcb1UYnip1pvrZNIVEIh/Cm+ZuvsUYIg=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
|
||||
github.com/rollbar/rollbar-go v1.0.2/go.mod h1:AcFs5f0I+c71bpHlXNNDbOWJiKwjFDtISeXco0L5PKQ=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
|
||||
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/solo-io/gloo v0.13.17 h1:rbNmO7e5+0vEq5krkO9/Rcp16PqqvepyGD0j3xPdmhg=
|
||||
github.com/solo-io/gloo v0.13.17/go.mod h1:dNnxchbq5F4ITJhX/0fy5brfbsj6vlW+AwgnTlqZN0A=
|
||||
github.com/solo-io/go-utils v0.7.11 h1:3Kmk50e6nYqyf7MBY1473XkH5L7qO/Nigjx+t6jEOQo=
|
||||
github.com/solo-io/go-utils v0.7.11/go.mod h1:7r+dFKdqJNOjx+odeLFqg8SOwVHyVVG1P0EPt6rNLN8=
|
||||
github.com/solo-io/solo-kit v0.6.3 h1:s/SxcgG7YSjW7wu7iQER5MCHSzeXg1b/lCZRazQ0IMw=
|
||||
github.com/solo-io/solo-kit v0.6.3/go.mod h1:oBaQ6tOwuO97u7w+s3TeI08YLHcbiWemInx0XkDfKFw=
|
||||
github.com/solo-io/supergloo v0.3.11 h1:IwnrL2xojowzb7k+V2wCG3I6WrelzXsezqJiraaVxIM=
|
||||
github.com/solo-io/supergloo v0.3.11/go.mod h1:hJuUwop5IMBL9Qc2/G+f+/PfIWPt/2nGr66fDcuhrn8=
|
||||
github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stefanprodan/klog v0.0.0-20190418165334-9cbb78b20423 h1:qTtUiiNM+iq4IXOwHofKW5+jzvkvnNVz0GFRxwukUlY=
|
||||
github.com/stefanprodan/klog v0.0.0-20190418165334-9cbb78b20423/go.mod h1:TYstY5LQfzxFVm9MiiMg7kZ39sc5cue/6CFoY5KgXn8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/technosophos/moniker v0.0.0-20180509230615-a5dbd03a2245/go.mod h1:O1c8HleITsZqzNZDjSNzirUGsMT0oGu9LhHKoJrqO+A=
|
||||
github.com/xanzy/ssh-agent v0.2.0/go.mod h1:0NyE30eGUDliuLEHJgYte/zncp2zdTStcOnWhgSqHD8=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20181025213731-e84da0312774 h1:a4tQYYYuK9QdeO/+kEvNYyuR21S+7ve5EANok6hABhI=
|
||||
golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 h1:7KByu05hhLed2MO29w7p1XfZvZ13m8mub3shuVftRs0=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495 h1:I6A9Ag9FpEKOjcKrRNjQkPHawoXIhKyTGfvvjFAiiAk=
|
||||
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006 h1:bfLnR+k0tq5Lqt6dflRLcZiz6UaXCMt3vhYJ1l4FQ80=
|
||||
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA=
|
||||
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg=
|
||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 h1:7Pf/N3ln54fsGsAPsSwSfFhxXGKWHMIRUI/T5x1GP90=
|
||||
golang.org/x/tools v0.0.0-20190313210603-aa82965741a9/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72 h1:bw9doJza/SFBEweII/rHQh338oozWyiFsBRHtrflcws=
|
||||
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw=
|
||||
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
|
||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
||||
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts=
|
||||
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181221175505-bd9b4fb69e2f/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107 h1:xtNn7qFlagY2mQNFHMSRPjT2RkOV4OXM7P5TVy9xATo=
|
||||
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM=
|
||||
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
gopkg.in/AlecAivazis/survey.v1 v1.8.2/go.mod h1:iBNOmqKz/NUbZx3bA+4hAGLRC7fSK7tgtVDT4tB22XA=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
@@ -463,56 +279,39 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/h2non/gock.v1 v1.0.14 h1:fTeu9fcUvSnLNacYvYI54h+1/XEteDyHvrVCZEEEYNM=
|
||||
gopkg.in/h2non/gock.v1 v1.0.14/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE=
|
||||
gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/square/go-jose.v2 v2.3.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/src-d/go-billy.v4 v4.2.1/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk=
|
||||
gopkg.in/src-d/go-git-fixtures.v3 v3.1.1/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g=
|
||||
gopkg.in/src-d/go-git.v4 v4.10.0/go.mod h1:Vtut8izDyrM8BUVQnzJ+YvmNcem2J89EmfZYCkLokZk=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
istio.io/gogo-genproto v0.0.0-20190124151557-6d926a6e6feb/go.mod h1:eIDJ6jNk/IeJz6ODSksHl5Aiczy5JUq6vFhJWI5OtiI=
|
||||
k8s.io/api v0.0.0-20190620073856-dcce3486da33 h1:aC/EvF9PT1h8NeMEOVwTel8xxbZwq0SZnxXNThEROnE=
|
||||
k8s.io/api v0.0.0-20190620073856-dcce3486da33/go.mod h1:ldk709UQo/iedNLOW7J06V9QSSGY5heETKeWqnPoqF8=
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20190111034747-7d26de67f177+incompatible/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE=
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed h1:rCteec//ELIjZMfjIGQbVtZooyaofqDJwsmWwWKItNs=
|
||||
k8s.io/apiextensions-apiserver v0.0.0-20190315093550-53c4693659ed/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE=
|
||||
k8s.io/apimachinery v0.0.0-20190620073744-d16981aedf33 h1:Lkd+QNFOB3DqrDyWo796aodJgFJautn/M+t9IGearPc=
|
||||
k8s.io/apimachinery v0.0.0-20190620073744-d16981aedf33/go.mod h1:9q5NW/mMno/nwbRZd/Ks2TECgi2PTZ9cwarf4q+ze6Q=
|
||||
k8s.io/apiserver v0.0.0-20190111033246-d50e9ac5404f+incompatible/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w=
|
||||
k8s.io/cli-runtime v0.0.0-20190111035321-c7263d800665+incompatible/go.mod h1:qWnH3/b8sp/l7EvlDh7ulDU3UWA4P4N1NFbEEP791tM=
|
||||
k8s.io/client-go v0.0.0-20190620074045-585a16d2e773 h1:XyjDnwRO9icfyrN7HRSa8o3NqdPOEQoVW8vWizuqyQQ=
|
||||
k8s.io/client-go v0.0.0-20190620074045-585a16d2e773/go.mod h1:miKCC7C/WGwJqcDctyJtAnP3Gss0Y5KwURqJ7q5pfEw=
|
||||
k8s.io/code-generator v0.0.0-20190620073620-d55040311883 h1:NWWNvN6IdpmQvZ43rVccCI8GPUrheK8XNdqeKycw0DI=
|
||||
k8s.io/code-generator v0.0.0-20190620073620-d55040311883/go.mod h1:+a+9g9W0llgbgvx6qOb+VbeZPH5km1FrVyMQe9/jkQY=
|
||||
k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/api v0.17.0 h1:H9d/lw+VkZKEVIUc8F3wgiQ+FUXTTr21M87jXLU7yqM=
|
||||
k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=
|
||||
k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
|
||||
k8s.io/apimachinery v0.17.1-beta.0 h1:0Wl/KpAiFOMe9to5h8x2Y6JnjV+BEWJiTcUk1Vx7zdE=
|
||||
k8s.io/apimachinery v0.17.1-beta.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
|
||||
k8s.io/client-go v0.17.0 h1:8QOGvUGdqDMFrm9sD6IUFl256BcffynGoe80sxgTEDg=
|
||||
k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k=
|
||||
k8s.io/code-generator v0.17.0 h1:y+KWtDWNqlJzJu/kUy8goJZO0X71PGIpAHLX8a0JYk0=
|
||||
k8s.io/code-generator v0.17.0/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 h1:4s3/R4+OYYYUKptXPhZKjQ04WJ6EhQQVFdjOFvCazDk=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/helm v2.13.0+incompatible/go.mod h1:LZzlS4LQBHfciFOurYBFkCMTaZ0D1l+p0teMg7TSULI=
|
||||
k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
||||
k8s.io/kube-openapi v0.0.0-20190401085232-94e1e7b7574c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
||||
k8s.io/kube-openapi v0.0.0-20190418160015-6b3d3b2d5666 h1:hlzz2EvLPcefAcG/j0tOZpds4LWSElZzxpZuhxbblbc=
|
||||
k8s.io/kube-openapi v0.0.0-20190418160015-6b3d3b2d5666/go.mod h1:jqYp7BKXW0Jl+F1dWXBieUmcHKMPpGHGWA0uqfpOZZ4=
|
||||
k8s.io/kubernetes v1.13.2/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
|
||||
k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7 h1:8r+l4bNWjRlsFYlQJnKJ2p7s1YQPj4XyXiJVqDHRx7c=
|
||||
k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
|
||||
k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM=
|
||||
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
|
||||
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
|
||||
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
|
||||
modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
|
||||
modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
|
||||
sigs.k8s.io/controller-runtime v0.1.10/go.mod h1:HFAYoOh6XMV+jKF1UjFwrknPbowfyHEHHRdJMf2jMX8=
|
||||
sigs.k8s.io/structured-merge-diff v0.0.0-20181214233322-d43a45b8663b/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
|
||||
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
vbom.ml/util v0.0.0-20180919145318-efcd4e0f9787/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI=
|
||||
|
||||
@@ -30,7 +30,7 @@ chmod +x ${CODEGEN_PKG}/generate-groups.sh
|
||||
|
||||
${CODEGEN_PKG}/generate-groups.sh all \
|
||||
github.com/weaveworks/flagger/pkg/client github.com/weaveworks/flagger/pkg/apis \
|
||||
"appmesh:v1beta1 istio:v1alpha3 flagger:v1alpha3 smi:v1alpha1" \
|
||||
"flagger:v1alpha3 appmesh:v1beta1 istio:v1alpha3 smi:v1alpha1 gloo:v1 projectcontour:v1" \
|
||||
--output-base "${TEMP_DIR}" \
|
||||
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt
|
||||
|
||||
|
||||
@@ -19,6 +19,14 @@ Note that you'll need kubectl 1.14 to run the above the command or you can downl
|
||||
kustomize build github.com/weaveworks/flagger//kustomize/istio | kubectl apply -f -
|
||||
```
|
||||
|
||||
Install Flagger for AWS App Mesh:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/appmesh
|
||||
```
|
||||
|
||||
This deploys Flagger in the `appmesh-system` namespace and sets the metrics server URL to App Mesh Prometheus instance.
|
||||
|
||||
Install Flagger for Linkerd:
|
||||
|
||||
```bash
|
||||
@@ -33,6 +41,14 @@ If you want to install a specific Flagger release, add the version number to the
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/linkerd?ref=0.18.0
|
||||
```
|
||||
|
||||
Install Flagger for Contour:
|
||||
|
||||
```bash
|
||||
kubectl apply -k github.com/weaveworks/flagger//kustomize/contour
|
||||
```
|
||||
|
||||
This deploys Flagger and Prometheus in the `projectcontour` namespace and sets Prometheus to scrape Contour's Envoy instances.
|
||||
|
||||
## Generic installer
|
||||
|
||||
Install Flagger and Prometheus:
|
||||
|
||||
5
kustomize/appmesh/kustomization.yaml
Normal file
5
kustomize/appmesh/kustomization.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
namespace: appmesh-system
|
||||
bases:
|
||||
- ../base/flagger
|
||||
patchesStrategicMerge:
|
||||
- patch.yaml
|
||||
29
kustomize/appmesh/patch.yaml
Normal file
29
kustomize/appmesh/patch.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: flagger
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: flagger
|
||||
args:
|
||||
- -log-level=info
|
||||
- -mesh-provider=appmesh
|
||||
- -metrics-server=http://appmesh-prometheus:9090
|
||||
- -slack-user=flagger
|
||||
- -slack-channel=
|
||||
- -slack-url=
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: flagger
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flagger
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flagger
|
||||
namespace: appmesh-system
|
||||
@@ -33,6 +33,26 @@ spec:
|
||||
- name: Weight
|
||||
type: string
|
||||
JSONPath: .status.canaryWeight
|
||||
- name: FailedChecks
|
||||
type: string
|
||||
JSONPath: .status.failedChecks
|
||||
priority: 1
|
||||
- name: Interval
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.interval
|
||||
priority: 1
|
||||
- name: Mirror
|
||||
type: boolean
|
||||
JSONPath: .spec.canaryAnalysis.mirror
|
||||
priority: 1
|
||||
- name: StepWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.stepWeight
|
||||
priority: 1
|
||||
- name: MaxWeight
|
||||
type: string
|
||||
JSONPath: .spec.canaryAnalysis.maxWeight
|
||||
priority: 1
|
||||
- name: LastTransitionTime
|
||||
type: string
|
||||
JSONPath: .status.lastTransitionTime
|
||||
@@ -48,13 +68,16 @@ spec:
|
||||
provider:
|
||||
description: Traffic managent provider
|
||||
type: string
|
||||
metricsServer:
|
||||
description: Prometheus URL
|
||||
type: string
|
||||
progressDeadlineSeconds:
|
||||
description: Deployment progress deadline
|
||||
type: number
|
||||
targetRef:
|
||||
description: Deployment selector
|
||||
type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
@@ -67,7 +90,7 @@ spec:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
@@ -80,7 +103,7 @@ spec:
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: object
|
||||
required: ['apiVersion', 'kind', 'name']
|
||||
required: ["apiVersion", "kind", "name"]
|
||||
properties:
|
||||
apiVersion:
|
||||
type: string
|
||||
@@ -90,7 +113,7 @@ spec:
|
||||
type: string
|
||||
service:
|
||||
type: object
|
||||
required: ['port']
|
||||
required: ["port"]
|
||||
properties:
|
||||
port:
|
||||
description: Container port number
|
||||
@@ -98,6 +121,11 @@ spec:
|
||||
portName:
|
||||
description: Container port name
|
||||
type: string
|
||||
targetPort:
|
||||
description: Container target port name
|
||||
anyOf:
|
||||
- type: string
|
||||
- type: number
|
||||
portDiscovery:
|
||||
description: Enable port dicovery
|
||||
type: boolean
|
||||
@@ -167,6 +195,9 @@ spec:
|
||||
stepWeight:
|
||||
description: Canary incremental traffic percentage step
|
||||
type: number
|
||||
mirror:
|
||||
description: Mirror traffic to canary before shifting
|
||||
type: boolean
|
||||
match:
|
||||
description: A/B testing match conditions
|
||||
anyOf:
|
||||
@@ -178,7 +209,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'threshold']
|
||||
required: ["name", "threshold"]
|
||||
properties:
|
||||
name:
|
||||
description: Name of the Prometheus metric
|
||||
@@ -199,7 +230,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['name', 'url', 'timeout']
|
||||
required: ["name", "url"]
|
||||
properties:
|
||||
name:
|
||||
description: Name of the webhook
|
||||
@@ -212,7 +243,9 @@ spec:
|
||||
- confirm-rollout
|
||||
- pre-rollout
|
||||
- rollout
|
||||
- confirm-promotion
|
||||
- post-rollout
|
||||
- event
|
||||
url:
|
||||
description: URL address of this webhook
|
||||
type: string
|
||||
@@ -237,6 +270,7 @@ spec:
|
||||
- Initialized
|
||||
- Waiting
|
||||
- Progressing
|
||||
- Promoting
|
||||
- Finalising
|
||||
- Succeeded
|
||||
- Failed
|
||||
@@ -262,7 +296,7 @@ spec:
|
||||
properties:
|
||||
items:
|
||||
type: object
|
||||
required: ['type', 'status', 'reason']
|
||||
required: ["type", "status", "reason"]
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: LastTransitionTime of this condition
|
||||
|
||||
@@ -15,11 +15,12 @@ spec:
|
||||
app: flagger
|
||||
annotations:
|
||||
prometheus.io/scrape: "true"
|
||||
prometheus.io/port: "8080"
|
||||
spec:
|
||||
serviceAccountName: flagger
|
||||
containers:
|
||||
- name: flagger
|
||||
image: weaveworks/flagger:0.16.0
|
||||
image: weaveworks/flagger:0.21.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- name: http
|
||||
|
||||
@@ -8,4 +8,4 @@ resources:
|
||||
- deployment.yaml
|
||||
images:
|
||||
- name: weaveworks/flagger
|
||||
newTag: 0.18.3
|
||||
newTag: 0.22.0
|
||||
|
||||
@@ -71,6 +71,11 @@ rules:
|
||||
- virtualservices
|
||||
- gateways
|
||||
verbs: ["*"]
|
||||
- apiGroups:
|
||||
- projectcontour.io
|
||||
resources:
|
||||
- httpproxies
|
||||
verbs: ["*"]
|
||||
- nonResourceURLs:
|
||||
- /version
|
||||
verbs:
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user