Compare commits

..

233 Commits

Author SHA1 Message Date
Stefan Prodan
2b6047d124 Merge pull request #782 from fluxcd/release-v1.6.1
Release v1.6.1
2021-01-19 11:37:25 +02:00
Stefan Prodan
05e832ed55 Release v1.6.1
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2021-01-19 11:10:34 +02:00
Stefan Prodan
45fddab9a9 Merge pull request #781 from kyanagimoto/secret-restart-analysis
Add missing TrackedConfig field to Canary status CRD
2021-01-19 11:01:49 +02:00
vito-laurenza-zocdoc
34e9c0da6b chore: reproduce restart analysis issue
Signed-off-by: kyanagimoto <koichiyanagimoto@gmail.com>
2021-01-19 16:11:34 +09:00
Stefan Prodan
9891375c20 Merge pull request #778 from fluxcd/e2e-updates
e2e: Update Istio to v1.8.2 and Contour to v1.11.0
2021-01-15 14:38:11 +02:00
Stefan Prodan
377f145a3f e2e: Update Istio to v1.8.2
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2021-01-15 13:47:05 +02:00
Stefan Prodan
6c7fff080f e2e: Update Contour to v1.11.0
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2021-01-15 13:39:48 +02:00
Stefan Prodan
b1a168c5f2 Merge pull request #777 from h-r-k-matsumoto/update-istio-v1alpha3
Update HTTPMatchRequest to match Istio's definitions
2021-01-15 13:34:29 +02:00
Hiroki Matsumoto
d15df9ae88 Reflect the latest Istio version v1alpha3
Signed-off-by: Hiroki Matsumoto <hiroki.matsumoto.ggg@gmail.com>
2021-01-12 21:08:29 +09:00
Stefan Prodan
c7d93d9ca7 Merge pull request #774 from fluxcd/release-v1.6.0
Release v1.6.0
2021-01-05 15:08:08 +02:00
Stefan Prodan
0ae4ccede1 Release v1.6.0
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2021-01-05 14:46:20 +02:00
Stefan Prodan
c6aa66ab94 Merge pull request #772 from fluxcd/crd-v1
Upgrade CRDs to apiextensions.k8s.io/v1
2021-01-05 13:51:40 +02:00
Stefan Prodan
2e10d8bf05 Format docs markdown
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2021-01-05 12:28:56 +02:00
Stefan Prodan
f64295bcee Update Kubernetes packages to v1.20.1
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2021-01-04 22:33:16 +02:00
Stefan Prodan
3fe8119e0c Set Kubernetes min version to 1.16
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2021-01-04 21:35:54 +02:00
Stefan Prodan
b6880213ce Upgrade CRDs to apiextensions.k8s.io/v1
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2021-01-04 17:29:45 +02:00
Stefan Prodan
aca6b2b558 Merge pull request #771 from fluxcd/update-nginx-helm
Update NGINX ingress Helm repository
2021-01-04 16:02:07 +02:00
Stefan Prodan
aa33af25fc Change branch from master to main in docs
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2021-01-04 15:32:19 +02:00
Stefan Prodan
aa3a93da98 Update NGINX ingress Helm repository
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2021-01-04 15:30:26 +02:00
Stefan Prodan
b42db67d85 Merge pull request #770 from fluxcd/fixes
Use fluxcd registry in base kustomization
2021-01-04 14:51:10 +02:00
Stefan Prodan
0d2163cd94 e2e: Expose traefik as NodePort
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2021-01-04 12:11:49 +02:00
Stefan Prodan
371e177ff3 Use fluxcd registry in base kustomization
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2021-01-04 11:11:41 +02:00
Stefan Prodan
e62668ab48 Merge pull request #765 from kdorosh/gloo_route_tables
A/B testing support for Gloo Edge ingress controller
2021-01-04 09:58:38 +02:00
Kevin Dorosh
005e3928e7 Gloo tests pass now, traefik ones have flake?
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-31 17:19:41 -05:00
Kevin Dorosh
0d5b2a2277 Might have done previous commit before release finished, kick
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-31 17:00:29 -05:00
Kevin Dorosh
bc8cfa91ee Update to Gloo Edge 1.6.0 now that it's released
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-31 16:17:01 -05:00
Stefan Prodan
63b217faee Merge pull request #766 from fluxcd/chart-linting-off
Disable Helm chart linting
2020-12-22 18:18:42 +02:00
Stefan Prodan
376bf194b3 Disable Helm chart linting
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-22 18:16:19 +02:00
Kevin Dorosh
a69e9abf3c Remove upstream group
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-22 10:20:07 -05:00
Kevin Dorosh
c22529bbd0 codegen
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-22 10:18:08 -05:00
Kevin Dorosh
6fd8498f6d We are generating 404s not 400s
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-22 10:09:50 -05:00
Kevin Dorosh
39cce0196f Guide is working
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-22 10:09:50 -05:00
Kevin Dorosh
4e39e5608c Fix rebase
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-22 10:09:20 -05:00
Kevin Dorosh
ba4d16fd76 Remove dated comment
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-22 10:09:20 -05:00
Kevin Dorosh
b9f14ee57a Update to use new Gloo Edge 1.6 API
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-22 10:09:20 -05:00
Kevin Dorosh
a3f791be17 First pass A/B testing
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-22 10:09:20 -05:00
Kevin Dorosh
41497c73f4 Update documented flow
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-22 10:09:20 -05:00
Kevin Dorosh
2e1b3fc8de Fix json naming
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-22 10:05:55 -05:00
Kevin Dorosh
44cf4d08e9 Fix test
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-22 10:05:55 -05:00
Kevin Dorosh
ca07b47523 Fix compile error
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-22 10:05:55 -05:00
Kevin Dorosh
8fceafc017 Fix api mistakes
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-22 10:05:55 -05:00
Kevin Dorosh
47dcf6a7b9 Add permissions
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-22 10:05:55 -05:00
Kevin Dorosh
c63ec2d95d Update gloo logic to use route tables, cleanup
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-22 10:05:55 -05:00
Kevin Dorosh
f4aeb98744 Add route table codegen
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-22 10:05:29 -05:00
Kevin Dorosh
e6aefb8f4b Initial commit
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-22 10:03:41 -05:00
Kevin Dorosh
6cf1f35eca Small docs fixes
Signed-off-by: Kevin Dorosh <kcdorosh@gmail.com>
2020-12-22 10:03:41 -05:00
Stefan Prodan
cff742d7c4 Merge pull request #764 from fluxcd/release-v1.5.0
Release v1.5.0
2020-12-22 16:55:29 +02:00
Stefan Prodan
67f8f414bf Merge pull request #763 from fluxcd/ghcr
Publish multi-arch image to GitHub Container Registry
2020-12-22 16:54:37 +02:00
Stefan Prodan
ecf73e967a Release v1.5.0
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-22 16:18:00 +02:00
Stefan Prodan
7f8986a06d Merge pull request #763 from fluxcd/ghcr
Publish multi-arch image to GitHub Container Registry
2020-12-22 15:16:10 +02:00
Stefan Prodan
ec6aab2c8d Publish multi-arch image to GitHub Container Registry
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-22 14:53:01 +02:00
Stefan Prodan
b8625d5e1e Merge pull request #762 from fluxcd/label-prefix
Add e2e tests for label prefix inclusion
2020-12-22 13:26:33 +02:00
Stefan Prodan
0fa4654034 Add label prefix e2e test
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-22 12:57:43 +02:00
Stefan Prodan
6349dbf5c0 Include app.kubernetes.io labels by default
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-22 12:45:03 +02:00
Stefan Prodan
c8cec8e18b Enable running tests on demand
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-22 12:41:30 +02:00
Stefan Prodan
70114e3fd3 Merge pull request #756 from fluxcd/copyright-flux
Copyright Flux authors
2020-12-21 20:09:33 +02:00
Stefan Prodan
cd75c5fa25 Copyright Flux authors
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-21 19:44:52 +02:00
Stefan Prodan
1535f7aa41 Merge pull request #755 from fluxcd/fluxcd
Rename imports to fluxcd/flagger
2020-12-21 19:24:48 +02:00
Stefan Prodan
90abb7ba5b Rename imports to fluxcd/flagger
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-21 19:02:23 +02:00
Stefan Prodan
e6739711b0 Merge pull request #754 from weaveworks/github-actions
Migrate CI to GitHub Actions
2020-12-21 18:38:28 +02:00
Stefan Prodan
333780e78b Disable CircleCI main build
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-21 17:26:05 +02:00
Stefan Prodan
38777801de Upload coverage to Codecov
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-21 17:04:31 +02:00
Stefan Prodan
3750ed850c Merge pull request #749 from Nerja/pdb
Added PodDisruptionBudget to the Flagger Chart
2020-12-21 16:05:19 +02:00
Stefan Prodan
fda53fbf80 Remove CircleCI testing framework
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-21 11:52:13 +02:00
Stefan Prodan
c8a472c01b Add Skipper e2e tests
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-21 10:05:04 +02:00
Stefan Prodan
ccd64a3df9 Add Kubernetes B/G e2e tests
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-21 01:12:53 +02:00
Stefan Prodan
2ea13cec88 Add Gloo e2e tests
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-21 00:26:36 +02:00
Stefan Prodan
5afc800b11 Cleanup Istio e2e tests
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-20 22:35:17 +02:00
Stefan Prodan
1fb898ac22 Cleanup e2e tests
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-20 22:22:39 +02:00
Stefan Prodan
73b7fc1cfc Add Traefik e2e tests
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-20 22:21:02 +02:00
Stefan Prodan
b25ff35e5b Use test workloads in e2e
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-20 22:08:43 +02:00
Stefan Prodan
4fe4053cdd Add workloads to e2e tests
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-20 21:38:07 +02:00
Stefan Prodan
ed70160583 Add NGINX Ingress e2e tests
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-20 21:20:48 +02:00
Stefan Prodan
bb00f8cabd Add Contour e2e tests
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-20 19:15:10 +02:00
Stefan Prodan
7bef999c41 Add Linkerd e2e tests
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-20 17:59:16 +02:00
Stefan Prodan
a2774d92da Add Istio e2e tests
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-20 17:24:39 +02:00
Stefan Prodan
be9b03d99b Add release workflow
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-20 17:07:32 +02:00
Stefan Prodan
b4af9e5f32 Add build workflow
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-20 17:07:18 +02:00
Stefan Prodan
3ba2762805 Add multi-arch Dockerfile
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-20 17:06:19 +02:00
Stefan Prodan
2884a80d31 Disable CircleCI
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-20 14:13:56 +02:00
Marcus Rodan
54266acfb1 Added entry to README and added default values 2020-12-10 12:11:19 +01:00
Marcus Rodan
9cb44815c4 Add pdb resource 2020-12-10 09:51:04 +01:00
Stefan Prodan
27b2616330 Merge pull request #748 from weaveworks/release-v1.4.2
Release v1.4.2
2020-12-09 14:52:43 +02:00
Stefan Prodan
8ed729cd54 Release v1.4.2
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-09 13:14:33 +02:00
Stefan Prodan
34f7bca33c Merge pull request #747 from weaveworks/update-prom-grafana
Update Prometheus and Grafana
2020-12-09 12:04:15 +02:00
Stefan Prodan
fee442ffe0 Update Prometheus and Grafana
- Prometheus 2.23.0
- Grafana 7.3.4

Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-09 11:20:23 +02:00
Stefan Prodan
eb890ef174 Merge pull request #746 from weaveworks/prom-auth-docs
Add Prometheus basic-auth config to docs
2020-12-09 11:01:54 +02:00
Stefan Prodan
24c61df388 Add Prometheus basic-auth config to docs
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-09 10:19:25 +02:00
Stefan Prodan
bfb3331457 Merge pull request #745 from Nerja/delegate
Fix for VirtualService delegation when analysis is enabled
2020-12-09 10:08:20 +02:00
Marcus Rodan
7fc6f8a04d Changed to using the old e2e test scenario 2020-12-08 18:08:44 +01:00
Marcus Rodan
3c37020260 Changed test file permissions 2020-12-08 16:54:00 +01:00
Marcus Rodan
d05b684dbe Remove log line 2020-12-08 16:14:15 +01:00
Marcus Rodan
da978254b1 Fix issue 2020-12-08 16:12:12 +01:00
Stefan Prodan
0cfeceb3c9 Merge pull request #744 from weaveworks/release-v1.4.1
Release v1.4.1
2020-12-08 15:09:17 +02:00
Stefan Prodan
814aee8f4f Release v1.4.1
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-08 14:43:52 +02:00
Stefan Prodan
48bfb062d8 Merge pull request #743 from relu/exclude-labels-for-cm-secrets
Apply label prefix rules for cm and secrets
2020-12-08 13:37:01 +02:00
Aurel Canciu
08be31f022 Apply label prefix rules for cm and secrets
Copying of Configmaps and Secrets managed through Flagger should now
follow the same label prefix filtering rules as for the workloads.

Extends: #709

Signed-off-by: Aurel Canciu <aurelcanciu@gmail.com>
2020-12-08 12:55:45 +02:00
Stefan Prodan
39380d4ce8 Merge pull request #741 from weaveworks/release-v1.4.0
Release v1.4.0
2020-12-07 11:59:49 +02:00
Stefan Prodan
1b9e575ba5 Release v1.4.0
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-07 11:39:08 +02:00
Stefan Prodan
128c883755 Update docs and examples to HPA v2beta2
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-12-07 11:24:01 +02:00
Stefan Prodan
a244e00057 Merge pull request #740 from tr-fteixeira/hpa-behavior
Add support to HPA behaviors on canaries
2020-12-07 11:13:52 +02:00
Fernando Teixeira
afc063ae9a update tests to use autoscaling/v2beta2 2020-12-06 15:23:08 -05:00
Stefan Prodan
0827622985 Merge pull request #736 from nmlc/traefik
Traefik support
2020-12-06 10:23:16 +02:00
Fernando Teixeira
83dae63989 add support to hpa behaviors on canaries 2020-12-06 00:51:20 -05:00
nmlc
578361a2b0 [traefik] Fix documentation 2020-12-02 05:22:50 +05:00
nmlc
553e1b38bc [traefik] Add documentation 2020-12-01 05:17:33 +05:00
nmlc
635bc83259 [traefik] Add CircleCI tests 2020-11-26 06:00:15 +05:00
nmlc
746507dcc9 [traefik] Remove TraefikService metadata from canary spec 2020-11-26 05:52:42 +05:00
nmlc
adeb585de1 [traefik] add e2e test 2020-11-25 07:55:05 +05:00
nmlc
9c4edc602a [traefik] Update chart: crd & rbac 2020-11-25 07:54:28 +05:00
nmlc
642d3678ec [traefik] Implement observer interface 2020-11-25 07:54:15 +05:00
nmlc
2c1d998c43 [traefik] Implement router interface 2020-11-25 07:54:00 +05:00
nmlc
a3b9ed126d [traefik] Api changes & codegen 2020-11-25 07:50:54 +05:00
Stefan Prodan
2f027de91f Merge pull request #735 from mattchrist/update_faq
fix typo in faq
2020-11-23 17:10:18 +02:00
Matt Christ
b8c9fcfb91 fix typo 2020-11-23 08:16:05 -06:00
Stefan Prodan
1b81ea5a10 Merge pull request #734 from weaveworks/releases-v1.3.0
Release v1.3.0
2020-11-23 14:52:20 +02:00
Stefan Prodan
82bf73e8da Release v1.3.0
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-11-23 13:59:43 +02:00
Stefan Prodan
58de5ab198 Merge pull request #733 from weaveworks/deps-update
Update Istio to v1.8.0
2020-11-23 13:47:16 +02:00
Stefan Prodan
6a0ab874b8 Update Istio docs for v1.8.0
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-11-23 12:58:06 +02:00
Stefan Prodan
8301a2c1ba Update Istio e2e tests to v1.8.0
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-11-23 12:40:07 +02:00
Stefan Prodan
9b5b1a1421 Merge pull request #731 from mattchrist/update_faq
Update faq with correct prometheus queries for Contour & Gloo (fixes #730)
2020-11-23 11:11:24 +02:00
Stefan Prodan
bc5150903c Merge pull request #729 from jddcarreira/supportAppMeshBackendARN
Support AWS App Mesh backends ARN
2020-11-23 11:10:45 +02:00
Matt Christ
0c017f916b Update faq with correct prometheus queries for Contour & Gloo 2020-11-20 09:30:29 -06:00
João Carreira
df6fb2251d Merge branch 'master' of github.com:jddcarreira/flagger into supportAppMeshBackendARN 2020-11-20 12:41:24 +00:00
Stefan Prodan
4c3bab7ed7 Merge pull request #726 from robq99/feat/custom-weights-in-progression
feat: custom weights in progression
2020-11-20 13:54:41 +02:00
João Carreira
74efb784a2 Update App Mesh guide with ARN usage in backends 2020-11-20 11:37:13 +00:00
João Carreira
5a856c98aa Use strings.HasPrefix instead of manual count of prefix 2020-11-20 10:43:28 +00:00
João Carreira
a9c96fa888 update th usage of App Mesh types 2020-11-20 10:34:10 +00:00
João Carreira
7ab9061899 Update AWS App Mesh types 2020-11-20 10:33:25 +00:00
João Carreira
e149125eaa validate if its an ARN 2020-11-19 16:19:16 +00:00
robq99
c53cbac22c fix: tests added, edge cases protection added 2020-11-18 12:20:42 +01:00
robq99
90bccf748b fix: rollout weights moved to canary doc 2020-11-18 10:09:04 +01:00
Robert Kwolek
1ea2e22734 fix: full weight => total weight 2020-11-17 16:30:45 +01:00
Robert Kwolek
2a0473fc9b fix: fullWeight removed, fullWeight => totalWeight 2020-11-17 09:00:21 +01:00
Robert Kwolek
67dca9c7ad Merge remote-tracking branch 'upstream/master' 2020-11-12 20:47:37 +01:00
Stefan Prodan
9667664853 Merge pull request #725 from sfrique/add-qps-and-burts-config-2
Add QPS and Burst configs for kubernetes client
2020-11-12 17:51:13 +02:00
Henrique Fernandes
4db9701c62 Add QPS and Burst configs for kubernetes client
Implemented as requested in PR723
supersedes: https://github.com/weaveworks/flagger/pull/723
fixes: https://github.com/weaveworks/flagger/issues/638
2020-11-11 17:48:27 -03:00
Stefan Prodan
4a805be5cd Merge pull request #721 from kingdonb/patch-3
Fixup some typos
2020-11-04 16:49:50 +02:00
Kingdon Barrett
3abeea43d0 Fix Typo in skipper-progressive-delivery.md
"exmaple" -> example
2020-11-03 18:13:48 -05:00
Kingdon Barrett
f51629d6b5 Fix Typo in nginx-progressive-delivery.md
"exmaple" -> example
2020-11-03 18:11:13 -05:00
Kazuki Nitta
a624a2977e Add support for Istio VirtualService delegation (#715)
Add support for Istio VirtualService delegation
2020-10-28 11:38:54 +02:00
Stefan Prodan
5ae5530c35 Merge pull request #718 from seankhliao/patch-1
fix release date
2020-10-28 10:02:11 +02:00
Sean Liao
1c58301fd7 fix release date 2020-10-27 19:47:07 +01:00
Stefan Prodan
690da0005d Merge pull request #714 from weaveworks/gitops-toolkit-roadmap
Add GitOps Toolkit integration to roadmap
2020-10-22 15:33:39 +03:00
Stefan Prodan
4d9fbc5da6 Merge pull request #709 from worldtiki/exclude-labels
Copy labels from canary to primary workloads based on prefix rules
2020-10-21 18:12:51 +03:00
Daniel Albuquerque
fbece964e0 Copy annotations to deployment and daemonset 2020-10-21 14:20:09 +01:00
Stefan Prodan
d3e855ac86 Add GitOps Toolkit integration to roadmap
Signed-off-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-10-21 15:24:48 +03:00
Daniel Albuquerque
bd536b689f Fix filtering of labels 2020-10-14 15:20:15 +01:00
Daniel Albuquerque
5ca5647fab Remove refs to jenkins 2020-10-13 22:01:49 +01:00
Daniel Albuquerque
bef02d8e1f Rename proprty from exclude to include 2020-10-13 22:00:31 +01:00
Daniel Albuquerque
8b87cf1757 MIssing commit 2020-10-13 21:59:26 +01:00
Daniel Albuquerque
6ec377181a Change from exclude labels to include labels 2020-10-13 21:58:47 +01:00
Daniel Albuquerque
23e59168af Exclude controller labels by prefix 2020-10-11 14:10:16 +01:00
Stefan Prodan
2f58e51242 Merge pull request #704 from Brick7Face/spell-fix
fix spelling of "template" in scheduler_metrics.go
2020-10-01 18:07:53 +03:00
Nate Tranel
79f0381c52 fix spelling of template 2020-10-01 08:06:39 -06:00
Stefan Prodan
14adedba6a Merge pull request #702 from weaveworks/release-v1.2.0
Release v1.2.0
2020-09-29 09:43:46 +03:00
stefanprodan
f2608e627c Release v1.2.0 2020-09-29 09:13:12 +03:00
Stefan Prodan
17237fbb3e Merge pull request #695 from worldtiki/skip_analysis
Do not promote when not ready on skip analysis
2020-09-29 08:48:43 +03:00
Daniel Albuquerque
065c8640e7 Remove metadata tests (unrelated to skip analysis) 2020-09-19 17:39:54 +01:00
Daniel Albuquerque
1a90392400 Add set -o errexit 2020-09-19 15:15:39 +01:00
Daniel Albuquerque
3b6302640f Remove custom metrics (not needed for tests) 2020-09-18 19:51:03 +01:00
Daniel Albuquerque
26d53dcd44 diff test stucture for istio 2020-09-18 19:05:45 +01:00
Daniel Albuquerque
0eee5b7402 Revert changes in skip analysis condition 2020-09-18 18:43:27 +01:00
Daniel Albuquerque
4b098cc7a2 Better assertion for new tests 2020-09-18 18:17:50 +01:00
Daniel Albuquerque
8119acb40a Remove comment :) 2020-09-18 18:00:38 +01:00
Daniel Albuquerque
013949a9f4 Add tests for when canary analysis is skipped 2020-09-18 17:59:16 +01:00
Stefan Prodan
6d65a2c897 Merge pull request #685 from splkforrest/add-label-value
Derive the label selector value from the target matchLabels
2020-09-17 13:19:49 +03:00
Stefan Prodan
fba16aa1f5 Merge pull request #691 from fpetkovski/newrelic-provider
Add New Relic as a metrics provider
2020-09-17 13:15:00 +03:00
Daniel Albuquerque
2907526452 Do not promote when not ready on skip analysis 2020-09-14 19:46:35 +01:00
Stefan Prodan
04a8759159 Merge pull request #692 from erkannt/patch-1
Add eLife to orgs using flagger
2020-09-10 14:56:54 +03:00
Daniel Haarhoff
d62e7f678f Add eLife to orgs using flagger 2020-09-10 12:22:05 +01:00
Filip Petkovski
8b3296c065 Apply suggestions from code review
Co-authored-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-09-10 09:19:36 +02:00
Filip Petkovski
563b1cd88d Add New Relic provider to the documentation 2020-09-10 09:11:33 +02:00
Filip Petkovski
c81e19c48a Add newrelic as to the provider type enum 2020-09-09 18:12:18 +02:00
Filip Petkovski
68e4e1cc68 Apply suggestions from code review
Co-authored-by: Stefan Prodan <stefan.prodan@gmail.com>
2020-09-09 13:51:27 +02:00
Filip Petkovski
2c249e2a92 Add New Relic as a metrics provider 2020-09-09 12:10:53 +02:00
Forrest Thomas
6c35f7611b address PR review comments and remove unnecessary configuration from Canary CR in e2e tests 2020-09-04 09:35:11 -07:00
Forrest Thomas
7793f0b29d add e2e nginx tests for inconsistent naming between service name and selector 2020-09-02 12:46:02 -07:00
Forrest Thomas
930eb8919d add e2e linkerd tests for inconsistent naming between service name and selector 2020-09-02 12:46:02 -07:00
Forrest Thomas
7ade97790e update e2e istio test to query the canary service instead of the apex service 2020-09-02 12:46:02 -07:00
Forrest Thomas
29c3056940 add e2e gloo tests for inconsistent naming between service name and selector 2020-09-02 12:46:02 -07:00
Forrest Thomas
2abfec05c9 add e2e contour tests for inconsistent naming between service name and selector 2020-09-02 12:46:02 -07:00
Forrest Thomas
621150cce6 add e2e istio tests for inconsistent naming between service name and selector 2020-09-02 12:46:02 -07:00
Forrest Thomas
ef57dcf75d add a small test for verifying the label selector is named as expected for daemonsets 2020-09-02 12:46:02 -07:00
Forrest Thomas
1bd7ce4eed add a small test for verifying the label selector is named as expected for deployments 2020-09-02 12:46:02 -07:00
Forrest Thomas
364fd0db65 setup daemonset tests to allow configurable name, label and selector 2020-09-02 12:46:02 -07:00
Forrest Thomas
b378b3eb5d setup deployment tests to allow configurable name, label and selector 2020-09-02 12:46:02 -07:00
Forrest Thomas
0db82b64f7 correct formatting 2020-09-02 12:46:02 -07:00
Forrest Thomas
c9dc5c5936 fix incorrect primary label value during promotion 2020-09-02 12:46:02 -07:00
Forrest Thomas
6f372d787d fix the incorrect primary label value 2020-09-02 12:46:02 -07:00
Forrest Thomas
f70f43bb3d use the existing labelSelector value instead of using the service name as the value 2020-09-02 12:46:02 -07:00
Stefan Prodan
c6f3a87bb3 Merge pull request #684 from xichengliudui/master
add istio 1.7 install command
2020-09-02 12:01:05 +03:00
xichengliudui
8e7aa29ef1 add istio 1.7 install command 2020-09-02 01:30:53 -07:00
Stefan Prodan
fb66cd3d94 Merge pull request #681 from o11n/preservePredicates
Skipper: preserve Predicates
2020-08-29 11:34:18 +03:00
Samuel Lang
e7da8c3f35 Skipper: preserve Predicates
Current implementation did overwrite potentially existing Predicates.

We face the situation that we need to add further Predicates which we need to keep in order to have  a proper route setup
2020-08-26 12:00:36 +02:00
Robert Kwolek
a6a38c6a7a fix: go fixes 2020-08-25 12:22:57 +02:00
Robert Kwolek
0ccf97bec1 fix: max weight for steps fixed 2020-08-25 10:34:59 +02:00
Robert Kwolek
ab80bcde44 doc: tutorial link added 2020-08-21 09:01:35 +02:00
Robert Kwolek
a58c0ac2c9 doc: rollout weights moved out of Linkerd 2020-08-21 08:59:44 +02:00
Robert Kwolek
c55fd94b67 doc: weighted rollout doc added 2020-08-20 21:38:11 +02:00
Robert Kwolek
16a6df59ab Merge remote-tracking branch 'upstream/master' 2020-08-20 21:03:43 +02:00
Robert Kwolek
906103daa5 feat: weighted deployments 2020-08-20 20:56:10 +02:00
Takeshi Yoneda
ce69a180d8 Merge pull request #679 from weaveworks/feature/optimized-config-disabled
pkg/canary: add unit test of configIsDisabled and its optimization
2020-08-20 21:33:05 +09:00
mathetake
87c090ad8c pkg/canary: add unit test of configIsDisabled and its optimization 2020-08-20 21:15:27 +09:00
Stefan Prodan
b6d6f32c7f Merge pull request #674 from weaveworks/prep-release-1.1.0
Release v1.1.0
2020-08-19 18:37:38 +03:00
stefanprodan
b6c98799d1 Release v1.1.0 2020-08-19 12:07:39 +03:00
stefanprodan
06dab2e137 Docs tidy up
Split feature comparison into two tables: service mesh and ingress.
2020-08-19 11:29:08 +03:00
Stefan Prodan
6494893812 Merge pull request #671 from stealthybox/per-config-tracker-disable
Support per-config configTracker disable via ConfigMap/Secret annotation
2020-08-19 10:48:09 +03:00
Stefan Prodan
11b82dbcc7 Merge pull request #670 from o11n/feature-Skipper
Skipper Ingress Controller support
2020-08-19 10:47:53 +03:00
David Hohengaßner
e09f44df77 📝 add documentation about Skipper Ingress (#15)
Skipper Ingress Controller support is added with
https://github.com/weaveworks/flagger/pull/670.

This commit add the documentation and links to mention
Skipper is now an available option.

Currently only Canary deployments are supported.
2020-08-18 17:02:53 +02:00
Samuel Lang
ad8233cf46 👷 Add high-level E2E test steps for Skipper
Add e2e-skipper* files for test setup

It does the following things:
* install Skipper ingress with Kustomize
* load Flagger image onto the local cluster
* install Flagger and Prometheus in the flagger-system namespace
2020-08-18 17:02:45 +02:00
leigh capili
dad70a6876 Support per-config configTracker disable via ConfigMap/Secret annotation
This allows a user to annotate a specific ConfigMap or Secret to be disabled/ignored via the
configTracking logic that tracks config changes makes configuration copies for the primary Deploy

Closes #435
2020-08-17 16:24:56 -06:00
Samuel Lang
39e55daa04 📈 Skipper Metrics Observer
Te be able to distinct Skipper routes we need to combine the Canary data to generate the Skipper metric label.

"request-success-rate" and  "request-duration" queries are implemented and tested that provide those obersvations from Skipper metrics

* Takes into account how Skipper renders the paths accordingly and reformats the quieries.
2020-08-17 08:23:38 +02:00
Samuel Lang
a9ad6c92a6 adding CircleCI tests 2020-08-17 08:23:38 +02:00
Samuel Lang
ca14a08f9c Skipper Router Implementation
Router implementation for zalan.do/Skipper Ingress -
An HTTP router and reverse proxy for service composition, including use cases like Kubernetes Ingress

https://github.com/zalando/skipper/

* The concept is to define routes with specific weights via the skipper specific annotation predicate of "zalando.org/backend-weights".
* A new "canary ingress" is created that has higher "weight" thus receiving all traffic, which distributes progressively
* After the canary process is finished, this ingress is disabled via the "False()" annotation predicate to route traffic again back to the apex Ingress.
There are certain Skipper principles which are taken into account:

```
Skipper Principles:
* if only one backend has a weight, only one backend will get 100% traffic
* if two of three or more backends have a weight, only those two should get traffic.
* if two backends don't have any weight, it's undefined and right now they get equal amount of traffic.
* weights can be int or float, but always treated as a ratio.

Implementation:
* apex Ingress is immutable
* new canary Ingress contains two paths for primary and canary service
* canary Ingress manages weights on primary & canary service, hence no traffic to apex service
```
2020-08-17 08:23:38 +02:00
Stefan Prodan
be16bd8768 Merge pull request #668 from timricese/master
Add securityContext parameter to loadtester chart
2020-08-17 08:33:45 +03:00
Stefan Prodan
47d00857bc Merge pull request #672 from weaveworks/kube-1.18.8
Update Kubernetes packages to v1.18.8
2020-08-15 10:17:19 +03:00
stefanprodan
7c3cb5c5a3 Install kustomize in CI 2020-08-15 09:25:27 +03:00
stefanprodan
f12fe4254a Add license to Flagger Helm chart 2020-08-15 09:16:47 +03:00
stefanprodan
bb627779d9 Update Kubernetes packages to v1.18.8 2020-08-15 09:16:11 +03:00
Tim Rice
eba066e044 Add securityContext parameter to loadtester chart
Default to `enabled: false` to avoid changing default behavior.

Allows using the chart on clusters with runAsNonRoot security policy
2020-08-13 08:11:32 +02:00
Stefan Prodan
34f0273c34 Merge pull request #667 from snahelou/master
Fix(grafana): metrics change since 1.16
2020-08-12 17:34:01 +03:00
Sebastien Nahelou
394c9545ce Fix(grafana): metrics change since 1.16 2020-08-11 11:13:58 +02:00
Stefan Prodan
a6f0481b27 Merge pull request #661 from weaveworks/e2e-test-suite-updates
Update Istio, Linkerd and Contour e2e to latest version
2020-08-06 10:22:23 +03:00
Stefan Prodan
4d2664b57e Merge pull request #663 from stealthybox/mapfix-658
Fix O(log n) bug over network in GetTargetConfigs() when using `--enable-config-tracking`
2020-08-06 08:56:16 +03:00
leigh capili
1242825c42 Fix O(log n) bug over network in GetTargetConfigs() when using --enable-config-tracking
Read for more details:
https://github.com/weaveworks/flagger/issues/658#issuecomment-669389203
2020-08-05 13:16:50 -06:00
stefanprodan
fd34614c84 Update Istio, Linkerd and Contour e2e to latest version 2020-08-05 11:47:46 +03:00
Takeshi Yoneda
68312570b6 Merge pull request #654 from weaveworks/docs-fix-typo-prometheus
fix typo in docs: promethues -> prometheus
2020-07-27 16:18:07 +09:00
Stefan Prodan
fa9de7d8f9 Merge pull request #652 from imrenagi/feature/pod-priority
Add priorityClassName to flagger and loadtester chart
2020-07-27 09:16:32 +03:00
mathetake
a04bb3d3c0 fix typo in docs: promethues -> prometheus 2020-07-27 15:14:55 +09:00
Imre Nagi
23e805965e Update readme for podPriorityClassName
Signed-off-by: Imre Nagi <imre.nagi2812@gmail.com>
2020-07-23 16:37:37 +07:00
Imre Nagi
9aa775f409 Add priorityClassName to loadtester chart
Signed-off-by: Imre Nagi <imre.nagi2812@gmail.com>
2020-07-23 07:33:29 +07:00
Imre Nagi
9655ed652f Add pod priorityClassName to flagger deployment template
Signed-off-by: Imre Nagi <imre.nagi2812@gmail.com>
2020-07-23 07:27:27 +07:00
450 changed files with 15346 additions and 6777 deletions

View File

@@ -1,259 +0,0 @@
version: 2.1
jobs:
build-binary:
docker:
- image: circleci/golang:1.14
working_directory: ~/build
steps:
- checkout
- restore_cache:
keys:
- go-mod-v3-{{ checksum "go.sum" }}
- run:
name: Run go mod download
command: go mod download
- run:
name: Check code formatting
command: go install golang.org/x/tools/cmd/goimports && make test-fmt
- run:
name: Build Flagger
command: |
CGO_ENABLED=0 GOOS=linux go build \
-ldflags "-s -w -X github.com/weaveworks/flagger/pkg/version.REVISION=${CIRCLE_SHA1}" \
-a -installsuffix cgo -o bin/flagger ./cmd/flagger/*.go
- run:
name: Build Flagger load tester
command: |
CGO_ENABLED=0 GOOS=linux go build \
-a -installsuffix cgo -o bin/loadtester ./cmd/loadtester/*.go
- run:
name: Run unit tests
command: |
go test -race -coverprofile=coverage.txt -covermode=atomic $(go list ./pkg/...)
bash <(curl -s https://codecov.io/bash)
- run:
name: Verify code gen
command: make test-codegen
- save_cache:
key: go-mod-v3-{{ checksum "go.sum" }}
paths:
- "/go/pkg/mod/"
- persist_to_workspace:
root: bin
paths:
- flagger
- loadtester
push-container:
docker:
- image: circleci/golang:1.14
steps:
- checkout
- setup_remote_docker:
docker_layer_caching: true
- attach_workspace:
at: /tmp/bin
- run: test/container-build.sh
- run: test/container-push.sh
push-binary:
docker:
- image: circleci/golang:1.14
working_directory: ~/build
steps:
- checkout
- setup_remote_docker:
docker_layer_caching: true
- restore_cache:
keys:
- go-mod-v3-{{ checksum "go.sum" }}
- run: make release-notes
- run: github-release-notes -org weaveworks -repo flagger -since-latest-release -include-author > /tmp/release.txt
- run: test/goreleaser.sh
e2e-kubernetes-testing:
machine: true
steps:
- checkout
- attach_workspace:
at: /tmp/bin
- run: test/container-build.sh
- run: test/e2e-kind.sh v1.18.2
- run: test/e2e-kubernetes.sh
- run: test/e2e-kubernetes-tests-deployment.sh
- run: test/e2e-kubernetes-cleanup.sh
- run: test/e2e-kubernetes-tests-daemonset.sh
e2e-istio-testing:
machine: true
steps:
- checkout
- attach_workspace:
at: /tmp/bin
- run: test/container-build.sh
- run: test/e2e-kind.sh v1.18.2
- run: test/e2e-istio.sh
- run: test/e2e-istio-tests.sh
e2e-gloo-testing:
machine: true
steps:
- checkout
- attach_workspace:
at: /tmp/bin
- run: test/container-build.sh
- run: test/e2e-kind.sh
- run: test/e2e-gloo.sh
- run: test/e2e-gloo-tests.sh
e2e-nginx-testing:
machine: true
steps:
- checkout
- attach_workspace:
at: /tmp/bin
- run: test/container-build.sh
- run: test/e2e-kind.sh
- run: test/e2e-nginx.sh
- run: test/e2e-nginx-tests.sh
- run: test/e2e-nginx-cleanup.sh
- run: test/e2e-nginx-custom-annotations.sh
- run: test/e2e-nginx-tests.sh
e2e-linkerd-testing:
machine: true
steps:
- checkout
- attach_workspace:
at: /tmp/bin
- run: test/container-build.sh
- run: test/e2e-kind.sh
- run: test/e2e-linkerd.sh
- run: test/e2e-linkerd-tests.sh
e2e-contour-testing:
machine: true
steps:
- checkout
- attach_workspace:
at: /tmp/bin
- run: test/container-build.sh
- run: test/e2e-kind.sh
- run: test/e2e-contour.sh
- run: test/e2e-contour-tests.sh
push-helm-charts:
docker:
- image: circleci/golang:1.14
steps:
- checkout
- run:
name: Install kubectl
command: sudo curl -L https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl && sudo chmod +x /usr/local/bin/kubectl
- run:
name: Install helm
command: sudo curl -L https://storage.googleapis.com/kubernetes-helm/helm-v2.14.2-linux-amd64.tar.gz | tar xz && sudo mv linux-amd64/helm /bin/helm && sudo rm -rf linux-amd64
- run:
name: Initialize helm
command: helm init --client-only --kubeconfig=$HOME/.kube/kubeconfig
- run:
name: Lint charts
command: |
helm lint ./charts/*
- run:
name: Package charts
command: |
mkdir $HOME/charts
helm package ./charts/* --destination $HOME/charts
- run:
name: Publish charts
command: |
if echo "${CIRCLE_TAG}" | grep v; then
REPOSITORY="https://weaveworksbot:${GITHUB_TOKEN}@github.com/weaveworks/flagger.git"
git config user.email weaveworksbot@users.noreply.github.com
git config user.name weaveworksbot
git remote set-url origin ${REPOSITORY}
git checkout gh-pages
mv -f $HOME/charts/*.tgz .
helm repo index . --url https://flagger.app
git add .
git commit -m "Publish Helm charts v${CIRCLE_TAG}"
git push origin gh-pages
else
echo "Not a release! Skip charts publish"
fi
workflows:
version: 2
build-test-push:
jobs:
- build-binary:
filters:
branches:
ignore:
- gh-pages
- /^user-.*/
- e2e-kubernetes-testing:
requires:
- build-binary
- e2e-istio-testing:
requires:
- build-binary
- e2e-gloo-testing:
requires:
- build-binary
- e2e-nginx-testing:
requires:
- build-binary
- e2e-linkerd-testing:
requires:
- build-binary
- e2e-contour-testing:
requires:
- build-binary
- push-container:
requires:
- build-binary
- e2e-kubernetes-testing
- e2e-istio-testing
- e2e-gloo-testing
- e2e-nginx-testing
- e2e-linkerd-testing
filters:
branches:
only:
- master
release:
jobs:
- build-binary:
filters:
branches:
ignore: /.*/
tags:
ignore: /^chart.*/
- push-container:
requires:
- build-binary
filters:
branches:
ignore: /.*/
tags:
ignore: /^chart.*/
- push-binary:
requires:
- push-container
filters:
branches:
ignore: /.*/
tags:
ignore: /^chart.*/
- push-helm-charts:
requires:
- push-container
filters:
branches:
ignore: /.*/
tags:
ignore: /^chart.*/

View File

@@ -10,4 +10,6 @@ redirects:
usage/contour-progressive-delivery: tutorials/contour-progressive-delivery.md
usage/gloo-progressive-delivery: tutorials/gloo-progressive-delivery.md
usage/nginx-progressive-delivery: tutorials/nginx-progressive-delivery.md
usage/skipper-progressive-delivery: tutorials/skipper-progressive-delivery.md
usage/crossover-progressive-delivery: tutorials/crossover-progressive-delivery.md
usage/traefik-progressive-delivery: tutorials/traefik-progressive-delivery.md

View File

@@ -1,17 +0,0 @@
workflow "Publish Helm charts" {
on = "push"
resolves = ["helm-push"]
}
action "helm-lint" {
uses = "stefanprodan/gh-actions/helm@master"
args = ["lint charts/*"]
}
action "helm-push" {
needs = ["helm-lint"]
uses = "stefanprodan/gh-actions/helm-gh-pages@master"
args = ["charts/*","https://flagger.app"]
secrets = ["GITHUB_TOKEN"]
}

49
.github/workflows/build.yaml vendored Normal file
View File

@@ -0,0 +1,49 @@
name: build
on:
workflow_dispatch:
pull_request:
branches:
- main
push:
branches:
- main
jobs:
container:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Restore Go cache
uses: actions/cache@v1
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Setup Go
uses: actions/setup-go@v2
with:
go-version: 1.15.x
- name: Download modules
run: |
go mod download
go install golang.org/x/tools/cmd/goimports
- name: Run linters
run: make test-fmt test-codegen
- name: Run tests
run: go test -race -coverprofile=coverage.txt -covermode=atomic $(go list ./pkg/...)
- name: Check if working tree is dirty
run: |
if [[ $(git diff --stat) != '' ]]; then
git --no-pager diff
echo 'run make test and commit changes'
exit 1
fi
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v1
with:
file: ./coverage.txt
- name: Build container image
run: docker build -t test/flagger:latest .

37
.github/workflows/e2e.yaml vendored Normal file
View File

@@ -0,0 +1,37 @@
name: e2e
on:
workflow_dispatch:
pull_request:
branches:
- main
push:
branches:
- main
jobs:
kind:
runs-on: ubuntu-latest
strategy:
matrix:
provider:
- istio
- linkerd
- contour
- nginx
- traefik
- gloo
- skipper
- kubernetes
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup Kubernetes
uses: engineerd/setup-kind@v0.5.0
- name: Build container image
run: |
docker build -t test/flagger:latest .
kind load docker-image test/flagger:latest
- name: Run tests
run: |
./test/${{ matrix['provider'] }}/run.sh

74
.github/workflows/release.yml vendored Normal file
View File

@@ -0,0 +1,74 @@
name: release
on:
push:
tags:
- 'v*'
jobs:
build-push:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Prepare
id: prep
run: |
VERSION=$(grep 'VERSION' pkg/version/version.go | awk '{ print $4 }' | tr -d '"')
CHANGELOG="https://github.com/fluxcd/flagger/blob/main/CHANGELOG.md#$(echo $VERSION | tr -d '.')"
echo ::set-output name=BUILD_DATE::$(date -u +'%Y-%m-%dT%H:%M:%SZ')
echo ::set-output name=VERSION::${VERSION}
echo ::set-output name=CHANGELOG::${CHANGELOG}
- name: Setup QEMU
uses: docker/setup-qemu-action@v1
with:
platforms: all
- name: Setup Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
with:
buildkitd-flags: "--debug"
- name: Login to GitHub Container Registry
uses: docker/login-action@v1
with:
registry: ghcr.io
username: fluxcdbot
password: ${{ secrets.GHCR_TOKEN }}
- name: Publish image
uses: docker/build-push-action@v2
with:
push: true
builder: ${{ steps.buildx.outputs.name }}
context: .
file: ./Dockerfile
platforms: linux/amd64,linux/arm64,linux/arm/v7
build-args: |
REVISON=${{ github.sha }}
tags: |
ghcr.io/fluxcd/flagger:${{ steps.prep.outputs.VERSION }}
labels: |
org.opencontainers.image.title=${{ github.event.repository.name }}
org.opencontainers.image.description=${{ github.event.repository.description }}
org.opencontainers.image.url=${{ github.event.repository.html_url }}
org.opencontainers.image.source=${{ github.event.repository.html_url }}
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.version=${{ steps.prep.outputs.VERSION }}
org.opencontainers.image.created=${{ steps.prep.outputs.BUILD_DATE }}
- name: Check images
run: |
docker buildx imagetools inspect ghcr.io/fluxcd/flagger:${{ steps.prep.outputs.VERSION }}
- name: Publish Helm charts
uses: stefanprodan/helm-gh-pages@v1.3.0
with:
token: ${{ secrets.GITHUB_TOKEN }}
charts_url: https://flagger.app
linting: off
- name: Create release
uses: actions/create-release@latest
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: ${{ github.ref }}
draft: false
prerelease: false
body: |
[CHANGELOG](${{ steps.prep.outputs.CHANGELOG }})

1
.gitignore vendored
View File

@@ -20,3 +20,4 @@ artifacts/gcloud/
Makefile.dev
vendor
coverage.txt

View File

@@ -1,7 +1,7 @@
builds:
- main: ./cmd/flagger
binary: flagger
ldflags: -s -w -X github.com/weaveworks/flagger/pkg/version.REVISION={{.Commit}}
ldflags: -s -w -X github.com/fluxcd/flagger/pkg/version.REVISION={{.Commit}}
goos:
- linux
goarch:
@@ -12,7 +12,3 @@ archives:
- name_template: "{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}"
files:
- none*
changelog:
filters:
exclude:
- '^CircleCI'

View File

@@ -2,6 +2,171 @@
All notable changes to this project are documented in this file.
## 1.6.1
**Release date:** 2021-01-19
This release extends the support for Istio's `HTTPMatchRequest` and
comes with a regression bug fix to secrets and configmaps tracking.
#### Improvements
- Update HTTPMatchRequest to match Istio's definitions
[#777](https://github.com/fluxcd/flagger/pull/777)
- e2e: Update Istio to v1.8.2 and Contour to v1.11.0
[#778](https://github.com/fluxcd/flagger/pull/778)
#### Fixes
- Add missing TrackedConfig field to Canary status CRD
[#781](https://github.com/fluxcd/flagger/pull/781)
## 1.6.0
**Release date:** 2021-01-05
**Breaking change:** the minimum supported version of Kubernetes is v1.16.0.
This release comes with support for A/B testing using [Gloo Edge](https://docs.flagger.app/tutorials/gloo-progressive-delivery)
HTTP headers based routing.
#### Features
- A/B testing support for Gloo Edge ingress controller
[#765](https://github.com/fluxcd/flagger/pull/765)
#### Improvements
- Upgrade the Kubernetes packages to `v1.20.1` and Flagger's CRDs to `apiextensions.k8s.io/v1`
[#772](https://github.com/fluxcd/flagger/pull/772)
## 1.5.0
**Release date:** 2020-12-22
This is the first release of Flagger under [fluxcd](https://github.com/fluxcd) organization (CNCF sandbox).
Starting with this version, Flagger can be installed on multi-arch Kubernetes clusters (Linux AMD64/ARM64/ARM).
The multi-arch image is available on GitHub Container Registry
at [ghcr.io/fluxcd/flagger](https://github.com/orgs/fluxcd/packages/container/package/flagger).
#### Improvements
- Publish multi-arch image to GitHub Container Registry
[#763](https://github.com/fluxcd/flagger/pull/763)
- Migrate CI to GitHub Actions
[#754](https://github.com/fluxcd/flagger/pull/754)
- Add e2e tests for label prefix inclusion
[#762](https://github.com/fluxcd/flagger/pull/762)
- Added PodDisruptionBudget to the Flagger Helm chart
[#749](https://github.com/fluxcd/flagger/pull/749)
## v1.4.2
**Release date:** 2020-12-09
Fix Istio virtual service delegation
#### Improvements
- Add Prometheus basic-auth config to docs
[#746](https://github.com/fluxcd/flagger/pull/746)
- Update Prometheus to 2.23.0 and Grafana to 7.3.4
[#747](https://github.com/fluxcd/flagger/pull/747)
#### Fixes
- Fix for VirtualService delegation when analysis is enabled
[#745](https://github.com/fluxcd/flagger/pull/745)
## 1.4.1 (2020-12-08)
Prevent primary ConfigMaps and Secrets from being pruned by Flux
#### Improvements
- Apply label prefix rules for ConfigMaps and Secrets
[#743](https://github.com/fluxcd/flagger/pull/743)
## 1.4.0 (2020-12-07)
Add support for Traefik ingress controller
#### Features
- Add Traefik support for progressive traffic shifting with `TraefikService`
[#736](https://github.com/fluxcd/flagger/pull/736)
- Add support for HPA v2beta2 behaviors
[#740](https://github.com/fluxcd/flagger/pull/740)
## 1.3.0 (2020-11-23)
Add support for custom weights when configuring traffic shifting
#### Features
- Support AWS App Mesh backends ARN
[#715](https://github.com/fluxcd/flagger/pull/715)
- Add support for Istio VirtualService delegation
[#715](https://github.com/fluxcd/flagger/pull/715)
- Copy labels from canary to primary workloads based on prefix rules
[#709](https://github.com/fluxcd/flagger/pull/709)
#### Improvements
- Add QPS and Burst configs for kubernetes client
[#725](https://github.com/fluxcd/flagger/pull/725)
- Update Istio to v1.8.0
[#733](https://github.com/fluxcd/flagger/pull/733)
## 1.2.0 (2020-09-29)
Add support for New Relic metrics
#### Features
- Add New Relic as a metrics provider
[#691](https://github.com/fluxcd/flagger/pull/691)
#### Improvements
- Derive the label selector value from the target matchLabel
[#685](https://github.com/fluxcd/flagger/pull/685)
- Preserve Skipper predicates
[#681](https://github.com/fluxcd/flagger/pull/681)
#### Fixes
- Do not promote when not ready on skip analysis
[#695](https://github.com/fluxcd/flagger/pull/695)
## 1.1.0 (2020-08-18)
Add support for Skipper ingress controller
#### Features
- Skipper Ingress Controller support
[#670](https://github.com/fluxcd/flagger/pull/670)
- Support per-config configTracker disable via ConfigMap/Secret annotation
[#671](https://github.com/fluxcd/flagger/pull/671)
#### Improvements
- Add priorityClassName and securityContext to Helm charts
[#652](https://github.com/fluxcd/flagger/pull/652)
[#668](https://github.com/fluxcd/flagger/pull/668)
- Update Kubernetes packages to v1.18.8
[#672](https://github.com/fluxcd/flagger/pull/672)
- Update Istio, Linkerd and Contour e2e tests
[#661](https://github.com/fluxcd/flagger/pull/661)
#### Fixes
- Fix O(log n) bug over network in GetTargetConfigs
[#663](https://github.com/fluxcd/flagger/pull/663)
- Fix(grafana): metrics change since Kubernetes 1.16
[#663](https://github.com/fluxcd/flagger/pull/663)
## 1.0.1 (2020-07-18)
Add support for App Mesh Gateway GA
@@ -9,18 +174,18 @@ Add support for App Mesh Gateway GA
#### Improvements
- Update App Mesh docs to v1beta2 API
[#649](https://github.com/weaveworks/flagger/pull/649)
[#649](https://github.com/fluxcd/flagger/pull/649)
- Add threadiness to Flagger helm chart
[#643](https://github.com/weaveworks/flagger/pull/643)
[#643](https://github.com/fluxcd/flagger/pull/643)
- Add Istio virtual service to loadtester helm chart
[#643](https://github.com/weaveworks/flagger/pull/643)
[#643](https://github.com/fluxcd/flagger/pull/643)
#### Fixes
- Fix multiple paths per rule on canary ingress
[#632](https://github.com/weaveworks/flagger/pull/632)
[#632](https://github.com/fluxcd/flagger/pull/632)
- Fix installers for kustomize >= 3.6.0
[#646](https://github.com/weaveworks/flagger/pull/646)
[#646](https://github.com/fluxcd/flagger/pull/646)
## 1.0.0 (2020-06-17)
@@ -37,20 +202,20 @@ canary basis for Slack, MS Teams, Discord and Rocket.
#### Features
- Implement progressive promotion
[#593](https://github.com/weaveworks/flagger/pull/593)
[#593](https://github.com/fluxcd/flagger/pull/593)
#### Improvements
- istio: Add source labels to analysis matching rules
[#594](https://github.com/weaveworks/flagger/pull/594)
[#594](https://github.com/fluxcd/flagger/pull/594)
- istio: Add allow origins field to CORS spec
[#604](https://github.com/weaveworks/flagger/pull/604)
[#604](https://github.com/fluxcd/flagger/pull/604)
- istio: Change builtin metrics to work with Istio telemetry v2
[#623](https://github.com/weaveworks/flagger/pull/623)
[#623](https://github.com/fluxcd/flagger/pull/623)
- appmesh: Implement App Mesh v1beta2 timeout
[#611](https://github.com/weaveworks/flagger/pull/611)
[#611](https://github.com/fluxcd/flagger/pull/611)
- metrics: Check metrics server availability during canary initialization
[#592](https://github.com/weaveworks/flagger/pull/592)
[#592](https://github.com/fluxcd/flagger/pull/592)
## 1.0.0-rc.5 (2020-05-14)
@@ -61,24 +226,24 @@ The upgrade procedure from 0.x to 1.0 can be found [here](https://docs.flagger.a
#### Features
- Add support for AWS AppMesh v1beta2 API
[#584](https://github.com/weaveworks/flagger/pull/584)
[#584](https://github.com/fluxcd/flagger/pull/584)
- Add support for Contour v1.4 ingress class
[#588](https://github.com/weaveworks/flagger/pull/588)
[#588](https://github.com/fluxcd/flagger/pull/588)
- Add user-specified labels/annotations to the generated Services
[#538](https://github.com/weaveworks/flagger/pull/538)
[#538](https://github.com/fluxcd/flagger/pull/538)
#### Improvements
- Support compatible Prometheus service
[#557](https://github.com/weaveworks/flagger/pull/557)
[#557](https://github.com/fluxcd/flagger/pull/557)
- Update e2e tests and packages to Kubernetes v1.18
[#549](https://github.com/weaveworks/flagger/pull/549)
[#576](https://github.com/weaveworks/flagger/pull/576)
[#549](https://github.com/fluxcd/flagger/pull/549)
[#576](https://github.com/fluxcd/flagger/pull/576)
#### Fixes
- pkg/controller: retry canary initialization on conflict
[#586](https://github.com/weaveworks/flagger/pull/586)
[#586](https://github.com/fluxcd/flagger/pull/586)
## 1.0.0-rc.4 (2020-04-03)
@@ -91,39 +256,39 @@ The upgrade procedure from 0.x to 1.0 can be found [here](https://docs.flagger.a
#### Features
- Implement NGINX Ingress header regex matching
[#546](https://github.com/weaveworks/flagger/pull/546)
[#546](https://github.com/fluxcd/flagger/pull/546)
#### Improvements
- pkg/router: update ingress API to networking.k8s.io/v1beta1
[#534](https://github.com/weaveworks/flagger/pull/534)
[#534](https://github.com/fluxcd/flagger/pull/534)
- loadtester: add return cmd output option
[#535](https://github.com/weaveworks/flagger/pull/535)
[#535](https://github.com/fluxcd/flagger/pull/535)
- refactoring: finalizer error handling and unit testing
[#531](https://github.com/weaveworks/flagger/pull/535)
[#530](https://github.com/weaveworks/flagger/pull/530)
[#531](https://github.com/fluxcd/flagger/pull/535)
[#530](https://github.com/fluxcd/flagger/pull/530)
- chart: add finalizers to RBAC rules for OpenShift
[#537](https://github.com/weaveworks/flagger/pull/537)
[#537](https://github.com/fluxcd/flagger/pull/537)
- chart: allow security context to be disabled on OpenShift
[#543](https://github.com/weaveworks/flagger/pull/543)
[#543](https://github.com/fluxcd/flagger/pull/543)
- chart: add annotations for service account
[#521](https://github.com/weaveworks/flagger/pull/521)
[#521](https://github.com/fluxcd/flagger/pull/521)
- docs: Add Prometheus Operator tutorial
[#524](https://github.com/weaveworks/flagger/pull/524)
[#524](https://github.com/fluxcd/flagger/pull/524)
#### Fixes
- pkg/controller: avoid status conflicts on initialization
[#544](https://github.com/weaveworks/flagger/pull/544)
[#544](https://github.com/fluxcd/flagger/pull/544)
- pkg/canary: fix status retry
[#541](https://github.com/weaveworks/flagger/pull/541)
[#541](https://github.com/fluxcd/flagger/pull/541)
- loadtester: fix timeout errors
[#539](https://github.com/weaveworks/flagger/pull/539)
[#539](https://github.com/fluxcd/flagger/pull/539)
- pkg/canary/daemonset: fix readiness check
[#529](https://github.com/weaveworks/flagger/pull/529)
[#529](https://github.com/fluxcd/flagger/pull/529)
- logs: reduce log verbosity and fix typos
[#540](https://github.com/weaveworks/flagger/pull/540)
[#526](https://github.com/weaveworks/flagger/pull/526)
[#540](https://github.com/fluxcd/flagger/pull/540)
[#526](https://github.com/fluxcd/flagger/pull/526)
## 1.0.0-rc.3 (2020-03-23)
@@ -135,19 +300,19 @@ The upgrade procedure from 0.x to 1.0 can be found [here](https://docs.flagger.a
#### Features
- Add opt-in finalizers to revert Flagger's mutations on deletion of a canary
[#495](https://github.com/weaveworks/flagger/pull/495)
[#495](https://github.com/fluxcd/flagger/pull/495)
#### Improvements
- e2e: update end-to-end tests to Contour 1.3.0 and Gloo 1.3.14
[#519](https://github.com/weaveworks/flagger/pull/519)
[#519](https://github.com/fluxcd/flagger/pull/519)
- build: update Kubernetes packages to 1.17.4
[#516](https://github.com/weaveworks/flagger/pull/516)
[#516](https://github.com/fluxcd/flagger/pull/516)
#### Fixes
- Preserve node ports on service reconciliation
[#514](https://github.com/weaveworks/flagger/pull/514)
[#514](https://github.com/fluxcd/flagger/pull/514)
## 1.0.0-rc.2 (2020-03-19)
@@ -158,33 +323,33 @@ The upgrade procedure from 0.x to 1.0 can be found [here](https://docs.flagger.a
#### Features
- Make mirror percentage configurable when using Istio traffic shadowing
[#492](https://github.com/weaveworks/flagger/pull/455)
[#492](https://github.com/fluxcd/flagger/pull/455)
- Add support for running Concord tests with loadtester webhooks
[#507](https://github.com/weaveworks/flagger/pull/507)
[#507](https://github.com/fluxcd/flagger/pull/507)
#### Improvements
- docs: add Istio telemetry v2 upgrade guide
[#486](https://github.com/weaveworks/flagger/pull/486),
[#486](https://github.com/fluxcd/flagger/pull/486),
update A/B testing tutorial for Istio 1.5
[#502](https://github.com/weaveworks/flagger/pull/502),
[#502](https://github.com/fluxcd/flagger/pull/502),
add how to retry a failed release to FAQ
[#494](https://github.com/weaveworks/flagger/pull/494)
[#494](https://github.com/fluxcd/flagger/pull/494)
- e2e: update end-to-end tests to
Istio 1.5 [#447](https://github.com/weaveworks/flagger/pull/447) and
Istio 1.5 [#447](https://github.com/fluxcd/flagger/pull/447) and
NGINX Ingress 0.30
[#489](https://github.com/weaveworks/flagger/pull/489)
[#511](https://github.com/weaveworks/flagger/pull/511)
[#489](https://github.com/fluxcd/flagger/pull/489)
[#511](https://github.com/fluxcd/flagger/pull/511)
- refactoring:
error handling [#480](https://github.com/weaveworks/flagger/pull/480),
scheduler [#484](https://github.com/weaveworks/flagger/pull/484) and
unit tests [#475](https://github.com/weaveworks/flagger/pull/475)
error handling [#480](https://github.com/fluxcd/flagger/pull/480),
scheduler [#484](https://github.com/fluxcd/flagger/pull/484) and
unit tests [#475](https://github.com/fluxcd/flagger/pull/475)
- chart: add the log level configuration to Flagger helm chart
[#506](https://github.com/weaveworks/flagger/pull/506)
[#506](https://github.com/fluxcd/flagger/pull/506)
#### Fixes
- Fix nil pointer for the global notifiers [#504](https://github.com/weaveworks/flagger/pull/504)
- Fix nil pointer for the global notifiers [#504](https://github.com/fluxcd/flagger/pull/504)
## 1.0.0-rc.1 (2020-03-03)
@@ -200,28 +365,28 @@ canary basis for Slack, MS Teams, Discord and Rocket.
#### Features
- Implement metric templates for Prometheus [#419](https://github.com/weaveworks/flagger/pull/419),
Datadog [#460](https://github.com/weaveworks/flagger/pull/460) and
CloudWatch [#464](https://github.com/weaveworks/flagger/pull/464)
- Implement metric range validation [#424](https://github.com/weaveworks/flagger/pull/424)
- Add support for targeting DaemonSets [#455](https://github.com/weaveworks/flagger/pull/455)
- Implement metric templates for Prometheus [#419](https://github.com/fluxcd/flagger/pull/419),
Datadog [#460](https://github.com/fluxcd/flagger/pull/460) and
CloudWatch [#464](https://github.com/fluxcd/flagger/pull/464)
- Implement metric range validation [#424](https://github.com/fluxcd/flagger/pull/424)
- Add support for targeting DaemonSets [#455](https://github.com/fluxcd/flagger/pull/455)
- Implement canary alerts and alert providers (Slack, MS Teams, Discord and Rocket)
[#429](https://github.com/weaveworks/flagger/pull/429)
[#429](https://github.com/fluxcd/flagger/pull/429)
#### Improvements
- Add support for Istio multi-cluster
[#447](https://github.com/weaveworks/flagger/pull/447) [#450](https://github.com/weaveworks/flagger/pull/450)
- Extend Istio traffic policy [#441](https://github.com/weaveworks/flagger/pull/441),
add support for header operations [#442](https://github.com/weaveworks/flagger/pull/442) and
set ingress destination port when multiple ports are discovered [#436](https://github.com/weaveworks/flagger/pull/436)
- Add support for rollback gating [#449](https://github.com/weaveworks/flagger/pull/449)
- Allow disabling ConfigMaps and Secrets tracking [#425](https://github.com/weaveworks/flagger/pull/425)
[#447](https://github.com/fluxcd/flagger/pull/447) [#450](https://github.com/fluxcd/flagger/pull/450)
- Extend Istio traffic policy [#441](https://github.com/fluxcd/flagger/pull/441),
add support for header operations [#442](https://github.com/fluxcd/flagger/pull/442) and
set ingress destination port when multiple ports are discovered [#436](https://github.com/fluxcd/flagger/pull/436)
- Add support for rollback gating [#449](https://github.com/fluxcd/flagger/pull/449)
- Allow disabling ConfigMaps and Secrets tracking [#425](https://github.com/fluxcd/flagger/pull/425)
#### Fixes
- Fix spec changes detection [#446](https://github.com/weaveworks/flagger/pull/446)
- Track projected ConfigMaps and Secrets [#433](https://github.com/weaveworks/flagger/pull/433)
- Fix spec changes detection [#446](https://github.com/fluxcd/flagger/pull/446)
- Track projected ConfigMaps and Secrets [#433](https://github.com/fluxcd/flagger/pull/433)
## 0.23.0 (2020-02-06)
@@ -229,8 +394,8 @@ Adds support for service name configuration and rollback webhook
#### Features
- Implement service name override [#416](https://github.com/weaveworks/flagger/pull/416)
- Add support for gated rollback [#420](https://github.com/weaveworks/flagger/pull/420)
- Implement service name override [#416](https://github.com/fluxcd/flagger/pull/416)
- Add support for gated rollback [#420](https://github.com/fluxcd/flagger/pull/420)
## 0.22.0 (2020-01-16)
@@ -238,14 +403,14 @@ Adds event dispatching through webhooks
#### Features
- Implement event dispatching webhook [#409](https://github.com/weaveworks/flagger/pull/409)
- Add general purpose event webhook [#401](https://github.com/weaveworks/flagger/pull/401)
- Implement event dispatching webhook [#409](https://github.com/fluxcd/flagger/pull/409)
- Add general purpose event webhook [#401](https://github.com/fluxcd/flagger/pull/401)
#### Improvements
- Update Contour to v1.1 and add Linkerd header [#411](https://github.com/weaveworks/flagger/pull/411)
- Update Istio e2e to v1.4.3 [#407](https://github.com/weaveworks/flagger/pull/407)
- Update Kubernetes packages to 1.17 [#406](https://github.com/weaveworks/flagger/pull/406)
- Update Contour to v1.1 and add Linkerd header [#411](https://github.com/fluxcd/flagger/pull/411)
- Update Istio e2e to v1.4.3 [#407](https://github.com/fluxcd/flagger/pull/407)
- Update Kubernetes packages to 1.17 [#406](https://github.com/fluxcd/flagger/pull/406)
## 0.21.0 (2020-01-06)
@@ -253,14 +418,14 @@ Adds support for Contour ingress controller
#### Features
- Add support for Contour ingress controller [#397](https://github.com/weaveworks/flagger/pull/397)
- Add support for Envoy managed by Crossover via SMI [#386](https://github.com/weaveworks/flagger/pull/386)
- Extend canary target ref to Kubernetes Service kind [#372](https://github.com/weaveworks/flagger/pull/372)
- Add support for Contour ingress controller [#397](https://github.com/fluxcd/flagger/pull/397)
- Add support for Envoy managed by Crossover via SMI [#386](https://github.com/fluxcd/flagger/pull/386)
- Extend canary target ref to Kubernetes Service kind [#372](https://github.com/fluxcd/flagger/pull/372)
#### Improvements
- Add Prometheus operator PodMonitor template to Helm chart [#399](https://github.com/weaveworks/flagger/pull/399)
- Update e2e tests to Kubernetes v1.16 [#390](https://github.com/weaveworks/flagger/pull/390)
- Add Prometheus operator PodMonitor template to Helm chart [#399](https://github.com/fluxcd/flagger/pull/399)
- Update e2e tests to Kubernetes v1.16 [#390](https://github.com/fluxcd/flagger/pull/390)
## 0.20.4 (2019-12-03)
@@ -268,12 +433,12 @@ Adds support for taking over a running deployment without disruption
#### Improvements
- Add initialization phase to Kubernetes router [#384](https://github.com/weaveworks/flagger/pull/384)
- Add canary controller interface and Kubernetes deployment kind implementation [#378](https://github.com/weaveworks/flagger/pull/378)
- Add initialization phase to Kubernetes router [#384](https://github.com/fluxcd/flagger/pull/384)
- Add canary controller interface and Kubernetes deployment kind implementation [#378](https://github.com/fluxcd/flagger/pull/378)
#### Fixes
- Skip primary check on skip analysis [#380](https://github.com/weaveworks/flagger/pull/380)
- Skip primary check on skip analysis [#380](https://github.com/fluxcd/flagger/pull/380)
## 0.20.3 (2019-11-13)
@@ -281,8 +446,8 @@ Adds wrk to load tester tools and the App Mesh gateway chart to Flagger Helm rep
#### Improvements
- Add wrk to load tester tools [#368](https://github.com/weaveworks/flagger/pull/368)
- Add App Mesh gateway chart [#365](https://github.com/weaveworks/flagger/pull/365)
- Add wrk to load tester tools [#368](https://github.com/fluxcd/flagger/pull/368)
- Add App Mesh gateway chart [#365](https://github.com/fluxcd/flagger/pull/365)
## 0.20.2 (2019-11-07)
@@ -290,11 +455,11 @@ Adds support for exposing canaries outside the cluster using App Mesh Gateway an
#### Improvements
- Expose canaries on public domains with App Mesh Gateway [#358](https://github.com/weaveworks/flagger/pull/358)
- Expose canaries on public domains with App Mesh Gateway [#358](https://github.com/fluxcd/flagger/pull/358)
#### Fixes
- Use the specified replicas when scaling up the canary [#363](https://github.com/weaveworks/flagger/pull/363)
- Use the specified replicas when scaling up the canary [#363](https://github.com/fluxcd/flagger/pull/363)
## 0.20.1 (2019-11-03)
@@ -302,13 +467,13 @@ Fixes promql execution and updates the load testing tools
#### Improvements
- Update load tester Helm tools [#8349dd1](https://github.com/weaveworks/flagger/commit/8349dd1cda59a741c7bed9a0f67c0fc0fbff4635)
- e2e testing: update providers [#346](https://github.com/weaveworks/flagger/pull/346)
- Update load tester Helm tools [#8349dd1](https://github.com/fluxcd/flagger/commit/8349dd1cda59a741c7bed9a0f67c0fc0fbff4635)
- e2e testing: update providers [#346](https://github.com/fluxcd/flagger/pull/346)
#### Fixes
- Fix Prometheus query escape [#353](https://github.com/weaveworks/flagger/pull/353)
- Updating hey release link [#350](https://github.com/weaveworks/flagger/pull/350)
- Fix Prometheus query escape [#353](https://github.com/fluxcd/flagger/pull/353)
- Updating hey release link [#350](https://github.com/fluxcd/flagger/pull/350)
## 0.20.0 (2019-10-21)
@@ -317,19 +482,19 @@ and retry policies when using App Mesh
#### Features
- Implement App Mesh A/B testing based on HTTP headers match conditions [#340](https://github.com/weaveworks/flagger/pull/340)
- Implement App Mesh HTTP retry policy [#338](https://github.com/weaveworks/flagger/pull/338)
- Implement metrics server override [#342](https://github.com/weaveworks/flagger/pull/342)
- Implement App Mesh A/B testing based on HTTP headers match conditions [#340](https://github.com/fluxcd/flagger/pull/340)
- Implement App Mesh HTTP retry policy [#338](https://github.com/fluxcd/flagger/pull/338)
- Implement metrics server override [#342](https://github.com/fluxcd/flagger/pull/342)
#### Improvements
- Add the app/name label to services and primary deployment [#333](https://github.com/weaveworks/flagger/pull/333)
- Allow setting Slack and Teams URLs with env vars [#334](https://github.com/weaveworks/flagger/pull/334)
- Refactor Gloo integration [#344](https://github.com/weaveworks/flagger/pull/344)
- Add the app/name label to services and primary deployment [#333](https://github.com/fluxcd/flagger/pull/333)
- Allow setting Slack and Teams URLs with env vars [#334](https://github.com/fluxcd/flagger/pull/334)
- Refactor Gloo integration [#344](https://github.com/fluxcd/flagger/pull/344)
#### Fixes
- Generate unique names for App Mesh virtual routers and routes [#336](https://github.com/weaveworks/flagger/pull/336)
- Generate unique names for App Mesh virtual routers and routes [#336](https://github.com/fluxcd/flagger/pull/336)
## 0.19.0 (2019-10-08)
@@ -337,19 +502,19 @@ Adds support for canary and blue/green [traffic mirroring](https://docs.flagger.
#### Features
- Add traffic mirroring for Istio service mesh [#311](https://github.com/weaveworks/flagger/pull/311)
- Implement canary service target port [#327](https://github.com/weaveworks/flagger/pull/327)
- Add traffic mirroring for Istio service mesh [#311](https://github.com/fluxcd/flagger/pull/311)
- Implement canary service target port [#327](https://github.com/fluxcd/flagger/pull/327)
#### Improvements
- Allow gRPC protocol for App Mesh [#325](https://github.com/weaveworks/flagger/pull/325)
- Enforce blue/green when using Kubernetes networking [#326](https://github.com/weaveworks/flagger/pull/326)
- Allow gRPC protocol for App Mesh [#325](https://github.com/fluxcd/flagger/pull/325)
- Enforce blue/green when using Kubernetes networking [#326](https://github.com/fluxcd/flagger/pull/326)
#### Fixes
- Fix port discovery diff [#324](https://github.com/weaveworks/flagger/pull/324)
- Fix port discovery diff [#324](https://github.com/fluxcd/flagger/pull/324)
- Helm chart: Enable Prometheus scraping of Flagger metrics
[#2141d88](https://github.com/weaveworks/flagger/commit/2141d88ce1cc6be220dab34171c215a334ecde24)
[#2141d88](https://github.com/fluxcd/flagger/commit/2141d88ce1cc6be220dab34171c215a334ecde24)
## 0.18.6 (2019-10-03)
@@ -357,13 +522,13 @@ Adds support for App Mesh conformance tests and latency metric checks
#### Improvements
- Add support for acceptance testing when using App Mesh [#322](https://github.com/weaveworks/flagger/pull/322)
- Add Kustomize installer for App Mesh [#310](https://github.com/weaveworks/flagger/pull/310)
- Update Linkerd to v2.5.0 and Prometheus to v2.12.0 [#323](https://github.com/weaveworks/flagger/pull/323)
- Add support for acceptance testing when using App Mesh [#322](https://github.com/fluxcd/flagger/pull/322)
- Add Kustomize installer for App Mesh [#310](https://github.com/fluxcd/flagger/pull/310)
- Update Linkerd to v2.5.0 and Prometheus to v2.12.0 [#323](https://github.com/fluxcd/flagger/pull/323)
#### Fixes
- Fix slack/teams notification fields mapping [#318](https://github.com/weaveworks/flagger/pull/318)
- Fix slack/teams notification fields mapping [#318](https://github.com/fluxcd/flagger/pull/318)
## 0.18.5 (2019-10-02)
@@ -372,17 +537,17 @@ webhooks and blue/green deployments when using a service mesh
#### Features
- Implement confirm-promotion hook [#307](https://github.com/weaveworks/flagger/pull/307)
- Implement B/G for service mesh providers [#305](https://github.com/weaveworks/flagger/pull/305)
- Implement confirm-promotion hook [#307](https://github.com/fluxcd/flagger/pull/307)
- Implement B/G for service mesh providers [#305](https://github.com/fluxcd/flagger/pull/305)
#### Improvements
- Canary promotion improvements to avoid dropping in-flight requests [#310](https://github.com/weaveworks/flagger/pull/310)
- Update end-to-end tests to Kubernetes v1.15.3 and Istio 1.3.0 [#306](https://github.com/weaveworks/flagger/pull/306)
- Canary promotion improvements to avoid dropping in-flight requests [#310](https://github.com/fluxcd/flagger/pull/310)
- Update end-to-end tests to Kubernetes v1.15.3 and Istio 1.3.0 [#306](https://github.com/fluxcd/flagger/pull/306)
#### Fixes
- Skip primary check for App Mesh [#315](https://github.com/weaveworks/flagger/pull/315)
- Skip primary check for App Mesh [#315](https://github.com/fluxcd/flagger/pull/315)
## 0.18.4 (2019-09-08)
@@ -390,14 +555,14 @@ Adds support for NGINX custom annotations and Helm v3 acceptance testing
#### Features
- Add annotations prefix for NGINX ingresses [#293](https://github.com/weaveworks/flagger/pull/293)
- Add wide columns in CRD [#289](https://github.com/weaveworks/flagger/pull/289)
- loadtester: implement Helm v3 test command [#296](https://github.com/weaveworks/flagger/pull/296)
- loadtester: add gRPC health check to load tester image [#295](https://github.com/weaveworks/flagger/pull/295)
- Add annotations prefix for NGINX ingresses [#293](https://github.com/fluxcd/flagger/pull/293)
- Add wide columns in CRD [#289](https://github.com/fluxcd/flagger/pull/289)
- loadtester: implement Helm v3 test command [#296](https://github.com/fluxcd/flagger/pull/296)
- loadtester: add gRPC health check to load tester image [#295](https://github.com/fluxcd/flagger/pull/295)
#### Fixes
- loadtester: fix tests error logging [#286](https://github.com/weaveworks/flagger/pull/286)
- loadtester: fix tests error logging [#286](https://github.com/fluxcd/flagger/pull/286)
## 0.18.3 (2019-08-22)
@@ -405,17 +570,17 @@ Adds support for tillerless helm tests and protobuf health checking
#### Features
- loadtester: add support for tillerless helm [#280](https://github.com/weaveworks/flagger/pull/280)
- loadtester: add support for protobuf health checking [#280](https://github.com/weaveworks/flagger/pull/280)
- loadtester: add support for tillerless helm [#280](https://github.com/fluxcd/flagger/pull/280)
- loadtester: add support for protobuf health checking [#280](https://github.com/fluxcd/flagger/pull/280)
#### Improvements
- Set HTTP listeners for AppMesh virtual routers [#272](https://github.com/weaveworks/flagger/pull/272)
- Set HTTP listeners for AppMesh virtual routers [#272](https://github.com/fluxcd/flagger/pull/272)
#### Fixes
- Add missing fields to CRD validation spec [#271](https://github.com/weaveworks/flagger/pull/271)
- Fix App Mesh backends validation in CRD [#281](https://github.com/weaveworks/flagger/pull/281)
- Add missing fields to CRD validation spec [#271](https://github.com/fluxcd/flagger/pull/271)
- Fix App Mesh backends validation in CRD [#281](https://github.com/fluxcd/flagger/pull/281)
## 0.18.2 (2019-08-05)
@@ -423,11 +588,11 @@ Fixes multi-port support for Istio
#### Fixes
- Fix port discovery for multiple port services [#267](https://github.com/weaveworks/flagger/pull/267)
- Fix port discovery for multiple port services [#267](https://github.com/fluxcd/flagger/pull/267)
#### Improvements
- Update e2e testing to Istio v1.2.3, Gloo v0.18.8 and NGINX ingress chart v1.12.1 [#268](https://github.com/weaveworks/flagger/pull/268)
- Update e2e testing to Istio v1.2.3, Gloo v0.18.8 and NGINX ingress chart v1.12.1 [#268](https://github.com/fluxcd/flagger/pull/268)
## 0.18.1 (2019-07-30)
@@ -435,7 +600,7 @@ Fixes Blue/Green style deployments for Kubernetes and Linkerd providers
#### Fixes
- Fix Blue/Green metrics provider and add e2e tests [#261](https://github.com/weaveworks/flagger/pull/261)
- Fix Blue/Green metrics provider and add e2e tests [#261](https://github.com/fluxcd/flagger/pull/261)
## 0.18.0 (2019-07-29)
@@ -443,20 +608,20 @@ Adds support for [manual gating](https://docs.flagger.app/how-it-works#manual-ga
#### Features
- Implement confirm rollout gate, hook and API [#251](https://github.com/weaveworks/flagger/pull/251)
- Implement confirm rollout gate, hook and API [#251](https://github.com/fluxcd/flagger/pull/251)
#### Improvements
- Refactor canary change detection and status [#240](https://github.com/weaveworks/flagger/pull/240)
- Implement finalising state [#257](https://github.com/weaveworks/flagger/pull/257)
- Add gRPC load testing tool [#248](https://github.com/weaveworks/flagger/pull/248)
- Refactor canary change detection and status [#240](https://github.com/fluxcd/flagger/pull/240)
- Implement finalising state [#257](https://github.com/fluxcd/flagger/pull/257)
- Add gRPC load testing tool [#248](https://github.com/fluxcd/flagger/pull/248)
#### Breaking changes
- Due to the status sub-resource changes in [#240](https://github.com/weaveworks/flagger/pull/240),
- Due to the status sub-resource changes in [#240](https://github.com/fluxcd/flagger/pull/240),
when upgrading Flagger the canaries status phase will be reset to `Initialized`
- Upgrading Flagger with Helm will fail due to Helm poor support of CRDs,
see [workaround](https://github.com/weaveworks/flagger/issues/223)
see [workaround](https://github.com/fluxcd/flagger/issues/223)
## 0.17.0 (2019-07-08)
@@ -464,15 +629,15 @@ Adds support for Linkerd (SMI Traffic Split API), MS Teams notifications and HA
#### Features
- Add Linkerd support [#230](https://github.com/weaveworks/flagger/pull/230)
- Implement MS Teams notifications [#235](https://github.com/weaveworks/flagger/pull/235)
- Implement leader election [#236](https://github.com/weaveworks/flagger/pull/236)
- Add Linkerd support [#230](https://github.com/fluxcd/flagger/pull/230)
- Implement MS Teams notifications [#235](https://github.com/fluxcd/flagger/pull/235)
- Implement leader election [#236](https://github.com/fluxcd/flagger/pull/236)
#### Improvements
- Add [Kustomize](https://docs.flagger.app/install/flagger-install-on-kubernetes#install-flagger-with-kustomize)
installer [#232](https://github.com/weaveworks/flagger/pull/232)
- Add Pod Security Policy to Helm chart [#234](https://github.com/weaveworks/flagger/pull/234)
installer [#232](https://github.com/fluxcd/flagger/pull/232)
- Add Pod Security Policy to Helm chart [#234](https://github.com/fluxcd/flagger/pull/234)
## 0.16.0 (2019-06-23)
@@ -481,19 +646,19 @@ without a service mesh or ingress controller
#### Features
- Allow blue/green deployments without a service mesh provider [#211](https://github.com/weaveworks/flagger/pull/211)
- Add the service mesh provider to the canary spec [#217](https://github.com/weaveworks/flagger/pull/217)
- Allow multi-port services and implement port discovery [#207](https://github.com/weaveworks/flagger/pull/207)
- Allow blue/green deployments without a service mesh provider [#211](https://github.com/fluxcd/flagger/pull/211)
- Add the service mesh provider to the canary spec [#217](https://github.com/fluxcd/flagger/pull/217)
- Allow multi-port services and implement port discovery [#207](https://github.com/fluxcd/flagger/pull/207)
#### Improvements
- Add [FAQ page](https://docs.flagger.app/faq) to docs website
- Switch to go modules in CI [#218](https://github.com/weaveworks/flagger/pull/218)
- Switch to go modules in CI [#218](https://github.com/fluxcd/flagger/pull/218)
- Update e2e testing to Kubernetes Kind 0.3.0 and Istio 1.2.0
#### Fixes
- Update the primary HPA on canary promotion [#216](https://github.com/weaveworks/flagger/pull/216)
- Update the primary HPA on canary promotion [#216](https://github.com/fluxcd/flagger/pull/216)
## 0.15.0 (2019-06-12)
@@ -501,11 +666,11 @@ Adds support for customising the Istio [traffic policy](https://docs.flagger.app
#### Features
- Generate Istio destination rules and allow traffic policy customisation [#200](https://github.com/weaveworks/flagger/pull/200)
- Generate Istio destination rules and allow traffic policy customisation [#200](https://github.com/fluxcd/flagger/pull/200)
#### Improvements
- Update Kubernetes packages to 1.14 and use go modules instead of dep [#202](https://github.com/weaveworks/flagger/pull/202)
- Update Kubernetes packages to 1.14 and use go modules instead of dep [#202](https://github.com/fluxcd/flagger/pull/202)
## 0.14.1 (2019-06-05)
@@ -514,12 +679,12 @@ with Helm test or Bash Bats using pre-rollout hooks
#### Features
- Implement Helm and Bash pre-rollout hooks [#196](https://github.com/weaveworks/flagger/pull/196)
- Implement Helm and Bash pre-rollout hooks [#196](https://github.com/fluxcd/flagger/pull/196)
#### Fixes
- Fix promoting canary when max weight is not a multiple of step [#190](https://github.com/weaveworks/flagger/pull/190)
- Add ability to set Prometheus url with custom path without trailing '/' [#197](https://github.com/weaveworks/flagger/pull/197)
- Fix promoting canary when max weight is not a multiple of step [#190](https://github.com/fluxcd/flagger/pull/190)
- Add ability to set Prometheus url with custom path without trailing '/' [#197](https://github.com/fluxcd/flagger/pull/197)
## 0.14.0 (2019-05-21)
@@ -527,8 +692,8 @@ Adds support for Service Mesh Interface and [Gloo](https://docs.flagger.app/usag
#### Features
- Add support for SMI (Istio weighted traffic) [#180](https://github.com/weaveworks/flagger/pull/180)
- Add support for Gloo ingress controller (weighted traffic) [#179](https://github.com/weaveworks/flagger/pull/179)
- Add support for SMI (Istio weighted traffic) [#180](https://github.com/fluxcd/flagger/pull/180)
- Add support for Gloo ingress controller (weighted traffic) [#179](https://github.com/fluxcd/flagger/pull/179)
## 0.13.2 (2019-04-11)
@@ -536,11 +701,11 @@ Fixes for Jenkins X deployments (prevent the jx GC from removing the primary ins
#### Fixes
- Do not copy labels from canary to primary deployment [#178](https://github.com/weaveworks/flagger/pull/178)
- Do not copy labels from canary to primary deployment [#178](https://github.com/fluxcd/flagger/pull/178)
#### Improvements
- Add NGINX ingress controller e2e and unit tests [#176](https://github.com/weaveworks/flagger/pull/176)
- Add NGINX ingress controller e2e and unit tests [#176](https://github.com/fluxcd/flagger/pull/176)
## 0.13.1 (2019-04-09)
@@ -548,7 +713,7 @@ Fixes for custom metrics checks and NGINX Prometheus queries
#### Fixes
- Fix promql queries for custom checks and NGINX [#174](https://github.com/weaveworks/flagger/pull/174)
- Fix promql queries for custom checks and NGINX [#174](https://github.com/fluxcd/flagger/pull/174)
## 0.13.0 (2019-04-08)
@@ -556,13 +721,13 @@ Adds support for [NGINX](https://docs.flagger.app/usage/nginx-progressive-delive
#### Features
- Add support for nginx ingress controller (weighted traffic and A/B testing) [#170](https://github.com/weaveworks/flagger/pull/170)
- Add support for nginx ingress controller (weighted traffic and A/B testing) [#170](https://github.com/fluxcd/flagger/pull/170)
- Add Prometheus add-on to Flagger Helm chart for App Mesh and
NGINX [79b3370](https://github.com/weaveworks/flagger/pull/170/commits/79b337089294a92961bc8446fd185b38c50a32df)
NGINX [79b3370](https://github.com/fluxcd/flagger/pull/170/commits/79b337089294a92961bc8446fd185b38c50a32df)
#### Fixes
- Fix duplicate hosts Istio error when using wildcards [#162](https://github.com/weaveworks/flagger/pull/162)
- Fix duplicate hosts Istio error when using wildcards [#162](https://github.com/fluxcd/flagger/pull/162)
## 0.12.0 (2019-04-29)
@@ -570,7 +735,7 @@ Adds support for [SuperGloo](https://docs.flagger.app/install/flagger-install-wi
#### Features
- Supergloo support for canary deployment (weighted traffic) [#151](https://github.com/weaveworks/flagger/pull/151)
- Supergloo support for canary deployment (weighted traffic) [#151](https://github.com/fluxcd/flagger/pull/151)
## 0.11.1 (2019-04-18)
@@ -586,16 +751,16 @@ Adds pre/post rollout [webhooks](https://docs.flagger.app/how-it-works#webhooks)
#### Features
- Add `pre-rollout` and `post-rollout` webhook types [#147](https://github.com/weaveworks/flagger/pull/147)
- Add `pre-rollout` and `post-rollout` webhook types [#147](https://github.com/fluxcd/flagger/pull/147)
#### Improvements
- Unify App Mesh and Istio builtin metric checks [#146](https://github.com/weaveworks/flagger/pull/146)
- Make the pod selector label configurable [#148](https://github.com/weaveworks/flagger/pull/148)
- Unify App Mesh and Istio builtin metric checks [#146](https://github.com/fluxcd/flagger/pull/146)
- Make the pod selector label configurable [#148](https://github.com/fluxcd/flagger/pull/148)
#### Breaking changes
- Set default `mesh` Istio gateway only if no gateway is specified [#141](https://github.com/weaveworks/flagger/pull/141)
- Set default `mesh` Istio gateway only if no gateway is specified [#141](https://github.com/fluxcd/flagger/pull/141)
## 0.10.0 (2019-03-27)
@@ -604,17 +769,17 @@ Adds support for App Mesh
#### Features
- AWS App Mesh integration
[#107](https://github.com/weaveworks/flagger/pull/107)
[#123](https://github.com/weaveworks/flagger/pull/123)
[#107](https://github.com/fluxcd/flagger/pull/107)
[#123](https://github.com/fluxcd/flagger/pull/123)
#### Improvements
- Reconcile Kubernetes ClusterIP services [#122](https://github.com/weaveworks/flagger/pull/122)
- Reconcile Kubernetes ClusterIP services [#122](https://github.com/fluxcd/flagger/pull/122)
#### Fixes
- Preserve pod labels on canary promotion [#105](https://github.com/weaveworks/flagger/pull/105)
- Fix canary status Prometheus metric [#121](https://github.com/weaveworks/flagger/pull/121)
- Preserve pod labels on canary promotion [#105](https://github.com/fluxcd/flagger/pull/105)
- Fix canary status Prometheus metric [#121](https://github.com/fluxcd/flagger/pull/121)
## 0.9.0 (2019-03-11)
@@ -623,11 +788,11 @@ primary and canary based on HTTP headers or cookies.
#### Features
- A/B testing - canary with session affinity [#88](https://github.com/weaveworks/flagger/pull/88)
- A/B testing - canary with session affinity [#88](https://github.com/fluxcd/flagger/pull/88)
#### Fixes
- Update the analysis interval when the custom resource changes [#91](https://github.com/weaveworks/flagger/pull/91)
- Update the analysis interval when the custom resource changes [#91](https://github.com/fluxcd/flagger/pull/91)
## 0.8.0 (2019-03-06)
@@ -635,16 +800,16 @@ Adds support for CORS policy and HTTP request headers manipulation
#### Features
- CORS policy support [#83](https://github.com/weaveworks/flagger/pull/83)
- Allow headers to be appended to HTTP requests [#82](https://github.com/weaveworks/flagger/pull/82)
- CORS policy support [#83](https://github.com/fluxcd/flagger/pull/83)
- Allow headers to be appended to HTTP requests [#82](https://github.com/fluxcd/flagger/pull/82)
#### Improvements
- Refactor the routing management
[#72](https://github.com/weaveworks/flagger/pull/72)
[#80](https://github.com/weaveworks/flagger/pull/80)
- Fine-grained RBAC [#73](https://github.com/weaveworks/flagger/pull/73)
- Add option to limit Flagger to a single namespace [#78](https://github.com/weaveworks/flagger/pull/78)
[#72](https://github.com/fluxcd/flagger/pull/72)
[#80](https://github.com/fluxcd/flagger/pull/80)
- Fine-grained RBAC [#73](https://github.com/fluxcd/flagger/pull/73)
- Add option to limit Flagger to a single namespace [#78](https://github.com/fluxcd/flagger/pull/78)
## 0.7.0 (2019-02-28)
@@ -652,8 +817,8 @@ Adds support for custom metric checks, HTTP timeouts and HTTP retries
#### Features
- Allow custom promql queries in the canary analysis spec [#60](https://github.com/weaveworks/flagger/pull/60)
- Add HTTP timeout and retries to canary service spec [#62](https://github.com/weaveworks/flagger/pull/62)
- Allow custom promql queries in the canary analysis spec [#60](https://github.com/fluxcd/flagger/pull/60)
- Add HTTP timeout and retries to canary service spec [#62](https://github.com/fluxcd/flagger/pull/62)
## 0.6.0 (2019-02-25)
@@ -663,15 +828,15 @@ to be customized in the service spec of the canary custom resource.
#### Features
- Add HTTP match conditions and URI rewrite to the canary service spec [#55](https://github.com/weaveworks/flagger/pull/55)
- Add HTTP match conditions and URI rewrite to the canary service spec [#55](https://github.com/fluxcd/flagger/pull/55)
- Update virtual service when the canary service spec changes
[#54](https://github.com/weaveworks/flagger/pull/54)
[#51](https://github.com/weaveworks/flagger/pull/51)
[#54](https://github.com/fluxcd/flagger/pull/54)
[#51](https://github.com/fluxcd/flagger/pull/51)
#### Improvements
- Run e2e testing on [Kubernetes Kind](https://github.com/kubernetes-sigs/kind) for canary promotion
[#53](https://github.com/weaveworks/flagger/pull/53)
[#53](https://github.com/fluxcd/flagger/pull/53)
## 0.5.1 (2019-02-14)
@@ -679,15 +844,15 @@ Allows skipping the analysis phase to ship changes directly to production
#### Features
- Add option to skip the canary analysis [#46](https://github.com/weaveworks/flagger/pull/46)
- Add option to skip the canary analysis [#46](https://github.com/fluxcd/flagger/pull/46)
#### Fixes
- Reject deployment if the pod label selector doesn't match `app: <DEPLOYMENT_NAME>` [#43](https://github.com/weaveworks/flagger/pull/43)
- Reject deployment if the pod label selector doesn't match `app: <DEPLOYMENT_NAME>` [#43](https://github.com/fluxcd/flagger/pull/43)
## 0.5.0 (2019-01-30)
Track changes in ConfigMaps and Secrets [#37](https://github.com/weaveworks/flagger/pull/37)
Track changes in ConfigMaps and Secrets [#37](https://github.com/fluxcd/flagger/pull/37)
#### Features
@@ -703,7 +868,7 @@ Track changes in ConfigMaps and Secrets [#37](https://github.com/weaveworks/flag
## 0.4.1 (2019-01-24)
Load testing webhook [#35](https://github.com/weaveworks/flagger/pull/35)
Load testing webhook [#35](https://github.com/fluxcd/flagger/pull/35)
#### Features
@@ -717,7 +882,7 @@ Load testing webhook [#35](https://github.com/weaveworks/flagger/pull/35)
## 0.4.0 (2019-01-18)
Restart canary analysis if revision changes [#31](https://github.com/weaveworks/flagger/pull/31)
Restart canary analysis if revision changes [#31](https://github.com/fluxcd/flagger/pull/31)
#### Breaking changes
@@ -738,7 +903,7 @@ Restart canary analysis if revision changes [#31](https://github.com/weaveworks/
## 0.3.0 (2019-01-11)
Configurable canary analysis duration [#20](https://github.com/weaveworks/flagger/pull/20)
Configurable canary analysis duration [#20](https://github.com/fluxcd/flagger/pull/20)
#### Breaking changes
@@ -753,7 +918,7 @@ Configurable canary analysis duration [#20](https://github.com/weaveworks/flagge
## 0.2.0 (2019-01-04)
Webhooks [#18](https://github.com/weaveworks/flagger/pull/18)
Webhooks [#18](https://github.com/fluxcd/flagger/pull/18)
#### Features
@@ -764,7 +929,7 @@ Webhooks [#18](https://github.com/weaveworks/flagger/pull/18)
## 0.1.2 (2018-12-06)
Improve Slack notifications [#14](https://github.com/weaveworks/flagger/pull/14)
Improve Slack notifications [#14](https://github.com/fluxcd/flagger/pull/14)
#### Features
@@ -773,7 +938,7 @@ Improve Slack notifications [#14](https://github.com/weaveworks/flagger/pull/14)
## 0.1.1 (2018-11-28)
Canary progress deadline [#10](https://github.com/weaveworks/flagger/pull/10)
Canary progress deadline [#10](https://github.com/fluxcd/flagger/pull/10)
#### Features

3
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,3 @@
## Code of Conduct
Flagger follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md).

View File

@@ -14,10 +14,28 @@ Origin (DCO). This document was created by the Linux Kernel community and is a
simple statement that you, as a contributor, have the legal right to make the
contribution.
## Chat
We require all commits to be signed. By signing off with your signature, you
certify that you wrote the patch or otherwise have the right to contribute the
material by the rules of the [DCO](DCO):
`Signed-off-by: Jane Doe <jane.doe@example.com>`
The signature must contain your real name
(sorry, no pseudonyms or anonymous contributions)
If your `user.name` and `user.email` are configured in your Git config,
you can sign your commit automatically with `git commit -s`.
## Communications
The project uses Slack: To join the conversation, simply join the
[Weave community](https://slack.weave.works/) Slack workspace #flagger channel.
[CNCF](https://slack.cncf.io/) Slack workspace and use the
[#flux](https://cloud-native.slack.com/messages/flux/) channel.
The developers use a mailing list to discuss development as well.
Simply subscribe to [flux-dev on cncf.io](https://lists.cncf.io/g/cncf-flux-dev)
to join the conversation (this will also add an invitation to your
Google calendar for our [Flux
meeting](https://docs.google.com/document/d/1l_M0om0qUEN_NNiGgpqJ2tvsF2iioHkaARDeh6b70B0/edit#)).
## Getting Started
@@ -69,4 +87,3 @@ For Flagger we prefer the following rules for good commit messages:
The [following article](https://chris.beams.io/posts/git-commit/#seven-rules)
has some more helpful advice on documenting your work.
This doc is adapted from [FluxCD](https://github.com/fluxcd/flux/blob/master/CONTRIBUTING.md).

36
DCO Normal file
View File

@@ -0,0 +1,36 @@
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
660 York Street, Suite 102,
San Francisco, CA 94110 USA
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.

View File

@@ -1,9 +1,32 @@
FROM golang:1.15-alpine as builder
ARG TARGETPLATFORM
ARG REVISON
WORKDIR /workspace
# copy modules manifests
COPY go.mod go.mod
COPY go.sum go.sum
# cache modules
RUN go mod download
# copy source code
COPY cmd/ cmd/
COPY pkg/ pkg/
# build
RUN CGO_ENABLED=0 go build \
-ldflags "-s -w -X github.com/fluxcd/flagger/pkg/version.REVISION=${REVISON}" \
-a -o flagger ./cmd/flagger
FROM alpine:3.12
RUN apk --no-cache add ca-certificates
USER nobody
COPY --chown=nobody:nobody /bin/flagger .
COPY --from=builder --chown=nobody:nobody /workspace/flagger .
ENTRYPOINT ["./flagger"]

View File

@@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2018 Weaveworks. All rights reserved.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -3,14 +3,7 @@ VERSION?=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | t
LT_VERSION?=$(shell grep 'VERSION' cmd/loadtester/main.go | awk '{ print $$4 }' | tr -d '"' | head -n1)
build:
GIT_COMMIT=$$(git rev-list -1 HEAD) && CGO_ENABLED=0 GOOS=linux go build \
-ldflags "-s -w -X github.com/weaveworks/flagger/pkg/version.REVISION=$${GIT_COMMIT}" \
-a -installsuffix cgo -o ./bin/flagger ./cmd/flagger/*
docker build -t weaveworks/flagger:$(TAG) . -f Dockerfile
push:
docker tag weaveworks/flagger:$(TAG) weaveworks/flagger:$(VERSION)
docker push weaveworks/flagger:$(VERSION)
CGO_ENABLED=0 go build -a -o ./bin/flagger ./cmd/flagger
fmt:
gofmt -l -s -w ./
@@ -48,13 +41,9 @@ release:
git tag "v$(VERSION)"
git push origin "v$(VERSION)"
release-notes:
cd /tmp && GH_REL_URL="https://github.com/buchanae/github-release-notes/releases/download/0.2.0/github-release-notes-linux-amd64-0.2.0.tar.gz" && \
curl -sSL $${GH_REL_URL} | tar xz && sudo mv github-release-notes /usr/local/bin/
loadtester-build:
CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o ./bin/loadtester ./cmd/loadtester/*
docker build -t weaveworks/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
docker build -t ghcr.io/fluxcd/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
loadtester-push:
docker push weaveworks/flagger-loadtester:$(LT_VERSION)
docker push ghcr.io/fluxcd/flagger-loadtester:$(LT_VERSION)

View File

@@ -1,19 +1,18 @@
# flagger
[![build](https://img.shields.io/circleci/build/github/weaveworks/flagger/master.svg)](https://circleci.com/gh/weaveworks/flagger)
[![report](https://goreportcard.com/badge/github.com/weaveworks/flagger)](https://goreportcard.com/report/github.com/weaveworks/flagger)
[![codecov](https://codecov.io/gh/weaveworks/flagger/branch/master/graph/badge.svg)](https://codecov.io/gh/weaveworks/flagger)
[![license](https://img.shields.io/github/license/weaveworks/flagger.svg)](https://github.com/weaveworks/flagger/blob/master/LICENSE)
[![release](https://img.shields.io/github/release/weaveworks/flagger/all.svg)](https://github.com/weaveworks/flagger/releases)
[![build](https://github.com/fluxcd/flagger/workflows/build/badge.svg)](https://github.com/fluxcd/flagger/actions)
[![report](https://goreportcard.com/badge/github.com/fluxcd/flagger)](https://goreportcard.com/report/github.com/fluxcd/flagger)
[![license](https://img.shields.io/github/license/fluxcd/flagger.svg)](https://github.com/fluxcd/flagger/blob/main/LICENSE)
[![release](https://img.shields.io/github/release/fluxcd/flagger/all.svg)](https://github.com/fluxcd/flagger/releases)
Flagger is a progressive delivery tool that automates the release process for applications running on Kubernetes.
It reduces the risk of introducing a new software version in production
by gradually shifting traffic to the new version while measuring metrics and running conformance tests.
![flagger-overview](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-overview.png)
![flagger-overview](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-canary-overview.png)
Flagger implements several deployment strategies (Canary releases, A/B testing, Blue/Green mirroring)
using a service mesh (App Mesh, Istio, Linkerd) or an ingress controller (Contour, Gloo, NGINX) for traffic routing.
using a service mesh (App Mesh, Istio, Linkerd) or an ingress controller (Contour, Gloo, NGINX, Skipper, Traefik) for traffic routing.
For release analysis, Flagger can query Prometheus, Datadog or CloudWatch
and for alerting it uses Slack, MS Teams, Discord and Rocket.
@@ -37,6 +36,8 @@ Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.ap
* [Contour](https://docs.flagger.app/tutorials/contour-progressive-delivery)
* [Gloo](https://docs.flagger.app/tutorials/gloo-progressive-delivery)
* [NGINX Ingress](https://docs.flagger.app/tutorials/nginx-progressive-delivery)
* [Skipper](https://docs.flagger.app/tutorials/skipper-progressive-delivery)
* [Traefik](https://docs.flagger.app/tutorials/traefik-progressive-delivery)
* [Kubernetes Blue/Green](https://docs.flagger.app/tutorials/kubernetes-blue-green)
### Who is using Flagger
@@ -49,6 +50,7 @@ List of organizations using Flagger:
* [MediaMarktSaturn](https://www.mediamarktsaturn.com)
* [Weaveworks](https://weave.works)
* [Jumia Group](https://group.jumia.com)
* [eLife](https://elifesciences.org/)
If you are using Flagger, please submit a PR to add your organization to the list!
@@ -71,7 +73,7 @@ metadata:
namespace: test
spec:
# service mesh provider (optional)
# can be: kubernetes, istio, linkerd, appmesh, nginx, contour, gloo, supergloo
# can be: kubernetes, istio, linkerd, appmesh, nginx, skipper, contour, gloo, supergloo, traefik
provider: istio
# deployment reference
targetRef:
@@ -180,24 +182,48 @@ For more details on how the canary analysis and promotion works please [read the
### Features
| Feature | Istio | Linkerd | App Mesh | NGINX | Gloo | Contour | CNI |
| -------------------------------------------- | ------------------ | ------------------ |------------------ |------------------ |------------------ |------------------ |------------------ |
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_minus_sign: |
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
| Custom metric checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Traffic policy, CORS, retries and timeouts | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_minus_sign: |
**Service Mesh**
| Feature | App Mesh | Istio | Linkerd | Kubernetes CNI |
| ------------------------------------------ | ------------------ | ------------------ | ------------------ | ----------------- |
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: |
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Blue/Green deployments (traffic mirroring) | :heavy_minus_sign: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: |
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: |
| Custom metric checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
**Ingress**
| Feature | Contour | Gloo | NGINX | Skipper | Traefik |
| ------------------------------------------ | ------------------ | ------------------ | ------------------ | ------------------ | ------------------ |
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| A/B testing (headers and cookies routing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: |
| Blue/Green deployments (traffic switch) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Webhooks (acceptance/load testing) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Manual gating (approve/pause/resume) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
| Custom metric checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
### Roadmap
#### [GitOps Toolkit](https://github.com/fluxcd/flux2) compatibility
* Migrate Flagger to Kubernetes controller-runtime and [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder)
* Make the Canary status compatible with [kstatus](https://github.com/kubernetes-sigs/cli-utils)
* Make Flagger emit Kubernetes events compatible with Flux v2 notification API
* Integrate Flagger into Flux v2 as the progressive delivery component
#### Integrations
* Add support for Kubernetes [Ingress v2](https://github.com/kubernetes-sigs/service-apis)
* Integrate with other service mesh like Consul Connect and ingress controllers like HAProxy, ALB
* Integrate with other metrics providers like InfluxDB, Stackdriver, SignalFX
* Add support for comparing the canary metrics to the primary ones and do the validation based on the derivation between the two
* Add support for SMI compatible service mesh solutions like Open Service Mesh and Consul Connect
* Add support for ingress controllers like HAProxy and ALB
* Add support for metrics providers like InfluxDB, Stackdriver, SignalFX
### Contributing
@@ -221,6 +247,6 @@ If you have any questions about Flagger and progressive delivery:
and join the [#flagger](https://weave-community.slack.com/messages/flagger/) channel.
* Join the [Weave User Group](https://www.meetup.com/pro/Weave/) and get invited to online talks,
hands-on training and meetups in your area.
* File an [issue](https://github.com/weaveworks/flagger/issues/new).
* File an [issue](https://github.com/fluxcd/flagger/issues/new).
Your feedback is always welcome!

View File

@@ -0,0 +1,51 @@
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
provider: linkerd
progressDeadlineSeconds: 600
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
name: podinfo
port: 80
targetPort: 9898
portName: http
portDiscovery: true
skipAnalysis: false
analysis:
interval: 15s
threshold: 10
stepWeights: [5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55]
metrics:
- name: request-success-rate
thresholdRange:
min: 99
interval: 1m
- name: request-duration
thresholdRange:
max: 500
interval: 30s
webhooks:
- name: conformance-test
type: pre-rollout
url: http://flagger-loadtester.test/
timeout: 15s
metadata:
type: "bash"
cmd: "curl -sd 'test' http://podinfo-canary.test/token | grep token"
- name: load-test
type: rollout
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test/"

View File

@@ -153,8 +153,19 @@ rules:
resources:
- upstreams
- upstreams/finalizers
- upstreamgroups
- upstreamgroups/finalizers
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- gateway.solo.io
resources:
- routetables
- routetables/finalizers
verbs:
- get
- list

File diff suppressed because it is too large Load Diff

View File

@@ -22,7 +22,7 @@ spec:
serviceAccountName: flagger
containers:
- name: flagger
image: weaveworks/flagger:1.0.1
image: ghcr.io/fluxcd/flagger:1.6.1
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@@ -1,14 +1,14 @@
apiVersion: v1
name: flagger
version: 1.0.1
appVersion: 1.0.1
kubeVersion: ">=1.11.0-0"
version: 1.6.1
appVersion: 1.6.1
kubeVersion: ">=1.16.0-0"
engine: gotpl
description: Flagger is a progressive delivery operator for Kubernetes
home: https://flagger.app
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/weaveworks.png
icon: https://raw.githubusercontent.com/fluxcd/flagger/main/docs/logo/weaveworks.png
sources:
- https://github.com/weaveworks/flagger
- https://github.com/fluxcd/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan

201
charts/flagger/LICENSE Normal file
View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,18 +1,19 @@
# Flagger
[Flagger](https://github.com/weaveworks/flagger) is an operator that automates the release process of applications on Kubernetes.
[Flagger](https://github.com/fluxcd/flagger) is an operator that automates the release process of applications on Kubernetes.
Flagger can run automated application analysis, testing, promotion and rollback for the following deployment strategies:
* Canary Release (progressive traffic shifting)
* A/B Testing (HTTP headers and cookies traffic routing)
* Blue/Green (traffic switching and mirroring)
Flagger works with service mesh solutions (Istio, Linkerd, AWS App Mesh) and with Kubernetes ingress controllers (NGINX, Gloo, Contour).
Flagger works with service mesh solutions (Istio, Linkerd, AWS App Mesh) and with Kubernetes ingress controllers
(NGINX, Skipper, Gloo, Contour, Traefik).
Flagger can be configured to send alerts to various chat platforms such as Slack, Microsoft Teams, Discord and Rocket.
## Prerequisites
* Kubernetes >= 1.14
* Kubernetes >= 1.16
## Installing the Chart
@@ -25,7 +26,7 @@ $ helm repo add flagger https://flagger.app
Install Flagger's custom resource definitions:
```console
$ kubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/master/artifacts/flagger/crd.yaml
$ kubectl apply -f https://raw.githubusercontent.com/fluxcd/flagger/main/artifacts/flagger/crd.yaml
```
To install Flagger for **Istio**:
@@ -83,6 +84,15 @@ $ helm upgrade -i flagger flagger/flagger \
--set prometheus.install=true
```
To install Flagger and Prometheus for **Traefik**:
```console
$ helm upgrade -i flagger flagger/flagger \
--namespace=traefik \
--set prometheus.install=true \
--set meshProvider=traefik
```
The [configuration](#configuration) section lists the parameters that can be configured during installation.
## Uninstalling the Chart
@@ -101,7 +111,7 @@ The following tables lists the configurable parameters of the Flagger chart and
Parameter | Description | Default
--- | --- | ---
`image.repository` | Image repository | `weaveworks/flagger`
`image.repository` | Image repository | `ghcr.io/fluxcd/flagger`
`image.tag` | Image tag | `<VERSION>`
`image.pullPolicy` | Image pull policy | `IfNotPresent`
`logLevel` | Log level | `info`
@@ -116,8 +126,8 @@ Parameter | Description | Default
`slack.user` | Slack username | `flagger`
`msteams.url` | Microsoft Teams incoming webhook | None
`podMonitor.enabled` | If `true`, create a PodMonitor for [monitoring the metrics](https://docs.flagger.app/usage/monitoring#metrics) | `false`
`podMonitor.namespace` | Namespace where the PodMonitor is created | the same namespace
`podMonitor.interval` | Interval at which metrics should be scraped | `15s`
`podMonitor.namespace` | Namespace where the PodMonitor is created | the same namespace
`podMonitor.interval` | Interval at which metrics should be scraped | `15s`
`podMonitor.podMonitor` | Additional labels to add to the PodMonitor | `{}`
`leaderElection.enabled` | If `true`, Flagger will run in HA mode | `false`
`leaderElection.replicaCount` | Number of replicas | `1`
@@ -125,6 +135,7 @@ Parameter | Description | Default
`serviceAccount.name` | The name of the service account to create or use. If not set and `serviceAccount.create` is `true`, a name is generated using the Flagger fullname | `""`
`serviceAccount.annotations` | Annotations for service account | `{}`
`ingressAnnotationsPrefix` | Annotations prefix for ingresses | `custom.ingress.kubernetes.io`
`includeLabelPrefix` | List of prefixes of labels that are copied when creating primary deployments or daemonsets. Use * to include all | `""`
`rbac.create` | If `true`, create and use RBAC resources | `true`
`rbac.pspEnabled` | If `true`, create and use a restricted pod security policy | `false`
`crd.create` | If `true`, create Flagger's CRDs (should be enabled for Helm v2 only) | `false`
@@ -140,6 +151,9 @@ Parameter | Description | Default
`istio.kubeconfig.key` | The name of Kubernetes secret data key that contains the Istio control plane kubeconfig | `kubeconfig`
`ingressAnnotationsPrefix` | Annotations prefix for NGINX ingresses | None
`ingressClass` | Ingress class used for annotating HTTPProxy objects, e.g. `contour` | None
`podPriorityClassName` | PriorityClass name for pod priority configuration | ""
`podDisruptionBudget.enabled` | A PodDisruptionBudget will be created if `true` | `false`
`podDisruptionBudget.minAvailable` | The minimal number of available replicas that will be set in the PodDisruptionBudget | `1`
Specify each parameter using the `--set key=value[,key=value]` argument to `helm upgrade`. For example,
@@ -159,5 +173,3 @@ $ helm upgrade -i flagger flagger/flagger \
```
> **Tip**: You can use the default [values.yaml](values.yaml)

File diff suppressed because it is too large Load Diff

View File

@@ -48,6 +48,9 @@ spec:
secret:
secretName: "{{ .Values.istio.kubeconfig.secretName }}"
{{- end }}
{{- if .Values.podPriorityClassName }}
priorityClassName: {{ .Values.podPriorityClassName }}
{{- end }}
containers:
- name: flagger
{{- if .Values.securityContext.enabled }}
@@ -103,12 +106,21 @@ spec:
{{- if .Values.ingressAnnotationsPrefix }}
- -ingress-annotations-prefix={{ .Values.ingressAnnotationsPrefix }}
{{- end }}
{{- if .Values.includeLabelPrefix }}
- -include-label-prefix={{ .Values.includeLabelPrefix }}
{{- end }}
{{- if .Values.ingressClass }}
- -ingress-class={{ .Values.ingressClass }}
{{- end }}
{{- if .Values.eventWebhook }}
- -event-webhook={{ .Values.eventWebhook }}
{{- end }}
{{- if .Values.kubeconfigQPS }}
- -kubeconfig-qps={{ .Values.kubeconfigQPS }}
{{- end }}
{{- if .Values.kubeconfigBurst }}
- -kubeconfig-burst={{ .Values.kubeconfigBurst }}
{{- end }}
{{- if .Values.istio.kubeconfig.secretName }}
- -kubeconfig-service-mesh=/tmp/istio-host/{{ .Values.istio.kubeconfig.key }}
{{- end }}

View File

@@ -0,0 +1,11 @@
{{- if .Values.podDisruptionBudget.enabled }}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ template "flagger.name" . }}
spec:
minAvailable: {{ .Values.podDisruptionBudget.minAvailable }}
selector:
matchLabels:
app.kubernetes.io/name: {{ template "flagger.name" . }}
{{- end }}

View File

@@ -149,8 +149,19 @@ rules:
resources:
- upstreams
- upstreams/finalizers
- upstreamgroups
- upstreamgroups/finalizers
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- gateway.solo.io
resources:
- routetables
- routetables/finalizers
verbs:
- get
- list
@@ -172,6 +183,18 @@ rules:
- update
- patch
- delete
- apiGroups:
- traefik.containo.us
resources:
- traefikservices
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- nonResourceURLs:
- /version
verbs:

View File

@@ -1,8 +1,8 @@
# Default values for flagger.
image:
repository: weaveworks/flagger
tag: 1.0.1
repository: ghcr.io/fluxcd/flagger
tag: 1.6.1
pullPolicy: IfNotPresent
pullSecret:
@@ -14,9 +14,12 @@ podAnnotations:
prometheus.io/port: "8080"
appmesh.k8s.aws/sidecarInjectorWebhook: disabled
# priority class name for pod priority configuration
podPriorityClassName: ""
metricsServer: "http://prometheus:9090"
# accepted values are kubernetes, istio, linkerd, appmesh, nginx, gloo or supergloo:mesh.namespace (defaults to istio)
# accepted values are kubernetes, istio, linkerd, appmesh, contour, nginx, gloo, skipper, traefik
meshProvider: ""
# single namespace restriction
@@ -121,9 +124,12 @@ tolerations: []
prometheus:
# to be used with ingress controllers
install: false
image: docker.io/prom/prometheus:v2.19.0
image: docker.io/prom/prometheus:v2.23.0
retention: 2h
kubeconfigQPS: ""
kubeconfigBurst: ""
# Istio multi-cluster service mesh (shared control plane single-network)
# https://istio.io/docs/setup/install/multicluster/shared-vpn/
istio:
@@ -132,3 +138,7 @@ istio:
secretName: ""
# istio.kubeconfig.key: The name of secret data key that contains the Istio control plane kubeconfig
key: "kubeconfig"
podDisruptionBudget:
enabled: false
minAvailable: 1

View File

@@ -1,12 +1,12 @@
apiVersion: v1
name: grafana
version: 1.4.0
appVersion: 6.5.1
version: 1.5.0
appVersion: 7.2.0
description: Grafana dashboards for monitoring Flagger canary deployments
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/weaveworks.png
icon: https://raw.githubusercontent.com/fluxcd/flagger/main/docs/logo/weaveworks.png
home: https://flagger.app
sources:
- https://github.com/weaveworks/flagger
- https://github.com/fluxcd/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan

View File

@@ -2,7 +2,7 @@
Grafana dashboards for monitoring progressive deployments powered by Flagger and Prometheus.
![flagger-grafana](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/grafana-canary-analysis.png)
![flagger-grafana](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/screens/grafana-canary-analysis.png)
## Prerequisites

View File

@@ -602,11 +602,11 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod_name=~\"$primary.*\", container_name!~\"POD|istio-proxy\"}[1m])) by (pod_name)",
"expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod=~\"$primary.*\", container!~\"POD|istio-proxy\"}[1m])) by (pod)",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
"legendFormat": "{{ pod_name }}",
"legendFormat": "{{ pod }}",
"refId": "B"
}
],
@@ -692,11 +692,11 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod_name=~\"$canary.*\", pod_name!~\"$primary.*\", container_name!~\"POD|istio-proxy\"}[1m])) by (pod_name)",
"expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod=~\"$canary.*\", pod!~\"$primary.*\", container!~\"POD|istio-proxy\"}[1m])) by (pod)",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
"legendFormat": "{{ pod_name }}",
"legendFormat": "{{ pod }}",
"refId": "B"
}
],
@@ -782,12 +782,12 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod_name=~\"$primary.*\", container_name!~\"POD|istio-proxy\"}) by (pod_name)",
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod=~\"$primary.*\", container!~\"POD|istio-proxy\"}) by (pod)",
"format": "time_series",
"hide": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ pod_name }}",
"legendFormat": "{{ pod }}",
"refId": "B"
}
],
@@ -874,12 +874,12 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod_name=~\"$canary.*\", pod_name!~\"$primary.*\", container_name!~\"POD|istio-proxy\"}) by (pod_name)",
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod=~\"$canary.*\", pod!~\"$primary.*\", container!~\"POD|istio-proxy\"}) by (pod)",
"format": "time_series",
"hide": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ pod_name }}",
"legendFormat": "{{ pod }}",
"refId": "B"
}
],
@@ -975,14 +975,14 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=~\"$primary.*\"}[1m])) ",
"expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod=~\"$primary.*\"}[1m])) ",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "received",
"refId": "A"
},
{
"expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=~\"$primary.*\"}[1m]))",
"expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod=~\"$primary.*\"}[1m]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "transmited",
@@ -1081,14 +1081,14 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=~\"$canary.*\",pod_name!~\"$primary.*\"}[1m])) ",
"expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod=~\"$canary.*\",pod!~\"$primary.*\"}[1m])) ",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "received",
"refId": "A"
},
{
"expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=~\"$canary.*\",pod_name!~\"$primary.*\"}[1m]))",
"expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod=~\"$canary.*\",pod!~\"$primary.*\"}[1m]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "transmited",

View File

@@ -602,11 +602,11 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod_name=~\"$target-primary.*\", container_name!~\"POD|istio-proxy\"}[1m])) by (pod_name)",
"expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod=~\"$target-primary.*\", container!~\"POD|istio-proxy\"}[1m])) by (pod)",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
"legendFormat": "{{ pod_name }}",
"legendFormat": "{{ pod }}",
"refId": "B"
}
],
@@ -692,11 +692,11 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod_name=~\"$target.*\", pod_name!~\"$target-primary.*\", container_name!~\"POD|istio-proxy\"}[1m])) by (pod_name)",
"expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod=~\"$target.*\", pod!~\"$target-primary.*\", container!~\"POD|istio-proxy\"}[1m])) by (pod)",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
"legendFormat": "{{ pod_name }}",
"legendFormat": "{{ pod }}",
"refId": "B"
}
],
@@ -782,12 +782,12 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod_name=~\"$target-primary.*\", container_name!~\"POD|istio-proxy\"}) by (pod_name)",
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod=~\"$target-primary.*\", container!~\"POD|istio-proxy\"}) by (pod)",
"format": "time_series",
"hide": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ pod_name }}",
"legendFormat": "{{ pod }}",
"refId": "B"
}
],
@@ -874,12 +874,12 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod_name=~\"$target.*\", pod_name!~\"$target-primary.*\", container_name!~\"POD|istio-proxy\"}) by (pod_name)",
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod=~\"$target.*\", pod!~\"$target-primary.*\", container!~\"POD|istio-proxy\"}) by (pod)",
"format": "time_series",
"hide": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ pod_name }}",
"legendFormat": "{{ pod }}",
"refId": "B"
}
],
@@ -975,14 +975,14 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=~\"$target-primary.*\"}[1m])) ",
"expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod=~\"$target-primary.*\"}[1m])) ",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "received",
"refId": "A"
},
{
"expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=~\"$target-primary.*\"}[1m]))",
"expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod=~\"$target-primary.*\"}[1m]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "transmited",
@@ -1081,14 +1081,14 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=~\"$target.*\",pod_name!~\"$target-primary.*\"}[1m])) ",
"expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod=~\"$target.*\",pod!~\"$target-primary.*\"}[1m])) ",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "received",
"refId": "A"
},
{
"expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=~\"$target.*\",pod_name!~\"$target-primary.*\"}[1m]))",
"expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod=~\"$target.*\",pod!~\"$target-primary.*\"}[1m]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "transmited",

View File

@@ -403,7 +403,7 @@
"steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.50, sum(irate(istio_request_duration_seconds_bucket{reporter=\"destination\",destination_workload=~\"$primary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
"expr": "histogram_quantile(0.50, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$primary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -411,7 +411,7 @@
"refId": "A"
},
{
"expr": "histogram_quantile(0.90, sum(irate(istio_request_duration_seconds_bucket{reporter=\"destination\",destination_workload=~\"$primary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
"expr": "histogram_quantile(0.90, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$primary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
@@ -419,7 +419,7 @@
"refId": "B"
},
{
"expr": "histogram_quantile(0.99, sum(irate(istio_request_duration_seconds_bucket{reporter=\"destination\",destination_workload=~\"$primary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
"expr": "histogram_quantile(0.99, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$primary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
@@ -509,7 +509,7 @@
"steppedLine": false,
"targets": [
{
"expr": "histogram_quantile(0.50, sum(irate(istio_request_duration_seconds_bucket{reporter=\"destination\",destination_workload=~\"$canary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
"expr": "histogram_quantile(0.50, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$canary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
"format": "time_series",
"interval": "",
"intervalFactor": 1,
@@ -517,7 +517,7 @@
"refId": "A"
},
{
"expr": "histogram_quantile(0.90, sum(irate(istio_request_duration_seconds_bucket{reporter=\"destination\",destination_workload=~\"$canary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
"expr": "histogram_quantile(0.90, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$canary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
@@ -525,7 +525,7 @@
"refId": "B"
},
{
"expr": "histogram_quantile(0.99, sum(irate(istio_request_duration_seconds_bucket{reporter=\"destination\",destination_workload=~\"$canary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
"expr": "histogram_quantile(0.99, sum(irate(istio_request_duration_milliseconds_bucket{reporter=\"destination\",destination_workload=~\"$canary\", destination_workload_namespace=~\"$namespace\"}[1m])) by (le))",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
@@ -630,11 +630,11 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod_name=~\"$primary.*\", container_name!~\"POD|istio-proxy\"}[1m])) by (pod_name)",
"expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod=~\"$primary.*\", container!~\"POD|istio-proxy\"}[1m])) by (pod)",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
"legendFormat": "{{ pod_name }}",
"legendFormat": "{{ pod }}",
"refId": "B"
}
],
@@ -720,11 +720,11 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod_name=~\"$canary.*\", pod_name!~\"$primary.*\", container_name!~\"POD|istio-proxy\"}[1m])) by (pod_name)",
"expr": "sum(rate(container_cpu_usage_seconds_total{cpu=\"total\",namespace=\"$namespace\",pod=~\"$canary.*\", pod!~\"$primary.*\", container!~\"POD|istio-proxy\"}[1m])) by (pod)",
"format": "time_series",
"hide": false,
"intervalFactor": 1,
"legendFormat": "{{ pod_name }}",
"legendFormat": "{{ pod }}",
"refId": "B"
}
],
@@ -810,12 +810,12 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod_name=~\"$primary.*\", container_name!~\"POD|istio-proxy\"}) by (pod_name)",
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod=~\"$primary.*\", container!~\"POD|istio-proxy\"}) by (pod)",
"format": "time_series",
"hide": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ pod_name }}",
"legendFormat": "{{ pod }}",
"refId": "B"
}
],
@@ -902,12 +902,12 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod_name=~\"$canary.*\", pod_name!~\"$primary.*\", container_name!~\"POD|istio-proxy\"}) by (pod_name)",
"expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\",pod=~\"$canary.*\", pod!~\"$primary.*\", container!~\"POD|istio-proxy\"}) by (pod)",
"format": "time_series",
"hide": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ pod_name }}",
"legendFormat": "{{ pod }}",
"refId": "B"
}
],
@@ -1003,14 +1003,14 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=~\"$primary.*\"}[1m])) ",
"expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod=~\"$primary.*\"}[1m])) ",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "received",
"refId": "A"
},
{
"expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=~\"$primary.*\"}[1m]))",
"expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod=~\"$primary.*\"}[1m]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "transmited",
@@ -1109,14 +1109,14 @@
"steppedLine": false,
"targets": [
{
"expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod_name=~\"$canary.*\",pod_name!~\"$primary.*\"}[1m])) ",
"expr": "sum(rate (container_network_receive_bytes_total{namespace=\"$namespace\",pod=~\"$canary.*\",pod!~\"$primary.*\"}[1m])) ",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "received",
"refId": "A"
},
{
"expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod_name=~\"$canary.*\",pod_name!~\"$primary.*\"}[1m]))",
"expr": "-sum (rate (container_network_transmit_bytes_total{namespace=\"$namespace\",pod=~\"$canary.*\",pod!~\"$primary.*\"}[1m]))",
"format": "time_series",
"intervalFactor": 1,
"legendFormat": "transmited",

View File

@@ -6,7 +6,7 @@ replicaCount: 1
image:
repository: grafana/grafana
tag: 6.5.1
tag: 7.3.4
pullPolicy: IfNotPresent
podAnnotations: {}
@@ -32,7 +32,7 @@ affinity: {}
user: admin
password:
# Istio Prometheus instance
# Prometheus instance
url: http://prometheus:9090
# Weave Cloud instance token

View File

@@ -6,9 +6,9 @@ kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger's load testing services based on rakyll/hey and bojand/ghz that generates traffic during canary analysis when configured as a webhook.
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/weaveworks.png
icon: https://raw.githubusercontent.com/fluxcd/flagger/main/docs/logo/weaveworks.png
sources:
- https://github.com/weaveworks/flagger
- https://github.com/fluxcd/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan

View File

@@ -1,6 +1,6 @@
# Flagger load testing service
[Flagger's](https://github.com/weaveworks/flagger) load testing service is based on
[Flagger's](https://github.com/fluxcd/flagger) load testing service is based on
[rakyll/hey](https://github.com/rakyll/hey) and
[bojand/ghz](https://github.com/bojand/ghz).
It can be used to generate HTTP and gRPC traffic during canary analysis when configured as a webhook.
@@ -67,6 +67,9 @@ Parameter | Description | Default
`istio.gateway.enabled` | Create Istio gateway in namespace | `false`
`istio.tls.enabled` | Enable TLS in gateway ( TLS secrets should be in namespace ) | `false`
`istio.tls.httpsRedirect` | Redirect traffic to TLS port | `false`
`podPriorityClassName` | PriorityClass name for pod priority configuration | ""
`securityContext.enabled` | Add securityContext to container | ""
`securityContext.context` | securityContext to add | ""
Specify each parameter using the `--set key=value[,key=value]` argument to `helm upgrade`. For example,

View File

@@ -27,8 +27,15 @@ spec:
{{- else if .Values.rbac.create }}
serviceAccountName: {{ include "loadtester.fullname" . }}
{{- end }}
{{- if .Values.podPriorityClassName }}
priorityClassName: {{ .Values.podPriorityClassName }}
{{- end }}
containers:
- name: {{ .Chart.Name }}
{{- if .Values.securityContext.enabled }}
securityContext:
{{ toYaml .Values.securityContext.context | indent 12 }}
{{- end }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:

View File

@@ -1,7 +1,7 @@
replicaCount: 1
image:
repository: weaveworks/flagger-loadtester
repository: ghcr.io/fluxcd/flagger-loadtester
tag: 0.18.0
pullPolicy: IfNotPresent
@@ -9,6 +9,8 @@ podAnnotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8080"
podPriorityClassName: ""
logLevel: info
cmd:
timeout: 1h
@@ -69,3 +71,11 @@ istio:
tls:
enabled: false
httpsRedirect: false
# when enabled, it will add a security context for the loadtester pod
securityContext:
enabled: false
context:
readOnlyRootFilesystem: true
runAsUser: 100
runAsGroup: 101

View File

@@ -1,11 +1,11 @@
apiVersion: v1
version: 3.1.1
appVersion: 3.1.0
version: 5.0.0
appVersion: 5.0.0
name: podinfo
engine: gotpl
description: Flagger canary deployment demo application
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/weaveworks.png
icon: https://raw.githubusercontent.com/fluxcd/flagger/main/docs/logo/weaveworks.png
sources:
- https://github.com/stefanprodan/podinfo
maintainers:

View File

@@ -1,7 +1,7 @@
# Default values for podinfo.
image:
repository: stefanprodan/podinfo
tag: 3.1.0
repository: ghcr.io/stefanprodan/podinfo
tag: 5.0.0
pullPolicy: IfNotPresent
podAnnotations: {}

View File

@@ -1,3 +1,19 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
@@ -9,7 +25,8 @@ import (
"strings"
"time"
semver "github.com/Masterminds/semver/v3"
"github.com/Masterminds/semver/v3"
"github.com/go-logr/zapr"
"go.uber.org/zap"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
@@ -21,28 +38,32 @@ import (
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/transport"
_ "k8s.io/code-generator/cmd/client-gen/generators"
"k8s.io/klog/v2"
"github.com/weaveworks/flagger/pkg/canary"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions"
"github.com/weaveworks/flagger/pkg/controller"
"github.com/weaveworks/flagger/pkg/logger"
"github.com/weaveworks/flagger/pkg/metrics/observers"
"github.com/weaveworks/flagger/pkg/notifier"
"github.com/weaveworks/flagger/pkg/router"
"github.com/weaveworks/flagger/pkg/server"
"github.com/weaveworks/flagger/pkg/signals"
"github.com/weaveworks/flagger/pkg/version"
"github.com/fluxcd/flagger/pkg/canary"
clientset "github.com/fluxcd/flagger/pkg/client/clientset/versioned"
informers "github.com/fluxcd/flagger/pkg/client/informers/externalversions"
"github.com/fluxcd/flagger/pkg/controller"
"github.com/fluxcd/flagger/pkg/logger"
"github.com/fluxcd/flagger/pkg/metrics/observers"
"github.com/fluxcd/flagger/pkg/notifier"
"github.com/fluxcd/flagger/pkg/router"
"github.com/fluxcd/flagger/pkg/server"
"github.com/fluxcd/flagger/pkg/signals"
"github.com/fluxcd/flagger/pkg/version"
)
var (
masterURL string
kubeconfig string
kubeconfigQPS int
kubeconfigBurst int
metricsServer string
controlLoopInterval time.Duration
logLevel string
port string
msteamsURL string
includeLabelPrefix string
slackURL string
slackUser string
slackChannel string
@@ -64,6 +85,8 @@ var (
func init() {
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.IntVar(&kubeconfigQPS, "kubeconfig-qps", 100, "Set QPS for kubeconfig.")
flag.IntVar(&kubeconfigBurst, "kubeconfig-burst", 250, "Set Burst for kubeconfig.")
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&metricsServer, "metrics-server", "http://prometheus:9090", "Prometheus URL.")
flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "Kubernetes API sync interval.")
@@ -74,11 +97,12 @@ func init() {
flag.StringVar(&slackChannel, "slack-channel", "", "Slack channel.")
flag.StringVar(&eventWebhook, "event-webhook", "", "Webhook for publishing flagger events")
flag.StringVar(&msteamsURL, "msteams-url", "", "MS Teams incoming webhook URL.")
flag.StringVar(&includeLabelPrefix, "include-label-prefix", "", "List of prefixes of labels that are copied when creating primary deployments or daemonsets. Use * to include all.")
flag.IntVar(&threadiness, "threadiness", 2, "Worker concurrency.")
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
flag.StringVar(&namespace, "namespace", "", "Namespace that flagger would watch canary object.")
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio, linkerd, appmesh, contour, gloo or nginx.")
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio, linkerd, appmesh, contour, gloo, nginx, skipper or traefik.")
flag.StringVar(&selectorLabels, "selector-labels", "app,name,app.kubernetes.io/name", "List of pod labels that Flagger uses to create pod selectors.")
flag.StringVar(&ingressAnnotationsPrefix, "ingress-annotations-prefix", "nginx.ingress.kubernetes.io", "Annotations prefix for NGINX ingresses.")
flag.StringVar(&ingressClass, "ingress-class", "", "Ingress class used for annotating HTTPProxy objects.")
@@ -90,6 +114,7 @@ func init() {
}
func main() {
klog.InitFlags(nil)
flag.Parse()
if ver {
@@ -105,6 +130,8 @@ func main() {
zap.ReplaceGlobals(logger.Desugar())
}
klog.SetLogger(zapr.NewLogger(logger.Desugar()))
defer logger.Sync()
stopCh := signals.SetupSignalHandler()
@@ -116,6 +143,9 @@ func main() {
logger.Fatalf("Error building kubeconfig: %v", err)
}
cfg.QPS = float32(kubeconfigQPS)
cfg.Burst = kubeconfigBurst
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
logger.Fatalf("Error building kubernetes clientset: %v", err)
@@ -135,6 +165,9 @@ func main() {
logger.Fatalf("Error building host kubeconfig: %v", err)
}
cfgHost.QPS = float32(kubeconfigQPS)
cfgHost.Burst = kubeconfigBurst
meshClient, err := clientset.NewForConfig(cfgHost)
if err != nil {
logger.Fatalf("Error building mesh clientset: %v", err)
@@ -184,7 +217,9 @@ func main() {
configTracker = &canary.NopTracker{}
}
canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, labels, logger)
includeLabelPrefixArray := strings.Split(includeLabelPrefix, ",")
canaryFactory := canary.NewFactory(kubeClient, flaggerClient, configTracker, labels, includeLabelPrefixArray, logger)
c := controller.NewController(
kubeClient,

View File

@@ -1,3 +1,19 @@
/*
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
@@ -5,9 +21,9 @@ import (
"log"
"time"
"github.com/weaveworks/flagger/pkg/loadtester"
"github.com/weaveworks/flagger/pkg/logger"
"github.com/weaveworks/flagger/pkg/signals"
"github.com/fluxcd/flagger/pkg/loadtester"
"github.com/fluxcd/flagger/pkg/logger"
"github.com/fluxcd/flagger/pkg/signals"
"go.uber.org/zap"
)

View File

@@ -1,73 +0,0 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, gender identity and expression, level of experience,
education, socio-economic status, nationality, personal appearance, race,
religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior
may be reported by contacting stefan.prodan(at)gmail.com.
All complaints will be reviewed and investigated and will result in a response that is deemed
necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of
an incident. Further details of specific enforcement policies may be
posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html

Binary file not shown.

After

Width:  |  Height:  |  Size: 47 KiB

View File

@@ -4,29 +4,33 @@ description: Flagger is a progressive delivery Kubernetes operator
# Introduction
[Flagger](https://github.com/weaveworks/flagger) is a **Kubernetes** operator that automates the promotion of
canary deployments using **Istio**, **Linkerd**, **App Mesh**, **NGINX**, **Contour** or **Gloo** routing for
traffic shifting and **Prometheus** metrics for canary analysis. The canary analysis can be extended with webhooks for
running system integration/acceptance tests, load tests, or any other custom validation.
[Flagger](https://github.com/fluxcd/flagger) is a **Kubernetes** operator
that automates the promotion of canary deployments using
**Istio**, **Linkerd**, **App Mesh**, **NGINX**, **Skipper**, **Contour**, **Gloo** or **Traefik**
routing for traffic shifting and **Prometheus** metrics for canary analysis.
The canary analysis can be extended with webhooks for running
system integration/acceptance tests, load tests, or any other custom validation.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
like HTTP requests success rate, requests average duration and pods health.
Based on analysis of the **KPIs** a canary is promoted or aborted, and the analysis result is published to **Slack** or **MS Teams**.
Flagger implements a control loop that gradually shifts traffic to the canary
while measuring key performance indicators like HTTP requests success rate,
requests average duration and pods health.
Based on analysis of the **KPIs** a canary is promoted or aborted,
and the analysis result is published to **Slack** or **MS Teams**.
![Flagger overview diagram](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-overview.png)
![Flagger overview diagram](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-canary-overview.png)
Flagger can be configured with Kubernetes custom resources and is compatible with any CI/CD solutions made for Kubernetes.
Since Flagger is declarative and reacts to Kubernetes events,
it can be used in **GitOps** pipelines together with Flux CD or JenkinsX.
This project is sponsored by [Weaveworks](https://www.weave.works/)
Flagger is a [Cloud Native Computing Foundation](https://cncf.io/) project.
## Getting started
To get started with Flagger, chose one of the supported routing providers
and [install](install/flagger-install-on-kubernetes.md) Flagger with Helm or Kustomize.
To get started with Flagger, chose one of the supported routing providers and
[install](install/flagger-install-on-kubernetes.md) Flagger with Helm or Kustomize.
After install Flagger, you can follow one of the tutorials:
After install Flagger, you can follow one of these tutorials to get started:
**Service mesh tutorials**
@@ -39,6 +43,8 @@ After install Flagger, you can follow one of the tutorials:
* [Contour](tutorials/contour-progressive-delivery.md)
* [Gloo](tutorials/gloo-progressive-delivery.md)
* [NGINX Ingress](tutorials/nginx-progressive-delivery.md)
* [Skipper Ingress](tutorials/skipper-progressive-delivery.md)
* [Traefik](tutorials/traefik-progressive-delivery.md)
**Hands-on GitOps workshops**

View File

@@ -24,17 +24,19 @@
* [Istio A/B Testing](tutorials/istio-ab-testing.md)
* [Linkerd Canary Deployments](tutorials/linkerd-progressive-delivery.md)
* [App Mesh Canary Deployments](tutorials/appmesh-progressive-delivery.md)
* [NGINX Canary Deployments](tutorials/nginx-progressive-delivery.md)
* [Gloo Canary Deployments](tutorials/gloo-progressive-delivery.md)
* [Contour Canary Deployments](tutorials/contour-progressive-delivery.md)
* [Gloo Canary Deployments](tutorials/gloo-progressive-delivery.md)
* [NGINX Canary Deployments](tutorials/nginx-progressive-delivery.md)
* [Skipper Canary Deployments](tutorials/skipper-progressive-delivery.md)
* [Traefik Canary Deployments](tutorials/traefik-progressive-delivery.md)
* [Blue/Green Deployments](tutorials/kubernetes-blue-green.md)
* [Crossover Canary Deployments](tutorials/crossover-progressive-delivery.md)
* [Canary analysis with Prometheus Operator](tutorials/prometheus-operator.md)
* [Canaries with Helm charts and GitOps](tutorials/canary-helm-gitops.md)
* [Zero downtime deployments](tutorials/zero-downtime-deployments.md)
* [Rollout Weights](tutorials/rollout-weights.md)
## Dev
* [Development Guide](dev/dev-guide.md)
* [Release Guide](dev/release-guide.md)
* [Upgrade Guide](dev/upgrade-guide.md)
* [Upgrade Guide](dev/upgrade-guide.md)

View File

@@ -2,36 +2,36 @@
This document describes how to build, test and run Flagger from source.
### Setup dev environment
## Setup dev environment
Flagger is written in Go and uses Go modules for dependency management.
On your dev machine install the following tools:
* go >= 1.14
* git >= 2.20
* bash >= 5.0
* make >= 3.81
* kubectl >= 1.16
* kustomize >= 3.5
* helm >= 3.0
* docker >= 19.03
* go &gt;= 1.14
* git &gt;= 2.20
* bash &gt;= 5.0
* make &gt;= 3.81
* kubectl &gt;= 1.16
* kustomize &gt;= 3.5
* helm &gt;= 3.0
* docker &gt;= 19.03
You'll also need a Kubernetes cluster for testing Flagger.
You can use Minikube, Kind, Docker desktop or any remote cluster
(AKS/EKS/GKE/etc) Kubernetes version 1.14 or newer.
You can use Minikube, Kind, Docker desktop or any remote cluster (AKS/EKS/GKE/etc) Kubernetes version 1.16 or newer.
To start contributing to Flagger, fork the [repository](https://github.com/weaveworks/flagger) on GitHub.
To start contributing to Flagger, fork the [repository](https://github.com/fluxcd/flagger) on GitHub.
Create a dir inside your `GOPATH`:
```bash
mkdir -p $GOPATH/src/github.com/weaveworks
mkdir -p $GOPATH/src/github.com/fluxcd
```
Clone your fork:
```bash
cd $GOPATH/src/github.com/weaveworks
cd $GOPATH/src/github.com/fluxcd
git clone https://github.com/YOUR_USERNAME/flagger
cd flagger
```
@@ -39,18 +39,18 @@ cd flagger
Set Flagger repository as upstream:
```bash
git remote add upstream https://github.com/weaveworks/flagger.git
git remote add upstream https://github.com/fluxcd/flagger.git
```
Sync your fork regularly to keep it up-to-date with upstream:
```bash
git fetch upstream
git checkout master
git merge upstream/master
git checkout main
git merge upstream/main
```
### Build
## Build
Download Go modules:
@@ -58,19 +58,30 @@ Download Go modules:
go mod download
```
Build Flagger binary and container image:
Build Flagger binary:
```bash
make build
```
Build load tester binary and container image:
Build load tester binary:
```bash
make loadtester-build
```
### Code changes
## Code changes
We require all commits to be signed. By signing off with your signature, you
certify that you wrote the patch or otherwise have the right to contribute the
material by the rules of the [DCO](https://raw.githubusercontent.com/fluxcd/flagger/main/DCO).
If your `user.name` and `user.email` are configured in your Git config,
you can sign your commit automatically with:
```bash
git commit -s
```
Before submitting a PR, make sure your changes are covered by unit tests.
@@ -98,7 +109,7 @@ Run unit tests:
make test
```
### API changes
## API changes
If you made changes to `pkg/apis` regenerate the Kubernetes client sets with:
@@ -114,10 +125,11 @@ make crd
Note that any change to the CRDs must be accompanied by an update to the Open API schema.
### Manual testing
## Manual testing
Install a service mesh and/or an ingress controller on your cluster and deploy Flagger
using one of the install options [listed here](https://docs.flagger.app/install/flagger-install-on-kubernetes).
Install a service mesh and/or an ingress controller on your cluster
and deploy Flagger using one of the install options
[listed here](https://docs.flagger.app/install/flagger-install-on-kubernetes).
If you made changes to the CRDs, apply your local copy with:
@@ -150,7 +162,7 @@ Another option to manually test your changes is to build and push the image to y
```bash
make build
docker tag weaveworks/flagger:latest <YOUR-DOCKERHUB-USERNAME>/flagger:<YOUR-TAG>
docker build -t <YOUR-DOCKERHUB-USERNAME>/flagger:<YOUR-TAG> .
docker push <YOUR-DOCKERHUB-USERNAME>/flagger:<YOUR-TAG>
```
@@ -163,7 +175,7 @@ kubectl -n istio-system scale deployment/flagger --replicas=1
Now you can use one of the [tutorials](https://docs.flagger.app/) to manually test your changes.
### Integration testing
## Integration testing
Flagger end-to-end tests can be run locally with [Kubernetes Kind](https://github.com/kubernetes-sigs/kind).
@@ -173,39 +185,22 @@ Create a Kind cluster:
kind create cluster
```
Install a service mesh and/or an ingress controller in Kind.
Linkerd example:
```bash
linkerd install | kubectl apply -f -
linkerd check
```
Build Flagger container image and load it on the cluster:
```bash
make build
docker tag weaveworks/flagger:latest test/flagger:latest
docker build -t test/flagger:latest .
kind load docker-image test/flagger:latest
```
Install Flagger on the cluster and set the test image:
Run the Istio e2e tests:
```bash
kubectl apply -k ./kustomize/linkerd
kubectl -n linkerd set image deployment/flagger flagger=test/flagger:latest
kubectl -n linkerd rollout status deployment/flagger
```
Run the Linkerd e2e tests:
```bash
./test/e2e-linkerd-tests.sh
./test/istio/run.sh
```
For each service mesh and ingress controller there is a dedicated e2e test suite,
chose one that matches your changes from this [list](https://github.com/weaveworks/flagger/tree/master/test).
chose one that matches your changes from this [list](https://github.com/fluxcd/flagger/tree/main/test).
When you open a pull request on Flagger repo, the unit and integration tests will be run in CI.

View File

@@ -2,33 +2,33 @@
This document describes how to release Flagger.
### Release
## Release
To release a new Flagger version (e.g. `2.0.0`) follow these steps:
* create a branch `git checkout -b prep-2.0.0`
* set the version in code and manifests `TAG=2.0.0 make version-set`
* commit changes and merge PR
* checkout master `git checkout master && git pull`
* checkout master `git checkout main && git pull`
* tag master `make release`
### CI
## CI
After the tag has been pushed to GitHub, the CI release pipeline does the following:
* creates a GitHub release
* pushes the Flagger binary and change log to GitHub release
* pushes the Flagger container image to Docker Hub
* pushes the Helm chart to github-pages branch
* GitHub pages publishes the new chart version on the Helm repository
### Docs
## Docs
The documentation [website](https://docs.flagger.app) is built from the `docs` branch.
After a Flagger release, publish the docs with:
* `git checkout master && git pull`
* `git checkout main && git pull`
* `git checkout docs`
* `git rebase master`
* `git rebase main`
* `git push origin docs`

View File

@@ -2,9 +2,10 @@
This document describes how to upgrade Flagger.
### Upgrade canaries v1alpha3 to v1beta1
## Upgrade canaries v1alpha3 to v1beta1
Canary CRD changes in `canaries.flagger.app/v1beta1`:
* the `spec.canaryAnalysis` field has been deprecated and replaced with `spec.analysis`
* the `spec.analysis.interval` and `spec.analysis.threshold` fields are required
* the `status.lastAppliedSpec` and `status.lastPromotedSpec` hashing algorithm changed to `hash/fnv`
@@ -17,17 +18,17 @@ Canary CRD changes in `canaries.flagger.app/v1beta1`:
* the `spec.service.meshName` field has been deprecated and no longer used for `provider: appmesh:v1beta2`
Upgrade procedure:
* install the `v1beta1` CRDs
* update Flagger deployment
* replace `apiVersion: flagger.app/v1alpha3` with `apiVersion: flagger.app/v1beta1` in all canary manifests
* replace `spec.canaryAnalysis` with `spec.analysis` in all canary manifests
* update canary manifests in cluster
**Note** that after upgrading Flagger, all canaries will be triggered as the hash value used for tracking changes
is computed differently. You can set `spec.skipAnalysis: true` in all canary manifests before upgrading Flagger,
do the upgrade, wait for Flagger to finish the no-op promotions and finally set `skipAnalysis` to `false`.
**Note** that after upgrading Flagger, all canaries will be triggered as the hash value used for tracking changes is computed differently. You can set `spec.skipAnalysis: true` in all canary manifests before upgrading Flagger, do the upgrade, wait for Flagger to finish the no-op promotions and finally set `skipAnalysis` to `false`.
Update builtin metrics:
* replace `threshold` with `thresholdRange.min` for request-success-rate
* replace `threshold` with `thresholdRange.max` for request-duration
@@ -43,11 +44,9 @@ metrics:
interval: 1m
```
### Istio telemetry v2
## Istio telemetry v2
Istio 1.5 comes with a breaking change for Flagger uses. In Istio telemetry v2 the metric
`istio_request_duration_seconds_bucket` has been removed and replaced with `istio_request_duration_milliseconds_bucket`
and this breaks the `request-duration` metric check.
Istio 1.5 comes with a breaking change for Flagger uses. In Istio telemetry v2 the metric `istio_request_duration_seconds_bucket` has been removed and replaced with `istio_request_duration_milliseconds_bucket` and this breaks the `request-duration` metric check.
If are using **Istio 1.4**, you can create a metric template using the old duration metric like this:
@@ -88,3 +87,4 @@ metrics:
max: 0.500
interval: 1m
```

View File

@@ -1,30 +1,33 @@
# Frequently asked questions
# FAQ
### Deployment Strategies
## Deployment Strategies
**Which deployment strategies are supported by Flagger?**
Flagger implements the following deployment strategies:
* [Canary Release](usage/deployment-strategies.md#canary-release)
* [A/B Testing](usage/deployment-strategies.md#a-b-testing)
* [Blue/Green](usage/deployment-strategies.md#blue-green-deployments)
* [Blue/Green Mirroring](usage/deployment-strategies.md#blue-green-with-traffic-mirroring)
* [A/B Testing](usage/deployment-strategies.md#ab-testing)
* [Blue/Green](usage/deployment-strategies.md#bluegreen-deployments)
* [Blue/Green Mirroring](usage/deployment-strategies.md#bluegreen-with-traffic-mirroring)
**When should I use A/B testing instead of progressive traffic shifting?**
For frontend applications that require session affinity you should use HTTP headers or cookies match conditions
to ensure a set of users will stay on the same version for the whole duration of the canary analysis.
For frontend applications that require session affinity you should use HTTP headers or
cookies match conditions to ensure a set of users will stay on the same version for
the whole duration of the canary analysis.
**Can I use Flagger to manage applications that live outside of a service mesh?**
For applications that are not deployed on a service mesh, Flagger can orchestrate Blue/Green style deployments
with Kubernetes L4 networking.
For applications that are not deployed on a service mesh,
Flagger can orchestrate Blue/Green style deployments with Kubernetes L4 networking.
**When can I use traffic mirroring?**
Traffic mirroring can be used for Blue/Green deployment strategy or a pre-stage in a Canary release.
Traffic mirroring will copy each incoming request, sending one request to the primary and one to the canary service.
Mirroring should be used for requests that are **idempotent** or capable of being processed twice (once by the primary and once by the canary).
Mirroring should be used for requests that are **idempotent**
or capable of being processed twice (once by the primary and once by the canary).
**How to retry a failed release?**
@@ -46,7 +49,7 @@ spec:
timestamp: "2020-03-10T14:24:48+0000"
```
### Kubernetes services
## Kubernetes services
**How is an application exposed inside the cluster?**
@@ -74,20 +77,26 @@ spec:
portName: http
```
If the `service.name` is not specified, then `targetRef.name` is used for the apex domain and canary/primary services name prefix.
If the `service.name` is not specified, then `targetRef.name` is used for
the apex domain and canary/primary services name prefix.
You should treat the service name as an immutable field, changing it could result in routing conflicts.
Based on the canary spec service, Flagger generates the following Kubernetes ClusterIP service:
* `<service.name>.<namespace>.svc.cluster.local`
selector `app=<name>-primary`
* `<service.name>-primary.<namespace>.svc.cluster.local`
selector `app=<name>-primary`
* `<service.name>-canary.<namespace>.svc.cluster.local`
selector `app=<name>`
This ensures that traffic coming from a namespace outside the mesh to `podinfo.test:9898`
will be routed to the latest stable release of your app.
will be routed to the latest stable release of your app.
```yaml
apiVersion: v1
@@ -133,15 +142,15 @@ spec:
targetPort: http
```
The `podinfo-canary.test:9898` address is available only during the
canary analysis and can be used for conformance testing or load testing.
The `podinfo-canary.test:9898` address is available only during the canary analysis
and can be used for conformance testing or load testing.
### Multiple ports
## Multiple ports
**My application listens on multiple ports, how can I expose them inside the cluster?**
If port discovery is enabled, Flagger scans the deployment spec and extracts the containers
ports excluding the port specified in the canary service and Envoy sidecar ports.
If port discovery is enabled, Flagger scans the deployment spec and extracts the containers ports excluding
the port specified in the canary service and Envoy sidecar ports.
These ports will be used when generating the ClusterIP services.
For a deployment that exposes two ports:
@@ -184,7 +193,7 @@ spec:
Both port `8080` and `9090` will be added to the ClusterIP services.
### Label selectors
## Label selectors
**What labels selectors are supported by Flagger?**
@@ -205,8 +214,8 @@ spec:
app: podinfo
```
Besides `app` Flagger supports `name` and `app.kubernetes.io/name` selectors. If you use a different
convention you can specify your label with the `-selector-labels` flag.
Besides `app` Flagger supports `name` and `app.kubernetes.io/name` selectors.
If you use a different convention you can specify your label with the `-selector-labels` flag.
**Is pod affinity and anti affinity supported?**
@@ -241,9 +250,9 @@ spec:
topologyKey: kubernetes.io/hostname
```
### Metrics
## Metrics
**How does Flagger measures the request success rate and duration?**
**How does Flagger measure the request success rate and duration?**
Flagger measures the request success rate and duration using Prometheus queries.
@@ -287,7 +296,7 @@ sum(
)
```
Envoy query (App Mesh, Contour or Gloo):
Envoy query (App Mesh):
```javascript
sum(
@@ -310,6 +319,27 @@ sum(
)
```
Envoy query (Contour and Gloo):
```javascript
sum(
rate(
envoy_cluster_upstream_rq{
envoy_cluster_name=~"$namespace-$workload",
envoy_response_code!~"5.*"
}[$interval]
)
)
/
sum(
rate(
envoy_cluster_upstream_rq{
envoy_cluster_name=~"$namespace-$workload",
}[$interval]
)
)
```
**HTTP requests milliseconds duration P99**
Spec:
@@ -341,7 +371,7 @@ histogram_quantile(0.99,
)
```
Envoy query (App Mesh, Contour or Gloo):
Envoy query (App Mesh, Contour and Gloo):
```javascript
histogram_quantile(0.99,
@@ -360,18 +390,18 @@ histogram_quantile(0.99,
**Can I use custom metrics?**
The analysis can be extended with metrics provided by Prometheus, Datadog and AWS CloudWatch. For more details
on how custom metrics can be used please read the [metrics docs](usage/metrics.md).
The analysis can be extended with metrics provided by Prometheus, Datadog and AWS CloudWatch.
For more details on how custom metrics can be used please read the [metrics docs](usage/metrics.md).
### Istio routing
## Istio routing
**How does Flagger interact with Istio?**
Flagger creates an Istio Virtual Service and Destination Rules based on the Canary service spec.
The service configuration lets you expose an app inside or outside the mesh.
You can also define traffic policies, HTTP match conditions, URI rewrite rules, CORS policies, timeout and retries.
Flagger creates an Istio Virtual Service and Destination Rules based on the Canary service spec.
The service configuration lets you expose an app inside or outside the mesh. You can also define traffic policies,
HTTP match conditions, URI rewrite rules, CORS policies, timeout and retries.
The following spec exposes the `frontend` workload inside the mesh on `frontend.test.svc.cluster.local:9898`
The following spec exposes the `frontend` workload inside the mesh on `frontend.test.svc.cluster.local:9898`
and outside the mesh on `frontend.example.com`. You'll have to specify an Istio ingress gateway for external hosts.
```yaml
@@ -551,10 +581,91 @@ spec:
app: backend-primary
```
Flagger works for user facing apps exposed outside the cluster via an ingress gateway
and for backend HTTP APIs that are accessible only from inside the mesh.
Flagger works for user facing apps exposed outside the cluster via an ingress gateway and for backend HTTP APIs
that are accessible only from inside the mesh.
### Istio Ingress Gateway
If `Delegation` is enabled, Flagger would generate Istio VirtualService without hosts and gateway,
making the service compatible with Istio delegation.
```yaml
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: backend
namespace: test
spec:
service:
delegation: true
port: 9898
targetRef:
apiVersion: v1
kind: Deployment
name: podinfo
analysis:
interval: 15s
threshold: 15
maxWeight: 30
stepWeight: 10
```
Based on the above spec, Flagger will create the following virtual service:
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: backend
namespace: test
ownerReferences:
- apiVersion: flagger.app/v1beta1
blockOwnerDeletion: true
controller: true
kind: Canary
name: backend
uid: 58562662-5e10-4512-b269-2b789c1b30fe
spec:
http:
- route:
- destination:
host: podinfo-primary
weight: 100
- destination:
host: podinfo-canary
weight: 0
```
Therefore, The following virtual service forward the traffic to `/podinfo` by the above delegate VirtualService.
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: frontend
namespace: test
spec:
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
hosts:
- frontend.example.com
- frontend
http:
- match:
- uri:
prefix: /podinfo
rewrite:
uri: /
delegate:
name: backend
namespace: test
```
Note that pilot env `PILOT_ENABLE_VIRTUAL_SERVICE_DELEGATE` must also be set.
For the use of Istio Delegation, you can refer to the documentation of
[Virtual Service](https://istio.io/latest/docs/reference/config/networking/virtual-service/#Delegate)
and [pilot environment variables](https://istio.io/latest/docs/reference/commands/pilot-discovery/#envvars).
## Istio Ingress Gateway
**How can I expose multiple canaries on the same external domain?**
@@ -597,13 +708,15 @@ spec:
uri: /
```
Based on the above configuration, Flagger will create two virtual services bounded to the same ingress gateway and external host.
Istio Pilot will [merge](https://istio.io/help/ops/traffic-management/deploy-guidelines/#multiple-virtual-services-and-destination-rules-for-the-same-host)
the two services and the website rule will be moved to the end of the list in the merged configuration.
Based on the above configuration, Flagger will create two virtual services bounded
to the same ingress gateway and external host.
Istio Pilot will
[merge](https://istio.io/help/ops/traffic-management/deploy-guidelines/#multiple-virtual-services-and-destination-rules-for-the-same-host)
the two services and the website rule will be moved to the end of the list in the merged configuration.
Note that host merging only works if the canaries are bounded to a ingress gateway other than the `mesh` gateway.
### Istio Mutual TLS
## Istio Mutual TLS
**How can I enable mTLS for a canary?**
@@ -633,7 +746,8 @@ spec:
**If Flagger is outside of the mesh, how can it start the load test?**
In order for Flagger to be able to call the load tester service from outside the mesh, you need to disable mTLS on port 80:
In order for Flagger to be able to call the load tester service from outside the mesh,
you need to disable mTLS on port 80:
```yaml
apiVersion: networking.istio.io/v1alpha3

View File

@@ -62,7 +62,7 @@ helm repo add eks https://aws.github.io/eks-charts
## Enable horizontal pod auto-scaling
Install the Horizontal Pod Autoscaler \(HPA\) metrics provider:
Install the Horizontal Pod Autoscaler (HPA) metrics provider:
```bash
helm upgrade -i metrics-server stable/metrics-server \
@@ -118,7 +118,7 @@ helm repo add flagger https://flagger.app
Install Flagger's Canary CRD:
```yaml
kubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/master/artifacts/flagger/crd.yaml
kubectl apply -f https://raw.githubusercontent.com/fluxcd/flagger/main/artifacts/flagger/crd.yaml
```
Deploy Flagger in the _**appmesh-system**_ namespace:
@@ -146,6 +146,6 @@ You can access Grafana using port forwarding:
kubectl -n appmesh-system port-forward svc/appmesh-grafana 3000:3000
```
Now that you have Flagger running,
you can try the [App Mesh canary deployments tutorial](https://docs.flagger.app/usage/appmesh-progressive-delivery).
Now that you have Flagger running, you can try the
[App Mesh canary deployments tutorial](https://docs.flagger.app/usage/appmesh-progressive-delivery).

View File

@@ -2,7 +2,7 @@
This guide walks you through setting up Flagger and Istio on Google Kubernetes Engine.
![GKE Cluster Overview](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-gke-istio.png)
![GKE Cluster Overview](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-gke-istio.png)
## Prerequisites
@@ -205,12 +205,12 @@ jetstack/cert-manager
## Istio Gateway TLS setup
![Istio Let&apos;s Encrypt](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/istio-cert-manager-gke.png)
![Istio Let&apos;s Encrypt](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/istio-cert-manager-gke.png)
Create a generic Istio Gateway to expose services outside the mesh on HTTPS:
```bash
REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
REPO=https://raw.githubusercontent.com/fluxcd/flagger/main
kubectl apply -f ${REPO}/artifacts/gke/istio-gateway.yaml
```
@@ -346,7 +346,7 @@ helm repo add flagger https://flagger.app
Install Flagger's Canary CRD:
```yaml
kubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/master/artifacts/flagger/crd.yaml
kubectl apply -f https://raw.githubusercontent.com/fluxcd/flagger/main/artifacts/flagger/crd.yaml
```
Deploy Flagger in the `istio-system` namespace with Slack notifications enabled:

View File

@@ -4,7 +4,7 @@ This guide walks you through setting up Flagger on a Kubernetes cluster with Hel
## Prerequisites
Flagger requires a Kubernetes cluster **v1.14** or newer.
Flagger requires a Kubernetes cluster **v1.16** or newer.
## Install Flagger with Helm
@@ -17,7 +17,7 @@ helm repo add flagger https://flagger.app
Install Flagger's Canary CRD:
```yaml
kubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/master/artifacts/flagger/crd.yaml
kubectl apply -f https://raw.githubusercontent.com/fluxcd/flagger/main/artifacts/flagger/crd.yaml
```
Deploy Flagger for Istio:
@@ -30,11 +30,12 @@ helm upgrade -i flagger flagger/flagger \
--set metricsServer=http://prometheus:9090
```
Note that Flagger depends on Istio telemetry and Prometheus, if you're installing Istio with istioctl
then you should be using the [default profile](https://istio.io/docs/setup/additional-setup/config-profiles/).
Note that Flagger depends on Istio telemetry and Prometheus, if you're installing
Istio with istioctl then you should be using the
[default profile](https://istio.io/docs/setup/additional-setup/config-profiles/).
For Istio multi-cluster shared control plane you can install Flagger
on each remote cluster and set the Istio control plane host cluster kubeconfig:
For Istio multi-cluster shared control plane you can install Flagger on each remote cluster and set the
Istio control plane host cluster kubeconfig:
```bash
helm upgrade -i flagger flagger/flagger \
@@ -47,8 +48,8 @@ helm upgrade -i flagger flagger/flagger \
```
Note that the Istio kubeconfig must be stored in a Kubernetes secret with a data key named `kubeconfig`.
For more details on how to configure Istio multi-cluster credentials
read the [Istio docs](https://istio.io/docs/setup/install/multicluster/shared-vpn/#credentials).
For more details on how to configure Istio multi-cluster
credentials read the [Istio docs](https://istio.io/docs/setup/install/multicluster/shared-vpn/#credentials).
Deploy Flagger for Linkerd:
@@ -77,26 +78,8 @@ For ingress controllers, the install instructions are:
* [Contour](https://docs.flagger.app/tutorials/contour-progressive-delivery)
* [Gloo](https://docs.flagger.app/tutorials/gloo-progressive-delivery)
* [NGINX](https://docs.flagger.app/tutorials/nginx-progressive-delivery)
Enable **Slack** notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set crd.create=false \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
Enable **Microsoft Teams** notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set crd.create=false \
--set msteams.url=https://outlook.office.com/webhook/YOUR/TEAMS/WEBHOOK
```
* [Skipper](https://docs.flagger.app/tutorials/skipper-progressive-delivery)
* [Traefik](https://docs.flagger.app/tutorials/traefik-progressive-delivery)
You can use the helm template command and apply the generated yaml with kubectl:
@@ -121,7 +104,7 @@ helm delete flagger
The command removes all the Kubernetes components associated with the chart and deletes the release.
> **Note** that on uninstall the Canary CRD will not be removed. Deleting the CRD will make Kubernetes
>remove all the objects owned by Flagger like Istio virtual services, Kubernetes deployments and ClusterIP services.
> remove all the objects owned by Flagger like Istio virtual services, Kubernetes deployments and ClusterIP services.
If you want to remove all the objects created by Flagger you have delete the Canary CRD with kubectl:
@@ -171,13 +154,13 @@ As an alternative to Helm, Flagger can be installed with Kustomize **3.5.0** or
Install Flagger for Istio:
```bash
kustomize build https://github.com/weaveworks/flagger/kustomize/istio | kubectl apply -f -
kustomize build https://github.com/fluxcd/flagger/kustomize/istio?ref=main | kubectl apply -f -
```
Install Flagger for AWS App Mesh:
```bash
kustomize build https://github.com/weaveworks/flagger/kustomize/appmesh | kubectl apply -f -
kustomize build https://github.com/fluxcd/flagger/kustomize/appmesh?ref=main | kubectl apply -f -
```
This deploys Flagger and sets the metrics server URL to App Mesh's Prometheus instance.
@@ -185,7 +168,7 @@ This deploys Flagger and sets the metrics server URL to App Mesh's Prometheus in
Install Flagger for Linkerd:
```bash
kustomize build https://github.com/weaveworks/flagger/kustomize/linkerd | kubectl apply -f -
kustomize build https://github.com/fluxcd/flagger/kustomize/linkerd?ref=main | kubectl apply -f -
```
This deploys Flagger in the `linkerd` namespace and sets the metrics server URL to Linkerd's Prometheus instance.
@@ -193,19 +176,19 @@ This deploys Flagger in the `linkerd` namespace and sets the metrics server URL
If you want to install a specific Flagger release, add the version number to the URL:
```bash
kustomize build https://github.com/weaveworks/flagger/kustomize/linkerd?ref=v1.0.0 | kubectl apply -f -
kustomize build https://github.com/fluxcd/flagger/kustomize/linkerd?ref=v1.0.0 | kubectl apply -f -
```
**Generic installer**
Install Flagger and Prometheus for Contour, Gloo or NGINX ingress:
Install Flagger and Prometheus for Contour, Gloo, NGINX, Skipper, or Traefik ingress:
```bash
kustomize build https://github.com/weaveworks/flagger/kustomize/kubernetes | kubectl apply -f -
kustomize build https://github.com/fluxcd/flagger/kustomize/kubernetes?ref=main | kubectl apply -f -
```
This deploys Flagger and Prometheus in the `flagger-system` namespace, sets the metrics server URL
to `http://flagger-prometheus.flagger-system:9090` and the mesh provider to `kubernetes`.
This deploys Flagger and Prometheus in the `flagger-system` namespace,
sets the metrics server URL to `http://flagger-prometheus.flagger-system:9090` and the mesh provider to `kubernetes`.
The Prometheus instance has a two hours data retention and is configured to scrape all pods in your cluster
that have the `prometheus.io/scrape: "true"` annotation.
@@ -219,7 +202,7 @@ metadata:
name: app
namespace: test
spec:
# can be: kubernetes, istio, linkerd, appmesh, nginx, gloo
# can be: kubernetes, istio, linkerd, appmesh, nginx, skipper, gloo, traefik
# use the kubernetes provider for Blue/Green style deployments
provider: nginx
```
@@ -232,7 +215,7 @@ Create a kustomization file using Flagger as base and patch the container args:
cat > kustomization.yaml <<EOF
namespace: istio-system
bases:
- github.com/weaveworks/flagger/kustomize/base/flagger
- https://github.com/fluxcd/flagger/kustomize/kubernetes?ref=main
patches:
- target:
kind: Deployment
@@ -250,19 +233,6 @@ patches:
args:
- -mesh-provider=istio
- -metrics-server=http://prometheus.istio-system:9090
- -slack-user=flagger
- -slack-channel=alerts
- -slack-url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK
- -include-label-prefix=app.kubernetes.io
EOF
```
Install Flagger for Istio with Slack notifications:
```bash
kustomize build . | kubectl apply -f -
```
If you want to use MS Teams instead of Slack, replace `-slack-url` with `-msteams-url` and set the webhook address
to `https://outlook.office.com/webhook/YOUR/TEAMS/WEBHOOK`.

View File

@@ -1,188 +0,0 @@
# Flagger Install with SuperGloo
This guide walks you through setting up Flagger on a Kubernetes cluster using [SuperGloo](https://github.com/solo-io/supergloo).
SuperGloo by [Solo.io](https://solo.io) is an opinionated abstraction layer that simplifies the installation, management, and operation of your service mesh. It supports running multiple ingresses with multiple meshes \(Istio, App Mesh, Consul Connect and Linkerd 2\) in the same cluster.
## Prerequisites
Flagger requires a Kubernetes cluster **v1.11** or newer with the following admission controllers enabled:
* MutatingAdmissionWebhook
* ValidatingAdmissionWebhook
## Install Istio with SuperGloo
### Install SuperGloo command line interface helper
SuperGloo includes a command line helper \(CLI\) that makes operation of SuperGloo easier. The CLI is not required for SuperGloo to function correctly.
If you use [Homebrew](https://brew.sh) package manager run the following commands to install the SuperGloo CLI.
```bash
brew tap solo-io/tap
brew solo-io/tap/supergloo
```
Or you can download SuperGloo CLI and add it to your path:
```bash
curl -sL https://run.solo.io/supergloo/install | sh
export PATH=$HOME/.supergloo/bin:$PATH
```
### Install SuperGloo controller
Deploy the SuperGloo controller in the `supergloo-system` namespace:
```bash
supergloo init
```
This is equivalent to installing SuperGloo using its Helm chart
```bash
helm repo add supergloo http://storage.googleapis.com/supergloo-helm
helm upgrade --install supergloo supergloo/supergloo --namespace supergloo-system
```
### Install Istio using SuperGloo
Create the `istio-system` namespace and install Istio with traffic management, telemetry and Prometheus enabled:
```bash
ISTIO_VER="1.0.6"
kubectl create namespace istio-system
supergloo install istio --name istio \
--namespace=supergloo-system \
--auto-inject=true \
--installation-namespace=istio-system \
--mtls=false \
--prometheus=true \
--version=${ISTIO_VER}
```
This creates a Kubernetes Custom Resource \(CRD\) like the following.
```yaml
apiVersion: supergloo.solo.io/v1
kind: Install
metadata:
name: istio
namespace: supergloo-system
spec:
installationNamespace: istio-system
mesh:
installedMesh:
name: istio
namespace: supergloo-system
istioMesh:
enableAutoInject: true
enableMtls: false
installGrafana: false
installJaeger: false
installPrometheus: true
istioVersion: 1.0.6
```
### Allow Flagger to manipulate SuperGloo
Create a cluster role binding so that Flagger can manipulate SuperGloo custom resources:
```bash
kubectl create clusterrolebinding flagger-supergloo \
--clusterrole=mesh-discovery \
--serviceaccount=istio-system:flagger
```
Wait for the Istio control plane to become available:
```bash
kubectl --namespace istio-system rollout status deployment/istio-sidecar-injector
kubectl --namespace istio-system rollout status deployment/prometheus
```
## Install Flagger
Add Flagger Helm repository:
```bash
helm repo add flagger https://flagger.app
```
Install Flagger's Canary CRD:
```yaml
kubectl apply -f https://raw.githubusercontent.com/weaveworks/flagger/master/artifacts/flagger/crd.yaml
```
Deploy Flagger in the _**istio-system**_ namespace and set the service mesh provider to SuperGloo:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set crd.create=false \
--set metricsServer=http://prometheus.istio-system:9090 \
--set meshProvider=supergloo:istio.supergloo-system
```
When using SuperGloo the mesh provider format is `supergloo:<MESH-NAME>.<SUPERGLOO-NAMESPACE>`.
Optionally you can enable **Slack** notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--reuse-values \
--namespace=istio-system \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
## Install Grafana
Flagger comes with a Grafana dashboard made for monitoring the canary analysis.
Deploy Grafana in the _**istio-system**_ namespace:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus.istio-system:9090
```
You can access Grafana using port forwarding:
```bash
kubectl -n istio-system port-forward svc/flagger-grafana 3000:80
```
## Install Load Tester
Flagger comes with an optional load testing service that generates traffic during canary analysis when configured as a webhook.
Deploy the load test runner with Helm:
```bash
helm upgrade -i flagger-loadtester flagger/loadtester \
--namespace=test \
--set cmd.timeout=1h
```
Deploy with kubectl:
```bash
helm fetch --untar --untardir . flagger/loadtester &&
helm template loadtester \
--name flagger-loadtester \
--namespace=test
> $HOME/flagger-loadtester.yaml
# apply
kubectl apply -f $HOME/flagger-loadtester.yaml
```
> **Note** that the load tester should be deployed in a namespace with Istio sidecar injection enabled.

View File

@@ -1,13 +1,14 @@
# App Mesh Canary Deployments
This guide shows you how to use App Mesh and Flagger to automate canary deployments.
You'll need an EKS cluster configured with App Mesh,
you can find the installion guide [here](https://docs.flagger.app/install/flagger-install-on-eks-appmesh).
You'll need an EKS cluster (Kubernetes >= 1.16) configured with App Mesh,
you can find the installation guide [here](https://docs.flagger.app/install/flagger-install-on-eks-appmesh).
## Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler \(HPA\),
then creates a series of objects \(Kubernetes deployments, ClusterIP services, App Mesh virtual nodes and services\).
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployments, ClusterIP services,
App Mesh virtual nodes and services).
These objects expose the application on the mesh and drive the canary analysis and promotion.
The only App Mesh object you need to create by yourself is the mesh resource.
@@ -42,7 +43,7 @@ EOF
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
```
Deploy the load testing service to generate traffic during the canary analysis:
@@ -76,7 +77,7 @@ spec:
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
name: podinfo
service:
@@ -167,20 +168,20 @@ virtualservice.appmesh.k8s.aws/podinfo
virtualservice.appmesh.k8s.aws/podinfo-canary
```
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to
`podinfo.test` will be routed to the primary pods.
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test`
will be routed to the primary pods.
During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
App Mesh blocks all egress traffic by default.
If your application needs to call another service, you have to
create an App Mesh virtual service for it and add the virtual service name to the backend list.
If your application needs to call another service, you have to create an App Mesh virtual service for it
and add the virtual service name to the backend list.
```yaml
service:
port: 9898
backends:
- backend1
- backend2
- arn:aws:appmesh:eu-west-1:12345678910:mesh/my-mesh/virtualService/backend2
```
## Setup App Mesh Gateway (optional)
@@ -234,7 +235,7 @@ Open your browser and navigate to the ingress address to access podinfo UI.
A canary deployment is triggered by changes in any of the following objects:
* Deployment PodSpec \(container image, command, ports, env, resources, etc\)
* Deployment PodSpec (container image, command, ports, env, resources, etc)
* ConfigMaps and Secrets mounted as volumes or mapped to environment variables
Trigger a canary deployment by updating the container image:
@@ -281,7 +282,7 @@ During the analysis the canarys progress can be monitored with Grafana.
The App Mesh dashboard URL is
[http://localhost:3000/d/flagger-appmesh/appmesh-canary?refresh=10s&orgId=1&var-namespace=test&var-primary=podinfo-primary&var-canary=podinfo](http://localhost:3000/d/flagger-appmesh/appmesh-canary?refresh=10s&orgId=1&var-namespace=test&var-primary=podinfo-primary&var-canary=podinfo).
![App Mesh Canary Dashboard](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/flagger-grafana-appmesh.png)
![App Mesh Canary Dashboard](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/screens/flagger-grafana-appmesh.png)
You can monitor all canaries with:
@@ -296,7 +297,7 @@ prod backend Failed 0
If youve enabled the Slack notifications, you should receive the following messages:
![Flagger Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-notifications.png)
![Flagger Slack Notifications](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/screens/slack-canary-notifications.png)
## Automated rollback
@@ -350,7 +351,7 @@ Canary failed! Scaling down podinfo.test
If youve enabled the Slack notifications, youll receive a message if the progress deadline is exceeded,
or if the analysis reached the maximum number of failed checks:
![Flagger Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-failed.png)
![Flagger Slack Notifications](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/screens/slack-canary-failed.png)
## A/B Testing
@@ -358,7 +359,7 @@ Besides weighted routing, Flagger can be configured to route traffic to the cana
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
This is particularly useful for frontend applications that require session affinity.
![Flagger A/B Testing Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-abtest-steps.png)
![Flagger A/B Testing Stages](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-abtest-steps.png)
Edit the canary analysis, remove the max/step weight and add the match conditions and iterations:
@@ -423,4 +424,8 @@ Routing all traffic to primary
Promotion completed! Scaling down podinfo.test
```
For an in-depth look at the analysis process read the [usage docs](../usage/how-it-works.md).
The above procedure can be extended with
[custom metrics](../usage/metrics.md) checks,
[webhooks](../usage/webhooks.md),
[manual promotion](../usage/webhooks.md#manual-gating) approval and
[Slack or MS Teams](../usage/alerting.md) notifications.

View File

@@ -30,7 +30,7 @@ You can find the chart source [here](https://github.com/stefanprodan/flagger/tre
Create a test namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
export REPO=https://raw.githubusercontent.com/fluxcd/flagger/main
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```
@@ -77,7 +77,7 @@ When the `frontend-primary` deployment comes online, Flagger will route all traf
Open your browser and navigate to the frontend URL:
![Podinfo Frontend](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/demo-frontend.png)
![Podinfo Frontend](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/screens/demo-frontend.png)
Now let's install the `backend` release without exposing it outside the mesh:
@@ -101,7 +101,7 @@ frontend Initialized 0 2019-02-12T17:50:50Z
Click on the ping button in the `frontend` UI to trigger a HTTP POST request that will reach the `backend` app:
![Jaeger Tracing](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/demo-frontend-jaeger.png)
![Jaeger Tracing](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/screens/demo-frontend-jaeger.png)
We'll use the `/echo` endpoint \(same as the one the ping button calls\) to generate load on both apps during a canary deployment.
@@ -159,7 +159,7 @@ Promotion completed! Scaling down frontend.test
You can monitor the canary deployment with Grafana. Open the Flagger dashboard, select `test` from the namespace dropdown, `frontend-primary` from the primary dropdown and `frontend` from the canary dropdown.
![Flagger Grafana Dashboard](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/demo-frontend-dashboard.png)
![Flagger Grafana Dashboard](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/screens/demo-frontend-dashboard.png)
Now trigger a canary deployment for the `backend` app, but this time you'll change a value in the configmap:
@@ -217,7 +217,7 @@ Copying backend.test template spec to backend-primary.test
Promotion completed! Scaling down backend.test
```
![Flagger Grafana Dashboard](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/demo-backend-dashboard.png)
![Flagger Grafana Dashboard](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/screens/demo-backend-dashboard.png)
If the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
@@ -235,7 +235,7 @@ If you've enabled the Slack notifications, you'll receive an alert with the reas
Instead of using Helm CLI from a CI tool to perform the install and upgrade, you could use a Git based approach. GitOps is a way to do Continuous Delivery, it works by using Git as a source of truth for declarative infrastructure and workloads. In the [GitOps model](https://www.weave.works/technologies/gitops/), any change to production must be committed in source control prior to being applied on the cluster. This way rollback and audit logs are provided by Git.
![Helm GitOps Canary Deployment](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-flux-gitops.png)
![Helm GitOps Canary Deployment](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-flux-gitops.png)
In order to apply the GitOps pipeline model to Flagger canary deployments you'll need a Git repository with your workloads definitions in YAML format, a container registry where your CI system pushes immutable images and an operator that synchronizes the Git repo with the cluster state.
@@ -286,15 +286,11 @@ spec:
enabled: true
```
In the `chart` section I've defined the release source by specifying the Helm repository (hosted on GitHub Pages),
chart name and version. In the `values` section I've overwritten the defaults set in values.yaml.
In the `chart` section I've defined the release source by specifying the Helm repository \(hosted on GitHub Pages\), chart name and version. In the `values` section I've overwritten the defaults set in values.yaml.
With the `fluxcd.io` annotations I instruct Flux to automate this release.
When an image tag in the sem ver range of `3.1.0 - 3.1.99` is pushed to Docker Hub,
Flux will upgrade the Helm release and from there Flagger will pick up the change and start a canary deployment.
With the `fluxcd.io` annotations I instruct Flux to automate this release. When an image tag in the sem ver range of `3.1.0 - 3.1.99` is pushed to Docker Hub, Flux will upgrade the Helm release and from there Flagger will pick up the change and start a canary deployment.
Install [Flux](https://github.com/fluxcd/flux) and its
[Helm Operator](https://github.com/fluxcd/helm-operator) by specifying your Git repo URL:
Install [Flux](https://github.com/fluxcd/flux) and its [Helm Operator](https://github.com/fluxcd/helm-operator) by specifying your Git repo URL:
```bash
helm repo add fluxcd https://charts.fluxcd.io
@@ -315,14 +311,11 @@ At startup Flux generates a SSH key and logs the public key. Find the SSH public
kubectl -n fluxcd logs deployment/flux | grep identity.pub | cut -d '"' -f2
```
In order to sync your cluster state with Git you need to copy the public key
and create a deploy key with write access on your GitHub repository.
In order to sync your cluster state with Git you need to copy the public key and create a deploy key with write access on your GitHub repository.
Open GitHub, navigate to your fork, go to _Setting &gt; Deploy keys_ click on _Add deploy key_, check _Allow write access_,
paste the Flux public key and click _Add key_.
Open GitHub, navigate to your fork, go to _Setting &gt; Deploy keys_ click on _Add deploy key_, check _Allow write access_, paste the Flux public key and click _Add key_.
After a couple of seconds Flux will apply the Kubernetes resources from Git and
Flagger will launch the `frontend` and `backend` apps.
After a couple of seconds Flux will apply the Kubernetes resources from Git and Flagger will launch the `frontend` and `backend` apps.
A CI/CD pipeline for the `frontend` release could look like this:
@@ -343,14 +336,12 @@ If the canary fails, fix the bug, do another patch release eg `3.1.2` and the wh
A canary deployment can fail due to any of the following reasons:
* the container image can't be downloaded
* the deployment replica set is stuck for more then ten minutes (eg. due to a container crash loop)
* the webooks (acceptance tests, helm tests, load tests, etc) are returning a non 2xx response
* the HTTP success rate (non 5xx responses) metric drops under the threshold
* the deployment replica set is stuck for more then ten minutes \(eg. due to a container crash loop\)
* the webooks \(acceptance tests, helm tests, load tests, etc\) are returning a non 2xx response
* the HTTP success rate \(non 5xx responses\) metric drops under the threshold
* the HTTP average duration metric goes over the threshold
* the Istio telemetry service is unable to collect traffic metrics
* the metrics server (Prometheus) can't be reached
* the metrics server \(Prometheus\) can't be reached
If you want to find out more about managing Helm releases with Flux here are two in-depth guides:
[gitops-helm](https://github.com/stefanprodan/gitops-helm)
and [gitops-istio](https://github.com/stefanprodan/gitops-istio).
If you want to find out more about managing Helm releases with Flux here are two in-depth guides: [gitops-helm](https://github.com/stefanprodan/gitops-helm) and [gitops-istio](https://github.com/stefanprodan/gitops-istio).

View File

@@ -2,11 +2,11 @@
This guide shows you how to use [Contour](https://projectcontour.io/) ingress controller and Flagger to automate canary releases and A/B testing.
![Flagger Contour Overview](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-contour-overview.png)
![Flagger Contour Overview](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-contour-overview.png)
## Prerequisites
Flagger requires a Kubernetes cluster **v1.11** or newer and Contour **v1.0** or newer.
Flagger requires a Kubernetes cluster **v1.16** or newer and Contour **v1.0** or newer.
Install Contour on a cluster with LoadBalancer support:
@@ -19,7 +19,7 @@ The above command will deploy Contour and an Envoy daemonset in the `projectcont
Install Flagger using Kustomize (kubectl 1.14) in the `projectcontour` namespace:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/contour
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/contour?ref=main
```
The above command will deploy Flagger and Prometheus configured to scrape the Contour's Envoy instances.
@@ -36,14 +36,11 @@ helm upgrade -i flagger flagger/flagger \
--set prometheus.install=true
```
You can also enable Slack, Discord, Rocket or MS Teams notifications,
see the alerting [docs](../usage/alerting.md).
You can also enable Slack, Discord, Rocket or MS Teams notifications, see the alerting [docs](../usage/alerting.md).
## Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployments, ClusterIP services and Contour HTTPProxy).
These objects expose the application in the cluster and drive the canary analysis and promotion.
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler \(HPA\), then creates a series of objects \(Kubernetes deployments, ClusterIP services and Contour HTTPProxy\). These objects expose the application in the cluster and drive the canary analysis and promotion.
Create a test namespace:
@@ -54,16 +51,16 @@ kubectl create ns test
Install the load testing service to generate traffic during the canary analysis:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/tester?ref=main
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
```
Create a canary custom resource (replace `app.example.com` with your own domain):
Create a canary custom resource \(replace `app.example.com` with your own domain\):
```yaml
apiVersion: flagger.app/v1beta1
@@ -79,7 +76,7 @@ spec:
name: podinfo
# HPA reference
autoscalerRef:
apiVersion: autoscaling/v2beta1
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
name: podinfo
service:
@@ -160,9 +157,7 @@ service/podinfo-primary
httpproxy.projectcontour.io/podinfo
```
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test`
will be routed to the primary pods.
During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
## Expose the app outside the cluster
@@ -174,11 +169,9 @@ export ADDRESS="$(kubectl -n projectcontour get svc/envoy -ojson \
echo $ADDRESS
```
Configure your DNS server with a CNAME record \(AWS\) or A record (GKE/AKS/DOKS)
and point a domain e.g. `app.example.com` to the LB address.
Configure your DNS server with a CNAME record \(AWS\) or A record \(GKE/AKS/DOKS\) and point a domain e.g. `app.example.com` to the LB address.
Create a HTTPProxy definition and include the podinfo proxy generated by Flagger
(replace `app.example.com` with your own domain):
Create a HTTPProxy definition and include the podinfo proxy generated by Flagger \(replace `app.example.com` with your own domain\):
```yaml
apiVersion: projectcontour.io/v1
@@ -214,21 +207,17 @@ podinfo-ingress app.example.com valid
Now you can access podinfo UI using your domain address.
Note that you should be using HTTPS when exposing production workloads on internet.
You can obtain free TLS certs from Let's Encrypt, read this [guide](https://github.com/stefanprodan/eks-contour-ingress)
on how to configure cert-manager to secure Contour with TLS certificates.
Note that you should be using HTTPS when exposing production workloads on internet. You can obtain free TLS certs from Let's Encrypt, read this [guide](https://github.com/stefanprodan/eks-contour-ingress) on how to configure cert-manager to secure Contour with TLS certificates.
## Automated canary promotion
Flagger implements a control loop that gradually shifts traffic to the canary while measuring
key performance indicators like HTTP requests success rate, requests average duration and pod health.
Based on analysis of the KPIs a canary is promoted or aborted.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators like HTTP requests success rate, requests average duration and pod health. Based on analysis of the KPIs a canary is promoted or aborted.
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
![Flagger Canary Stages](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-canary-steps.png)
A canary deployment is triggered by changes in any of the following objects:
* Deployment PodSpec (container image, command, ports, env, resources, etc)
* Deployment PodSpec \(container image, command, ports, env, resources, etc\)
* ConfigMaps and Secrets mounted as volumes or mapped to environment variables
Trigger a canary deployment by updating the container image:
@@ -282,7 +271,7 @@ test podinfo Progressing 15 2019-12-20T14:05:07Z
If youve enabled the Slack notifications, you should receive the following messages:
![Flagger Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-notifications.png)
![Flagger Slack Notifications](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/screens/slack-canary-notifications.png)
## Automated rollback
@@ -313,8 +302,7 @@ Generate latency:
watch -n 1 curl http://app.example.com/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n projectcontour logs deploy/flagger -f | jq .msg
@@ -333,18 +321,15 @@ Rolling back podinfo.test failed checks threshold reached 5
Canary failed! Scaling down podinfo.test
```
If youve enabled the Slack notifications, youll receive a message if the progress deadline is exceeded,
or if the analysis reached the maximum number of failed checks:
If youve enabled the Slack notifications, youll receive a message if the progress deadline is exceeded, or if the analysis reached the maximum number of failed checks:
![Flagger Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-failed.png)
![Flagger Slack Notifications](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/screens/slack-canary-failed.png)
## A/B Testing
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
This is particularly useful for frontend applications that require session affinity.
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions. In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users. This is particularly useful for frontend applications that require session affinity.
![Flagger A/B Testing Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-abtest-steps.png)
![Flagger A/B Testing Stages](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-abtest-steps.png)
Edit the canary analysis, remove the max/step weight and add the match conditions and iterations:
@@ -439,3 +424,4 @@ match:
```
For an in-depth look at the analysis process read the [usage docs](../usage/how-it-works.md).

View File

@@ -1,358 +0,0 @@
# Crossover Canary Deployments
This guide shows you how to use Envoy, [Crossover](https://github.com/mumoshu/crossover) and Flagger to automate canary deployments.
Crossover is a minimal Envoy xDS implementation supports [Service Mesh Interface](https://smi-spec.io/).
## Prerequisites
Flagger requires a Kubernetes cluster **v1.11** or newer and Envoy paired with [Crossover](https://github.com/mumoshu/crossover) sidecar.
Create a test namespace:
```bash
kubectl create ns test
```
Install Envoy along with the Crossover sidecar with Helm:
```bash
helm repo add crossover https://mumoshu.github.io/crossover
helm upgrade --install envoy crossover/envoy \
--namespace test \
-f <(cat <<EOF
smi:
apiVersions:
trafficSplits: v1alpha1
upstreams:
podinfo:
smi:
enabled: true
backends:
podinfo-primary:
port: 9898
weight: 100
podinfo-canary:
port: 9898
weight: 0
EOF
)
```
Install Flagger and the Prometheus add-on in the same namespace as Envoy:
```bash
helm repo add flagger https://flagger.app
helm upgrade -i flagger flagger/flagger \
--namespace test \
--set prometheus.install=true \
--set meshProvider=smi:crossover
```
## Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployments, ClusterIP services, SMI traffic splits).
These objects expose the application on the mesh and drive the canary analysis and promotion.
There's no SMI object you need to create by yourself.
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
helm upgrade -i flagger-loadtester flagger/loadtester \
--namespace=test
```
Create a metric template to measure the HTTP requests error rate:
```yaml
apiVersion: flagger.app/v1beta1
kind: MetricTemplate
metadata:
name: error-rate
namespace: test
spec:
provider:
address: http://flagger-prometheus:9090
type: prometheus
query: |
100 - rate(
envoy_cluster_upstream_rq{
kubernetes_namespace="{{ namespace }}",
envoy_cluster_name="{{ target }}-canary",
envoy_response_code!~"5.*"
}[{{ interval }}])
/
rate(
envoy_cluster_upstream_rq{
kubernetes_namespace="{{ namespace }}",
envoy_cluster_name="{{ target }}-canary"
}[{{ interval }}]
) * 100
```
Create a metric template to measure the HTTP requests average duration:
```yaml
apiVersion: flagger.app/v1beta1
kind: MetricTemplate
metadata:
name: latency
namespace: test
spec:
provider:
address: http://flagger-prometheus:9090
type: prometheus
query: |
histogram_quantile(0.99,
sum(
rate(
envoy_cluster_upstream_rq_time_bucket{
kubernetes_namespace="{{ namespace }}",
envoy_cluster_name="{{ target }}-canary"
}[{{ interval }}]
)
) by (le)
)
```
Create a canary custom resource:
```yaml
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
provider: "smi:crossover"
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
port: 9898
# define the canary analysis timing and KPIs
analysis:
# schedule interval (default 60s)
interval: 1m
# max number of failed metric checks before rollback
threshold: 5
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
metrics:
- name: error-rate
templateRef:
name: error-rate
thresholdRange:
max: 1
interval: 30s
- name: latency
templateRef:
name: latency
thresholdRange:
max: 0.5
interval: 30s
webhooks:
- name: acceptance-test
type: pre-rollout
url: http://flagger-loadtester.test/
timeout: 30s
metadata:
type: bash
cmd: "curl -sd 'test' http://podinfo-canary.test:9898/token | grep token"
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 -H 'Host: podinfo.test' http://envoy.test:10000/"
```
Save the above resource as podinfo-canary.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-canary.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
canary.flagger.app/podinfo
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
trafficsplits.split.smi-spec.io/podinfo
```
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test`
will be routed to the primary pods. During the canary analysis,
the `podinfo-canary.test` address can be used to target directly the canary pods.
## Automated canary promotion
Flagger implements a control loop that gradually shifts traffic to the canary while measuring
key performance indicators like HTTP requests success rate, requests average duration and pod health.
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
A canary deployment is triggered by changes in any of the following objects:
* Deployment PodSpec (container image, command, ports, env, resources, etc)
* ConfigMaps and Secrets mounted as volumes or mapped to environment variables
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:3.1.5
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 0
Phase: Succeeded
Events:
New revision detected! Scaling up podinfo.test
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Pre-rollout check acceptance-test passed
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Advance podinfo.test canary weight 20
Advance podinfo.test canary weight 25
Advance podinfo.test canary weight 30
Advance podinfo.test canary weight 35
Advance podinfo.test canary weight 40
Advance podinfo.test canary weight 45
Advance podinfo.test canary weight 50
Copying podinfo.test template spec to podinfo-primary.test
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Routing all traffic to primary
Promotion completed! Scaling down podinfo.test
```
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
During the analysis the canarys progress can be monitored with Grafana.
Flagger comes with a Grafana dashboard made for canary analysis. Install Grafana with Helm:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=test \
--set url=http://flagger-prometheus:9090
```
Run:
```bash
kubectl port-forward --namespace test svc/flagger-grafana 3000:80
```
The Envoy dashboard URL is [http://localhost:3000/d/flagger-envoy/envoy-canary?refresh=10s&orgId=1&var-namespace=test&var-target=podinfo](http://localhost:3000/d/flagger-envoy/envoy-canary?refresh=10s&orgId=1&var-namespace=test&var-target=podinfo)
![Envoy Canary Dashboard](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/flagger-grafana-appmesh.png)
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 15 2019-10-02T14:05:07Z
prod frontend Succeeded 0 2019-10-02T16:15:07Z
prod backend Failed 0 2019-10-02T17:05:07Z
```
If youve enabled the Slack notifications, you should receive the following messages:
![Flagger Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-notifications.png)
## Automated rollback
During the canary analysis you can generate HTTP 500 errors or high latency to test if Flagger pauses the rollout.
Trigger a canary deployment:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:3.1.2
```
Exec into the load tester pod with:
```bash
kubectl -n test exec -it deploy/flagger-loadtester bash
```
Generate HTTP 500 errors:
```bash
hey -z 1m -c 5 -q 5 -H 'Host: podinfo.test' http://envoy.test:10000/status/500
```
Generate latency:
```bash
watch -n 1 curl -H 'Host: podinfo.test' http://envoy.test:10000/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test logs deploy/flagger -f | jq .msg
New revision detected! progressing canary analysis for podinfo.test
Pre-rollout check acceptance-test passed
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Halt podinfo.test advancement success rate 69.17% < 99%
Halt podinfo.test advancement success rate 61.39% < 99%
Halt podinfo.test advancement success rate 55.06% < 99%
Halt podinfo.test advancement request duration 1.20s > 0.5s
Halt podinfo.test advancement request duration 1.45s > 0.5s
Rolling back podinfo.test failed checks threshold reached 5
Canary failed! Scaling down podinfo.test
```
If youve enabled the Slack notifications, youll receive a message if the progress deadline is exceeded,
or if the analysis reached the maximum number of failed checks:
![Flagger Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-failed.png)

View File

@@ -1,295 +0,0 @@
# SMI Istio Canary Deployments
This guide shows you how to use the SMI Istio adapter and Flagger to automate canary deployments.
## Prerequisites
* Kubernetes &gt; 1.13
* Istio &gt; 1.0
## Install Istio SMI adapter
Install the SMI adapter:
```bash
kubectl apply -f https://raw.githubusercontent.com/deislabs/smi-adapter-istio/master/deploy/crds/crds.yaml
kubectl apply -f https://raw.githubusercontent.com/deislabs/smi-adapter-istio/master/deploy/operator-and-rbac.yaml
```
Create a generic Istio gateway to expose services outside the mesh on HTTP:
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: public-gateway
namespace: istio-system
spec:
selector:
istio: ingressgateway
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- "*"
```
Save the above resource as public-gateway.yaml and then apply it:
```bash
kubectl apply -f ./public-gateway.yaml
```
Find the Gateway load balancer IP and add a DNS record for it:
```bash
kubectl -n istio-system get svc/istio-ingressgateway -ojson | jq -r .status.loadBalancer.ingress[0].ip
```
## Install Flagger and Grafana
Add Flagger Helm repository:
```bash
helm repo add flagger https://flagger.app
```
Deploy Flagger in the _**istio-system**_ namespace:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set meshProvider=smi:istio
```
Flagger comes with a Grafana dashboard made for monitoring the canary deployments.
Deploy Grafana in the _**istio-system**_ namespace:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus.istio-system:9090
```
You can access Grafana using port forwarding:
```bash
kubectl -n istio-system port-forward svc/flagger-grafana 3000:80
```
## Workloads bootstrap
Create a test namespace with Istio sidecar injection enabled:
Create a test namespace and enable Linkerd proxy injection:
```bash
kubectl create ns test
kubectl label namespace test istio-injection=enabled
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
```
Create a canary custom resource \(replace example.com with your own domain\):
```yaml
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- app.example.com
analysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 5
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 10
metrics:
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
thresholdRange:
min: 99
interval: 1m
- name: request-duration
# maximum req duration P99
# milliseconds
thresholdRange:
max: 500
interval: 30s
# generate traffic during analysis
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
```
Save the above resource as podinfo-canary.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-canary.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
canary.flagger.app/podinfo
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
trafficsplits.split.smi-spec.io/podinfo
```
## Automated canary promotion
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators like HTTP requests success rate, requests average duration and pod health. Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:3.1.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n istio-system logs deployment/flagger -f | jq .msg
New revision detected podinfo.test
Scaling up podinfo.test
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Advance podinfo.test canary weight 20
Advance podinfo.test canary weight 25
Advance podinfo.test canary weight 30
Advance podinfo.test canary weight 35
Advance podinfo.test canary weight 40
Advance podinfo.test canary weight 45
Advance podinfo.test canary weight 50
Copying podinfo.test template spec to podinfo-primary.test
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Promotion completed! Scaling down podinfo.test
```
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
During the analysis the canarys progress can be monitored with Grafana. The Istio dashboard URL is [http://localhost:3000/d/flagger-istio/istio-canary?refresh=10s&orgId=1&var-namespace=test&var-primary=podinfo-primary&var-canary=podinfo](http://localhost:3000/d/flagger-istio/istio-canary?refresh=10s&orgId=1&var-namespace=test&var-primary=podinfo-primary&var-canary=podinfo)
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 15 2019-05-16T14:05:07Z
prod frontend Succeeded 0 2019-05-15T16:15:07Z
prod backend Failed 0 2019-05-14T17:05:07Z
```
## Automated rollback
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
Create a tester pod and exec into it:
```bash
kubectl -n test run tester \
--image=quay.io/stefanprodan/podinfo:3.1.2 \
-- ./podinfo --port=9898
kubectl -n test exec -it tester-xx-xx sh
```
Generate HTTP 500 errors:
```bash
watch curl http://podinfo-canary:9898/status/500
```
Generate latency:
```bash
watch curl http://podinfo-canary:9898/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 10
Phase: Failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for podinfo.test
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```

View File

@@ -1,12 +1,17 @@
# Gloo Canary Deployments
This guide shows you how to use the [Gloo](https://gloo.solo.io/) ingress controller and Flagger to automate canary deployments.
This guide shows you how to use the [Gloo Edge](https://gloo.solo.io/) ingress controller
and Flagger to automate canary releases and A/B testing.
![Flagger Gloo Ingress Controller](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-gloo-overview.png)
![Flagger Gloo Ingress Controller](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-gloo-overview.png)
## Prerequisites
Flagger requires a Kubernetes cluster **v1.11** or newer and Gloo ingress **1.3.5** or newer.
Flagger requires a Kubernetes cluster **v1.16** or newer and Gloo Edge ingress **1.6.0** or newer.
This guide was written for Flagger version **1.6.0** or higher. Prior versions of Flagger
used Gloo upstream groups to handle canaries, but newer versions of Flagger use Gloo
route tables to handle canaries as well as A/B testing.
Install Gloo with Helm v3:
@@ -31,7 +36,7 @@ helm upgrade -i flagger flagger/flagger \
## Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployments, ClusterIP services and Gloo upstream groups).
then creates a series of objects (Kubernetes deployments, ClusterIP services and Gloo route tables groups).
These objects expose the application outside the cluster and drive the canary analysis and promotion.
Create a test namespace:
@@ -43,16 +48,16 @@ kubectl create ns test
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl -n test apply -k github.com/weaveworks/flagger//kustomize/podinfo
kubectl -n test apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
kubectl -n test apply -k github.com/weaveworks/flagger//kustomize/tester
kubectl -n test apply -k https://github.com/fluxcd/flagger//kustomize/tester?ref=main
```
Create an virtual service definition that references an upstream group that will be generated by Flagger
Create a virtual service definition that references a route table that will be generated by Flagger
(replace `app.example.com` with your own domain):
```yaml
@@ -68,8 +73,8 @@ spec:
routes:
- matchers:
- prefix: /
routeAction:
upstreamGroup:
delegateAction:
ref:
name: podinfo
namespace: test
```
@@ -97,7 +102,7 @@ spec:
name: podinfo
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
name: podinfo
service:
@@ -168,7 +173,7 @@ horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
upstreamgroups.gloo.solo.io/podinfo
routetables.gateway.solo.io/podinfo
```
When the bootstrap finishes Flagger will set the canary status to initialized:
@@ -186,7 +191,7 @@ Flagger implements a control loop that gradually shifts traffic to the canary wh
key performance indicators like HTTP requests success rate, requests average duration and pod health.
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
![Flagger Canary Stages](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-canary-steps.png)
Trigger a canary deployment by updating the container image:
@@ -240,7 +245,8 @@ prod backend Failed 0 2019-05-17T17:05:07Z
## Automated rollback
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses and rolls back the faulted version.
During the canary analysis you can generate HTTP 500 errors and high latency to test if
Flagger pauses and rolls back the faulted version.
Trigger another canary deployment:
@@ -252,13 +258,13 @@ podinfod=stefanprodan/podinfo:3.1.2
Generate HTTP 500 errors:
```bash
watch curl -H 'Host: app.example.com' http://gateway-proxy-v2.gloo-system/status/500
watch curl -H 'Host: app.example.com' http://gateway-proxy.gloo-system/status/500
```
Generate high latency:
```bash
watch curl -H 'Host: app.example.com' http://gateway-proxy-v2.gloo-system/delay/2
watch curl -H 'Host: app.example.com' http://gateway-proxy.gloo-system/delay/2
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
@@ -291,8 +297,8 @@ Events:
The canary analysis can be extended with Prometheus queries.
The demo app is instrumented with Prometheus so you can create a custom check that will use
the HTTP request duration histogram to validate the canary.
The demo app is instrumented with Prometheus so you can create a custom check that will use the HTTP request
duration histogram to validate the canary.
Create a metric template and apply it on the cluster:
@@ -305,7 +311,7 @@ metadata:
spec:
provider:
type: prometheus
address: http://flagger-promethues.gloo-system:9090
address: http://flagger-prometheus.gloo-system:9090
query: |
100 - sum(
rate(
@@ -340,8 +346,8 @@ Edit the canary analysis and add the following metric:
interval: 1m
```
The above configuration validates the canary by checking if the HTTP 404 req/sec percentage is
below 5 percent of the total traffic. If the 404s rate reaches the 5% threshold, then the canary fails.
The above configuration validates the canary by checking if the HTTP 404 req/sec percentage
is below 5 percent of the total traffic. If the 404s rate reaches the 5% threshold, then the canary fails.
Trigger a canary deployment by updating the container image:
@@ -353,7 +359,7 @@ podinfod=stefanprodan/podinfo:3.1.3
Generate 404s:
```bash
watch curl -H 'Host: app.example.com' http://gateway-proxy.gloo-system/status/400
watch curl -H 'Host: app.example.com' http://gateway-proxy.gloo-system/status/404
```
Watch Flagger logs:
@@ -377,5 +383,90 @@ Canary failed! Scaling down podinfo.test
If you have [alerting](../usage/alerting.md) configured,
Flagger will send a notification with the reason why the canary failed.
For an in-depth look at the analysis process read the [usage docs](../usage/how-it-works.md).
## A/B Testing
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
This is particularly useful for frontend applications that require session affinity.
![Flagger A/B Testing Stages](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-abtest-steps.png)
Edit the canary analysis, remove the max/step weight and add the match conditions and iterations:
```yaml
analysis:
interval: 1m
threshold: 5
iterations: 10
match:
- headers:
x-canary:
exact: "insider"
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
metadata:
cmd: "hey -z 1m -q 5 -c 5 -H 'X-Canary: insider' -host app.example.com http://gateway-proxy.gloo-system"
```
The above configuration will run an analysis for ten minutes targeting users that have a `X-Canary: insider` header.
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:3.1.4
```
Flagger detects that the deployment revision changed and starts the A/B test:
```text
kubectl -n gloo-system logs deploy/flagger -f | jq .msg
New revision detected! Progressing canary analysis for podinfo.test
Advance podinfo.test canary iteration 1/10
Advance podinfo.test canary iteration 2/10
Advance podinfo.test canary iteration 3/10
Advance podinfo.test canary iteration 4/10
Advance podinfo.test canary iteration 5/10
Advance podinfo.test canary iteration 6/10
Advance podinfo.test canary iteration 7/10
Advance podinfo.test canary iteration 8/10
Advance podinfo.test canary iteration 9/10
Advance podinfo.test canary iteration 10/10
Copying podinfo.test template spec to podinfo-primary.test
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Routing all traffic to primary
Promotion completed! Scaling down podinfo.test
```
The web browser user agent header allows user segmentation based on device or OS.
For example, if you want to route all mobile users to the canary instance:
```yaml
match:
- headers:
user-agent:
regex: ".*Mobile.*"
```
Or if you want to target only Android users:
```yaml
match:
- headers:
user-agent:
regex: ".*Android.*"
```
Or a specific browser version:
```yaml
match:
- headers:
user-agent:
regex: ".*Firefox.*"
```
For an in-depth look at the analysis process read the [usage docs](../usage/how-it-works.md).

View File

@@ -2,26 +2,26 @@
This guide shows you how to automate A/B testing with Istio and Flagger.
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
This is particularly useful for frontend applications that require session affinity.
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions. In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users. This is particularly useful for frontend applications that require session affinity.
![Flagger A/B Testing Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-abtest-steps.png)
![Flagger A/B Testing Stages](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-abtest-steps.png)
## Prerequisites
Flagger requires a Kubernetes cluster **v1.11** or newer and Istio **v1.0** or newer.
Flagger requires a Kubernetes cluster **v1.16** or newer and Istio **v1.0** or newer.
Install Istio with telemetry support and Prometheus:
```bash
istioctl manifest apply --set profile=default
istioctl manifest install --set profile=default
kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.8/samples/addons/prometheus.yaml
```
Install Flagger using Kustomize (kubectl 1.14) in the `istio-system` namespace:
Install Flagger in the `istio-system` namespace:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/istio
kubectl apply -k github.com/fluxcd/flagger//kustomize/istio
```
Create an ingress gateway to expose the demo app outside of the mesh:
@@ -56,16 +56,16 @@ kubectl label namespace test istio-injection=enabled
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/tester?ref=main
```
Create a canary custom resource (replace example.com with your own domain):
Create a canary custom resource \(replace example.com with your own domain\):
```yaml
apiVersion: flagger.app/v1beta1
@@ -84,7 +84,7 @@ spec:
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
name: podinfo
service:
@@ -138,8 +138,7 @@ spec:
cmd: "hey -z 1m -q 10 -c 2 -H 'Cookie: type=insider' http://podinfo.test:9898/"
```
**Note** that when using Istio 1.5 you have to replace the `request-duration`
with a [metric template](https://docs.flagger.app/dev/upgrade-guide#istio-telemetry-v2).
**Note** that when using Istio 1.5 you have to replace the `request-duration` with a [metric template](https://docs.flagger.app/dev/upgrade-guide#istio-telemetry-v2).
The above configuration will run an analysis for ten minutes targeting Firefox users and those that have an insider cookie.
@@ -235,8 +234,7 @@ Generate latency:
watch curl -b 'type=insider' http://app.example.com/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test describe canary/podinfo
@@ -257,7 +255,5 @@ Events:
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```
The above procedure can be extended with [custom metrics](../usage/metrics.md) checks,
[webhooks](../usage/webhooks.md),
[manual promotion](../usage/webhooks.md#manual-gating) approval and
[Slack or MS Teams](../usage/alerting.md) notifications.
The above procedure can be extended with [custom metrics](../usage/metrics.md) checks, [webhooks](../usage/webhooks.md), [manual promotion](../usage/webhooks.md#manual-gating) approval and [Slack or MS Teams](../usage/alerting.md) notifications.

View File

@@ -2,22 +2,24 @@
This guide shows you how to use Istio and Flagger to automate canary deployments.
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
![Flagger Canary Stages](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-canary-steps.png)
## Prerequisites
Flagger requires a Kubernetes cluster **v1.11** or newer and Istio **v1.5** or newer.
Flagger requires a Kubernetes cluster **v1.16** or newer and Istio **v1.5** or newer.
Install Istio with telemetry support and Prometheus:
```bash
istioctl manifest apply --set profile=default
istioctl manifest install --set profile=default
kubectl apply -f https://raw.githubusercontent.com/istio/istio/release-1.8/samples/addons/prometheus.yaml
```
Install Flagger using Kustomize (kubectl >= 1.14) in the `istio-system` namespace:
Install Flagger in the `istio-system` namespace:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/istio
kubectl apply -k github.com/fluxcd/flagger//kustomize/istio
```
Create an ingress gateway to expose the demo app outside of the mesh:
@@ -42,10 +44,7 @@ spec:
## Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployments, ClusterIP services,
Istio destination rules and virtual services).
These objects expose the application inside the mesh and drive the canary analysis and promotion.
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler \(HPA\), then creates a series of objects \(Kubernetes deployments, ClusterIP services, Istio destination rules and virtual services\). These objects expose the application inside the mesh and drive the canary analysis and promotion.
Create a test namespace with Istio sidecar injection enabled:
@@ -57,16 +56,16 @@ kubectl label namespace test istio-injection=enabled
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/tester?ref=main
```
Create a canary custom resource (replace example.com with your own domain):
Create a canary custom resource \(replace example.com with your own domain\):
```yaml
apiVersion: flagger.app/v1beta1
@@ -85,7 +84,7 @@ spec:
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
name: podinfo
service:
@@ -149,8 +148,7 @@ spec:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/"
```
**Note** that when using Istio 1.4 you have to replace the `request-duration`
with a [metric template](https://docs.flagger.app/dev/upgrade-guide#istio-telemetry-v2).
**Note** that when using Istio 1.4 you have to replace the `request-duration` with a [metric template](https://docs.flagger.app/dev/upgrade-guide#istio-telemetry-v2).
Save the above resource as podinfo-canary.yaml and then apply it:
@@ -158,10 +156,9 @@ Save the above resource as podinfo-canary.yaml and then apply it:
kubectl apply -f ./podinfo-canary.yaml
```
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
The canary analysis will run for five minutes while validating the HTTP metrics and rollout hooks every minute.
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary. The canary analysis will run for five minutes while validating the HTTP metrics and rollout hooks every minute.
![Flagger Canary Process](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-hpa.png)
![Flagger Canary Process](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-canary-hpa.png)
After a couple of seconds Flagger will create the canary objects:
@@ -269,8 +266,7 @@ Generate latency:
watch curl http://podinfo-canary:9898/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test describe canary/podinfo
@@ -297,14 +293,11 @@ Events:
## Traffic mirroring
![Flagger Canary Traffic Shadowing](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-traffic-mirroring.png)
![Flagger Canary Traffic Shadowing](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-canary-traffic-mirroring.png)
For applications that perform read operations, Flagger can be configured to drive canary releases with traffic mirroring.
Istio traffic mirroring will copy each incoming request, sending one request to the primary and one to the canary service.
The response from the primary is sent back to the user and the response from the canary is discarded.
Metrics are collected on both requests so that the deployment will only proceed if the canary metrics are within the threshold values.
For applications that perform read operations, Flagger can be configured to drive canary releases with traffic mirroring. Istio traffic mirroring will copy each incoming request, sending one request to the primary and one to the canary service. The response from the primary is sent back to the user and the response from the canary is discarded. Metrics are collected on both requests so that the deployment will only proceed if the canary metrics are within the threshold values.
Note that mirroring should be used for requests that are **idempotent** or capable of being processed twice (once by the primary and once by the canary).
Note that mirroring should be used for requests that are **idempotent** or capable of being processed twice \(once by the primary and once by the canary\).
You can enable mirroring by replacing `stepWeight/maxWeight` with `iterations` and by setting `analysis.mirror` to `true`:
@@ -352,7 +345,7 @@ spec:
With the above configuration, Flagger will run a canary release with the following steps:
* detect new revision (deployment spec, secrets or configmaps changes)
* detect new revision \(deployment spec, secrets or configmaps changes\)
* scale from zero the canary deployment
* wait for the HPA to set the canary minimum replicas
* check canary pods health
@@ -364,7 +357,7 @@ With the above configuration, Flagger will run a canary release with the followi
* abort the canary release if the metrics check failure threshold is reached
* stop traffic mirroring after the number of iterations is reached
* route live traffic to the canary pods
* promote the canary (update the primary secrets, configmaps and deployment spec)
* promote the canary \(update the primary secrets, configmaps and deployment spec\)
* wait for the primary deployment rollout to finish
* wait for the HPA to set the primary minimum replicas
* check primary pods health
@@ -372,7 +365,5 @@ With the above configuration, Flagger will run a canary release with the followi
* scale to zero the canary
* send notification with the canary analysis result
The above procedure can be extended with [custom metrics](../usage/metrics.md) checks,
[webhooks](../usage/webhooks.md),
[manual promotion](../usage/webhooks.md#manual-gating) approval and
[Slack or MS Teams](../usage/alerting.md) notifications.
The above procedure can be extended with [custom metrics](../usage/metrics.md) checks, [webhooks](../usage/webhooks.md), [manual promotion](../usage/webhooks.md#manual-gating) approval and [Slack or MS Teams](../usage/alerting.md) notifications.

View File

@@ -2,15 +2,13 @@
This guide shows you how to automate Blue/Green deployments with Flagger and Kubernetes.
For applications that are not deployed on a service mesh, Flagger can orchestrate Blue/Green style deployments
with Kubernetes L4 networking. When using a service mesh blue/green can be used as
specified [here](../usage/deployment-strategies.md).
For applications that are not deployed on a service mesh, Flagger can orchestrate Blue/Green style deployments with Kubernetes L4 networking. When using a service mesh blue/green can be used as specified [here](../usage/deployment-strategies.md).
![Flagger Blue/Green Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-bluegreen-steps.png)
![Flagger Blue/Green Stages](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-bluegreen-steps.png)
## Prerequisites
Flagger requires a Kubernetes cluster **v1.11** or newer.
Flagger requires a Kubernetes cluster **v1.16** or newer.
Install Flagger and the Prometheus add-on:
@@ -44,9 +42,7 @@ helm upgrade -i flagger flagger/flagger \
## Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployment and ClusterIP services).
These objects expose the application inside the cluster and drive the canary analysis and Blue/Green promotion.
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler \(HPA\), then creates a series of objects \(Kubernetes deployment and ClusterIP services\). These objects expose the application inside the cluster and drive the canary analysis and Blue/Green promotion.
Create a test namespace:
@@ -57,13 +53,13 @@ kubectl create ns test
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
```
Deploy the load testing service to generate traffic during the analysis:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/tester?ref=main
```
Create a canary custom resource:
@@ -87,7 +83,7 @@ spec:
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
name: podinfo
service:
@@ -158,10 +154,14 @@ service/podinfo-primary
Blue/Green scenario:
* on bootstrap, Flagger will create three ClusterIP services (`app-primary`,`app-canary`, `app`)
and a shadow deployment named `app-primary` that represents the blue version
* on bootstrap, Flagger will create three ClusterIP services \(`app-primary`,`app-canary`, `app`\)
and a shadow deployment named `app-primary` that represents the blue version
* when a new version is detected, Flagger would scale up the green version and run the conformance tests
(the tests should target the `app-canary` ClusterIP service to reach the green version)
\(the tests should target the `app-canary` ClusterIP service to reach the green version\)
* if the conformance tests are passing, Flagger would start the load tests and validate them with custom Prometheus queries
* if the load test analysis is successful, Flagger will promote the new version to `app-primary` and scale down the green version
@@ -257,9 +257,7 @@ Events:
## Custom metrics
The analysis can be extended with Prometheus queries. The demo app is instrumented with Prometheus so you can
create a custom check that will use the HTTP request duration histogram to validate the canary (green version).
The analysis can be extended with Prometheus queries. The demo app is instrumented with Prometheus so you can create a custom check that will use the HTTP request duration histogram to validate the canary \(green version\).
Create a metric template and apply it on the cluster:
@@ -272,7 +270,7 @@ metadata:
spec:
provider:
type: prometheus
address: http://flagger-promethues.flagger:9090
address: http://flagger-prometheus.flagger:9090
query: |
100 - sum(
rate(
@@ -307,8 +305,7 @@ Edit the canary analysis and add the following metric:
interval: 1m
```
The above configuration validates the canary (green version) by checking if the HTTP 404 req/sec percentage is
below 5 percent of the total traffic. If the 404s rate reaches the 5% threshold, then the rollout is rolled back.
The above configuration validates the canary \(green version\) by checking if the HTTP 404 req/sec percentage is below 5 percent of the total traffic. If the 404s rate reaches the 5% threshold, then the rollout is rolled back.
Trigger a deployment by updating the container image:
@@ -337,8 +334,7 @@ Rolling back podinfo.test failed checks threshold reached 2
Canary failed! Scaling down podinfo.test
```
If you have [alerting](../usage/alerting.md) configured,
Flagger will send a notification with the reason why the canary failed.
If you have [alerting](../usage/alerting.md) configured, Flagger will send a notification with the reason why the canary failed.
## Conformance Testing with Helm
@@ -370,8 +366,7 @@ Add a helm test pre-rollout hook to your chart:
cmd: "test {{ .Release.Name }} --cleanup"
```
When the canary analysis starts, Flagger will call the pre-rollout webhooks.
If the helm test fails, Flagger will retry until the analysis threshold is reached and the canary is rolled back.
When the canary analysis starts, Flagger will call the pre-rollout webhooks. If the helm test fails, Flagger will retry until the analysis threshold is reached and the canary is rolled back.
For an in-depth look at the analysis process read the [usage docs](../usage/how-it-works.md).

View File

@@ -2,25 +2,23 @@
This guide shows you how to use Linkerd and Flagger to automate canary deployments.
![Flagger Linkerd Traffic Split](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-linkerd-traffic-split.png)
![Flagger Linkerd Traffic Split](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-linkerd-traffic-split.png)
## Prerequisites
Flagger requires a Kubernetes cluster **v1.11** or newer and Linkerd **2.4** or newer.
Flagger requires a Kubernetes cluster **v1.16** or newer and Linkerd **2.4** or newer.
Install Flagger in the linkerd namespace:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/linkerd
kubectl apply -k github.com/fluxcd/flagger//kustomize/linkerd
```
Note that you'll need kubectl 1.14 or newer to run the above command.
## Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployments, ClusterIP services and SMI traffic split).
These objects expose the application inside the mesh and drive the canary analysis and promotion.
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler \(HPA\), then creates a series of objects \(Kubernetes deployments, ClusterIP services and SMI traffic split\). These objects expose the application inside the mesh and drive the canary analysis and promotion.
Create a test namespace and enable Linkerd proxy injection:
@@ -32,13 +30,13 @@ kubectl annotate namespace test linkerd.io/inject=enabled
Install the load testing service to generate traffic during the canary analysis:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/tester?ref=main
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
```
Create a canary custom resource for the podinfo deployment:
@@ -57,7 +55,7 @@ spec:
name: podinfo
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
name: podinfo
# the maximum time in seconds for the canary deployment
@@ -115,8 +113,7 @@ Save the above resource as podinfo-canary.yaml and then apply it:
kubectl apply -f ./podinfo-canary.yaml
```
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
The canary analysis will run for five minutes while validating the HTTP metrics and rollout hooks every half a minute.
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary. The canary analysis will run for five minutes while validating the HTTP metrics and rollout hooks every half a minute.
After a couple of seconds Flagger will create the canary objects:
@@ -136,17 +133,13 @@ service/podinfo-primary
trafficsplits.split.smi-spec.io/podinfo
```
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test`
will be routed to the primary pods.
During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
After the boostrap, the podinfo deployment will be scaled to zero and the traffic to `podinfo.test` will be routed to the primary pods. During the canary analysis, the `podinfo-canary.test` address can be used to target directly the canary pods.
## Automated canary promotion
Flagger implements a control loop that gradually shifts traffic to the canary while measuring
key performance indicators like HTTP requests success rate, requests average duration and pod health.
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators like HTTP requests success rate, requests average duration and pod health. Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
![Flagger Canary Stages](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-canary-steps.png)
Trigger a canary deployment by updating the container image:
@@ -205,8 +198,7 @@ prod backend Failed 0 2019-06-30T17:05:07Z
## Automated rollback
During the canary analysis you can generate HTTP 500 errors and high latency to
test if Flagger pauses and rolls back the faulted version.
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses and rolls back the faulted version.
Trigger another canary deployment:
@@ -233,8 +225,7 @@ Generate latency:
watch -n 1 curl http://podinfo-canary.test:9898/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test describe canary/podinfo
@@ -293,9 +284,7 @@ Let's a define a check for not found errors. Edit the canary analysis and add th
* 100
```
The above configuration validates the canary version by checking if the HTTP 404 req/sec percentage
is below three percent of the total traffic.
If the 404s rate reaches the 3% threshold, then the analysis is aborted and the canary is marked as failed.
The above configuration validates the canary version by checking if the HTTP 404 req/sec percentage is below three percent of the total traffic. If the 404s rate reaches the 3% threshold, then the analysis is aborted and the canary is marked as failed.
Trigger a canary deployment by updating the container image:
@@ -340,8 +329,7 @@ helm upgrade -i nginx-ingress stable/nginx-ingress \
--namespace ingress-nginx
```
Create an ingress definition for podinfo that rewrites the incoming header
to the internal service name (required by Linkerd):
Create an ingress definition for podinfo that rewrites the incoming header to the internal service name \(required by Linkerd\):
```yaml
apiVersion: extensions/v1beta1
@@ -367,20 +355,15 @@ spec:
servicePort: 9898
```
When using an ingress controller, the Linkerd traffic split does not apply to incoming traffic
since NGINX in running outside of the mesh. In order to run a canary analysis for a frontend app,
Flagger creates a shadow ingress and sets the NGINX specific annotations.
When using an ingress controller, the Linkerd traffic split does not apply to incoming traffic since NGINX in running outside of the mesh. In order to run a canary analysis for a frontend app, Flagger creates a shadow ingress and sets the NGINX specific annotations.
## A/B Testing
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
This is particularly useful for frontend applications that require session affinity.
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions. In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users. This is particularly useful for frontend applications that require session affinity.
![Flagger Linkerd Ingress](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-nginx-linkerd.png)
![Flagger Linkerd Ingress](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-nginx-linkerd.png)
Edit podinfo canary analysis, set the provider to `nginx`, add the ingress reference,
remove the max/step weight and add the match conditions and iterations:
Edit podinfo canary analysis, set the provider to `nginx`, add the ingress reference, remove the max/step weight and add the match conditions and iterations:
```yaml
apiVersion: flagger.app/v1beta1
@@ -400,7 +383,7 @@ spec:
kind: Deployment
name: podinfo
autoscalerRef:
apiVersion: autoscaling/v2beta1
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
name: podinfo
service:
@@ -444,8 +427,7 @@ spec:
cmd: "hey -z 2m -q 10 -c 2 -H 'Cookie: canary=always' http://app.example.com"
```
The above configuration will run an analysis for ten minutes targeting users that have
a `canary` cookie set to `always` or those that call the service using the `X-Canary: always` header.
The above configuration will run an analysis for ten minutes targeting users that have a `canary` cookie set to `always` or those that call the service using the `X-Canary: always` header.
**Note** that the load test now targets the external address and uses the canary cookie.
@@ -479,7 +461,5 @@ Events:
Promotion completed! Scaling down podinfo.test
```
The above procedure can be extended with [custom metrics](../usage/metrics.md) checks,
[webhooks](../usage/webhooks.md),
[manual promotion](../usage/webhooks.md#manual-gating) approval and
[Slack or MS Teams](../usage/alerting.md) notifications.
The above procedure can be extended with [custom metrics](../usage/metrics.md) checks, [webhooks](../usage/webhooks.md), [manual promotion](../usage/webhooks.md#manual-gating) approval and [Slack or MS Teams](../usage/alerting.md) notifications.

View File

@@ -2,24 +2,25 @@
This guide shows you how to use the NGINX ingress controller and Flagger to automate canary deployments and A/B testing.
![Flagger NGINX Ingress Controller](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-nginx-overview.png)
![Flagger NGINX Ingress Controller](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-nginx-overview.png)
## Prerequisites
Flagger requires a Kubernetes cluster **v1.14** or newer and NGINX ingress **0.24** or newer.
Flagger requires a Kubernetes cluster **v1.16** or newer and NGINX ingress **v0.41** or newer.
Install NGINX with Helm v3:
Install the NGINX ingress controller with Helm v3:
```bash
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
kubectl create ns ingress-nginx
helm upgrade -i nginx-ingress stable/nginx-ingress \
helm upgrade -i ingress-nginx ingress-nginx/ingress-nginx \
--namespace ingress-nginx \
--set controller.metrics.enabled=true \
--set controller.podAnnotations."prometheus\.io/scrape"=true \
--set controller.podAnnotations."prometheus\.io/port"=10254
```
Install Flagger and the Prometheus add-on in the same namespace as NGINX:
Install Flagger and the Prometheus add-on in the same namespace as the ingress controller:
```bash
helm repo add flagger https://flagger.app
@@ -30,17 +31,6 @@ helm upgrade -i flagger flagger/flagger \
--set meshProvider=nginx
```
Optionally you can enable Slack notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--reuse-values \
--namespace ingress-nginx \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
## Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
@@ -56,7 +46,7 @@ kubectl create ns test
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/podinfo
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
```
Deploy the load testing service to generate traffic during the canary analysis:
@@ -66,7 +56,7 @@ helm upgrade -i flagger-loadtester flagger/loadtester \
--namespace=test
```
Create an ingress definition \(replace `app.example.com` with your own domain\):
Create an ingress definition (replace `app.example.com` with your own domain):
```yaml
apiVersion: networking.k8s.io/v1beta1
@@ -94,7 +84,7 @@ Save the above resource as podinfo-ingress.yaml and then apply it:
kubectl apply -f ./podinfo-ingress.yaml
```
Create a canary custom resource \(replace `app.example.com` with your own domain\):
Create a canary custom resource (replace `app.example.com` with your own domain):
```yaml
apiVersion: flagger.app/v1beta1
@@ -116,7 +106,7 @@ spec:
name: podinfo
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
name: podinfo
# the maximum time in seconds for the canary deployment
@@ -188,11 +178,11 @@ ingresses.extensions/podinfo-canary
## Automated canary promotion
Flagger implements a control loop that gradually shifts traffic to the canary while measuring
key performance indicators like HTTP requests success rate, requests average duration and pod health.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
indicators like HTTP requests success rate, requests average duration and pod health.
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack or MS Teams.
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
![Flagger Canary Stages](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-canary-steps.png)
Trigger a canary deployment by updating the container image:
@@ -261,7 +251,8 @@ Generate HTTP 500 errors:
watch curl http://app.example.com/status/500
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
When the number of failed checks reaches the canary analysis threshold,
the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
```text
@@ -291,8 +282,8 @@ Events:
The canary analysis can be extended with Prometheus queries.
The demo app is instrumented with Prometheus so you can create a custom check that will use the
HTTP request duration histogram to validate the canary.
The demo app is instrumented with Prometheus so you can create a custom check
that will use the HTTP request duration histogram to validate the canary.
Create a metric template and apply it on the cluster:
@@ -305,7 +296,7 @@ metadata:
spec:
provider:
type: prometheus
address: http://flagger-promethues.ingress-nginx:9090
address: http://flagger-prometheus.ingress-nginx:9090
query: |
histogram_quantile(0.99,
sum(
@@ -332,8 +323,8 @@ Edit the canary analysis and add the latency check:
interval: 1m
```
The threshold is set to 500ms so if the average request duration in the last minute goes over half a second
then the analysis will fail and the canary will not be promoted.
The threshold is set to 500ms so if the average request duration in the last minute goes over
half a second then the analysis will fail and the canary will not be promoted.
Trigger a canary deployment by updating the container image:
@@ -345,7 +336,7 @@ podinfod=stefanprodan/podinfo:3.1.3
Generate high response latency:
```bash
watch curl http://app.exmaple.com/delay/2
watch curl http://app.example.com/delay/2
```
Watch Flagger logs:
@@ -374,7 +365,7 @@ Besides weighted routing, Flagger can be configured to route traffic to the cana
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
This is particularly useful for frontend applications that require session affinity.
![Flagger A/B Testing Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-abtest-steps.png)
![Flagger A/B Testing Stages](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-abtest-steps.png)
Edit the canary analysis, remove the max/step weight and add the match conditions and iterations:
@@ -405,8 +396,8 @@ Edit the canary analysis, remove the max/step weight and add the match condition
cmd: "hey -z 1m -q 10 -c 2 -H 'Cookie: canary=always' http://app.example.com/"
```
The above configuration will run an analysis for ten minutes targeting users that have a `canary` cookie
set to `always` or those that call the service using the `X-Canary: insider` header.
The above configuration will run an analysis for ten minutes targeting users that have
a `canary` cookie set to `always` or those that call the service using the `X-Canary: insider` header.
Trigger a canary deployment by updating the container image:
@@ -444,7 +435,8 @@ Events:
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```
The above procedure can be extended with [custom metrics](../usage/metrics.md) checks,
The above procedure can be extended with
[custom metrics](../usage/metrics.md) checks,
[webhooks](../usage/webhooks.md),
[manual promotion](../usage/webhooks.md#manual-gating) approval and
[Slack or MS Teams](../usage/alerting.md) notifications.

View File

@@ -1,23 +1,26 @@
# Canary analysis with Prometheus Operator
This guide show you how to use Prometheus Operator for canary analysis.
This guide show you how to use
[Prometheus Operator](https://github.com/prometheus-operator/prometheus-operator) for canary analysis.
## Prerequisites
Flagger requires a Kubernetes cluster **v1.16** or newer and Prometheus Operator **v0.40** or newer.
Install Prometheus Operator with Helm v3:
```bash
helm repo add stable https://kubernetes-charts.storage.googleapis.com
helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
kubectl create ns monitoring
helm upgrade -i prometheus stable/prometheus-operator \
helm upgrade -i prometheus prometheus-community/kube-prometheus-stack \
--namespace monitoring \
--set prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false \
--set fullnameOverride=prometheus
```
The `prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false`
option allows Prometheus operator to watch serviceMonitors outside of his namespace.
option allows Prometheus Operator to watch serviceMonitors outside of its namespace.
Install Flagger by setting the metrics server to Prometheus:
@@ -38,7 +41,7 @@ helm upgrade -i loadtester flagger/loadtester \
--namespace flagger-system
```
Install podinfo demo app:
Install [podinfo](https://github.com/stefanprodan/podinfo) demo app:
```bash
helm repo add podinfo https://stefanprodan.github.io/podinfo
@@ -51,23 +54,8 @@ helm upgrade -i podinfo podinfo/podinfo \
## Service monitors
The demo app is instrumented with Prometheus so you can create service monitors to scrape podinfo's metrics endpoint:
```yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: podinfo-primary
namespace: test
spec:
endpoints:
- path: /metrics
port: http
interval: 5s
selector:
matchLabels:
app: podinfo
```
The demo app is instrumented with Prometheus,
so you can create a `ServiceMonitor` objects to scrape podinfo's metrics endpoint:
```yaml
apiVersion: monitoring.coreos.com/v1
@@ -83,10 +71,24 @@ spec:
selector:
matchLabels:
app: podinfo-canary
---
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: podinfo-primary
namespace: test
spec:
endpoints:
- path: /metrics
port: http
interval: 5s
selector:
matchLabels:
app: podinfo
```
We are setting `interval: 5s` to have a more aggressive scraping.
If you do not define it, you must to use a longer interval in the Canary object.
If you do not define it, you should use a longer interval in the Canary object.
## Metric templates
@@ -191,7 +193,7 @@ spec:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test/"
```
Based on the above specification, Flagger creates the primary and canary Kubernetes ClusterIP service.
Based on the above specification, Flagger creates the primary and canary Kubernetes ClusterIP service.
During the canary analysis, Prometheus will scrape the canary service and Flagger will use the HTTP error rate and
latency queries to determine if the release should be promoted or rolled back.
During the canary analysis, Prometheus will scrape the canary service and Flagger will use the HTTP error rate
and latency queries to determine if the release should be promoted or rolled back.

View File

@@ -0,0 +1,370 @@
# Skipper Canary Deployments
This guide shows you how to use the [Skipper ingress controller](https://opensource.zalando.com/skipper/kubernetes/ingress-controller/) and Flagger to automate canary deployments.
![Flagger Skipper Ingress Controller](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-skipper-overview.png)
## Prerequisites
Flagger requires a Kubernetes cluster **v1.16** or newer and Skipper ingress **0.11.40** or newer.
Install Skipper ingress-controller using [upstream definition](https://opensource.zalando.com/skipper/kubernetes/ingress-controller/#install-skipper-as-ingress-controller).
Certain arguments are relevant:
```yaml
- -enable-connection-metrics
- -histogram-metric-buckets=.01,1,10,100
- -kubernetes
- -kubernetes-in-cluster
- -kubernetes-path-mode=path-prefix
- -metrics-exp-decay-sample
- -metrics-flavour=prometheus
- -route-backend-metrics
- -route-backend-error-counters
- -route-response-metrics
- -serve-host-metrics
- -serve-route-metrics
- -whitelisted-healthcheck-cidr=0.0.0.0/0 # permit Kind source health checks
```
Install Flagger using kustomize:
```bash
kustomize build https://github.com/fluxcd/flagger/kustomize/kubernetes | kubectl apply -f -
```
## Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler \(HPA\), then creates a series of objects \(Kubernetes deployments, ClusterIP services and canary ingress\). These objects expose the application outside the cluster and drive the canary analysis and promotion.
Create a test namespace:
```bash
kubectl create ns test
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
helm upgrade -i flagger-loadtester flagger/loadtester \
--namespace=test
```
Create an ingress definition \(replace `app.example.com` with your own domain\):
```yaml
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
annotations:
kubernetes.io/ingress.class: "skipper"
spec:
rules:
- host: app.example.com
http:
paths:
- backend:
serviceName: podinfo
servicePort: 80
```
Save the above resource as podinfo-ingress.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-ingress.yaml
```
Create a canary custom resource \(replace `app.example.com` with your own domain\):
```yaml
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
provider: skipper
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# ingress reference
ingressRef:
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
name: podinfo
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
service:
# ClusterIP port number
port: 80
# container port number or name
targetPort: 9898
analysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# Skipper Prometheus checks
metrics:
- name: request-success-rate
interval: 1m
# minimum req success rate (non 5xx responses)
# percentage (0-100)
thresholdRange:
min: 99
- name: request-duration
interval: 1m
# maximum req duration P99
# milliseconds
thresholdRange:
max: 500
webhooks:
- name: gate
type: confirm-rollout
url: http://flagger-loadtester.test/gate/approve
- name: acceptance-test
type: pre-rollout
url: http://flagger-loadtester.test/
timeout: 10s
metadata:
type: bash
cmd: "curl -sd 'test' http://podinfo-canary/token | grep token"
- name: load-test
type: rollout
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 10m -q 10 -c 2 -host app.example.com http://skipper-ingress.kube-system"
logCmdOutput: "true"
```
Save the above resource as podinfo-canary.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-canary.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
ingress.networking.k8s.io/podinfo-ingress
canary.flagger.app/podinfo
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
ingress.networking.k8s.io/podinfo-canary
```
## Automated canary promotion
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators like HTTP requests success rate, requests average duration and pod health. Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack or MS Teams.
![Flagger Canary Stages](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-canary-steps.png)
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:4.0.6
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 0
Phase: Succeeded
Events:
New revision detected! Scaling up podinfo.test
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Pre-rollout check acceptance-test passed
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Advance podinfo.test canary weight 20
Advance podinfo.test canary weight 25
Advance podinfo.test canary weight 30
Advance podinfo.test canary weight 35
Advance podinfo.test canary weight 40
Advance podinfo.test canary weight 45
Advance podinfo.test canary weight 50
Copying podinfo.test template spec to podinfo-primary.test
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Routing all traffic to primary
Promotion completed! Scaling down podinfo.test
```
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo-2 Progressing 30 2020-08-14T12:32:12Z
test podinfo Succeeded 0 2020-08-14T11:23:88Z
```
## Automated rollback
During the canary analysis you can generate HTTP 500 errors to test if Flagger pauses and rolls back the faulted version.
Trigger another canary deployment:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:4.0.6
```
Exec into the load tester pod with:
```bash
kubectl -n test exec -it deploy/flagger-loadtester bash
```
Generate HTTP 500 errors:
```bash
hey -z 1m -c 5 -q 5 http://app.example.com/status/500
```
Generate latency:
```bash
watch -n 1 curl http://app.example.com/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n flagger-system logs deploy/flagger -f | jq .msg
New revision detected! Scaling up podinfo.test
Canary deployment podinfo.test not ready: waiting for rollout to finish: 0 of 1 updated replicas are available
Starting canary analysis for podinfo.test
Pre-rollout check acceptance-test passed
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Advance podinfo.test canary weight 20
Halt podinfo.test advancement success rate 53.42% < 99%
Halt podinfo.test advancement success rate 53.19% < 99%
Halt podinfo.test advancement success rate 48.05% < 99%
Rolling back podinfo.test failed checks threshold reached 3
Canary failed! Scaling down podinfo.test
```
## Custom metrics
The canary analysis can be extended with Prometheus queries.
Create a metric template and apply it on the cluster:
```yaml
apiVersion: flagger.app/v1beta1
kind: MetricTemplate
metadata:
name: latency
namespace: test
spec:
provider:
type: prometheus
address: http://flagger-prometheus.flagger-system:9090
query: |
histogram_quantile(0.99,
sum(
rate(
skipper_serve_route_duration_seconds_bucket{
route=~"{{ printf "kube(ew)?_%s__%s_canary__.*__%s_canary(_[0-9]+)?" namespace ingress service }}",
le="+Inf"
}[1m]
)
) by (le)
)
```
Edit the canary analysis and add the latency check:
```yaml
analysis:
metrics:
- name: "latency"
templateRef:
name: latency
thresholdRange:
max: 0.5
interval: 1m
```
The threshold is set to 500ms so if the average request duration in the last minute goes over half a second then the analysis will fail and the canary will not be promoted.
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:4.0.6
```
Generate high response latency:
```bash
watch curl http://app.example.com/delay/2
```
Watch Flagger logs:
```text
kubectl -n flagger-system logs deployment/flagger -f | jq .msg
Starting canary deployment for podinfo.test
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Halt podinfo.test advancement latency 1.20 > 0.5
Halt podinfo.test advancement latency 1.45 > 0.5
Halt podinfo.test advancement latency 1.60 > 0.5
Halt podinfo.test advancement latency 1.69 > 0.5
Halt podinfo.test advancement latency 1.70 > 0.5
Rolling back podinfo.test failed checks threshold reached 5
Canary failed! Scaling down podinfo.test
```
If you have alerting configured, Flagger will send a notification with the reason why the canary failed.

View File

@@ -0,0 +1,360 @@
# Traefik Canary Deployments
This guide shows you how to use the [Traefik](https://doc.traefik.io/traefik/) and Flagger to automate canary deployments.
## Prerequisites
Flagger requires a Kubernetes cluster **v1.16** or newer and Traefik **v2.3** or newer.
Install Traefik with Helm v3:
```bash
helm repo add traefik https://helm.traefik.io/traefik
kubectl create ns traefik
helm upgrade -i traefik traefik/traefik \
--namespace traefik \
--set additionalArguments="--metrics.prometheus=true"
```
Install Flagger and the Prometheus add-on in the same namespace as Traefik:
```bash
helm repo add flagger https://flagger.app
helm upgrade -i flagger flagger/flagger \
--namespace traefik \
--set prometheus.install=true \
--set meshProvider=traefik
```
## Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler \(HPA\), then creates a series of objects \(Kubernetes deployments, ClusterIP services and TraefikService\). These objects expose the application outside the cluster and drive the canary analysis and promotion.
Create a test namespace:
```bash
kubectl create ns test
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/podinfo?ref=main
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
helm upgrade -i flagger-loadtester flagger/loadtester \
--namespace=test
```
Create Traefik IngressRoute that references TraefikService generated by Flagger \(replace `app.example.com` with your own domain\):
```yaml
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: podinfo
namespace: test
spec:
entryPoints:
- web
routes:
- match: Host(`app.example.com`)
kind: Rule
services:
- name: podinfo
kind: TraefikService
port: 80
```
Save the above resource as podinfo-ingressroute.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-ingressroute.yaml
```
Create a canary custom resource \(replace `app.example.com` with your own domain\):
```yaml
apiVersion: flagger.app/v1beta1
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
provider: traefik
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
service:
# ClusterIP port number
port: 80
# container port number or name
targetPort: 9898
analysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# Traefik Prometheus checks
metrics:
- name: request-success-rate
interval: 1m
# minimum req success rate (non 5xx responses)
# percentage (0-100)
thresholdRange:
min: 99
- name: request-duration
interval: 1m
# maximum req duration P99
# milliseconds
thresholdRange:
max: 500
webhooks:
- name: acceptance-test
type: pre-rollout
url: http://flagger-loadtester.test/
timeout: 10s
metadata:
type: bash
cmd: "curl -sd 'test' http://podinfo-canary.test/token | grep token"
- name: load-test
type: rollout
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 10m -q 10 -c 2 -host app.example.com http://traefik.traefik"
logCmdOutput: "true"
```
Save the above resource as podinfo-canary.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-canary.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
canary.flagger.app/podinfo
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
traefikservice.traefik.containo.us/podinfo
```
## Automated canary promotion
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators like HTTP requests success rate, requests average duration and pod health. Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack or MS Teams.
![Flagger Canary Stages](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-canary-steps.png)
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:4.0.6
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 0
Phase: Succeeded
Events:
New revision detected! Scaling up podinfo.test
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Pre-rollout check acceptance-test passed
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Advance podinfo.test canary weight 20
Advance podinfo.test canary weight 25
Advance podinfo.test canary weight 30
Advance podinfo.test canary weight 35
Advance podinfo.test canary weight 40
Advance podinfo.test canary weight 45
Advance podinfo.test canary weight 50
Copying podinfo.test template spec to podinfo-primary.test
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Routing all traffic to primary
Promotion completed! Scaling down podinfo.test
```
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo-2 Progressing 30 2020-08-14T12:32:12Z
test podinfo Succeeded 0 2020-08-14T11:23:88Z
```
## Automated rollback
During the canary analysis you can generate HTTP 500 errors to test if Flagger pauses and rolls back the faulted version.
Trigger another canary deployment:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:4.0.6
```
Exec into the load tester pod with:
```bash
kubectl -n test exec -it deploy/flagger-loadtester bash
```
Generate HTTP 500 errors:
```bash
hey -z 1m -c 5 -q 5 http://app.example.com/status/500
```
Generate latency:
```bash
watch -n 1 curl http://app.example.com/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary, the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n traefik logs deploy/flagger -f | jq .msg
New revision detected! Scaling up podinfo.test
Canary deployment podinfo.test not ready: waiting for rollout to finish: 0 of 1 updated replicas are available
Starting canary analysis for podinfo.test
Pre-rollout check acceptance-test passed
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Advance podinfo.test canary weight 20
Halt podinfo.test advancement success rate 53.42% < 99%
Halt podinfo.test advancement success rate 53.19% < 99%
Halt podinfo.test advancement success rate 48.05% < 99%
Rolling back podinfo.test failed checks threshold reached 3
Canary failed! Scaling down podinfo.test
```
## Custom metrics
The canary analysis can be extended with Prometheus queries.
Create a metric template and apply it on the cluster:
```yaml
apiVersion: flagger.app/v1beta1
kind: MetricTemplate
metadata:
name: not-found-percentage
namespace: test
spec:
provider:
type: prometheus
address: http://flagger-prometheus.traefik:9090
query: |
sum(
rate(
traefik_service_request_duration_seconds_bucket{
service=~"{{ namespace }}-{{ target }}-canary-[0-9a-zA-Z-]+@kubernetescrd",
code!="404",
}[{{ interval }}]
)
)
/
sum(
rate(
traefik_service_request_duration_seconds_bucket{
service=~"{{ namespace }}-{{ target }}-canary-[0-9a-zA-Z-]+@kubernetescrd",
}[{{ interval }}]
)
) * 100
```
Edit the canary analysis and add the not found error rate check:
```yaml
analysis:
metrics:
- name: "404s percentage"
templateRef:
name: not-found-percentage
thresholdRange:
max: 5
interval: 1m
```
The above configuration validates the canary by checking if the HTTP 404 req/sec percentage is below 5 percent of the total traffic. If the 404s rate reaches the 5% threshold, then the canary fails.
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=stefanprodan/podinfo:4.0.6
```
Generate 404s:
```bash
watch curl http://app.example.com/status/400
```
Watch Flagger logs:
```text
kubectl -n traefik logs deployment/flagger -f | jq .msg
Starting canary deployment for podinfo.test
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Halt podinfo.test advancement 404s percentage 6.20 > 5
Halt podinfo.test advancement 404s percentage 6.45 > 5
Halt podinfo.test advancement 404s percentage 7.60 > 5
Halt podinfo.test advancement 404s percentage 8.69 > 5
Halt podinfo.test advancement 404s percentage 9.70 > 5
Rolling back podinfo.test failed checks threshold reached 5
Canary failed! Scaling down podinfo.test
```
If you have [alerting](../usage/alerting.md) configured, Flagger will send a notification with the reason why the canary failed.
For an in-depth look at the analysis process read the [usage docs](../usage/how-it-works.md).

View File

@@ -139,7 +139,7 @@ Note that without resource requests the horizontal pod autoscaler can't determin
A production environment should be able to handle traffic bursts without impacting the quality of service. This can be achieved with Kubernetes autoscaling capabilities. Autoscaling in Kubernetes has two dimensions: the Cluster Autoscaler that deals with node scaling operations and the Horizontal Pod Autoscaler that automatically scales the number of pods in a deployment.
```yaml
apiVersion: autoscaling/v2beta1
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
spec:
scaleTargetRef:

View File

@@ -1,9 +1,9 @@
# Alerting
Flagger can be configured to send alerts to various chat platforms. You can define a global alert provider at
install time or configure alerts on a per canary basis.
Flagger can be configured to send alerts to various chat platforms.
You can define a global alert provider at install time or configure alerts on a per canary basis.
### Global configuration
## Global configuration
Flagger can be configured to send Slack notifications:
@@ -14,15 +14,16 @@ helm upgrade -i flagger flagger/flagger \
--set slack.user=flagger
```
Once configured with a Slack incoming **webhook**, Flagger will post messages when a canary deployment
has been initialised, when a new revision has been detected and if the canary analysis failed or succeeded.
Once configured with a Slack incoming **webhook**,
Flagger will post messages when a canary deployment has been initialised,
when a new revision has been detected and if the canary analysis failed or succeeded.
![Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-notifications.png)
![Slack Notifications](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/screens/slack-canary-notifications.png)
A canary deployment will be rolled back if the progress deadline exceeded or if the analysis reached the
maximum number of failed checks:
A canary deployment will be rolled back if the progress deadline exceeded
or if the analysis reached the maximum number of failed checks:
![Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-failed.png)
![Slack Notifications](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/screens/slack-canary-failed.png)
Flagger can be configured to send notifications to Microsoft Teams:
@@ -33,18 +34,16 @@ helm upgrade -i flagger flagger/flagger \
Similar to Slack, Flagger alerts on canary analysis events:
![MS Teams Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/flagger-ms-teams-notifications.png)
![MS Teams Notifications](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/screens/flagger-ms-teams-notifications.png)
![MS Teams Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/flagger-ms-teams-failed.png)
![MS Teams Notifications](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/screens/flagger-ms-teams-failed.png)
### Canary configuration
## Canary configuration
Configuring alerting globally has several limitations as it's not possible to specify different channels
or configure the verbosity on a per canary basis.
To make the alerting move flexible, the canary analysis can be extended
with a list of alerts that reference an alert provider.
For each alert, users can configure the severity level.
The alerts section overrides the global setting.
or configure the verbosity on a per canary basis. To make the alerting move flexible,
the canary analysis can be extended with a list of alerts that reference an alert provider.
For each alert, users can configure the severity level. The alerts section overrides the global setting.
Slack example:
@@ -103,15 +102,16 @@ The canary analysis can have a list of alerts, each alert referencing an alert p
```
Alert fields:
* **name** (required)
* **name** \(required\)
* **severity** levels: `info`, `warn`, `error` (default info)
* **providerRef.name** alert provider name (required)
* **providerRef.namespace** alert provider namespace (defaults to the canary namespace)
When the severity is set to `warn`, Flagger will alert when waiting on manual confirmation or if the analysis fails.
When the severity is set to `warn`, Flagger will alert when waiting on manual confirmation or if the analysis fails.
When the severity is set to `error`, Flagger will alert only if the canary analysis fails.
### Prometheus Alert Manager
## Prometheus Alert Manager
You can use Alertmanager to trigger alerts when a canary deployment failed:
@@ -125,4 +125,3 @@ You can use Alertmanager to trigger alerts when a canary deployment failed:
summary: "Canary failed"
description: "Workload {{ $labels.name }} namespace {{ $labels.namespace }}"
```

View File

@@ -1,33 +1,34 @@
# Deployment Strategies
Flagger can run automated application analysis, promotion and rollback for the following deployment strategies:
* **Canary Release** (progressive traffic shifting)
* Istio, Linkerd, App Mesh, NGINX, Contour, Gloo
* **A/B Testing** (HTTP headers and cookies traffic routing)
* Istio, App Mesh, NGINX, Contour
* **Blue/Green** (traffic switching)
* Kubernetes CNI, Istio, Linkerd, App Mesh, NGINX, Contour, Gloo
* **Blue/Green Mirroring** (traffic shadowing)
* Istio
For Canary releases and A/B testing you'll need a Layer 7 traffic management solution like a service mesh or an ingress controller.
For Blue/Green deployments no service mesh or ingress controller is required.
* **Canary Release** \(progressive traffic shifting\)
* Istio, Linkerd, App Mesh, NGINX, Skipper, Contour, Gloo Edge, Traefik
* **A/B Testing** \(HTTP headers and cookies traffic routing\)
* Istio, App Mesh, NGINX, Contour, Gloo Edge
* **Blue/Green** \(traffic switching\)
* Kubernetes CNI, Istio, Linkerd, App Mesh, NGINX, Contour, Gloo Edge
* **Blue/Green Mirroring** \(traffic shadowing\)
* Istio
For Canary releases and A/B testing you'll need a Layer 7 traffic management solution like
a service mesh or an ingress controller. For Blue/Green deployments no service mesh or ingress controller is required.
A canary analysis is triggered by changes in any of the following objects:
* Deployment PodSpec (container image, command, ports, env, resources, etc)
* Deployment PodSpec \(container image, command, ports, env, resources, etc\)
* ConfigMaps mounted as volumes or mapped to environment variables
* Secrets mounted as volumes or mapped to environment variables
### Canary Release
## Canary Release
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
indicators like HTTP requests success rate, requests average duration and pod health.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring
key performance indicators like HTTP requests success rate, requests average duration and pod health.
Based on analysis of the KPIs a canary is promoted or aborted.
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
![Flagger Canary Stages](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-canary-steps.png)
The canary analysis runs periodically until it reaches the maximum traffic weight or the failed checks threshold.
The canary analysis runs periodically until it reaches the maximum traffic weight or the failed checks threshold.
Spec:
@@ -54,61 +55,60 @@ Spec:
The above analysis, if it succeeds, will run for 25 minutes while validating the HTTP metrics and webhooks every minute.
You can determine the minimum time it takes to validate and promote a canary deployment using this formula:
```
```text
interval * (maxWeight / stepWeight)
```
And the time it takes for a canary to be rollback when the metrics or webhook checks are failing:
```
interval * threshold
```text
interval * threshold
```
When `stepWeightPromotion` is specified, the promotion phase happens in stages,
the traffic is routed back to the primary pods in a progressive manner,
the primary weight is increased until it reaches 100%.
When `stepWeightPromotion` is specified, the promotion phase happens in stages, the traffic is routed back
to the primary pods in a progressive manner, the primary weight is increased until it reaches 100%.
In emergency cases, you may want to skip the analysis phase and ship changes directly to production.
At any time you can set the `spec.skipAnalysis: true`.
When skip analysis is enabled, Flagger checks if the canary deployment is healthy and
promotes it without analysing it. If an analysis is underway, Flagger cancels it and runs the promotion.
In emergency cases, you may want to skip the analysis phase and ship changes directly to production.
At any time you can set the `spec.skipAnalysis: true`. When skip analysis is enabled,
Flagger checks if the canary deployment is healthy and promotes it without analysing it.
If an analysis is underway, Flagger cancels it and runs the promotion.
Gated canary promotion stages:
* scan for canary deployments
* check primary and canary deployment status
* halt advancement if a rolling update is underway
* halt advancement if pods are unhealthy
* halt advancement if a rolling update is underway
* halt advancement if pods are unhealthy
* call confirm-rollout webhooks and check results
* halt advancement if any hook returns a non HTTP 2xx result
* halt advancement if any hook returns a non HTTP 2xx result
* call pre-rollout webhooks and check results
* halt advancement if any hook returns a non HTTP 2xx result
* increment the failed checks counter
* increase canary traffic weight percentage from 0% to 2% (step weight)
* halt advancement if any hook returns a non HTTP 2xx result
* increment the failed checks counter
* increase canary traffic weight percentage from 0% to 2% \(step weight\)
* call rollout webhooks and check results
* check canary HTTP request success rate and latency
* halt advancement if any metric is under the specified threshold
* increment the failed checks counter
* halt advancement if any metric is under the specified threshold
* increment the failed checks counter
* check if the number of failed checks reached the threshold
* route all traffic to primary
* scale to zero the canary deployment and mark it as failed
* call post-rollout webhooks
* post the analysis result to Slack
* wait for the canary deployment to be updated and start over
* increase canary traffic weight by 2% (step weight) till it reaches 50% (max weight)
* halt advancement if any webhook call fails
* halt advancement while canary request success rate is under the threshold
* halt advancement while canary request duration P99 is over the threshold
* halt advancement while any custom metric check fails
* halt advancement if the primary or canary deployment becomes unhealthy
* halt advancement while canary deployment is being scaled up/down by HPA
* route all traffic to primary
* scale to zero the canary deployment and mark it as failed
* call post-rollout webhooks
* post the analysis result to Slack
* wait for the canary deployment to be updated and start over
* increase canary traffic weight by 2% \(step weight\) till it reaches 50% \(max weight\)
* halt advancement if any webhook call fails
* halt advancement while canary request success rate is under the threshold
* halt advancement while canary request duration P99 is over the threshold
* halt advancement while any custom metric check fails
* halt advancement if the primary or canary deployment becomes unhealthy
* halt advancement while canary deployment is being scaled up/down by HPA
* call confirm-promotion webhooks and check results
* halt advancement if any hook returns a non HTTP 2xx result
* halt advancement if any hook returns a non HTTP 2xx result
* promote canary to primary
* copy ConfigMaps and Secrets from canary to primary
* copy canary deployment spec template over primary
* copy ConfigMaps and Secrets from canary to primary
* copy canary deployment spec template over primary
* wait for primary rolling update to finish
* halt advancement if pods are unhealthy
* halt advancement if pods are unhealthy
* route all traffic to primary
* scale to zero the canary deployment
* mark rollout as finished
@@ -116,12 +116,58 @@ Gated canary promotion stages:
* send notification with the canary analysis result
* wait for the canary deployment to be updated and start over
### A/B Testing
### Rollout Weights
For frontend applications that require session affinity you should use HTTP headers or cookies match conditions
to ensure a set of users will stay on the same version for the whole duration of the canary analysis.
By default Flagger uses linear weight values for the promotion, with the start value,
the step and the maximum weight value in 0 to 100 range.
![Flagger A/B Testing Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-abtest-steps.png)
Example:
```yaml
canary:
analysis:
promotion:
maxWeight: 50
stepWeight: 20
```
This configuration performs analysis starting from 20, increasing by 20 until weight goes above 50.
We would have steps (canary weight : primary weight):
* 20 (20 : 80)
* 40 (40 : 60)
* 60 (60 : 40)
* promotion
In order to enable non-linear promotion a new parameter was introduced:
* `stepWeights` - determines the ordered array of weights, which shall be used during canary promotion.
Example:
```yaml
canary:
analysis:
promotion:
stepWeights: [1, 2, 10, 80]
```
This configuration performs analysis starting from 1, going through `stepWeights` values till 80.
We would have steps (canary weight : primary weight):
* 1 (1 : 99)
* 2 (2 : 98)
* 10 (10 : 90)
* 80 (20 : 60)
* promotion
## A/B Testing
For frontend applications that require session affinity you should use
HTTP headers or cookies match conditions to ensure a set of users
will stay on the same version for the whole duration of the canary analysis.
![Flagger A/B Testing Stages](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-abtest-steps.png)
You can enable A/B testing by specifying the HTTP match conditions and the number of iterations.
If Flagger finds a HTTP match condition, it will ignore the `maxWeight` and `stepWeight` settings.
@@ -149,14 +195,14 @@ Istio example:
The above configuration will run an analysis for ten minutes targeting the Safari users and those that have a test cookie.
You can determine the minimum time that it takes to validate and promote a canary deployment using this formula:
```
```text
interval * iterations
```
And the time it takes for a canary to be rollback when the metrics or webhook checks are failing:
```
interval * threshold
```text
interval * threshold
```
Istio example:
@@ -179,13 +225,14 @@ Istio example:
The header keys must be lowercase and use hyphen as the separator.
Header values are case-sensitive and formatted as follows:
- `exact: "value"` for exact string match
- `prefix: "value"` for prefix-based match
- `suffix: "value"` for suffix-based match
- `regex: "value"` for [RE2](https://github.com/google/re2/wiki/Syntax) style regex-based match
Note that the `sourceLabels` match conditions are applicable only when the `mesh` gateway
is included in the `canary.service.gateways` list.
* `exact: "value"` for exact string match
* `prefix: "value"` for prefix-based match
* `suffix: "value"` for suffix-based match
* `regex: "value"` for [RE2](https://github.com/google/re2/wiki/Syntax) style regex-based match
Note that the `sourceLabels` match conditions are applicable only when
the `mesh` gateway is included in the `canary.service.gateways` list.
App Mesh example:
@@ -233,24 +280,28 @@ NGINX example:
exact: "canary"
```
Note that the NGINX ingress controller supports only exact matching for cookies names where the value must be set to `always`.
Note that the NGINX ingress controller supports only exact matching for
cookies names where the value must be set to `always`.
Starting with NGINX ingress v0.31, regex matching is supported for header values.
The above configurations will route users with the x-canary header or canary cookie to the canary instance during analysis:
The above configurations will route users with the x-canary header
or canary cookie to the canary instance during analysis:
```bash
curl -H 'X-Canary: insider' http://app.example.com
curl -b 'canary=always' http://app.example.com
```
### Blue/Green Deployments
## Blue/Green Deployments
For applications that are not deployed on a service mesh, Flagger can orchestrate blue/green style deployments
with Kubernetes L4 networking. When using Istio you have the option to mirror traffic between blue and green.
For applications that are not deployed on a service mesh,
Flagger can orchestrate blue/green style deployments with Kubernetes L4 networking.
When using Istio you have the option to mirror traffic between blue and green.
![Flagger Blue/Green Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-bluegreen-steps.png)
![Flagger Blue/Green Stages](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-bluegreen-steps.png)
You can use the blue/green deployment strategy by replacing `stepWeight/maxWeight` with `iterations` in the `analysis` spec:
You can use the blue/green deployment strategy by replacing
`stepWeight/maxWeight` with `iterations` in the `analysis` spec:
```yaml
analysis:
@@ -262,13 +313,14 @@ You can use the blue/green deployment strategy by replacing `stepWeight/maxWeigh
threshold: 2
```
With the above configuration Flagger will run conformance and load tests on the canary pods for ten minutes.
If the metrics analysis succeeds, live traffic will be switched from the old version to the new one when the
canary is promoted.
With the above configuration Flagger will run conformance and load tests on the canary pods for ten minutes.
If the metrics analysis succeeds, live traffic will be switched from
the old version to the new one when the canary is promoted.
The blue/green deployment strategy is supported for all service mesh providers.
Blue/Green rollout steps for service mesh:
* detect new revision (deployment spec, secrets or configmaps changes)
* scale up the canary (green)
* run conformance tests for the canary pods
@@ -280,24 +332,22 @@ Blue/Green rollout steps for service mesh:
* route traffic to primary
* scale down canary
After the analysis finishes, the traffic is routed to the canary (green) before triggering the primary (blue)
rolling update, this ensures a smooth transition to the new version avoiding dropping in-flight requests during
the Kubernetes deployment rollout.
After the analysis finishes, the traffic is routed to the canary (green) before
triggering the primary (blue) rolling update,
this ensures a smooth transition to the new version avoiding dropping
in-flight requests during the Kubernetes deployment rollout.
### Blue/Green with Traffic Mirroring
## Blue/Green with Traffic Mirroring
Traffic Mirroring is a pre-stage in a Canary (progressive traffic shifting) or
Blue/Green deployment strategy. Traffic mirroring will copy each incoming
request, sending one request to the primary and one to the canary service.
The response from the primary is sent back to the user. The response from the canary
is discarded. Metrics are collected on both requests so that the deployment will
only proceed if the canary metrics are healthy.
Traffic Mirroring is a pre-stage in a Canary (progressive traffic shifting) or Blue/Green deployment strategy.
Traffic mirroring will copy each incoming request, sending one request to the primary and one to the canary service.
The response from the primary is sent back to the user. The response from the canary is discarded.
Metrics are collected on both requests so that the deployment will only proceed if the canary metrics are healthy.
Mirroring should be used for requests that are **idempotent** or capable of
being processed twice (once by the primary and once by the canary). Reads are
idempotent. Before using mirroring on requests that may be writes, you should
consider what will happen if a write is duplicated and handled by the primary
and canary.
Mirroring should be used for requests that are **idempotent** or capable of being processed
twice (once by the primary and once by the canary).
Reads are idempotent. Before using mirroring on requests that may be writes,
you should consider what will happen if a write is duplicated and handled by the primary and canary.
To use mirroring, set `spec.analysis.mirror` to `true`.
@@ -318,6 +368,7 @@ Istio example:
```
Mirroring rollout steps for service mesh:
* detect new revision (deployment spec, secrets or configmaps changes)
* scale from zero the canary deployment
* wait for the HPA to set the canary minimum replicas
@@ -330,7 +381,7 @@ Mirroring rollout steps for service mesh:
* abort the canary release if the failure threshold is reached
* stop traffic mirroring after the number of iterations is reached
* route live traffic to the canary pods
* promote the canary (update the primary secrets, configmaps and deployment spec)
* promote the canary \(update the primary secrets, configmaps and deployment spec\)
* wait for the primary deployment rollout to finish
* wait for the HPA to set the primary minimum replicas
* check primary pods health
@@ -338,6 +389,7 @@ Mirroring rollout steps for service mesh:
* scale to zero the canary
* send notification with the canary analysis result
After the analysis finishes, the traffic is routed to the canary (green) before triggering the primary (blue)
rolling update, this ensures a smooth transition to the new version avoiding dropping in-flight requests during
the Kubernetes deployment rollout.
After the analysis finishes, the traffic is routed to the canary (green) before
triggering the primary (blue) rolling update, this ensures a smooth transition
to the new version avoiding dropping in-flight requests during the Kubernetes deployment rollout.

View File

@@ -1,12 +1,12 @@
# How it works
[Flagger](https://github.com/weaveworks/flagger) can be configured to automate the release process
for Kubernetes workloads with a custom resource named canary.
[Flagger](https://github.com/fluxcd/flagger) can be configured to automate the release process for
Kubernetes workloads with a custom resource named canary.
### Canary resource
## Canary resource
The canary custom resource defines the release process of an application running on Kubernetes
and is portable across clusters, service meshes and ingress providers.
The canary custom resource defines the release process of an application running on Kubernetes and is
portable across clusters, service meshes and ingress providers.
For a deployment named _podinfo_, a canary release with progressive traffic shifting can be defined as:
@@ -43,15 +43,15 @@ spec:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo-canary.test:9898/"
```
When you deploy a new version of an app, Flagger gradually shifts traffic to the canary,
and at the same time, measures the requests success rate as well as the average response duration.
You can extend the canary analysis with custom metrics, acceptance and load testing
to harden the validation process of your app release process.
When you deploy a new version of an app, Flagger gradually shifts traffic to the canary, and at the same time,
measures the requests success rate as well as the average response duration.
You can extend the canary analysis with custom metrics,
acceptance and load testing to harden the validation process of your app release process.
If you are running multiple service meshes or ingress controllers in the same cluster,
you can override the global provider for a specific canary with `spec.provider`.
### Canary target
## Canary target
A canary resource can target a Kubernetes Deployment or DaemonSet.
@@ -65,7 +65,7 @@ spec:
kind: Deployment
name: podinfo
autoscalerRef:
apiVersion: autoscaling/v2beta1
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
name: podinfo
```
@@ -75,15 +75,10 @@ Based on the above configuration, Flagger generates the following Kubernetes obj
* `deployment/<targetRef.name>-primary`
* `hpa/<autoscalerRef.name>-primary`
The primary deployment is considered the stable release of your app, by default all traffic is routed to this version
and the target deployment is scaled to zero.
Flagger will detect changes to the target deployment (including secrets and configmaps) and will perform a
canary analysis before promoting the new version as primary.
If the target deployment uses secrets and/or configmaps, Flagger will create a copy of each object using the `-primary`
prefix and will reference these objects in the primary deployment. You can disable the secrets/configmaps tracking
with the `-enable-config-tracking=false` command flag in the Flagger deployment manifest under containers args
or by setting `--set configTracking.enabled=false` when installing Flagger with Helm.
The primary deployment is considered the stable release of your app,
by default all traffic is routed to this version and the target deployment is scaled to zero.
Flagger will detect changes to the target deployment (including secrets and configmaps)
and will perform a canary analysis before promoting the new version as primary.
**Note** that the target deployment must have a single label selector in the format `app: <DEPLOYMENT-NAME>`:
@@ -102,18 +97,29 @@ spec:
app: podinfo
```
Besides `app` Flagger supports `name` and `app.kubernetes.io/name` selectors.
If you use a different convention you can specify your label with
the `-selector-labels=my-app-label` command flag in the Flagger deployment manifest under containers args
In addition to `app`, Flagger supports `name` and `app.kubernetes.io/name` selectors.
If you use a different convention you can specify your label with the `-selector-labels=my-app-label`
command flag in the Flagger deployment manifest under containers args
or by setting `--set selectorLabels=my-app-label` when installing Flagger with Helm.
The autoscaler reference is optional, when specified, Flagger will pause the traffic increase while the
target and primary deployments are scaled up or down. HPA can help reduce the resource usage during the canary analysis.
If the target deployment uses secrets and/or configmaps,
Flagger will create a copy of each object using the `-primary` suffix
and will reference these objects in the primary deployment.
If you annotate your ConfigMap or Secret with `flagger.app/config-tracking: disabled`,
Flagger will use the same object for the primary deployment instead of making a primary copy.
You can disable the secrets/configmaps tracking globally with the `-enable-config-tracking=false`
command flag in the Flagger deployment manifest under containers args
or by setting `--set configTracking.enabled=false` when installing Flagger with Helm,
but disabling config-tracking using the per Secret/ConfigMap annotation may fit your use-case better.
The progress deadline represents the maximum time in seconds for the canary deployment to make progress
before it is rolled back, defaults to ten minutes.
The autoscaler reference is optional, when specified,
Flagger will pause the traffic increase while the target and primary deployments are scaled up or down.
HPA can help reduce the resource usage during the canary analysis.
### Canary service
The progress deadline represents the maximum time in seconds for the canary deployment to
make progress before it is rolled back, defaults to ten minutes.
## Canary service
A canary resource dictates how the target workload is exposed inside the cluster.
The canary target should expose a TCP port that will be used by Flagger to create the ClusterIP Services.
@@ -133,22 +139,27 @@ The `service.name` is optional, defaults to `spec.targetRef.name`.
The `service.targetPort` can be a container port number or name.
The `service.portName` is optional (defaults to `http`), if your workload uses gRPC then set the port name to `grpc`.
If port discovery is enabled, Flagger scans the target workload and extracts the containers
ports excluding the port specified in the canary service and service mesh sidecar ports.
If port discovery is enabled, Flagger scans the target workload and extracts the containers ports
excluding the port specified in the canary service and service mesh sidecar ports.
These ports will be used when generating the ClusterIP services.
Based on the canary spec service, Flagger creates the following Kubernetes ClusterIP service:
* `<service.name>.<namespace>.svc.cluster.local`
selector `app=<name>-primary`
* `<service.name>-primary.<namespace>.svc.cluster.local`
selector `app=<name>-primary`
* `<service.name>-canary.<namespace>.svc.cluster.local`
selector `app=<name>`
This ensures that traffic to `podinfo.test:9898` will be routed to the latest stable release of your app.
The `podinfo-canary.test:9898` address is available only during the
canary analysis and can be used for conformance testing or load testing.
This ensures that traffic to `podinfo.test:9898` will be routed to the latest stable release of your app.
The `podinfo-canary.test:9898` address is available only during the canary analysis
and can be used for conformance testing or load testing.
You can configure Flagger to set annotations and labels for the generated services with:
@@ -173,8 +184,8 @@ spec:
test: "test"
```
Besides port mapping and metadata, the service specification can contain URI match and rewrite rules,
timeout and retry polices:
Besides port mapping and metadata, the service specification can
contain URI match and rewrite rules, timeout and retry polices:
```yaml
spec:
@@ -191,13 +202,13 @@ spec:
timeout: 5s
```
When using **Istio** as the mesh provider, you can also specify
HTTP header operations, CORS and traffic policies, Istio gateways and hosts.
When using **Istio** as the mesh provider, you can also specify HTTP header operations,
CORS and traffic policies, Istio gateways and hosts.
The Istio routing configuration can be found [here](../faq.md#istio-routing).
### Canary status
You can use kubectl to get the current status of canary deployments cluster wide:
## Canary status
You can use kubectl to get the current status of canary deployments cluster wide:
```bash
kubectl get canaries --all-namespaces
@@ -263,14 +274,15 @@ kubectl wait canary/podinfo --for=condition=promoted --timeout=5m
kubectl get canary/podinfo | grep Succeeded
```
### Canary finalizers
## Canary finalizers
The default behavior of Flagger on canary deletion is to leave resources that aren't owned by the controller
in their current state. This simplifies the deletion action and avoids possible deadlocks during resource
finalization. In the event the canary was introduced with existing resource(s) (i.e. service, virtual service, etc.),
they would be mutated during the initialization phase and no longer reflect their initial state. If the desired
functionality upon deletion is to revert the resources to their initial state, the `revertOnDeletion` attribute
can be enabled.
The default behavior of Flagger on canary deletion is to leave resources that aren't owned
by the controller in their current state.
This simplifies the deletion action and avoids possible deadlocks during resource finalization.
In the event the canary was introduced with existing resource(s) (i.e. service, virtual service, etc.),
they would be mutated during the initialization phase and no longer reflect their initial state.
If the desired functionality upon deletion is to revert the resources to their initial state,
the `revertOnDeletion` attribute can be enabled.
```yaml
spec:
@@ -279,19 +291,21 @@ spec:
When a deletion action is submitted to the cluster, Flagger will attempt to revert the following resources:
* [Canary target](#canary-target) replicas will be updated to the primary replica count
* [Canary service](#canary-service) selector will be reverted
* [Canary target](how-it-works.md#canary-target) replicas will be updated to the primary replica count
* [Canary service](how-it-works.md#canary-service) selector will be reverted
* Mesh/Ingress traffic routed to the target
The recommended approach to disable canary analysis would be utilization of the `skipAnalysis`
attribute, which limits the need for resource reconciliation. Utilizing the `revertOnDeletion` attribute should be
enabled when you no longer plan to rely on Flagger for deployment management.
The recommended approach to disable canary analysis would be utilization of the `skipAnalysis` attribute,
which limits the need for resource reconciliation.
Utilizing the `revertOnDeletion` attribute should be enabled when
you no longer plan to rely on Flagger for deployment management.
**Note** When this feature is enabled expect a delay in the delete action due to the reconciliation.
**Note** When this feature is enabled expect a delay in the delete action due to the reconciliation.
### Canary analysis
## Canary analysis
The canary analysis defines:
* the type of [deployment strategy](deployment-strategies.md)
* the [metrics](metrics.md) used to validate the canary version
* the [webhooks](webhooks.md) used for conformance testing, load testing and manual gating
@@ -333,5 +347,7 @@ Spec:
```
The canary analysis runs periodically until it reaches the maximum traffic weight or the number of iterations.
On each run, Flagger calls the webhooks, checks the metrics and if the failed checks threshold is reached, stops the
analysis and rolls back the canary. If alerting is configured, Flagger will post the analysis result using the alert providers.
On each run, Flagger calls the webhooks, checks the metrics and if the failed checks threshold is reached,
stops the analysis and rolls back the canary.
If alerting is configured, Flagger will post the analysis result using the alert providers.

View File

@@ -1,11 +1,12 @@
# Metrics Analysis
As part of the analysis process, Flagger can validate service level objectives (SLOs) like
availability, error rate percentage, average response time and any other objective based on app specific metrics.
As part of the analysis process, Flagger can validate service level objectives
(SLOs) like availability, error rate percentage, average response time and any other objective
based on app specific metrics.
If a drop in performance is noticed during the SLOs analysis,
the release will be automatically rolled back with minimum impact to end-users.
### Builtin metrics
## Builtin metrics
Flagger comes with two builtin metric checks: HTTP request success rate and duration.
@@ -26,15 +27,16 @@ Flagger comes with two builtin metric checks: HTTP request success rate and dura
max: 500
```
For each metric you can specify a range of accepted values with `thresholdRange`
and the window size or the time series with `interval`.
The builtin checks are available for every service mesh / ingress controller
For each metric you can specify a range of accepted values with `thresholdRange` and
the window size or the time series with `interval`.
The builtin checks are available for every service mesh / ingress controlle
and are implemented with [Prometheus queries](../faq.md#metrics).
### Custom metrics
## Custom metrics
The canary analysis can be extended with custom metric checks. Using a `MetricTemplate` custom resource, you
configure Flagger to connect to a metric provider and run a query that returns a `float64` value.
The canary analysis can be extended with custom metric checks.
Using a `MetricTemplate` custom resource,
you configure Flagger to connect to a metric provider and run a query that returns a `float64` value.
The query result is used to validate the canary based on the specified threshold range.
```yaml
@@ -53,12 +55,12 @@ spec:
The following variables are available in query templates:
- `name` (canary.metadata.name)
- `namespace` (canary.metadata.namespace)
- `target` (canary.spec.targetRef.name)
- `service` (canary.spec.service.name)
- `ingress` (canary.spec.ingresRef.name)
- `interval` (canary.spec.analysis.metrics[].interval)
* `name` (canary.metadata.name)
* `namespace` (canary.metadata.namespace)
* `target` (canary.spec.targetRef.name)
* `service` (canary.spec.service.name)
* `ingress` (canary.spec.ingresRef.name)
* `interval` (canary.spec.analysis.metrics[].interval)
A canary analysis metric can reference a template with `templateRef`:
@@ -79,10 +81,10 @@ A canary analysis metric can reference a template with `templateRef`:
interval: 1m
```
### Prometheus
## Prometheus
You can create custom metric checks targeting a Prometheus server
by setting the provider type to `prometheus` and writing the query in PromQL.
You can create custom metric checks targeting a Prometheus server by
setting the provider type to `prometheus` and writing the query in PromQL.
Prometheus template example:
@@ -95,7 +97,7 @@ metadata:
spec:
provider:
type: prometheus
address: http://promethues.istio-system:9090
address: http://prometheus.istio-system:9090
query: |
100 - sum(
rate(
@@ -133,9 +135,8 @@ Reference the template in the canary analysis:
interval: 1m
```
The above configuration validates the canary by checking
if the HTTP 404 req/sec percentage is below 5 percent of the total traffic.
If the 404s rate reaches the 5% threshold, then the canary fails.
The above configuration validates the canary by checking if the HTTP 404 req/sec percentage
is below 5 percent of the total traffic. If the 404s rate reaches the 5% threshold, then the canary fails.
Prometheus gRPC error rate example:
@@ -148,7 +149,7 @@ metadata:
spec:
provider:
type: prometheus
address: http://flagger-promethues.flagger-system:9090
address: http://flagger-prometheus.flagger-system:9090
query: |
100 - sum(
rate(
@@ -170,9 +171,42 @@ spec:
) * 100
```
The above template is for gRPC services instrumented with [go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus).
The above template is for gRPC services instrumented with
[go-grpc-prometheus](https://github.com/grpc-ecosystem/go-grpc-prometheus).
### Datadog
## Prometheus authentication
If your Prometheus API requires basic authentication, you can create a secret in the same namespace
as the `MetricTemplate` with the basic-auth credentials:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: prom-basic-auth
namespace: flagger
data:
username: your-user
password: your-password
```
Then reference the secret in the `MetricTemplate`:
```yaml
apiVersion: flagger.app/v1beta1
kind: MetricTemplate
metadata:
name: my-metric
namespace: flagger
spec:
provider:
type: prometheus
address: http://prometheus.monitoring:9090
secretRef:
name: prom-basic-auth
```
## Datadog
You can create custom metric checks using the Datadog provider.
@@ -234,8 +268,7 @@ Reference the template in the canary analysis:
interval: 1m
```
### Amazon CloudWatch
## Amazon CloudWatch
You can create custom metric checks using the CloudWatch metrics provider.
@@ -298,7 +331,8 @@ spec:
]
```
The query format documentation can be found [here](https://aws.amazon.com/premiumsupport/knowledge-center/cloudwatch-getmetricdata-api/).
The query format documentation can be found
[here](https://aws.amazon.com/premiumsupport/knowledge-center/cloudwatch-getmetricdata-api/).
Reference the template in the canary analysis:
@@ -314,3 +348,56 @@ Reference the template in the canary analysis:
```
**Note** that Flagger need AWS IAM permission to perform `cloudwatch:GetMetricData` to use this provider.
## New Relic
You can create custom metric checks using the New Relic provider.
Create a secret with your New Relic Insights credentials:
```yaml
apiVersion: v1
kind: Secret
metadata:
name: newrelic
namespace: istio-system
data:
newrelic_account_id: your-account-id
newrelic_query_key: your-insights-query-key
```
New Relic template example:
```yaml
apiVersion: flagger.app/v1beta1
kind: MetricTemplate
metadata:
name: newrelic-error-rate
namespace: ingress-nginx
spec:
provider:
type: newrelic
secretRef:
name: newrelic
query: |
SELECT
filter(sum(nginx_ingress_controller_requests), WHERE status >= '500') /
sum(nginx_ingress_controller_requests) * 100
FROM Metric
WHERE metricName = 'nginx_ingress_controller_requests'
AND ingress = '{{ ingress }}' AND namespace = '{{ namespace }}'
```
Reference the template in the canary analysis:
```yaml
analysis:
metrics:
- name: "error rate"
templateRef:
name: newrelic-error-rate
namespace: ingress-nginx
thresholdRange:
max: 5
interval: 1m
```

View File

@@ -6,13 +6,12 @@ Flagger comes with a Grafana dashboard made for canary analysis. Install Grafana
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \ # or appmesh-system
--set url=http://prometheus:9090
```
The dashboard shows the RED and USE metrics for the primary and canary workloads:
![Canary Dashboard](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/grafana-canary-analysis.png)
![Canary Dashboard](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/screens/grafana-canary-analysis.png)
## Logging
@@ -49,9 +48,11 @@ helm upgrade -i flagger flagger/flagger \
--set eventWebhook=https://example.com/flagger-canary-event-webhook
```
The environment variable _EVENT\_WEBHOOK\_URL_ can be used for activating the event-webhook, too. This is handy for using a secret to store a sensible value that could contain api keys for example.
The environment variable _EVENT\_WEBHOOK\_URL_ can be used for activating the event-webhook, too.
This is handy for using a secret to store a sensible value that could contain api keys for example.
When configured, every action that Flagger takes during a canary deployment will be sent as JSON via an HTTP POST request. The JSON payload has the following schema:
When configured, every action that Flagger takes during a canary deployment
will be sent as JSON via an HTTP POST request. The JSON payload has the following schema:
```javascript
{
@@ -93,7 +94,8 @@ The event webhook can be overwritten at canary level with:
## Metrics
Flagger exposes Prometheus metrics that can be used to determine the canary analysis status and the destination weight values:
Flagger exposes Prometheus metrics that can be used to determine
the canary analysis status and the destination weight values:
```bash
# Flagger version and mesh provider gauge
@@ -116,4 +118,3 @@ flagger_canary_duration_seconds_bucket{name="podinfo",namespace="test",le="+Inf"
flagger_canary_duration_seconds_sum{name="podinfo",namespace="test"} 17.3561329
flagger_canary_duration_seconds_count{name="podinfo",namespace="test"} 6
```

View File

@@ -1,26 +1,34 @@
# Webhooks
The canary analysis can be extended with webhooks. Flagger will call each webhook URL and
determine from the response status code (HTTP 2xx) if the canary is failing or not.
The canary analysis can be extended with webhooks.
Flagger will call each webhook URL and determine from the response status code
(HTTP 2xx) if the canary is failing or not.
There are several types of hooks:
* **confirm-rollout** hooks are executed before scaling up the canary deployment and can be used for manual approval.
The rollout is paused until the hook returns a successful HTTP status code.
* **pre-rollout** hooks are executed before routing traffic to canary.
The canary advancement is paused if a pre-rollout hook fails and if the number of failures reach the
threshold the canary will be rollback.
* **rollout** hooks are executed during the analysis on each iteration before the metric checks.
If a rollout hook call fails the canary advancement is paused and eventfully rolled back.
The rollout is paused until the hook returns a successful HTTP status code.
* **pre-rollout** hooks are executed before routing traffic to canary.
The canary advancement is paused if a pre-rollout hook fails and if the number of failures reach the
threshold the canary will be rollback.
* **rollout** hooks are executed during the analysis on each iteration before the metric checks.
If a rollout hook call fails the canary advancement is paused and eventfully rolled back.
* **confirm-promotion** hooks are executed before the promotion step.
The canary promotion is paused until the hooks return HTTP 200.
While the promotion is paused, Flagger will continue to run the metrics checks and rollout hooks.
* **post-rollout** hooks are executed after the canary has been promoted or rolled back.
If a post rollout hook fails the error is logged.
The canary promotion is paused until the hooks return HTTP 200.
While the promotion is paused, Flagger will continue to run the metrics checks and rollout hooks.
* **post-rollout** hooks are executed after the canary has been promoted or rolled back.
If a post rollout hook fails the error is logged.
* **rollback** hooks are executed while a canary deployment is in either Progressing or Waiting status.
This provides the ability to rollback during analysis or while waiting for a confirmation. If a rollback hook
returns a successful HTTP status code, Flagger will stop the analysis and mark the canary release as failed.
This provides the ability to rollback during analysis or while waiting for a confirmation. If a rollback hook
returns a successful HTTP status code, Flagger will stop the analysis and mark the canary release as failed.
* **event** hooks are executed every time Flagger emits a Kubernetes event. When configured,
every action that Flagger takes during a canary deployment will be sent as JSON via an HTTP POST request.
every action that Flagger takes during a canary deployment will be sent as JSON via an HTTP POST request.
Spec:
@@ -60,11 +68,11 @@ Spec:
url: http://event-recevier.notifications/slack
```
> **Note** that the sum of all rollout webhooks timeouts should be lower than the analysis interval.
> **Note** that the sum of all rollout webhooks timeouts should be lower than the analysis interval.
Webhook payload (HTTP POST):
```json
```javascript
{
"name": "podinfo",
"namespace": "test",
@@ -85,7 +93,7 @@ On a non-2xx response Flagger will include the response body (if any) in the fai
Event payload (HTTP POST):
```json
```javascript
{
"name": "string (canary name)",
"namespace": "string (canary namespace)",
@@ -98,25 +106,25 @@ Event payload (HTTP POST):
}
```
The event receiver can create alerts based on the received phase
(possible values: ` Initialized`, `Waiting`, `Progressing`, `Promoting`, `Finalising`, `Succeeded` or `Failed`).
The event receiver can create alerts based on the received phase
(possible values: `Initialized`, `Waiting`, `Progressing`, `Promoting`, `Finalising`, `Succeeded` or `Failed`).
### Load Testing
## Load Testing
For workloads that are not receiving constant traffic Flagger can be configured with a webhook,
For workloads that are not receiving constant traffic Flagger can be configured with a webhook,
that when called, will start a load test for the target workload.
If the target workload doesn't receive any traffic during the canary analysis,
If the target workload doesn't receive any traffic during the canary analysis,
Flagger metric checks will fail with "no values found for metric request-success-rate".
Flagger comes with a load testing service based on [rakyll/hey](https://github.com/rakyll/hey)
Flagger comes with a load testing service based on [rakyll/hey](https://github.com/rakyll/hey)
that generates traffic during analysis when configured as a webhook.
![Flagger Load Testing Webhook](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-load-testing.png)
![Flagger Load Testing Webhook](https://raw.githubusercontent.com/fluxcd/flagger/main/docs/diagrams/flagger-load-testing.png)
First you need to deploy the load test runner in a namespace with sidecar injection enabled:
```bash
kubectl apply -k github.com/weaveworks/flagger//kustomize/tester
kubectl apply -k https://github.com/fluxcd/flagger//kustomize/tester?ref=main
```
Or by using Helm:
@@ -129,7 +137,7 @@ helm upgrade -i flagger-loadtester flagger/loadtester \
--set cmd.timeout=1h
```
When deployed the load tester API will be available at `http://flagger-loadtester.test/`.
When deployed the load tester API will be available at `http://flagger-loadtester.test/`.
Now you can add webhooks to the canary analysis spec:
@@ -149,12 +157,12 @@ webhooks:
cmd: "hey -z 1m -q 10 -c 2 -m POST -d '{test: 2}' http://podinfo-canary.test:9898/echo"
```
When the canary analysis starts, Flagger will call the webhooks and the load tester will run the `hey` commands
in the background, if they are not already running. This will ensure that during the
analysis, the `podinfo-canary.test` service will receive a steady stream of GET and POST requests.
When the canary analysis starts, Flagger will call the webhooks and the load tester will
run the `hey` commands in the background, if they are not already running.
This will ensure that during the analysis, the `podinfo-canary.test`
service will receive a steady stream of GET and POST requests.
If your workload is exposed outside the mesh you can point `hey` to the
public URL and use HTTP2.
If your workload is exposed outside the mesh you can point `hey` to the public URL and use HTTP2.
```yaml
webhooks:
@@ -178,7 +186,11 @@ webhooks:
cmd: "ghz -z 1m -q 10 -c 2 --insecure podinfo.test:9898"
```
`ghz` uses reflection to identify which gRPC method to call. If you do not wish to enable reflection for your gRPC service you can implement a standardized health check from the [grpc-proto](https://github.com/grpc/grpc-proto) library. To use this [health check schema](https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto) without reflection you can pass a parameter to `ghz` like this
`ghz` uses reflection to identify which gRPC method to call.
If you do not wish to enable reflection for your gRPC service you can implement a standardized
health check from the [grpc-proto](https://github.com/grpc/grpc-proto) library.
To use this [health check schema](https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto)
without reflection you can pass a parameter to `ghz` like this
```yaml
webhooks:
@@ -191,19 +203,19 @@ webhooks:
```
The load tester can run arbitrary commands as long as the binary is present in the container image.
For example if you you want to replace `hey` with another CLI, you can create your own Docker image:
For example if you want to replace `hey` with another CLI, you can create your own Docker image:
```dockerfile
```text
FROM weaveworks/flagger-loadtester:<VER>
RUN curl -Lo /usr/local/bin/my-cli https://github.com/user/repo/releases/download/ver/my-cli \
&& chmod +x /usr/local/bin/my-cli
```
### Load Testing Delegation
## Load Testing Delegation
The load tester can also forward testing tasks to external tools, by now [nGrinder](https://github.com/naver/ngrinder)
is supported.
The load tester can also forward testing tasks to external tools,
by now [nGrinder](https://github.com/naver/ngrinder) is supported.
To use this feature, add a load test task of type 'ngrinder' to the canary analysis spec:
@@ -225,11 +237,14 @@ webhooks:
# the interval between between nGrinder test status polling, default to 1s
pollInterval: 5s
```
When the canary analysis starts, the load tester will initiate a [clone_and_start request](https://github.com/naver/ngrinder/wiki/REST-API-PerfTest)
to the nGrinder server and start a new performance test. the load tester will periodically poll the nGrinder server
for the status of the test, and prevent duplicate requests from being sent in subsequent analysis loops.
### Integration Testing
When the canary analysis starts, the load tester will initiate a
[clone_and_start request](https://github.com/naver/ngrinder/wiki/REST-API-PerfTest)
to the nGrinder server and start a new performance test. the load tester will periodically
poll the nGrinder server for the status of the test,
and prevent duplicate requests from being sent in subsequent analysis loops.
## Integration Testing
Flagger comes with a testing service that can run Helm tests, Bats tests or Concord tests when configured as a webhook.
@@ -243,7 +258,7 @@ helm upgrade -i flagger-helmtester flagger/loadtester \
--set serviceAccountName=tiller
```
When deployed the Helm tester API will be available at `http://flagger-helmtester.kube-system/`.
When deployed the Helm tester API will be available at `http://flagger-helmtester.kube-system/`.
Now you can add pre-rollout webhooks to the canary analysis spec:
@@ -262,7 +277,8 @@ Now you can add pre-rollout webhooks to the canary analysis spec:
When the canary analysis starts, Flagger will call the pre-rollout webhooks before routing traffic to the canary.
If the helm test fails, Flagger will retry until the analysis threshold is reached and the canary is rolled back.
If you are using Helm v3, you'll have to create a dedicated service account and add the release namespace to the test command:
If you are using Helm v3,
you'll have to create a dedicated service account and add the release namespace to the test command:
```yaml
analysis:
@@ -277,9 +293,10 @@ If you are using Helm v3, you'll have to create a dedicated service account and
```
If the test hangs or logs error messages hinting to insufficient permissions it can be related to RBAC,
check the [Troubleshooting](#Troubleshooting) section for an example configuration.
check the [Troubleshooting](webhooks.md#Troubleshooting) section for an example configuration.
As an alternative to Helm you can use the [Bash Automated Testing System](https://github.com/bats-core/bats-core) to run your tests.
As an alternative to Helm you can use the
[Bash Automated Testing System](https://github.com/bats-core/bats-core) to run your tests.
```yaml
analysis:
@@ -316,21 +333,24 @@ You can also configure the test runner to start a [Concord](https://concord.walm
pollTimeout: "60"
```
`org`, `project`, `repo` and `entrypoint` represents where your test process runs in Concord.
In order to authenticate to Concord, you need to set `apiKeyPath` to a path of a file containing a valid Concord API key
on the `flagger-helmtester` container. This can be done via mounting a Kubernetes secret in the tester's Deployment.
`pollInterval` represents the interval in seconds the web-hook will call Concord to see if the process has finished (Default is 5s).
`pollTimeout` represents the time in seconds the web-hook will try to call Concord before timing out (Default is 30s).
`org`, `project`, `repo` and `entrypoint` represents where your test process runs in Concord.
In order to authenticate to Concord, you need to set `apiKeyPath`
to a path of a file containing a valid Concord API key on the `flagger-helmtester` container.
This can be done via mounting a Kubernetes secret in the tester's Deployment.
`pollInterval` represents the interval in seconds the web-hook will call Concord
to see if the process has finished (Default is 5s). `pollTimeout` represents the time in seconds
the web-hook will try to call Concord before timing out (Default is 30s).
### Manual Gating
## Manual Gating
For manual approval of a canary deployment you can use the `confirm-rollout` and `confirm-promotion` webhooks.
The confirmation rollout hooks are executed before the pre-rollout hooks.
For manual approval of a canary deployment you can use the `confirm-rollout` and `confirm-promotion` webhooks.
The confirmation rollout hooks are executed before the pre-rollout hooks.
Flagger will halt the canary traffic shifting and analysis until the confirm webhook returns HTTP status 200.
For manual rollback of a canary deployment you can use the `rollback` webhook. The rollback hook will be called
during the analysis and confirmation states. If a rollback webhook returns a successful HTTP status code, Flagger
will shift all traffic back to the primary instance and fail the canary.
For manual rollback of a canary deployment you can use the `rollback` webhook.
The rollback hook will be called during the analysis and confirmation states.
If a rollback webhook returns a successful HTTP status code,
Flagger will shift all traffic back to the primary instance and fail the canary.
Manual gating with Flagger's tester:
@@ -342,9 +362,10 @@ Manual gating with Flagger's tester:
url: http://flagger-loadtester.test/gate/halt
```
The `/gate/halt` returns HTTP 403 thus blocking the rollout.
The `/gate/halt` returns HTTP 403 thus blocking the rollout.
If you have notifications enabled, Flagger will post a message to Slack or MS Teams if a canary rollout is waiting for approval.
If you have notifications enabled, Flagger will post a message to
Slack or MS Teams if a canary rollout is waiting for approval.
Change the URL to `/gate/approve` to start the canary analysis:
@@ -371,13 +392,13 @@ By default the gate is closed, you can start or resume the canary rollout with:
```bash
kubectl -n test exec -it flagger-loadtester-xxxx-xxxx sh
curl -d '{"name": "podinfo","namespace":"test"}' http://localhost:8080/gate/open
curl -d '{"name": "podinfo","namespace":"test"}' http://localhost:8080/gate/open
```
You can pause the rollout at any time with:
```bash
curl -d '{"name": "podinfo","namespace":"test"}' http://localhost:8080/gate/close
curl -d '{"name": "podinfo","namespace":"test"}' http://localhost:8080/gate/close
```
If a canary analysis is paused the status will change to waiting:
@@ -400,8 +421,8 @@ While the promotion is paused, Flagger will continue to run the metrics checks a
url: http://flagger-loadtester.test/gate/halt
```
The `rollback` hook type can be used to manually rollback the canary promotion. As with gating, rollbacks can be driven
with Flagger's tester API by setting the rollback URL to `/rollback/check`
The `rollback` hook type can be used to manually rollback the canary promotion.
As with gating, rollbacks can be driven with Flagger's tester API by setting the rollback URL to `/rollback/check`
```yaml
analysis:
@@ -409,35 +430,36 @@ with Flagger's tester API by setting the rollback URL to `/rollback/check`
- name: "rollback"
type: rollback
url: http://flagger-loadtester.test/rollback/check
```
```
By default rollback is closed, you can rollback a canary rollout with:
By default, rollback is closed, you can rollback a canary rollout with:
```bash
kubectl -n test exec -it flagger-loadtester-xxxx-xxxx sh
curl -d '{"name": "podinfo","namespace":"test"}' http://localhost:8080/rollback/open
curl -d '{"name": "podinfo","namespace":"test"}' http://localhost:8080/rollback/open
```
You can close the rollback with:
```bash
curl -d '{"name": "podinfo","namespace":"test"}' http://localhost:8080/rollback/close
curl -d '{"name": "podinfo","namespace":"test"}' http://localhost:8080/rollback/close
```
If you have notifications enabled, Flagger will post a message to Slack or MS Teams if a canary has been rolled back.
### Troubleshooting
## Troubleshooting
#### Manually check if helm test is running
### Manually check if helm test is running
To debug in depth any issues with helm tests, you can execute commands on the flagger-loadtester pod.
```bash
kubectl exec -it deploy/flagger-loadtester -- bash
helmv3 test <release> -n <namespace> --debug
```
#### Helm tests hang during canary deployment
### Helm tests hang during canary deployment
If test execution hangs or displays insufficient permissions, check your RBAC settings.

20
go.mod
View File

@@ -1,20 +1,20 @@
module github.com/weaveworks/flagger
module github.com/fluxcd/flagger
go 1.14
require (
github.com/Masterminds/semver/v3 v3.0.3
github.com/aws/aws-sdk-go v1.30.19
github.com/aws/aws-sdk-go v1.36.20
github.com/davecgh/go-spew v1.1.1
github.com/google/go-cmp v0.4.0
github.com/go-logr/zapr v0.3.0
github.com/google/go-cmp v0.5.2
github.com/prometheus/client_golang v1.5.1
github.com/stretchr/testify v1.5.1
github.com/stretchr/testify v1.6.1
go.uber.org/zap v1.14.1
gopkg.in/h2non/gock.v1 v1.0.15
k8s.io/api v0.18.2
k8s.io/apimachinery v0.18.2
k8s.io/client-go v0.18.2
k8s.io/code-generator v0.18.2
k8s.io/api v0.20.1
k8s.io/apimachinery v0.20.1
k8s.io/client-go v0.20.1
k8s.io/code-generator v0.20.1
k8s.io/klog/v2 v2.4.0
)
replace k8s.io/klog => github.com/stefanprodan/klog v0.0.0-20190418165334-9cbb78b20423

351
go.sum
View File

@@ -2,49 +2,85 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Masterminds/semver/v3 v3.0.3 h1:znjIyLfpXEDQjOIEWh+ehwpTU14UzUPub3c3sm36u14=
github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/aws/aws-sdk-go v1.30.19 h1:vRwsYgbUvC25Cb3oKXTyTYk3R5n1LRVk8zbvL4inWsc=
github.com/aws/aws-sdk-go v1.30.19/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.36.20 h1:IQr81xegCd40Xq21ZjFToKw9llaCzO1LRE75CgnvJ1Q=
github.com/aws/aws-sdk-go v1.36.20/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk=
@@ -53,34 +89,48 @@ github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2i
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/zapr v0.3.0 h1:iyiCRZ29uPmbO7mWIjOEiYMXrTxZWTyK4tCatLyGpUY=
github.com/go-logr/zapr v0.3.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@@ -89,21 +139,28 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI=
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542 h1:2VTzZjLZBgl62/EtslCrtky5vbi9dd7HrQPQIx6wqiw=
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
@@ -112,17 +169,21 @@ github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
@@ -130,16 +191,18 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -174,6 +237,7 @@ github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3O
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
@@ -187,13 +251,12 @@ github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLk
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stefanprodan/klog v0.0.0-20190418165334-9cbb78b20423 h1:qTtUiiNM+iq4IXOwHofKW5+jzvkvnNVz0GFRxwukUlY=
github.com/stefanprodan/klog v0.0.0-20190418165334-9cbb78b20423/go.mod h1:TYstY5LQfzxFVm9MiiMg7kZ39sc5cue/6CFoY5KgXn8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
@@ -203,32 +266,67 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo=
go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -237,18 +335,32 @@ golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73r
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -256,55 +368,147 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503 h1:5SvYFrOM3W8Mexn9/oA44Ji7vhXAZQ9hiP+1Q/DMrWg=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72 h1:bw9doJza/SFBEweII/rHQh338oozWyiFsBRHtrflcws=
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 h1:HHeAlu5H9b71C+Fx0K+1dGgVFN1DM1/wz4aoGOA5qS8=
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
@@ -329,29 +533,40 @@ gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8=
k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78=
k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA=
k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
k8s.io/client-go v0.18.2 h1:aLB0iaD4nmwh7arT2wIn+lMnAq7OswjaejkQ8p9bBYE=
k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU=
k8s.io/code-generator v0.18.2 h1:C1Nn2JiMf244CvBDKVPX0W2mZFJkVBg54T8OV7/Imso=
k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 h1:4s3/R4+OYYYUKptXPhZKjQ04WJ6EhQQVFdjOFvCazDk=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4=
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
k8s.io/api v0.20.1 h1:ud1c3W3YNzGd6ABJlbFfKXBKXO+1KdGfcgGGNgFR03E=
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/apimachinery v0.20.1 h1:LAhz8pKbgR8tUwn7boK+b2HZdt7MiTu2mkYtFMUjTRQ=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/client-go v0.20.1 h1:Qquik0xNFbK9aUG92pxHYsyfea5/RPO9o9bSywNor+M=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/code-generator v0.20.1 h1:kre3GNich5gbO3d1FyTT8fHI4ZJezZV217yFdWlQaRQ=
k8s.io/code-generator v0.20.1/go.mod h1:UsqdF+VX4PU2g46NC2JRs4gc+IfrctnwHb76RNbWHJg=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20201113003025-83324d819ded h1:JApXBKYyB7l9xx+DK7/+mFjC7A9Bt5A93FPvFD0HIFE=
k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View File

@@ -1,5 +1,5 @@
/*
Copyright The Flagger Authors.
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -29,10 +29,10 @@ echo ">> Temporary output directory ${TEMP_DIR}"
chmod +x ${CODEGEN_PKG}/generate-groups.sh
${CODEGEN_PKG}/generate-groups.sh all \
github.com/weaveworks/flagger/pkg/client github.com/weaveworks/flagger/pkg/apis \
"flagger:v1beta1 appmesh:v1beta2 appmesh:v1beta1 istio:v1alpha3 smi:v1alpha1 smi:v1alpha2 gloo:v1 projectcontour:v1" \
github.com/fluxcd/flagger/pkg/client github.com/fluxcd/flagger/pkg/apis \
"flagger:v1beta1 appmesh:v1beta2 appmesh:v1beta1 istio:v1alpha3 smi:v1alpha1 smi:v1alpha2 gloo:v1 projectcontour:v1 traefik:v1alpha1" \
--output-base "${TEMP_DIR}" \
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt
# Copy everything back.
cp -r "${TEMP_DIR}/github.com/weaveworks/flagger/." "${SCRIPT_ROOT}/"
cp -r "${TEMP_DIR}/github.com/fluxcd/flagger/." "${SCRIPT_ROOT}/"

View File

@@ -12,7 +12,7 @@ As an alternative to Helm, Flagger can be installed with [Kustomize](https://kus
Install Flagger for Istio:
```bash
kustomize build https://github.com/weaveworks/flagger/kustomize/istio | kubectl apply -f -
kustomize build https://github.com/fluxcd/flagger/kustomize/istio?ref=main | kubectl apply -f -
```
This deploys Flagger in the `istio-system` namespace and sets the metrics server URL to Istio's Prometheus instance.
@@ -20,7 +20,7 @@ This deploys Flagger in the `istio-system` namespace and sets the metrics server
Install Flagger for AWS App Mesh:
```bash
kustomize build https://github.com/weaveworks/flagger/kustomize/appmesh | kubectl apply -f -
kustomize build https://github.com/fluxcd/flagger/kustomize/appmesh?ref=main | kubectl apply -f -
```
This deploys Flagger in the `appmesh-system` namespace and sets the metrics server URL to App Mesh Prometheus instance.
@@ -28,7 +28,7 @@ This deploys Flagger in the `appmesh-system` namespace and sets the metrics serv
Install Flagger for Linkerd:
```bash
kustomize build https://github.com/weaveworks/flagger/kustomize/linkerd | kubectl apply -f -
kustomize build https://github.com/fluxcd/flagger/kustomize/linkerd?ref=main | kubectl apply -f -
```
This deploys Flagger in the `linkerd` namespace and sets the metrics server URL to Linkerd's Prometheus instance.
@@ -36,13 +36,13 @@ This deploys Flagger in the `linkerd` namespace and sets the metrics server URL
If you want to install a specific Flagger release, add the version number to the URL:
```bash
kustomize build https://github.com/weaveworks/flagger/kustomize/linkerd?ref=v1.0.0 | kubectl apply -f -
kustomize build https://github.com/fluxcd/flagger/kustomize/linkerd?ref=v1.0.0 | kubectl apply -f -
```
Install Flagger for Contour:
```bash
kustomize build https://github.com/weaveworks/flagger/kustomize/contour | kubectl apply -f -
kustomize build https://github.com/fluxcd/flagger/kustomize/contour?ref=main | kubectl apply -f -
```
This deploys Flagger and Prometheus in the `projectcontour` namespace and sets Prometheus to scrape Contour's Envoy instances.
@@ -52,7 +52,7 @@ This deploys Flagger and Prometheus in the `projectcontour` namespace and sets P
Install Flagger and Prometheus:
```bash
kustomize build https://github.com/weaveworks/flagger/kustomize/kubernetes | kubectl apply -f -
kustomize build https://github.com/fluxcd/flagger/kustomize/kubernetes?ref=main | kubectl apply -f -
```
This deploys Flagger and Prometheus in the `flagger-system` namespace,
@@ -67,12 +67,12 @@ metadata:
name: app
namespace: test
spec:
# can be: kubernetes, istio, linkerd, appmesh, nginx, gloo
# can be: kubernetes, istio, linkerd, appmesh, nginx, skipper, gloo
# use the kubernetes provider for Blue/Green style deployments
provider: nginx
```
You'll need Prometheus when using Flagger with AWS App Mesh, Gloo or NGINX ingress controller.
You'll need Prometheus when using Flagger with AWS App Mesh, Gloo, NGINX or Skipper ingress controller.
The Prometheus instance has a two hours data retention and is configured to scrape all pods in your cluster that
have the `prometheus.io/scrape: "true"` annotation.
@@ -84,7 +84,7 @@ Create a kustomization file using Flagger as base and patch the container args:
cat > kustomization.yaml <<EOF
namespace: istio-system
bases:
- github.com/weaveworks/flagger/kustomize/base/flagger
- github.com/fluxcd/flagger/kustomize/base/flagger
patches:
- target:
kind: Deployment

View File

@@ -9,11 +9,9 @@ spec:
- name: flagger
args:
- -log-level=info
- -include-label-prefix=app.kubernetes.io
- -mesh-provider=appmesh
- -metrics-server=http://appmesh-prometheus:9090
- -slack-user=flagger
- -slack-channel=
- -slack-url=
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding

File diff suppressed because it is too large Load Diff

View File

@@ -20,7 +20,7 @@ spec:
serviceAccountName: flagger
containers:
- name: flagger
image: weaveworks/flagger:1.0.0
image: ghcr.io/fluxcd/flagger:1.0.0
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@@ -7,5 +7,6 @@ resources:
- crd.yaml
- deployment.yaml
images:
- name: weaveworks/flagger
newTag: 1.0.1
- name: ghcr.io/fluxcd/flagger
newName: ghcr.io/fluxcd/flagger
newTag: 1.6.1

View File

@@ -143,8 +143,19 @@ rules:
resources:
- upstreams
- upstreams/finalizers
- upstreamgroups
- upstreamgroups/finalizers
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- gateway.solo.io
resources:
- routetables
- routetables/finalizers
verbs:
- get
- list

View File

@@ -19,7 +19,7 @@ spec:
serviceAccountName: flagger-prometheus
containers:
- name: prometheus
image: prom/prometheus:v2.19.0
image: prom/prometheus:v2.23.0
imagePullPolicy: IfNotPresent
args:
- '--storage.tsdb.retention=2h'

View File

@@ -9,9 +9,7 @@ spec:
- name: flagger
args:
- -log-level=info
- -include-label-prefix=app.kubernetes.io
- -mesh-provider=contour
- -ingress-class=contour
- -metrics-server=http://flagger-prometheus:9090
- -slack-user=flagger
- -slack-channel=
- -slack-url=

View File

@@ -9,12 +9,9 @@ spec:
- name: flagger
args:
- -log-level=info
- -include-label-prefix=app.kubernetes.io
- -mesh-provider=istio
- -metrics-server=http://prometheus:9090
- -slack-user=flagger
- -slack-channel=
- -slack-url=
---
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding

View File

@@ -9,8 +9,6 @@ spec:
- name: flagger
args:
- -log-level=info
- -include-label-prefix=app.kubernetes.io
- -mesh-provider=kubernetes
- -metrics-server=http://flagger-prometheus:9090
- -slack-user=flagger
- -slack-channel=
- -slack-url=

View File

@@ -9,11 +9,9 @@ spec:
- name: flagger
args:
- -log-level=info
- -include-label-prefix=app.kubernetes.io
- -mesh-provider=linkerd
- -metrics-server=http://linkerd-prometheus:9090
- -slack-user=flagger
- -slack-channel=
- -slack-url=
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding

View File

@@ -1,4 +1,4 @@
apiVersion: autoscaling/v2beta1
apiVersion: autoscaling/v2beta2
kind: HorizontalPodAutoscaler
metadata:
name: podinfo
@@ -10,9 +10,11 @@ spec:
minReplicas: 2
maxReplicas: 4
metrics:
- type: Resource
resource:
name: cpu
# scale up if usage is above
# 99% of the requested CPU (100m)
targetAverageUtilization: 99
- type: Resource
resource:
name: cpu
target:
type: Utilization
# scale up if usage is above
# 99% of the requested CPU (100m)
averageUtilization: 99

View File

@@ -18,7 +18,7 @@ spec:
spec:
containers:
- name: loadtester
image: weaveworks/flagger-loadtester:0.18.0
image: ghcr.io/fluxcd/flagger-loadtester:0.18.0
imagePullPolicy: IfNotPresent
ports:
- name: http

View File

@@ -5,7 +5,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/weaveworks/flagger/pkg/apis/appmesh"
"github.com/fluxcd/flagger/pkg/apis/appmesh"
)
// SchemeGroupVersion is group version used to register these objects

View File

@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright The Flagger Authors.
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@@ -5,7 +5,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/weaveworks/flagger/pkg/apis/appmesh"
"github.com/fluxcd/flagger/pkg/apis/appmesh"
)
// SchemeGroupVersion is group version used to register these objects

View File

@@ -113,3 +113,36 @@ type TCPTimeout struct {
// +optional
Idle *Duration `json:"idle,omitempty"`
}
type TCPConnectionPool struct {
// Represents the maximum number of outbound TCP connections
// the envoy can establish concurrently with all the hosts in the upstream cluster.
// +kubebuilder:validation:Minimum=1
MaxConnections int64 `json:"maxConnections"`
}
type HTTPConnectionPool struct {
// Represents the maximum number of outbound TCP connections
// the envoy can establish concurrently with all the hosts in the upstream cluster.
// +kubebuilder:validation:Minimum=1
MaxConnections int64 `json:"maxConnections"`
// Represents the number of overflowing requests after max_connections
// that an envoy will queue to an upstream cluster.
// +kubebuilder:validation:Minimum=1
// +optional
MaxPendingRequests *int64 `json:"maxPendingRequests,omitempty"`
}
type HTTP2ConnectionPool struct {
// Represents the maximum number of inflight requests that an envoy
// can concurrently support across all the hosts in the upstream cluster
// +kubebuilder:validation:Minimum=1
MaxRequests int64 `json:"maxRequests"`
}
type GRPCConnectionPool struct {
// Represents the maximum number of inflight requests that an envoy
// can concurrently support across all the hosts in the upstream cluster
// +kubebuilder:validation:Minimum=1
MaxRequests int64 `json:"maxRequests"`
}

View File

@@ -1,3 +1,19 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
@@ -59,8 +75,12 @@ type ClientPolicy struct {
// VirtualServiceBackend refers to https://docs.aws.amazon.com/app-mesh/latest/APIReference/API_VirtualServiceBackend.html
type VirtualServiceBackend struct {
// The VirtualService that is acting as a virtual node backend.
VirtualServiceRef VirtualServiceReference `json:"virtualServiceRef"`
// Reference to Kubernetes VirtualService CR in cluster that is acting as a virtual node backend. Exactly one of 'virtualServiceRef' or 'virtualServiceARN' must be specified.
// +optional
VirtualServiceRef *VirtualServiceReference `json:"virtualServiceRef,omitempty"`
// Amazon Resource Name to AppMesh VirtualService object that is acting as a virtual node backend. Exactly one of 'virtualServiceRef' or 'virtualServiceARN' must be specified.
// +optional
VirtualServiceARN *string `json:"virtualServiceARN,omitempty"`
// A reference to an object that represents the client policy for a backend.
// +optional
ClientPolicy *ClientPolicy `json:"clientPolicy,omitempty"`
@@ -108,6 +128,25 @@ type HealthCheckPolicy struct {
UnhealthyThreshold int64 `json:"unhealthyThreshold"`
}
// OutlierDetection defines the health check policy that temporarily ejects an endpoint/host of a VirtualNode
// from the load balancing set when it meets failure threshold
type OutlierDetection struct {
// The threshold for the number of server errors returned by a given host during an outlier detection interval.
// If the server error count meets/exceeds this threshold the host is ejected.
// A server error is defined as any HTTP 5xx response (or the equivalent for gRPC and TCP connections)
// +kubebuilder:validation:Minimum=1
MaxServerErrors int64 `json:"maxServerErrors"`
// The time interval between ejection analysis sweeps. This can result in both new ejections as well as hosts being returned to service
Interval Duration `json:"interval"`
// The base time that a host is ejected for. The real time is equal to the base time multiplied by the number of times the host has been ejected
BaseEjectionDuration Duration `json:"baseEjectionDuration"`
// The threshold for the max percentage of outlier hosts that can be ejected from the load balancing set.
// maxEjectionPercent=100 means outlier detection can potentially eject all of the hosts from the upstream service if they are all considered outliers, leaving the load balancing set with zero hosts
// +kubebuilder:validation:Minimum=0
// +kubebuilder:validation:Maximum=100
MaxEjectionPercent int64 `json:"maxEjectionPercent"`
}
// ListenerTLSACMCertificate refers to https://docs.aws.amazon.com/app-mesh/latest/APIReference/API_ListenerTlsAcmCertificate.html
type ListenerTLSACMCertificate struct {
// The Amazon Resource Name (ARN) for the certificate.
@@ -169,6 +208,25 @@ type ListenerTimeout struct {
GRPC *GRPCTimeout `json:"grpc,omitempty"`
}
// VirtualNodeConnectionPool refers to the connection pools settings for Virtual Node.
// Connection pool limits the number of connections that an Envoy can concurrently establish with
// all the hosts in the upstream cluster. Currently connection pool is supported only at the listener
// level and it is intended protect your local application from being overwhelmed with connections.
type VirtualNodeConnectionPool struct {
// Specifies tcp connection pool settings for the virtual node listener
// +optional
TCP *TCPConnectionPool `json:"tcp,omitempty"`
// Specifies http connection pool settings for the virtual node listener
// +optional
HTTP *HTTPConnectionPool `json:"http,omitempty"`
// Specifies http2 connection pool settings for the virtual node listener
// +optional
HTTP2 *HTTP2ConnectionPool `json:"http2,omitempty"`
// Specifies grpc connection pool settings for the virtual node listener
// +optional
GRPC *GRPCConnectionPool `json:"grpc,omitempty"`
}
// Listener refers to https://docs.aws.amazon.com/app-mesh/latest/APIReference/API_Listener.html
type Listener struct {
// The port mapping information for the listener.
@@ -176,6 +234,12 @@ type Listener struct {
// The health check information for the listener.
// +optional
HealthCheck *HealthCheckPolicy `json:"healthCheck,omitempty"`
// The outlier detection for the listener
// +optional
OutlierDetection *OutlierDetection `json:"outlierDetection,omitempty"`
// The connection pool settings for the listener
// +optional
ConnectionPool *VirtualNodeConnectionPool `json:"connectionPool,omitempty"`
// A reference to an object that represents the Transport Layer Security (TLS) properties for a listener.
// +optional
TLS *ListenerTLS `json:"tls,omitempty"`
@@ -273,7 +337,7 @@ type VirtualNodeCondition struct {
}
// VirtualNodeSpec defines the desired state of VirtualNode
// refers to https://docs.aws.amazon.com/app-mesh/latest/APIReference/API_VirtualServiceSpec.html
// refers to https://docs.aws.amazon.com/app-mesh/latest/APIReference/API_VirtualNodeSpec.html
type VirtualNodeSpec struct {
// AWSName is the AppMesh VirtualNode object's name.
// If unspecified or empty, it defaults to be "${name}_${namespace}" of k8s VirtualNode
@@ -290,7 +354,8 @@ type VirtualNodeSpec struct {
// +kubebuilder:validation:MaxItems=1
// +optional
Listeners []Listener `json:"listeners,omitempty"`
// The service discovery information for the virtual node.
// The service discovery information for the virtual node. Optional if there is no
// inbound traffic(no listeners). Mandatory if a listener is specified.
// +optional
ServiceDiscovery *ServiceDiscovery `json:"serviceDiscovery,omitempty"`
// The backends that the virtual node is expected to send outbound traffic to.

View File

@@ -1,3 +1,19 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta2
import (
@@ -13,8 +29,12 @@ type VirtualRouterListener struct {
// WeightedTarget refers to https://docs.aws.amazon.com/app-mesh/latest/APIReference/API_WeightedTarget.html
type WeightedTarget struct {
// The virtual node to associate with the weighted target.
VirtualNodeRef VirtualNodeReference `json:"virtualNodeRef"`
// Reference to Kubernetes VirtualNode CR in cluster to associate with the weighted target. Exactly one of 'virtualNodeRef' or 'virtualNodeARN' must be specified.
// +optional
VirtualNodeRef *VirtualNodeReference `json:"virtualNodeRef,omitempty"`
// Amazon Resource Name to AppMesh VirtualNode object to associate with the weighted target. Exactly one of 'virtualNodeRef' or 'virtualNodeARN' must be specified.
// +optional
VirtualNodeARN *string `json:"virtualNodeARN,omitempty"`
// The relative weight of the weighted target.
// +kubebuilder:validation:Minimum=0
// +kubebuilder:validation:Maximum=100

View File

@@ -23,14 +23,22 @@ import (
// VirtualNodeServiceProvider refers to https://docs.aws.amazon.com/app-mesh/latest/APIReference/API_VirtualNodeServiceProvider.html
type VirtualNodeServiceProvider struct {
// The virtual node that is acting as a service provider.
VirtualNodeRef VirtualNodeReference `json:"virtualNodeRef"`
// Reference to Kubernetes VirtualNode CR in cluster that is acting as a service provider. Exactly one of 'virtualNodeRef' or 'virtualNodeARN' must be specified.
// +optional
VirtualNodeRef *VirtualNodeReference `json:"virtualNodeRef,omitempty"`
// Amazon Resource Name to AppMesh VirtualNode object that is acting as a service provider. Exactly one of 'virtualNodeRef' or 'virtualNodeARN' must be specified.
// +optional
VirtualNodeARN *string `json:"virtualNodeARN,omitempty"`
}
// VirtualRouterServiceProvider refers to https://docs.aws.amazon.com/app-mesh/latest/APIReference/API_VirtualRouterServiceProvider.html
type VirtualRouterServiceProvider struct {
// The virtual router that is acting as a service provider.
VirtualRouterRef VirtualRouterReference `json:"virtualRouterRef"`
// Reference to Kubernetes VirtualRouter CR in cluster that is acting as a service provider. Exactly one of 'virtualRouterRef' or 'virtualRouterARN' must be specified.
// +optional
VirtualRouterRef *VirtualRouterReference `json:"virtualRouterRef,omitempty"`
// Amazon Resource Name to AppMesh VirtualRouter object that is acting as a service provider. Exactly one of 'virtualRouterRef' or 'virtualRouterARN' must be specified.
// +optional
VirtualRouterARN *string `json:"virtualRouterARN,omitempty"`
}
// VirtualServiceProvider refers to https://docs.aws.amazon.com/app-mesh/latest/APIReference/API_VirtualServiceProvider.html
@@ -67,6 +75,7 @@ type VirtualServiceCondition struct {
}
// VirtualServiceSpec defines the desired state of VirtualService
// refers to https://docs.aws.amazon.com/app-mesh/latest/APIReference/API_VirtualServiceSpec.html
type VirtualServiceSpec struct {
// AWSName is the AppMesh VirtualService object's name.
// If unspecified or empty, it defaults to be "${name}.${namespace}" of k8s VirtualService
@@ -94,6 +103,10 @@ type VirtualServiceStatus struct {
// The current VirtualService status.
// +optional
Conditions []VirtualServiceCondition `json:"conditions,omitempty"`
// The generation observed by the VirtualService controller.
// +optional
ObservedGeneration *int64 `json:"observedGeneration,omitempty"`
}
// +genclient

View File

@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright The Flagger Authors.
Copyright 2020 The Flux authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -217,6 +217,22 @@ func (in *FileAccessLog) DeepCopy() *FileAccessLog {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GRPCConnectionPool) DeepCopyInto(out *GRPCConnectionPool) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GRPCConnectionPool.
func (in *GRPCConnectionPool) DeepCopy() *GRPCConnectionPool {
if in == nil {
return nil
}
out := new(GRPCConnectionPool)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GRPCRetryPolicy) DeepCopyInto(out *GRPCRetryPolicy) {
*out = *in
@@ -426,6 +442,43 @@ func (in *GRPCTimeout) DeepCopy() *GRPCTimeout {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTP2ConnectionPool) DeepCopyInto(out *HTTP2ConnectionPool) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTP2ConnectionPool.
func (in *HTTP2ConnectionPool) DeepCopy() *HTTP2ConnectionPool {
if in == nil {
return nil
}
out := new(HTTP2ConnectionPool)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPConnectionPool) DeepCopyInto(out *HTTPConnectionPool) {
*out = *in
if in.MaxPendingRequests != nil {
in, out := &in.MaxPendingRequests, &out.MaxPendingRequests
*out = new(int64)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPConnectionPool.
func (in *HTTPConnectionPool) DeepCopy() *HTTPConnectionPool {
if in == nil {
return nil
}
out := new(HTTPConnectionPool)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HTTPRetryPolicy) DeepCopyInto(out *HTTPRetryPolicy) {
*out = *in
@@ -665,6 +718,16 @@ func (in *Listener) DeepCopyInto(out *Listener) {
*out = new(HealthCheckPolicy)
(*in).DeepCopyInto(*out)
}
if in.OutlierDetection != nil {
in, out := &in.OutlierDetection, &out.OutlierDetection
*out = new(OutlierDetection)
**out = **in
}
if in.ConnectionPool != nil {
in, out := &in.ConnectionPool, &out.ConnectionPool
*out = new(VirtualNodeConnectionPool)
(*in).DeepCopyInto(*out)
}
if in.TLS != nil {
in, out := &in.TLS, &out.TLS
*out = new(ListenerTLS)
@@ -852,6 +915,24 @@ func (in *MeshReference) DeepCopy() *MeshReference {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OutlierDetection) DeepCopyInto(out *OutlierDetection) {
*out = *in
out.Interval = in.Interval
out.BaseEjectionDuration = in.BaseEjectionDuration
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutlierDetection.
func (in *OutlierDetection) DeepCopy() *OutlierDetection {
if in == nil {
return nil
}
out := new(OutlierDetection)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortMapping) DeepCopyInto(out *PortMapping) {
*out = *in
@@ -935,6 +1016,22 @@ func (in *ServiceDiscovery) DeepCopy() *ServiceDiscovery {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TCPConnectionPool) DeepCopyInto(out *TCPConnectionPool) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPConnectionPool.
func (in *TCPConnectionPool) DeepCopy() *TCPConnectionPool {
if in == nil {
return nil
}
out := new(TCPConnectionPool)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TCPRoute) DeepCopyInto(out *TCPRoute) {
*out = *in
@@ -1160,6 +1257,42 @@ func (in *VirtualNodeCondition) DeepCopy() *VirtualNodeCondition {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualNodeConnectionPool) DeepCopyInto(out *VirtualNodeConnectionPool) {
*out = *in
if in.TCP != nil {
in, out := &in.TCP, &out.TCP
*out = new(TCPConnectionPool)
**out = **in
}
if in.HTTP != nil {
in, out := &in.HTTP, &out.HTTP
*out = new(HTTPConnectionPool)
(*in).DeepCopyInto(*out)
}
if in.HTTP2 != nil {
in, out := &in.HTTP2, &out.HTTP2
*out = new(HTTP2ConnectionPool)
**out = **in
}
if in.GRPC != nil {
in, out := &in.GRPC, &out.GRPC
*out = new(GRPCConnectionPool)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNodeConnectionPool.
func (in *VirtualNodeConnectionPool) DeepCopy() *VirtualNodeConnectionPool {
if in == nil {
return nil
}
out := new(VirtualNodeConnectionPool)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualNodeList) DeepCopyInto(out *VirtualNodeList) {
*out = *in
@@ -1217,7 +1350,16 @@ func (in *VirtualNodeReference) DeepCopy() *VirtualNodeReference {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualNodeServiceProvider) DeepCopyInto(out *VirtualNodeServiceProvider) {
*out = *in
in.VirtualNodeRef.DeepCopyInto(&out.VirtualNodeRef)
if in.VirtualNodeRef != nil {
in, out := &in.VirtualNodeRef, &out.VirtualNodeRef
*out = new(VirtualNodeReference)
(*in).DeepCopyInto(*out)
}
if in.VirtualNodeARN != nil {
in, out := &in.VirtualNodeARN, &out.VirtualNodeARN
*out = new(string)
**out = **in
}
return
}
@@ -1456,7 +1598,16 @@ func (in *VirtualRouterReference) DeepCopy() *VirtualRouterReference {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualRouterServiceProvider) DeepCopyInto(out *VirtualRouterServiceProvider) {
*out = *in
in.VirtualRouterRef.DeepCopyInto(&out.VirtualRouterRef)
if in.VirtualRouterRef != nil {
in, out := &in.VirtualRouterRef, &out.VirtualRouterRef
*out = new(VirtualRouterReference)
(*in).DeepCopyInto(*out)
}
if in.VirtualRouterARN != nil {
in, out := &in.VirtualRouterARN, &out.VirtualRouterARN
*out = new(string)
**out = **in
}
return
}
@@ -1579,7 +1730,16 @@ func (in *VirtualService) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualServiceBackend) DeepCopyInto(out *VirtualServiceBackend) {
*out = *in
in.VirtualServiceRef.DeepCopyInto(&out.VirtualServiceRef)
if in.VirtualServiceRef != nil {
in, out := &in.VirtualServiceRef, &out.VirtualServiceRef
*out = new(VirtualServiceReference)
(*in).DeepCopyInto(*out)
}
if in.VirtualServiceARN != nil {
in, out := &in.VirtualServiceARN, &out.VirtualServiceARN
*out = new(string)
**out = **in
}
if in.ClientPolicy != nil {
in, out := &in.ClientPolicy, &out.ClientPolicy
*out = new(ClientPolicy)
@@ -1754,6 +1914,11 @@ func (in *VirtualServiceStatus) DeepCopyInto(out *VirtualServiceStatus) {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
if in.ObservedGeneration != nil {
in, out := &in.ObservedGeneration, &out.ObservedGeneration
*out = new(int64)
**out = **in
}
return
}
@@ -1770,7 +1935,16 @@ func (in *VirtualServiceStatus) DeepCopy() *VirtualServiceStatus {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WeightedTarget) DeepCopyInto(out *WeightedTarget) {
*out = *in
in.VirtualNodeRef.DeepCopyInto(&out.VirtualNodeRef)
if in.VirtualNodeRef != nil {
in, out := &in.VirtualNodeRef, &out.VirtualNodeRef
*out = new(VirtualNodeReference)
(*in).DeepCopyInto(*out)
}
if in.VirtualNodeARN != nil {
in, out := &in.VirtualNodeARN, &out.VirtualNodeARN
*out = new(string)
**out = **in
}
return
}

Some files were not shown because too many files have changed in this diff Show More