Compare commits

...

321 Commits

Author SHA1 Message Date
stefanprodan
bb620ad94a Release v0.14.0 changelog 2019-05-21 13:54:54 +02:00
stefanprodan
7c6d1c48a3 Release v0.14.0 2019-05-21 13:54:15 +02:00
Stefan Prodan
bd5d884c8b Merge pull request #187 from weaveworks/docs-smi
Flagger docs SMI
2019-05-21 13:34:19 +02:00
Stefan Prodan
1c06721c9a Merge pull request #185 from weaveworks/docs-gloo
Add Gloo ingress controller docs
2019-05-21 13:24:19 +02:00
stefanprodan
1e29e2c4eb Fix Grafana Prometheus URL 2019-05-19 10:34:36 +03:00
stefanprodan
88c39d7379 Add Gloo canary deployment docs and diagram 2019-05-17 15:07:43 +03:00
stefanprodan
da43a152ba Add Gloo canary deployment example 2019-05-17 13:15:53 +03:00
stefanprodan
ec63aa9999 Add Gloo custom resources to RBAC 2019-05-17 11:55:15 +03:00
Stefan Prodan
7b9df746ad Merge pull request #179 from yuval-k/gloo2
Add support for Gloo
2019-05-17 11:17:27 +03:00
Yuval Kohavi
52d93ddda2 fix router tests 2019-05-16 13:08:53 -04:00
Yuval Kohavi
eb0331f2bf fix tests 2019-05-16 12:48:03 -04:00
Yuval Kohavi
6a66a87a44 PR updates 2019-05-16 07:28:22 -04:00
stefanprodan
f3cc810948 Update Flagger image tag (fix latency check) 2019-05-15 20:31:25 +03:00
Stefan Prodan
12d84b2e24 Merge pull request #183 from weaveworks/metrics-fix
Fix Istio latency check
2019-05-15 20:24:15 +03:00
stefanprodan
58bde24ece Fix Istio request duration test 2019-05-15 20:10:27 +03:00
stefanprodan
5b3fd0efca Set Istio request duration to milliseconds 2019-05-15 20:01:27 +03:00
stefanprodan
ee6e39afa6 Add SMI tutorial 2019-05-15 17:37:29 +03:00
Yuval Kohavi
677b9d9197 gloo metrics 2019-05-14 17:48:13 -04:00
Yuval Kohavi
786c5aa93a Merge remote-tracking branch 'upstream/master' into gloo2 2019-05-14 10:26:57 -04:00
Stefan Prodan
fd44f1fabf Merge pull request #182 from weaveworks/linkerd-metrics
Fix Linkerd promql queries
2019-05-14 15:23:37 +03:00
Stefan Prodan
b20e0178e1 Merge pull request #180 from weaveworks/smi
Add support for SMI
2019-05-14 13:24:29 +03:00
stefanprodan
5a490abfdd Remove the mesh gateway from docs examples 2019-05-14 13:06:52 +03:00
stefanprodan
674c79da94 Fix Linkerd promql queries
- include all inbound traffic stats
2019-05-14 12:14:47 +03:00
stefanprodan
23ebb4235d merge metrics-v2 into smi 2019-05-14 09:53:42 +03:00
Stefan Prodan
b2500d0ccb Merge pull request #181 from weaveworks/metrics-v2
Refactor the metrics package
2019-05-14 09:49:24 +03:00
stefanprodan
ee500d83ac Add Linkerd observer implementation 2019-05-13 17:51:39 +03:00
stefanprodan
0032c14a78 Refactor metrics
- add observer interface with builtin metrics functions
- add metrics observer factory
- add prometheus client
- implement the observer interface for istio, envoy and nginx
- remove deprecated istio and app mesh metric aliases (istio_requests_total, istio_request_duration_seconds_bucket, envoy_cluster_upstream_rq, envoy_cluster_upstream_rq_time_bucket)
2019-05-13 17:34:08 +03:00
stefanprodan
8fd3e927b8 Merge branch 'master' into smi 2019-05-12 13:58:37 +03:00
stefanprodan
1902884b56 Release v0.13.2 2019-05-11 15:16:31 +03:00
Stefan Prodan
98d2805267 Merge pull request #178 from carlossg/issue-177
Fix #177 Do not copy labels from canary to primary deployment
2019-05-11 14:56:22 +03:00
Carlos Sanchez
24a74d3589 Fix #177 Do not copy labels from canary to primary deployment 2019-05-11 13:42:08 +02:00
stefanprodan
7fe273a21d Fix SMI cluster role binding 2019-05-11 14:08:58 +03:00
stefanprodan
bd817cc520 Run SMI Istio e2e tests 2019-05-11 14:00:53 +03:00
stefanprodan
eb856fda13 Add SMI Istio e2e tests 2019-05-11 13:46:24 +03:00
stefanprodan
d63f05c92e Add SMI group to RBAC 2019-05-11 13:45:32 +03:00
stefanprodan
8fde6bdb8a Add SMI Istio adapter deployment 2019-05-11 13:35:36 +03:00
stefanprodan
8148120421 Enable Istio checks for SMI-Istio adapter 2019-05-11 13:06:06 +03:00
stefanprodan
95b8840bf2 Add SMI traffic split to router 2019-05-11 13:05:19 +03:00
stefanprodan
0e8b1ef20f Generate the SMI TrafficSplit clientset 2019-05-11 12:49:23 +03:00
Yuval Kohavi
0fbf4dcdb2 add canary promotion 2019-05-10 20:16:21 -04:00
Yuval Kohavi
7aca9468ac re-enable helm 2019-05-10 19:48:22 -04:00
Yuval Kohavi
a6c0f08fcc add gloo to circle 2019-05-10 19:44:46 -04:00
Yuval Kohavi
9c1bcc08bb float -> percent 2019-05-10 19:21:08 -04:00
Yuval Kohavi
87e9dfe3d3 e2e test 2019-05-10 19:16:16 -04:00
Yuval Kohavi
d7be66743e Merge remote-tracking branch 'upstream/master' into gloo2 2019-05-10 10:38:14 -04:00
Stefan Prodan
15463456ec Merge pull request #176 from weaveworks/nginx-tests
Add nginx e2e and unit tests
2019-05-10 12:09:40 +03:00
stefanprodan
752eceed4b Add tests for ingress weight changes 2019-05-10 11:53:12 +03:00
stefanprodan
eadce34d6f Add ingress router unit tests 2019-05-10 11:39:52 +03:00
stefanprodan
11ccf34bbc Document the nginx e2e tests 2019-05-10 10:50:24 +03:00
stefanprodan
e308678ed5 Deploy ingress for nginx e2e tests 2019-05-10 10:40:38 +03:00
stefanprodan
cbe72f0aa2 Add ingress target to nginx e2e tests 2019-05-10 10:29:09 +03:00
stefanprodan
bc84e1c154 Fix typos 2019-05-10 10:24:47 +03:00
stefanprodan
344bd45a0e Add nginx e2e tests 2019-05-10 10:24:35 +03:00
stefanprodan
72014f736f Release v0.13.1 2019-05-09 14:29:42 +03:00
Stefan Prodan
0a2949b6ad Merge pull request #174 from weaveworks/fix-metrics
Fix NGINX promql and custom metrics checks
2019-05-09 14:22:30 +03:00
stefanprodan
2ff695ecfe Fix nginx metrics tests 2019-05-09 14:00:15 +03:00
stefanprodan
8d0b54e059 Add custom metrics to nginx docs 2019-05-09 13:51:37 +03:00
stefanprodan
121a65fad0 Fix nginx promql namespace selector 2019-05-09 13:50:47 +03:00
stefanprodan
ecaa203091 Fix custom metric checks
- escape the prom query before encoding it
2019-05-09 13:49:48 +03:00
Stefan Prodan
6d0e3c6468 Merge pull request #173 from weaveworks/release-v0.13.0
Prepare release v0.13.0
2019-05-08 20:52:18 +03:00
stefanprodan
c933476fff Bump Grafana chart version 2019-05-08 20:26:40 +03:00
stefanprodan
1335210cf5 Add the Prometheus add-on to App Mesh docs 2019-05-08 19:03:53 +03:00
stefanprodan
9d12794600 Add NGINX to readme 2019-05-08 18:30:00 +03:00
stefanprodan
d57fc7d03e Add v0.13.0 change log 2019-05-08 18:05:58 +03:00
stefanprodan
1f9f6fb55a Release v0.13.0 2019-05-08 18:05:47 +03:00
Stefan Prodan
948df55de3 Merge pull request #170 from weaveworks/nginx
Add support for nginx ingress controller
2019-05-08 17:44:29 +03:00
stefanprodan
8914f26754 Add ngnix docs 2019-05-08 17:03:36 +03:00
stefanprodan
79b3370892 Add Prometheus add-on to Flagger chart 2019-05-08 15:44:28 +03:00
stefanprodan
a233b99f0b Add HPA to nginx demo 2019-05-07 11:12:36 +03:00
stefanprodan
0d94c01678 Toggle canary annotation based on weight 2019-05-07 11:10:19 +03:00
stefanprodan
00151e92fe Implement A/B testing for nginx ingress 2019-05-07 10:33:40 +03:00
stefanprodan
f7db0210ea Add nginx ingress controller checks 2019-05-06 18:43:02 +03:00
stefanprodan
cf3ba35fb9 Add nginx ingress controller metrics 2019-05-06 18:42:31 +03:00
stefanprodan
177dc824e3 Implement nginx ingress router 2019-05-06 18:42:02 +03:00
stefanprodan
5f544b90d6 Log mesh provider at startup 2019-05-06 18:41:04 +03:00
stefanprodan
921ac00383 Add ingress ref to CRD and RBAC 2019-05-06 18:33:00 +03:00
Stefan Prodan
7df7218978 Merge pull request #168 from scranton/supergloo
Fix and clarify SuperGloo installation docs
2019-05-06 11:33:40 +03:00
Scott Cranton
e4c6903a01 Fix and clarify SuperGloo installation docs
Added missing `=` for --version, and added brew and helm install options
2019-05-05 15:42:06 -04:00
Stefan Prodan
027342dc72 Merge pull request #167 from weaveworks/grafana-fix
Change dashboard selector to destination workload
2019-05-04 09:03:57 +03:00
stefanprodan
e17a747785 Change dashboard selector to destination workload 2019-05-03 19:32:29 +03:00
Stefan Prodan
e477b37bd0 Merge pull request #162 from weaveworks/fix-vs
Fix duplicate hosts error when using wildcard
2019-05-02 19:17:52 +03:00
Stefan Prodan
ad25068375 Merge pull request #160 from aackerman/patch-1
Update default image repo in flagger chart readme to be weaveworks
2019-05-02 19:17:38 +03:00
stefanprodan
c92230c109 Fix duplicate hosts error when using wildcard 2019-05-02 19:05:54 +03:00
Stefan Prodan
9e082d9ee3 Update charts/flagger/README.md
Co-Authored-By: aackerman <theron17@gmail.com>
2019-05-02 11:05:43 -05:00
Aaron Ackerman
cfd610ac55 Update default image repo in flagger chart readme to be weaveworks 2019-05-02 07:18:00 -05:00
stefanprodan
82067f13bf Add GitOps diagram 2019-05-01 13:09:18 +03:00
Stefan Prodan
242d79e49d Merge pull request #159 from weaveworks/release-v0.12.0
Prepare release v0.12.0
2019-04-29 17:08:16 +03:00
stefanprodan
4f01ecde5a Update changelog 2019-04-29 16:41:26 +03:00
stefanprodan
61141c7479 Release v0.12.0 2019-04-29 16:37:48 +03:00
Stefan Prodan
62429ff710 Merge pull request #158 from weaveworks/docs-supergloo
Add SuperGloo install docs
2019-04-29 16:35:58 +03:00
stefanprodan
82a1f45cc1 Fix load tester image repo 2019-04-29 11:17:19 +03:00
stefanprodan
1a95fc2a9c Add SuperGloo install docs 2019-04-26 19:51:09 +03:00
Stefan Prodan
13816eeafa Merge pull request #151 from yuval-k/supergloo
Supergloo Support
2019-04-25 23:18:22 +03:00
Yuval Kohavi
5279f73c17 use name.namespace instead of namespace.name 2019-04-25 11:10:23 -04:00
Yuval Kohavi
d196bb2856 e2e test 2019-04-24 16:00:55 -04:00
Yuval Kohavi
3f8f634a1b add e2e tests 2019-04-23 18:06:46 -04:00
Yuval Kohavi
350efb2bfe gloo upstream group support 2019-04-23 07:47:50 -04:00
Yuval Kohavi
5ba27c898e remove todo 2019-04-23 07:42:52 -04:00
Stefan Prodan
57f1b63fa1 Merge pull request #156 from weaveworks/docs-fix
Fix Tiller-less install command
2019-04-22 20:15:11 +03:00
stefanprodan
d69e203479 Fix Tiller-less install command 2019-04-22 20:08:56 +03:00
Yuval Kohavi
4d7fae39a8 add retries and cors 2019-04-19 14:41:50 -04:00
Yuval Kohavi
2dc554c92a dep ensure twice 2019-04-19 11:23:32 -04:00
Yuval Kohavi
21c394ef7f pin supergloo\solo-kit 2019-04-19 11:20:28 -04:00
Yuval Kohavi
2173bfc1a0 Merge remote-tracking branch 'origin/master' into supergloo-updated 2019-04-19 11:17:37 -04:00
Yuval Kohavi
a19d016e14 more rules 2019-04-19 10:59:04 -04:00
Stefan Prodan
8f1b5df9e2 Merge pull request #154 from weaveworks/dep-update
Disable bats in load tester artifacts
2019-04-19 13:03:39 +03:00
stefanprodan
2d6b8ecfdf Disable bats in load tester artifacts 2019-04-19 13:02:20 +03:00
Stefan Prodan
8093612011 Merge pull request #153 from weaveworks/dep-update
Update Kubernetes packages to 1.13.1
2019-04-18 20:07:33 +03:00
stefanprodan
39dc761e32 Make codegen work with the klog shim 2019-04-18 19:56:48 +03:00
stefanprodan
0c68983c62 Update deps to Kubernetes 1.13.1 2019-04-18 19:30:55 +03:00
Stefan Prodan
c7539f6e4b Merge pull request #152 from weaveworks/release-v0.11.1
Prepare release v0.11.1
2019-04-18 16:14:55 +03:00
stefanprodan
8cebc0acee Update changelog for v0.11.1 2019-04-18 15:40:48 +03:00
stefanprodan
f60c4d60cf Release v0.11.1 2019-04-18 14:50:26 +03:00
stefanprodan
662f9cba2e Add bats tests to load tester artifacts 2019-04-18 14:34:25 +03:00
stefanprodan
4a82e1e223 Use the builtin metrics in docs 2019-04-18 14:25:55 +03:00
stefanprodan
b60b912bf8 Use the builtin metrics in artifacts 2019-04-18 13:53:13 +03:00
stefanprodan
093348bc60 Release loadtester 0.3.0 with bats support 2019-04-18 13:45:32 +03:00
Yuval Kohavi
37ebbf14f9 fix compile 2019-04-17 18:44:33 -04:00
Yuval Kohavi
156488c8d5 Merge remote-tracking branch 'origin/master' into supergloo-updated 2019-04-17 18:24:41 -04:00
Yuval Kohavi
68d1f583cc more tests 2019-04-17 13:04:02 -04:00
Stefan Prodan
3492b07d9a Merge pull request #150 from weaveworks/release-v0.11.0
Release v0.11.0
2019-04-17 11:25:16 +03:00
stefanprodan
d0b582048f Add change log for v0.11.0 2019-04-17 11:22:02 +03:00
stefanprodan
a82eb7b01f Release v0.11.0 2019-04-17 11:13:31 +03:00
stefanprodan
cd08afcbeb Add bash bats task runner
- run bats tests (blocking requests)
2019-04-17 11:05:10 +03:00
stefanprodan
331942a4ed Switch to Docker Hub from Quay 2019-04-17 11:03:38 +03:00
Yuval Kohavi
aa24d6ff7e minor change 2019-04-16 19:16:49 -04:00
Yuval Kohavi
58c2c19f1e re generate code 2019-04-16 19:16:37 -04:00
Yuval Kohavi
2a91149211 starting adding tests 2019-04-16 19:16:15 -04:00
Yuval Kohavi
868482c240 basics seem working! 2019-04-16 15:10:08 -04:00
Stefan Prodan
4e387fa943 Merge pull request #149 from weaveworks/e2e
Add end-to-end tests for A/B testing and pre/post hooks
2019-04-16 11:11:01 +03:00
stefanprodan
15484363d6 Add A/B testing and hooks to e2e readme 2019-04-16 11:00:03 +03:00
stefanprodan
50b7b74480 Speed up e2e tests by reducing the number of iterations 2019-04-16 10:41:35 +03:00
stefanprodan
adb53c63dd Add e2e tests for A/B testing and pre/post hooks 2019-04-16 10:26:00 +03:00
Stefan Prodan
bdc3a32e96 Merge pull request #148 from weaveworks/selectors
Make the pod selector label configurable
2019-04-16 09:51:05 +03:00
stefanprodan
65f716182b Add default selectors to docs 2019-04-15 13:30:58 +03:00
stefanprodan
6ef72e2550 Make the pod selector configurable
- default labels: app, name and app.kubernetes.io/name
2019-04-15 12:57:25 +03:00
stefanprodan
60f51ad7d5 Move deployer and config tracker to canary package 2019-04-15 11:27:08 +03:00
stefanprodan
a09dc2cbd8 Rename logging package 2019-04-15 11:25:45 +03:00
Stefan Prodan
825d07aa54 Merge pull request #147 from weaveworks/pre-post-rollout-hooks
Add pre/post-rollout webhooks
2019-04-14 19:52:19 +03:00
stefanprodan
f46882c778 Update cert-manager to v0.7 in GKE docs 2019-04-14 12:24:35 +03:00
stefanprodan
663fa08cc1 Add hook type and status to CRD schema validation 2019-04-13 21:21:51 +03:00
stefanprodan
19e625d38e Add pre/post rollout webhooks to docs 2019-04-13 20:30:19 +03:00
stefanprodan
edcff9cd15 Execute pre/post rollout webhooks
- halt the canary advancement if pre-rollout hooks are failing
- include the canary status (Succeeded/Failed) in the post-rollout webhook payload
- ignore post-rollout webhook failures
- log pre/post rollout webhook response result
2019-04-13 15:43:23 +03:00
stefanprodan
e0fc5ecb39 Add hook type to CRD
- pre-rollout execute webhook before routing traffic to canary
- rollout execute webhook during the canary analysis on each iteration
- post-rollout execute webhook after the canary has been promoted or rolled back
Add canary phase to webhook payload
2019-04-13 15:37:41 +03:00
stefanprodan
4ac6629969 Exclude docs branches from CI 2019-04-13 15:33:03 +03:00
Stefan Prodan
68d8dad7c8 Merge pull request #146 from weaveworks/metrics
Unify App Mesh and Istio builtin metric checks
2019-04-13 00:47:35 +03:00
stefanprodan
4ab9ceafc1 Use metrics alias in e2e tests 2019-04-12 17:43:47 +03:00
stefanprodan
352ed898d4 Add request success rate and duration metrics alias 2019-04-12 17:00:04 +03:00
stefanprodan
e091d6a50d Set Envoy request duration to ms 2019-04-12 16:25:34 +03:00
stefanprodan
c651ef00c9 Add Envoy request duration test 2019-04-12 16:09:00 +03:00
stefanprodan
4b17788a77 Add Envoy request duration P99 query 2019-04-12 16:02:23 +03:00
Yuval Kohavi
e5612bca50 dep ensure 2019-04-10 20:20:10 -04:00
Yuval Kohavi
d21fb1afe8 initial supergloo code 2019-04-10 20:19:49 -04:00
Yuval Kohavi
89d0a533e2 dep + vendor 2019-04-10 20:03:53 -04:00
Stefan Prodan
db673dddd9 Merge pull request #141 from weaveworks/default-mesh
Set default mesh gateway only if no gateway is specified
2019-04-08 10:26:33 +03:00
stefanprodan
88ad457e87 Add default mesh gateway to docs and examples 2019-04-06 12:26:11 +03:00
stefanprodan
126b68559e Set default mesh gateway if no gateway is specified 2019-04-06 12:21:30 +03:00
stefanprodan
2cd3fe47e6 Remove FAQ from docs site index 2019-04-03 19:33:37 +03:00
Stefan Prodan
15eb7cce55 Merge pull request #139 from dholbach/add-faq-stub
Add faq stub
2019-04-03 18:33:16 +03:00
Daniel Holbach
13f923aabf remove FAQ from main page for now, as requested by Stefan 2019-04-03 17:25:45 +02:00
Daniel Holbach
0ffb112063 move FAQ into separate file, add links 2019-04-03 14:49:21 +02:00
Daniel Holbach
b4ea6af110 add FAQ stub
closes: #132
2019-04-03 11:10:55 +02:00
Daniel Holbach
611c8f7374 make markdownlint happy
Signed-off-by: Daniel Holbach <daniel@weave.works>
2019-04-03 10:56:04 +02:00
Daniel Holbach
1cc73f37e7 go OCD on the feature table
Signed-off-by: Daniel Holbach <daniel@weave.works>
2019-04-03 10:52:40 +02:00
Stefan Prodan
ca37fc0eb5 Merge pull request #136 from weaveworks/feature-list
Add Istio vs App Mesh feature list
2019-04-02 13:37:20 +03:00
stefanprodan
5380624da9 Add CORS, retries and timeouts to feature list 2019-04-02 13:27:32 +03:00
stefanprodan
aaece0bd44 Add acceptance tests to feature list 2019-04-02 13:05:01 +03:00
stefanprodan
de7cc17f5d Add Istio vs App Mesh feature list 2019-04-02 12:57:15 +03:00
Stefan Prodan
66efa39d27 Merge pull request #134 from dholbach/add-features-section
Seed a features section
2019-04-02 12:44:17 +03:00
Stefan Prodan
ff7c0a105d Merge pull request #130 from weaveworks/metrics-refactor
Refactor the metrics observer
2019-04-02 12:41:59 +03:00
Daniel Holbach
7b29253df4 use white-check-mark instead - hope this work this time 2019-04-01 18:51:44 +02:00
Daniel Holbach
7ef63b341e use white-heavy-check-mark instead 2019-04-01 18:47:10 +02:00
Daniel Holbach
e7bfaa4f1a factor in Stefan's feedback - use a table instead 2019-04-01 18:44:51 +02:00
Daniel Holbach
3a9a408941 Seed a features section
Closes: #133

Signed-off-by: Daniel Holbach <daniel@weave.works>
2019-04-01 18:09:57 +02:00
stefanprodan
3e43963daa Wait for Istio pods to be ready 2019-04-01 14:52:03 +03:00
stefanprodan
69a6e260f5 Bring down the Istio e2e CPU requests 2019-04-01 14:40:11 +03:00
stefanprodan
664e7ad555 Debug e2e load test 2019-04-01 14:33:47 +03:00
stefanprodan
ee4a009a06 Print Istio e2e status 2019-04-01 14:09:19 +03:00
stefanprodan
36dfd4dd35 Bring down Istio Pilot memory requests 2019-04-01 13:09:52 +03:00
stefanprodan
dbf36082b2 Revert Istio e2e to 1.1.0 2019-04-01 12:55:00 +03:00
stefanprodan
3a1018cff6 Change Istio e2e limits 2019-03-31 18:00:03 +03:00
stefanprodan
fc10745a1a Upgrade Istio e2e to v1.1.1 2019-03-31 14:43:08 +03:00
stefanprodan
347cfd06de Upgrade Kubernetes Kind to v0.2.1 2019-03-31 14:30:27 +03:00
stefanprodan
ec759ce467 Add Envoy success rate test 2019-03-31 14:17:39 +03:00
stefanprodan
f211e0fe31 Use go templates to render the builtin promql queries 2019-03-31 13:55:14 +03:00
stefanprodan
c91a128b65 Fix observer mock init 2019-03-30 11:55:41 +02:00
stefanprodan
6a080f3032 Rename observer and recorder 2019-03-30 11:49:43 +02:00
stefanprodan
b2c12c1131 Move observer to metrics package 2019-03-30 11:45:39 +02:00
Stefan Prodan
b945b37089 Merge pull request #127 from weaveworks/metrics
Refactor Prometheus recorder
2019-03-28 15:49:08 +02:00
stefanprodan
9a5529a0aa Add flagger_info metric to docs 2019-03-28 12:11:25 +02:00
stefanprodan
025785389d Refactor Prometheus recorder
- add flagger_info gauge metric
- expose the version and mesh provider as labels
- move the recorder to the metrics package
2019-03-28 11:58:19 +02:00
stefanprodan
48d9a0dede Ensure the status metric is set after a restart 2019-03-28 11:52:13 +02:00
Stefan Prodan
fbdf38e990 Merge pull request #124 from weaveworks/release-v0.10.0
Release v0.10.0 (AWS App Mesh edition)
2019-03-27 14:20:50 +02:00
stefanprodan
ef5bf70386 Update go to 1.12 2019-03-27 13:02:30 +02:00
stefanprodan
274c1469b4 Update changelog v0.10.0 2019-03-27 09:46:06 +02:00
stefanprodan
960d506360 Upgrade mesh definition to v1beta1 2019-03-27 09:40:11 +02:00
stefanprodan
79a6421178 Move load tester to Weaveworks Quay 2019-03-27 09:35:34 +02:00
Stefan Prodan
8b5c004860 Merge pull request #123 from weaveworks/appmesh-v1beta1
Update App Mesh to v1beta1
2019-03-26 21:53:34 +02:00
stefanprodan
f54768772e Fix App Mesh success rate graph 2019-03-26 21:30:18 +02:00
stefanprodan
b9075dc6f9 Update App Mesh to v1beta1 2019-03-26 20:29:40 +02:00
Stefan Prodan
107596ad54 Merge pull request #122 from weaveworks/prep-0.10.0
Reconcile ClusterIP services and prep v0.10.0
2019-03-26 17:25:57 +02:00
stefanprodan
3c6a2b1508 Update changelog for v0.10.0 2019-03-26 17:12:46 +02:00
stefanprodan
f996cba354 Set Grafana dashboards readable IDs 2019-03-26 17:12:46 +02:00
stefanprodan
bdd864fbdd Add port and mesh name to CRD validation 2019-03-26 17:12:46 +02:00
stefanprodan
ca074ef13f Rename router sync to reconcile 2019-03-26 17:12:46 +02:00
stefanprodan
ddd3a8251e Reconcile ClusterIP services
- add svc update tests fix: #114
2019-03-26 17:12:46 +02:00
stefanprodan
f5b862dc1b Add App Mesh docs to readme 2019-03-26 17:12:45 +02:00
stefanprodan
d45d475f61 Add Grafana dashboard link and update screen 2019-03-26 17:12:45 +02:00
stefanprodan
d0f72ea3fa Bump Flagger version to 0.10.0 2019-03-26 17:12:45 +02:00
stefanprodan
5ed5d1e5b6 Fix load tester install command for zsh 2019-03-26 11:57:01 +02:00
stefanprodan
311b14026e Release loadtester chart v0.2.0 2019-03-26 11:33:07 +02:00
stefanprodan
67cd722b54 Release Grafana chart v1.1.0 2019-03-26 11:16:42 +02:00
stefanprodan
7f6247eb7b Add jq requirement to App Mesh installer 2019-03-26 10:14:44 +02:00
Stefan Prodan
f3fd515521 Merge pull request #121 from weaveworks/stats
Fix canary status prom metric
2019-03-26 09:58:54 +02:00
stefanprodan
9db5dd0d7f Update changelog 2019-03-26 09:58:40 +02:00
stefanprodan
d07925d79d Fix canary status prom metrics 2019-03-25 17:26:22 +02:00
Stefan Prodan
662d0f31ff Merge pull request #119 from weaveworks/appmesh-ref
App Mesh docs
2019-03-25 15:13:35 +02:00
stefanprodan
2c5ad0bf8f Disable App Mesh ingress for load tester 2019-03-25 15:00:57 +02:00
stefanprodan
0ea76b986a Prevent the CRD from being removed by Helm 2019-03-25 15:00:23 +02:00
stefanprodan
3c4253c336 Docs fixes 2019-03-25 14:59:55 +02:00
stefanprodan
77ba28e91c Use App Mesh install script 2019-03-25 09:41:56 +02:00
stefanprodan
6399e7586c Update overview diagram 2019-03-24 15:10:13 +02:00
Stefan Prodan
1caa62adc8 Merge pull request #118 from weaveworks/appmesh
App Mesh refactoring + docs
2019-03-24 13:53:40 +02:00
stefanprodan
8fa558f124 Add intro do App Mesh tutorial 2019-03-24 13:38:00 +02:00
stefanprodan
191228633b Add App Mesh backend example 2019-03-24 13:11:03 +02:00
stefanprodan
8a981f935a Add App Mesh GitOps diagram 2019-03-24 12:26:35 +02:00
stefanprodan
a8ea9adbcc Add the Slack notifications to App Mesh tutorial 2019-03-23 17:27:53 +02:00
stefanprodan
685d94c44b Add Grafana screen to App Mesh docs 2019-03-23 16:26:24 +02:00
stefanprodan
153ed1b044 Add the App Mesh ingress to docs 2019-03-23 16:09:30 +02:00
stefanprodan
7788f3a1ba Add virtual node for App Mesh ingress 2019-03-23 15:56:52 +02:00
stefanprodan
cd99225f9b Add App Mesh canary deployments tutorial 2019-03-23 15:41:04 +02:00
stefanprodan
33ba3b8d4a Add App Mesh canary demo definitions 2019-03-23 15:40:27 +02:00
stefanprodan
d222dd1069 Change GitHub raw URLs to Weaveworks org 2019-03-23 13:38:57 +02:00
stefanprodan
39fd3d46ba Add App Mesh ingress gateway deployment 2019-03-23 13:22:33 +02:00
stefanprodan
419c1804b6 Add App Mesh telemetry deployment 2019-03-23 13:21:52 +02:00
stefanprodan
ae0351ddad Exclude the namespace from AppMesh object names
ref: https://github.com/aws/aws-app-mesh-controller-for-k8s/issues/14
2019-03-23 11:25:39 +02:00
stefanprodan
941be15762 Fix typo in comments 2019-03-23 11:25:31 +02:00
stefanprodan
578ebcf6ed Use pod name filter in Envoy metrics query
Add support for Weave Cloud Prometheus agent
2019-03-23 11:20:51 +02:00
Stefan Prodan
27ab4b08f9 Merge pull request #112 from weaveworks/appmesh-grafana
Add AppMesh Grafana dashboard
2019-03-21 16:19:05 +02:00
stefanprodan
428b2208ba Add Flagger logo svg format (CNCF landscape) 2019-03-21 16:05:51 +02:00
Stefan Prodan
438c553d60 Merge pull request #113 from tanordheim/service-port-name
Allow setting name of ports in generated services
2019-03-21 15:38:21 +02:00
Trond Nordheim
90cb293182 Add GRPC canary analysis custom query example 2019-03-21 14:27:11 +01:00
Trond Nordheim
1f9f93ebe4 Add portName information to how it works-guide 2019-03-21 12:41:44 +01:00
Trond Nordheim
f5b97fbb74 Add support for naming generated service ports 2019-03-21 12:37:02 +01:00
stefanprodan
ce79244126 Add maintainers 2019-03-21 12:34:28 +02:00
stefanprodan
3af5d767d8 Add AppMesh dashboard screen 2019-03-21 12:19:04 +02:00
stefanprodan
3ce3efd2f2 Fix Grafana port forward command 2019-03-21 12:11:11 +02:00
stefanprodan
8108edea31 Add AppMesh Grafana dashboard 2019-03-21 12:06:50 +02:00
Stefan Prodan
5d80087ab3 Merge pull request #109 from weaveworks/appmesh-docs
Add EKS App Mesh install docs
2019-03-21 11:02:35 +02:00
stefanprodan
6593be584d Remove namespace from Prometheus URL 2019-03-21 10:52:21 +02:00
stefanprodan
a0f63f858f Update changelog and roadmap 2019-03-21 09:57:49 +02:00
stefanprodan
49914f3bd5 Add EKS App Mesh install link to readme 2019-03-20 23:46:14 +02:00
stefanprodan
71988a8b98 Add EKS App Mesh install guide 2019-03-20 23:40:09 +02:00
stefanprodan
d65be6ef58 Add AppMesh virtual node to load tester chart 2019-03-20 23:39:34 +02:00
Stefan Prodan
38c40d02e7 Merge pull request #108 from weaveworks/ww
Move to weaveworks org
2019-03-20 19:10:56 +02:00
stefanprodan
9e071b9d60 Move to weaveworks Quay 2019-03-20 18:52:27 +02:00
stefanprodan
b4ae060122 Move to weaveworks org 2019-03-20 18:26:04 +02:00
Stefan Prodan
436656e81b Merge pull request #107 from stefanprodan/appmesh
AWS App Mesh integration
2019-03-20 17:57:25 +02:00
stefanprodan
d7e111b7d4 Add mesh provider option to Helm chart 2019-03-20 12:59:35 +02:00
stefanprodan
4b6126dd1a Add Envoy HTTP success rate metric check 2019-03-19 15:52:26 +02:00
stefanprodan
89faa70196 Fix canary virtual node DNS discovery 2019-03-19 15:51:17 +02:00
stefanprodan
6ed9d4a1db Add AppMesh CRDs to Flagger's RBAC 2019-03-18 10:13:46 +02:00
stefanprodan
9d0e38c2e1 Disable primary readiness check in tests 2019-03-17 12:50:48 +02:00
stefanprodan
8b758fd616 Set primary min replicas 2019-03-17 12:30:11 +02:00
stefanprodan
14369d8be3 Fix virtual node backends 2019-03-17 12:29:04 +02:00
stefanprodan
7b4153113e Fix router tests 2019-03-17 11:14:47 +02:00
stefanprodan
7d340c5e61 Change mesh providers based on cmd flag 2019-03-17 10:52:52 +02:00
stefanprodan
337c94376d Add AppMesh routing tests 2019-03-17 10:29:40 +02:00
stefanprodan
0ef1d0b2f1 Implement AppMesh routing ops 2019-03-17 10:29:21 +02:00
stefanprodan
5cf67bd4e0 Add AppMesh router sync tests 2019-03-16 14:14:24 +02:00
stefanprodan
f22be17852 Add AppMesh router sync implementation
Sync virtual nodes and virtual services
2019-03-16 14:13:58 +02:00
stefanprodan
48e79d5dd4 Add mesh provider flag 2019-03-16 14:12:23 +02:00
stefanprodan
59f5a0654a Add AppMesh fields to Canary CRD 2019-03-16 14:11:24 +02:00
stefanprodan
6da2e11683 Add AppMesh CRDs and Kubernetes client 2019-03-16 14:10:09 +02:00
stefanprodan
802c087a4b Fix Istio Gateway certificate
fix: #102
2019-03-14 17:52:58 +02:00
Stefan Prodan
ed2048e9f3 Merge pull request #105 from stefanprodan/svc
Copy pod labels from canary to primary
2019-03-14 01:59:20 +02:00
stefanprodan
437b1d30c0 Copy labels from canary to primary 2019-03-14 01:49:07 +02:00
stefanprodan
ba1788cbc5 Change default ClusterIP to point to primary
- ensures that the routing works without a service mesh
2019-03-14 01:48:10 +02:00
Stefan Prodan
773094a20d Merge pull request #99 from stefanprodan/loadtester-v0.2.0
Release loadtester v0.2.0
2019-03-12 12:07:47 +02:00
stefanprodan
5aa39106a0 Update loadtester cmd for e2e testing 2019-03-12 11:58:16 +02:00
stefanprodan
a9167801ba Release loadtester v0.2.0 2019-03-12 11:55:31 +02:00
Stefan Prodan
62f4a6cb96 Merge pull request #90 from cloudang/ngrinder
Support delegation to external load testing tools
2019-03-12 11:47:30 +02:00
Stefan Prodan
ea2b41e96e Merge pull request #98 from peterj/master
Upgrade Alpine to 3.9
2019-03-12 10:18:16 +02:00
Alex Wong
d28ce650e9 fix typo 2019-03-12 15:40:20 +08:00
Alex Wong
1bfcdba499 update vendor 2019-03-12 15:00:55 +08:00
Alex Wong
e48faa9144 add docs for ngrinder load testing 2019-03-12 14:59:55 +08:00
Alex Wong
33fbe99561 remove logCmdOutput from docs and k8s resources definition 2019-03-12 14:35:39 +08:00
Alex Wong
989925b484 update canary spec example, cmd flag logCmdOutput moved here 2019-03-12 14:33:23 +08:00
Alex Wong
7dd66559e7 add metadata field 'cmd' 2019-03-12 14:31:30 +08:00
Alex Wong
2ef1c5608e remove logCmdOutput flag 2019-03-12 14:31:00 +08:00
Alex Wong
b5932e8905 support time duration literal 2019-03-12 14:29:50 +08:00
Peter Jausovec
37999d3250 Upgrade Alpine to 3.9. Fixes #89 2019-03-11 20:17:15 -07:00
Stefan Prodan
83985ae482 Merge pull request #93 from stefanprodan/release-v0.9.0
Release v0.9.0
2019-03-11 15:26:00 +02:00
Stefan Prodan
3adfcc837e Merge pull request #94 from stefanprodan/fix-abtest-routing
Fix A/B Testing HTTP URI match conditions
2019-03-11 15:15:42 +02:00
stefanprodan
c720fee3ab Target the canary header in the load test 2019-03-11 15:04:01 +02:00
stefanprodan
881387e522 Fix HTTP URI match conditions 2019-03-11 14:54:17 +02:00
stefanprodan
d9f3378e29 Add change log for v0.9.0 2019-03-11 14:03:55 +02:00
stefanprodan
ba87620225 Release v0.9.0 2019-03-11 13:57:10 +02:00
Stefan Prodan
1cd0c49872 Merge pull request #88 from stefanprodan/ab-testing
A/B testing - canary with session affinity
2019-03-11 13:55:06 +02:00
stefanprodan
12ac96deeb Document how to enable A/B testing 2019-03-11 12:58:33 +02:00
Alex Wong
17e6f35785 add gock.v1 dependency 2019-03-11 10:07:50 +08:00
Stefan Prodan
bd115633a3 Merge pull request #91 from huydinhle/update-analysis-interval
Sync job when canary's interval changes
fix #86
2019-03-09 19:58:34 +02:00
stefanprodan
86ea172380 Fix weight metric report 2019-03-08 23:28:45 +02:00
stefanprodan
d87bbbbc1e Add A/B testing tutorial 2019-03-08 21:26:52 +02:00
Huy Le
6196f69f4d Create New Job when Canary's Interval changes
- Currently whenever the Canary analysis interval changes, flagger does
not reflect this into canary's job.
- This change will make sure the canary analysis interval got updated whenever
the Canary object's interval changes
2019-03-08 10:27:34 -08:00
Alex Wong
be31bcf22f mocked test 2019-03-08 22:20:29 +08:00
Alex Wong
cba2135c69 add comments 2019-03-08 22:20:16 +08:00
Alex Wong
2e52573499 add gock dep 2019-03-08 22:20:02 +08:00
Alex Wong
b2ce1ed1fb test for ngrinder task 2019-03-08 21:30:26 +08:00
Alex Wong
77a485af74 poll ngrinder task status 2019-03-08 21:29:58 +08:00
stefanprodan
d8b847a973 Mention session affinity in docs 2019-03-08 15:05:44 +02:00
stefanprodan
e80a3d3232 Add A/B testing scheduling unit tests 2019-03-08 13:06:39 +02:00
stefanprodan
780ba82385 Log namespace restriction if one exists 2019-03-08 13:05:25 +02:00
stefanprodan
6ba69dce0a Add iterations field to CRD validation 2019-03-08 12:31:35 +02:00
stefanprodan
3c7a561db8 Add Istio routes A/B testing unit tests 2019-03-08 12:24:43 +02:00
stefanprodan
49c942bea0 Add A/B testing examples 2019-03-08 11:55:04 +02:00
stefanprodan
bf1ca293dc Implement fix routing for canary analysis
Allow A/B testing scenarios where instead of weighted routing the traffic is split between the primary and canary based on HTTP headers or cookies.
2019-03-08 11:54:41 +02:00
stefanprodan
62b906d30b Add canary HTTP match conditions and iterations 2019-03-08 11:49:32 +02:00
Alex Wong
65bf048189 add ngrinder support 2019-03-08 15:50:44 +08:00
Alex Wong
a498ed8200 move original cmd tester to standalone source 2019-03-08 15:50:26 +08:00
Alex Wong
9f12bbcd98 refactoring loadtester to support external testing platform 2019-03-08 15:49:35 +08:00
1660 changed files with 213164 additions and 25401 deletions

View File

@@ -1,6 +1,6 @@
version: 2.1
jobs:
e2e-testing:
e2e-istio-testing:
machine: true
steps:
- checkout
@@ -9,14 +9,78 @@ jobs:
- run: test/e2e-build.sh
- run: test/e2e-tests.sh
e2e-smi-istio-testing:
machine: true
steps:
- checkout
- run: test/e2e-kind.sh
- run: test/e2e-istio.sh
- run: test/e2e-smi-istio-build.sh
- run: test/e2e-tests.sh canary
e2e-supergloo-testing:
machine: true
steps:
- checkout
- run: test/e2e-kind.sh
- run: test/e2e-supergloo.sh
- run: test/e2e-build.sh supergloo:test.supergloo-system
- run: test/e2e-tests.sh canary
e2e-gloo-testing:
machine: true
steps:
- checkout
- run: test/e2e-kind.sh
- run: test/e2e-gloo.sh
- run: test/e2e-gloo-build.sh
- run: test/e2e-gloo-tests.sh
e2e-nginx-testing:
machine: true
steps:
- checkout
- run: test/e2e-kind.sh
- run: test/e2e-nginx.sh
- run: test/e2e-nginx-build.sh
- run: test/e2e-nginx-tests.sh
workflows:
version: 2
build-and-test:
jobs:
- e2e-testing:
- e2e-istio-testing:
filters:
branches:
ignore:
- gh-pages
- /gh-pages.*/
- /docs-.*/
- /release-.*/
- e2e-smi-istio-testing:
filters:
branches:
ignore:
- /gh-pages.*/
- /docs-.*/
- /release-.*/
- e2e-supergloo-testing:
filters:
branches:
ignore:
- /gh-pages.*/
- /docs-.*/
- /release-.*/
- e2e-nginx-testing:
filters:
branches:
ignore:
- /gh-pages.*/
- /docs-.*/
- /release-.*/
- e2e-gloo-testing:
filters:
branches:
ignore:
- /gh-pages.*/
- /docs-.*/
- /release-.*/

1
.github/CODEOWNERS vendored Normal file
View File

@@ -0,0 +1 @@
* @stefanprodan

View File

@@ -1,7 +1,7 @@
builds:
- main: ./cmd/flagger
binary: flagger
ldflags: -s -w -X github.com/stefanprodan/flagger/pkg/version.REVISION={{.Commit}}
ldflags: -s -w -X github.com/weaveworks/flagger/pkg/version.REVISION={{.Commit}}
goos:
- linux
goarch:

View File

@@ -1,8 +1,13 @@
sudo: required
language: go
branches:
except:
- /gh-pages.*/
- /docs-.*/
go:
- 1.11.x
- 1.12.x
services:
- docker
@@ -12,13 +17,7 @@ addons:
packages:
- docker-ce
#before_script:
# - go get -u sigs.k8s.io/kind
# - curl https://raw.githubusercontent.com/kubernetes/helm/master/scripts/get | bash
# - curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/
script:
- set -e
- make test-fmt
- make test-codegen
- go test -race -coverprofile=coverage.txt -covermode=atomic $(go list ./pkg/...)
@@ -29,16 +28,16 @@ after_success:
echo "PR build, skipping image push";
else
BRANCH_COMMIT=${TRAVIS_BRANCH}-$(echo ${TRAVIS_COMMIT} | head -c7);
docker tag stefanprodan/flagger:latest quay.io/stefanprodan/flagger:${BRANCH_COMMIT};
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin quay.io;
docker push quay.io/stefanprodan/flagger:${BRANCH_COMMIT};
docker tag weaveworks/flagger:latest weaveworks/flagger:${BRANCH_COMMIT};
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin;
docker push weaveworks/flagger:${BRANCH_COMMIT};
fi
- if [ -z "$TRAVIS_TAG" ]; then
echo "Not a release, skipping image push";
else
docker tag stefanprodan/flagger:latest quay.io/stefanprodan/flagger:${TRAVIS_TAG};
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin quay.io;
docker push quay.io/stefanprodan/flagger:$TRAVIS_TAG;
docker tag weaveworks/flagger:latest weaveworks/flagger:${TRAVIS_TAG};
echo $DOCKER_PASS | docker login -u=$DOCKER_USER --password-stdin;
docker push weaveworks/flagger:$TRAVIS_TAG;
fi
- bash <(curl -s https://codecov.io/bash)
- rm coverage.txt

View File

@@ -2,22 +2,129 @@
All notable changes to this project are documented in this file.
## 0.14.0 (2019-05-21)
Adds support for Service Mesh Interface and [Gloo](https://docs.flagger.app/usage/gloo-progressive-delivery) ingress controller
#### Features
- Add support for SMI (Istio weighted traffic) [#180](https://github.com/weaveworks/flagger/pull/180)
- Add support for Gloo ingress controller (weighted traffic) [#179](https://github.com/weaveworks/flagger/pull/179)
## 0.13.2 (2019-04-11)
Fixes for Jenkins X deployments (prevent the jx GC from removing the primary instance)
#### Fixes
- Do not copy labels from canary to primary deployment [#178](https://github.com/weaveworks/flagger/pull/178)
#### Improvements
- Add NGINX ingress controller e2e and unit tests [#176](https://github.com/weaveworks/flagger/pull/176)
## 0.13.1 (2019-04-09)
Fixes for custom metrics checks and NGINX Prometheus queries
#### Fixes
- Fix promql queries for custom checks and NGINX [#174](https://github.com/weaveworks/flagger/pull/174)
## 0.13.0 (2019-04-08)
Adds support for [NGINX](https://docs.flagger.app/usage/nginx-progressive-delivery) ingress controller
#### Features
- Add support for nginx ingress controller (weighted traffic and A/B testing) [#170](https://github.com/weaveworks/flagger/pull/170)
- Add Prometheus add-on to Flagger Helm chart for App Mesh and NGINX [79b3370](https://github.com/weaveworks/flagger/pull/170/commits/79b337089294a92961bc8446fd185b38c50a32df)
#### Fixes
- Fix duplicate hosts Istio error when using wildcards [#162](https://github.com/weaveworks/flagger/pull/162)
## 0.12.0 (2019-04-29)
Adds support for [SuperGloo](https://docs.flagger.app/install/flagger-install-with-supergloo)
#### Features
- Supergloo support for canary deployment (weighted traffic) [#151](https://github.com/weaveworks/flagger/pull/151)
## 0.11.1 (2019-04-18)
Move Flagger and the load tester container images to Docker Hub
#### Features
- Add Bash Automated Testing System support to Flagger tester for running acceptance tests as pre-rollout hooks
## 0.11.0 (2019-04-17)
Adds pre/post rollout [webhooks](https://docs.flagger.app/how-it-works#webhooks)
#### Features
- Add `pre-rollout` and `post-rollout` webhook types [#147](https://github.com/weaveworks/flagger/pull/147)
#### Improvements
- Unify App Mesh and Istio builtin metric checks [#146](https://github.com/weaveworks/flagger/pull/146)
- Make the pod selector label configurable [#148](https://github.com/weaveworks/flagger/pull/148)
#### Breaking changes
- Set default `mesh` Istio gateway only if no gateway is specified [#141](https://github.com/weaveworks/flagger/pull/141)
## 0.10.0 (2019-03-27)
Adds support for App Mesh
#### Features
- AWS App Mesh integration
[#107](https://github.com/weaveworks/flagger/pull/107)
[#123](https://github.com/weaveworks/flagger/pull/123)
#### Improvements
- Reconcile Kubernetes ClusterIP services [#122](https://github.com/weaveworks/flagger/pull/122)
#### Fixes
- Preserve pod labels on canary promotion [#105](https://github.com/weaveworks/flagger/pull/105)
- Fix canary status Prometheus metric [#121](https://github.com/weaveworks/flagger/pull/121)
## 0.9.0 (2019-03-11)
Allows A/B testing scenarios where instead of weighted routing, the traffic is split between the
primary and canary based on HTTP headers or cookies.
#### Features
- A/B testing - canary with session affinity [#88](https://github.com/weaveworks/flagger/pull/88)
#### Fixes
- Update the analysis interval when the custom resource changes [#91](https://github.com/weaveworks/flagger/pull/91)
## 0.8.0 (2019-03-06)
Adds support for CORS policy and HTTP request headers manipulation
#### Features
- CORS policy support [#83](https://github.com/stefanprodan/flagger/pull/83)
- Allow headers to be appended to HTTP requests [#82](https://github.com/stefanprodan/flagger/pull/82)
- CORS policy support [#83](https://github.com/weaveworks/flagger/pull/83)
- Allow headers to be appended to HTTP requests [#82](https://github.com/weaveworks/flagger/pull/82)
#### Improvements
- Refactor the routing management
[#72](https://github.com/stefanprodan/flagger/pull/72)
[#80](https://github.com/stefanprodan/flagger/pull/80)
- Fine-grained RBAC [#73](https://github.com/stefanprodan/flagger/pull/73)
- Add option to limit Flagger to a single namespace [#78](https://github.com/stefanprodan/flagger/pull/78)
[#72](https://github.com/weaveworks/flagger/pull/72)
[#80](https://github.com/weaveworks/flagger/pull/80)
- Fine-grained RBAC [#73](https://github.com/weaveworks/flagger/pull/73)
- Add option to limit Flagger to a single namespace [#78](https://github.com/weaveworks/flagger/pull/78)
## 0.7.0 (2019-02-28)
@@ -25,8 +132,8 @@ Adds support for custom metric checks, HTTP timeouts and HTTP retries
#### Features
- Allow custom promql queries in the canary analysis spec [#60](https://github.com/stefanprodan/flagger/pull/60)
- Add HTTP timeout and retries to canary service spec [#62](https://github.com/stefanprodan/flagger/pull/62)
- Allow custom promql queries in the canary analysis spec [#60](https://github.com/weaveworks/flagger/pull/60)
- Add HTTP timeout and retries to canary service spec [#62](https://github.com/weaveworks/flagger/pull/62)
## 0.6.0 (2019-02-25)
@@ -36,15 +143,15 @@ to be customized in the service spec of the canary custom resource.
#### Features
- Add HTTP match conditions and URI rewrite to the canary service spec [#55](https://github.com/stefanprodan/flagger/pull/55)
- Add HTTP match conditions and URI rewrite to the canary service spec [#55](https://github.com/weaveworks/flagger/pull/55)
- Update virtual service when the canary service spec changes
[#54](https://github.com/stefanprodan/flagger/pull/54)
[#51](https://github.com/stefanprodan/flagger/pull/51)
[#54](https://github.com/weaveworks/flagger/pull/54)
[#51](https://github.com/weaveworks/flagger/pull/51)
#### Improvements
- Run e2e testing on [Kubernetes Kind](https://github.com/kubernetes-sigs/kind) for canary promotion
[#53](https://github.com/stefanprodan/flagger/pull/53)
[#53](https://github.com/weaveworks/flagger/pull/53)
## 0.5.1 (2019-02-14)
@@ -52,15 +159,15 @@ Allows skipping the analysis phase to ship changes directly to production
#### Features
- Add option to skip the canary analysis [#46](https://github.com/stefanprodan/flagger/pull/46)
- Add option to skip the canary analysis [#46](https://github.com/weaveworks/flagger/pull/46)
#### Fixes
- Reject deployment if the pod label selector doesn't match `app: <DEPLOYMENT_NAME>` [#43](https://github.com/stefanprodan/flagger/pull/43)
- Reject deployment if the pod label selector doesn't match `app: <DEPLOYMENT_NAME>` [#43](https://github.com/weaveworks/flagger/pull/43)
## 0.5.0 (2019-01-30)
Track changes in ConfigMaps and Secrets [#37](https://github.com/stefanprodan/flagger/pull/37)
Track changes in ConfigMaps and Secrets [#37](https://github.com/weaveworks/flagger/pull/37)
#### Features
@@ -76,7 +183,7 @@ Track changes in ConfigMaps and Secrets [#37](https://github.com/stefanprodan/fl
## 0.4.1 (2019-01-24)
Load testing webhook [#35](https://github.com/stefanprodan/flagger/pull/35)
Load testing webhook [#35](https://github.com/weaveworks/flagger/pull/35)
#### Features
@@ -90,7 +197,7 @@ Load testing webhook [#35](https://github.com/stefanprodan/flagger/pull/35)
## 0.4.0 (2019-01-18)
Restart canary analysis if revision changes [#31](https://github.com/stefanprodan/flagger/pull/31)
Restart canary analysis if revision changes [#31](https://github.com/weaveworks/flagger/pull/31)
#### Breaking changes
@@ -111,7 +218,7 @@ Restart canary analysis if revision changes [#31](https://github.com/stefanproda
## 0.3.0 (2019-01-11)
Configurable canary analysis duration [#20](https://github.com/stefanprodan/flagger/pull/20)
Configurable canary analysis duration [#20](https://github.com/weaveworks/flagger/pull/20)
#### Breaking changes
@@ -126,7 +233,7 @@ Configurable canary analysis duration [#20](https://github.com/stefanprodan/flag
## 0.2.0 (2019-01-04)
Webhooks [#18](https://github.com/stefanprodan/flagger/pull/18)
Webhooks [#18](https://github.com/weaveworks/flagger/pull/18)
#### Features
@@ -137,7 +244,7 @@ Webhooks [#18](https://github.com/stefanprodan/flagger/pull/18)
## 0.1.2 (2018-12-06)
Improve Slack notifications [#14](https://github.com/stefanprodan/flagger/pull/14)
Improve Slack notifications [#14](https://github.com/weaveworks/flagger/pull/14)
#### Features
@@ -146,7 +253,7 @@ Improve Slack notifications [#14](https://github.com/stefanprodan/flagger/pull/1
## 0.1.1 (2018-11-28)
Canary progress deadline [#10](https://github.com/stefanprodan/flagger/pull/10)
Canary progress deadline [#10](https://github.com/weaveworks/flagger/pull/10)
#### Features

View File

@@ -1,17 +1,17 @@
FROM golang:1.11
FROM golang:1.12
RUN mkdir -p /go/src/github.com/stefanprodan/flagger/
RUN mkdir -p /go/src/github.com/weaveworks/flagger/
WORKDIR /go/src/github.com/stefanprodan/flagger
WORKDIR /go/src/github.com/weaveworks/flagger
COPY . .
RUN GIT_COMMIT=$(git rev-list -1 HEAD) && \
CGO_ENABLED=0 GOOS=linux go build -ldflags "-s -w \
-X github.com/stefanprodan/flagger/pkg/version.REVISION=${GIT_COMMIT}" \
-X github.com/weaveworks/flagger/pkg/version.REVISION=${GIT_COMMIT}" \
-a -installsuffix cgo -o flagger ./cmd/flagger/*
FROM alpine:3.8
FROM alpine:3.9
RUN addgroup -S flagger \
&& adduser -S -g flagger flagger \
@@ -19,7 +19,7 @@ RUN addgroup -S flagger \
WORKDIR /home/flagger
COPY --from=0 /go/src/github.com/stefanprodan/flagger/flagger .
COPY --from=0 /go/src/github.com/weaveworks/flagger/flagger .
RUN chown -R flagger:flagger ./

View File

@@ -1,24 +1,8 @@
FROM golang:1.11 AS hey-builder
FROM golang:1.12 AS builder
RUN mkdir -p /go/src/github.com/rakyll/hey/
RUN mkdir -p /go/src/github.com/weaveworks/flagger/
WORKDIR /go/src/github.com/rakyll/hey
ADD https://github.com/rakyll/hey/archive/v0.1.1.tar.gz .
RUN tar xzf v0.1.1.tar.gz --strip 1
RUN go get ./...
RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 \
go install -ldflags '-w -extldflags "-static"' \
/go/src/github.com/rakyll/hey
FROM golang:1.11 AS builder
RUN mkdir -p /go/src/github.com/stefanprodan/flagger/
WORKDIR /go/src/github.com/stefanprodan/flagger
WORKDIR /go/src/github.com/weaveworks/flagger
COPY . .
@@ -26,16 +10,18 @@ RUN go test -race ./pkg/loadtester/
RUN CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o loadtester ./cmd/loadtester/*
FROM alpine:3.8
FROM bats/bats:v1.1.0
RUN addgroup -S app \
&& adduser -S -g app app \
&& apk --no-cache add ca-certificates curl
&& apk --no-cache add ca-certificates curl jq
WORKDIR /home/app
COPY --from=hey-builder /go/bin/hey /usr/local/bin/hey
COPY --from=builder /go/src/github.com/stefanprodan/flagger/loadtester .
RUN curl -sSLo hey "https://storage.googleapis.com/jblabs/dist/hey_linux_v0.1.2" && \
chmod +x hey && mv hey /usr/local/bin/hey
COPY --from=builder /go/src/github.com/weaveworks/flagger/loadtester .
RUN chown -R app:app ./

626
Gopkg.lock generated
View File

@@ -6,16 +6,32 @@
name = "cloud.google.com/go"
packages = ["compute/metadata"]
pruneopts = "NUT"
revision = "c9474f2f8deb81759839474b6bd1726bbfe1c1c4"
version = "v0.36.0"
revision = "fcb9a2d5f791d07be64506ab54434de65989d370"
version = "v0.37.4"
[[projects]]
branch = "master"
digest = "1:f12358576cd79bba0ae626530d23cde63416744f486c8bc817802c6907eaadd7"
name = "github.com/armon/go-metrics"
packages = ["."]
pruneopts = "NUT"
revision = "f0300d1749da6fa982027e449ec0c7a145510c3c"
[[projects]]
digest = "1:13d5750ba049ce46bf931792803f1d5584b04026df9badea5931e33c22aa34ee"
name = "github.com/avast/retry-go"
packages = ["."]
pruneopts = "NUT"
revision = "08d411bf8302219fe47ca04dbdf9de892010c5e5"
version = "v2.2.0"
[[projects]]
digest = "1:707ebe952a8b3d00b343c01536c79c73771d100f63ec6babeaed5c79e2b8a8dd"
name = "github.com/beorn7/perks"
packages = ["quantile"]
pruneopts = "NUT"
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
revision = "4b2b341e8d7715fae06375aa633dbb6e91b3fb46"
version = "v1.0.0"
[[projects]]
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
@@ -25,6 +41,14 @@
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
version = "v1.1.1"
[[projects]]
digest = "1:32598368f409bbee79deb9d43569fcd92b9fb27f39155f5e166b3371217f051f"
name = "github.com/evanphx/json-patch"
packages = ["."]
pruneopts = "NUT"
revision = "72bf35d0ff611848c1dc9df0f976c81192392fa5"
version = "v4.1.0"
[[projects]]
digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756"
name = "github.com/ghodss/yaml"
@@ -34,25 +58,20 @@
version = "v1.0.0"
[[projects]]
digest = "1:a1b2a5e38f79688ee8250942d5fa960525fceb1024c855c7bc76fa77b0f3cca2"
digest = "1:895d2773c9e78e595dd5f946a25383d579d3094a9d8d9306dba27359f190f275"
name = "github.com/gogo/protobuf"
packages = [
"gogoproto",
"jsonpb",
"proto",
"protoc-gen-gogo/descriptor",
"sortkeys",
"types",
]
pruneopts = "NUT"
revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c"
version = "v1.2.1"
[[projects]]
branch = "master"
digest = "1:e0f096f9332ad5f84341de82db69fd098864b17c668333a1fbbffd1b846dcc2b"
name = "github.com/golang/glog"
packages = ["."]
pruneopts = "NUT"
revision = "2cc4b790554d1a0c48fcc3aeb891e3de70cf8de0"
source = "github.com/istio/glog"
[[projects]]
branch = "master"
digest = "1:b7cb6054d3dff43b38ad2e92492f220f57ae6087ee797dca298139776749ace8"
@@ -62,26 +81,35 @@
revision = "5b532d6fd5efaf7fa130d4e859a2fde0fc3a9e1b"
[[projects]]
digest = "1:2d0636a8c490d2272dd725db26f74a537111b99b9dbdda0d8b98febe63702aa4"
digest = "1:a98a0b00720dc3149bf3d0c8d5726188899e5bab2f5072b9a7ef82958fbc98b2"
name = "github.com/golang/protobuf"
packages = [
"proto",
"protoc-gen-go/descriptor",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp",
]
pruneopts = "NUT"
revision = "c823c79ea1570fb5ff454033735a8e68575d1d0f"
version = "v1.3.0"
revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30"
version = "v1.3.1"
[[projects]]
digest = "1:7f114b78210bf5b75f307fc97cff293633c835bab1e0ea8a744a44b39c042dfe"
name = "github.com/golang/snappy"
packages = ["."]
pruneopts = "NUT"
revision = "2a8bb927dd31d8daada140a5d09578521ce5c36a"
version = "v0.0.1"
[[projects]]
branch = "master"
digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107"
name = "github.com/google/btree"
packages = ["."]
pruneopts = "NUT"
revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
version = "v1.0.0"
[[projects]]
digest = "1:d2754cafcab0d22c13541618a8029a70a8959eb3525ff201fe971637e2274cd0"
@@ -98,12 +126,12 @@
version = "v0.2.0"
[[projects]]
branch = "master"
digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc"
name = "github.com/google/gofuzz"
packages = ["."]
pruneopts = "NUT"
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
revision = "f140a6486e521aad38f5917de355cbf147cc0496"
version = "v1.0.0"
[[projects]]
digest = "1:06a7dadb7b760767341ffb6c8d377238d68a1226f2b21b5d497d2e3f6ecf6b4e"
@@ -128,6 +156,78 @@
pruneopts = "NUT"
revision = "3befbb6ad0cc97d4c25d851e9528915809e1a22f"
[[projects]]
digest = "1:7313d6b9095eb86581402557bbf3871620cf82adf41853c5b9bee04b894290c7"
name = "github.com/h2non/parth"
packages = ["."]
pruneopts = "NUT"
revision = "b4df798d65426f8c8ab5ca5f9987aec5575d26c9"
version = "v2.0.1"
[[projects]]
digest = "1:adf097b949dbc1e452fbad15322c78651f6e7accb4661dffa38fed30273c5966"
name = "github.com/hashicorp/consul"
packages = ["api"]
pruneopts = "NUT"
revision = "ea5210a30e154f4da9a4c8e729b45b8ce7b9b92c"
version = "v1.4.4"
[[projects]]
digest = "1:f0d9d74edbd40fdeada436d5ac9cb5197407899af3fef85ff0137077ffe8ae19"
name = "github.com/hashicorp/errwrap"
packages = ["."]
pruneopts = "NUT"
revision = "8a6fb523712970c966eefc6b39ed2c5e74880354"
version = "v1.0.0"
[[projects]]
digest = "1:fff05cb0c34d2decaeb27bb6ab6b73a6947c3009d725160070da55f9511fd410"
name = "github.com/hashicorp/go-cleanhttp"
packages = ["."]
pruneopts = "NUT"
revision = "eda1e5db218aad1db63ca4642c8906b26bcf2744"
version = "v0.5.1"
[[projects]]
digest = "1:1cf16b098a70d6c02899608abbb567296d11c7b830635014dfe6124a02dc1369"
name = "github.com/hashicorp/go-immutable-radix"
packages = ["."]
pruneopts = "NUT"
revision = "27df80928bb34bb1b0d6d0e01b9e679902e7a6b5"
version = "v1.0.0"
[[projects]]
digest = "1:2ed138049ab373f696db2081ca48f15c5abdf20893803612a284f2bdce2bf443"
name = "github.com/hashicorp/go-multierror"
packages = ["."]
pruneopts = "NUT"
revision = "886a7fbe3eb1c874d46f623bfa70af45f425b3d1"
version = "v1.0.0"
[[projects]]
digest = "1:b3496707ba69dd873a870238644aa8ac259ee67fc4fd05caf37b608e7053e1f7"
name = "github.com/hashicorp/go-retryablehttp"
packages = ["."]
pruneopts = "NUT"
revision = "357460732517ec3b57c05c51443296bdd6df1874"
version = "v0.5.3"
[[projects]]
digest = "1:cdb5ce76cd7af19e3d2d5ba9b6458a2ee804f0d376711215dd3df5f51100d423"
name = "github.com/hashicorp/go-rootcerts"
packages = ["."]
pruneopts = "NUT"
revision = "63503fb4e1eca22f9ae0f90b49c5d5538a0e87eb"
version = "v1.0.0"
[[projects]]
digest = "1:6c69626c7aacae1e573084cdb6ed55713094ba56263f687e5d1750053bd08598"
name = "github.com/hashicorp/go-sockaddr"
packages = ["."]
pruneopts = "NUT"
revision = "c7188e74f6acae5a989bdc959aa779f8b9f42faf"
version = "v1.0.2"
[[projects]]
digest = "1:52094d0f8bdf831d1a2401e9b6fee5795fdc0b2a2d1f8bb1980834c289e79129"
name = "github.com/hashicorp/golang-lru"
@@ -139,6 +239,48 @@
revision = "7087cb70de9f7a8bc0a10c375cb0d2280a8edf9c"
version = "v0.5.1"
[[projects]]
digest = "1:39f543569bf189e228c84a294c50aca8ea56c82b3d9df5c9b788249907d7049a"
name = "github.com/hashicorp/hcl"
packages = [
".",
"hcl/ast",
"hcl/parser",
"hcl/scanner",
"hcl/strconv",
"hcl/token",
"json/parser",
"json/scanner",
"json/token",
]
pruneopts = "NUT"
revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241"
version = "v1.0.0"
[[projects]]
digest = "1:acc81e4e4289587b257ccdfccbc6eaf16d4c2fb57dda73c6bb349bf50f02501f"
name = "github.com/hashicorp/serf"
packages = ["coordinate"]
pruneopts = "NUT"
revision = "15cfd05de3dffb3664aa37b06e91f970b825e380"
version = "v0.8.3"
[[projects]]
digest = "1:cded54cacfb6fdc86b916031e4113cbc50dfb55e92535651733604f1e3a8ce59"
name = "github.com/hashicorp/vault"
packages = [
"api",
"helper/compressutil",
"helper/consts",
"helper/hclutil",
"helper/jsonutil",
"helper/parseutil",
"helper/strutil",
]
pruneopts = "NUT"
revision = "36aa8c8dd1936e10ebd7a4c1d412ae0e6f7900bd"
version = "v1.1.0"
[[projects]]
digest = "1:aaa38889f11896ee3644d77e17dc7764cc47f5f3d3b488268df2af2b52541c5f"
name = "github.com/imdario/mergo"
@@ -148,19 +290,55 @@
version = "v0.3.7"
[[projects]]
branch = "master"
digest = "1:e0f096f9332ad5f84341de82db69fd098864b17c668333a1fbbffd1b846dcc2b"
name = "github.com/istio/glog"
packages = ["."]
pruneopts = "NUT"
revision = "2cc4b790554d1a0c48fcc3aeb891e3de70cf8de0"
[[projects]]
digest = "1:0243cffa4a3410f161ee613dfdd903a636d07e838a42d341da95d81f42cd1d41"
digest = "1:4e903242fe176238aaa469f59d7035f5abf2aa9acfefb8964ddd203651b574e9"
name = "github.com/json-iterator/go"
packages = ["."]
pruneopts = "NUT"
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
revision = "0ff49de124c6f76f8494e194af75bde0f1a49a29"
version = "v1.1.6"
[[projects]]
digest = "1:2760a8fe9b7bcc95c397bc85b69bc7a11eed03c644b45e8c00c581c114486d3f"
name = "github.com/k0kubun/pp"
packages = ["."]
pruneopts = "NUT"
revision = "3d73dea227e0711e38b911ffa6fbafc8ff6b2991"
version = "v3.0.1"
[[projects]]
digest = "1:493282a1185f77368678d3886b7e999e37e920d22f69669545f1ee5ae10743a2"
name = "github.com/linkerd/linkerd2"
packages = [
"controller/gen/apis/serviceprofile",
"controller/gen/apis/serviceprofile/v1alpha1",
]
pruneopts = "NUT"
revision = "5e47cb150a33150e5aeddc6672d8a64701a970de"
version = "stable-2.2.1"
[[projects]]
digest = "1:0dbba7d4d4f3eeb01acd81af338ff4a3c4b0bb814d87368ea536e616f383240d"
name = "github.com/lyft/protoc-gen-validate"
packages = ["validate"]
pruneopts = "NUT"
revision = "ff6f7a9bc2e5fe006509b9f8c7594c41a953d50f"
version = "v0.0.14"
[[projects]]
digest = "1:9785a54031460a402fab4e4bbb3124c8dd9e9f7b1982109fef605cb91632d480"
name = "github.com/mattn/go-colorable"
packages = ["."]
pruneopts = "NUT"
revision = "3a70a971f94a22f2fa562ffcc7a0eb45f5daf045"
version = "v0.1.1"
[[projects]]
digest = "1:85edcc76fa95b8b312642905b56284f4fe5c42d8becb219481adba7e97d4f5c5"
name = "github.com/mattn/go-isatty"
packages = ["."]
pruneopts = "NUT"
revision = "c2a7a6ca930a4cd0bc33a3f298eb71960732a3a7"
version = "v0.0.7"
[[projects]]
digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6"
@@ -170,6 +348,30 @@
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
version = "v1.0.1"
[[projects]]
digest = "1:f9f72e583aaacf1d1ac5d6121abd4afd3c690baa9e14e1d009df26bf831ba347"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
pruneopts = "NUT"
revision = "af06845cf3004701891bf4fdb884bfe4920b3727"
version = "v1.1.0"
[[projects]]
digest = "1:e34decedbcec12332c5836d16a6838f864e0b43c5b4f9aa9d9a85101015f87c2"
name = "github.com/mitchellh/hashstructure"
packages = ["."]
pruneopts = "NUT"
revision = "a38c50148365edc8df43c1580c48fb2b3a1e9cd7"
version = "v1.0.0"
[[projects]]
digest = "1:a45ae66dea4c899d79fceb116accfa1892105c251f0dcd9a217ddc276b42ec68"
name = "github.com/mitchellh/mapstructure"
packages = ["."]
pruneopts = "NUT"
revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
version = "v1.1.2"
[[projects]]
digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f"
name = "github.com/modern-go/concurrent"
@@ -202,6 +404,25 @@
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
version = "v2.0.1"
[[projects]]
digest = "1:122724025b9505074138089f78f543f643ae3a8fab6d5b9edf72cce4dd49cc91"
name = "github.com/pierrec/lz4"
packages = [
".",
"internal/xxh32",
]
pruneopts = "NUT"
revision = "315a67e90e415bcdaff33057da191569bf4d8479"
version = "v2.1.1"
[[projects]]
digest = "1:14715f705ff5dfe0ffd6571d7d201dd8e921030f8070321a79380d8ca4ec1a24"
name = "github.com/pkg/errors"
packages = ["."]
pruneopts = "NUT"
revision = "ba968bfe8b2f7e042a574c888954fccecfa385b4"
version = "v0.8.1"
[[projects]]
digest = "1:03bca087b180bf24c4f9060775f137775550a0834e18f0bca0520a868679dbd7"
name = "github.com/prometheus/client_golang"
@@ -230,22 +451,118 @@
"model",
]
pruneopts = "NUT"
revision = "cfeb6f9992ffa54aaa4f2170ade4067ee478b250"
version = "v0.2.0"
revision = "a82f4c12f983cc2649298185f296632953e50d3e"
version = "v0.3.0"
[[projects]]
branch = "master"
digest = "1:0a2e604afa3cbf53a1ddade2f240ee8472eded98856dd8c7cfbfea392ddbbfc7"
digest = "1:7813f698f171bd7132b123364433e1b0362f7fdb4ed7f4a20df595a4c2410f8a"
name = "github.com/prometheus/procfs"
packages = ["."]
pruneopts = "NUT"
revision = "8368d24ba045f26503eb745b624d930cbe214c79"
[[projects]]
digest = "1:38969f56c08bdf302a73a7c8adb0520dc9cb4cd54206cbe7c8a147da52cc0890"
name = "github.com/radovskyb/watcher"
packages = ["."]
pruneopts = "NUT"
revision = "d8b41ca2397a9b5cfc26adb10edbbcde40187a87"
version = "v1.0.6"
[[projects]]
digest = "1:09d61699d553a4e6ec998ad29816177b1f3d3ed0c18fe923d2c174ec065c99c8"
name = "github.com/ryanuber/go-glob"
packages = ["."]
pruneopts = "NUT"
revision = "51a8f68e6c24dc43f1e371749c89a267de4ebc53"
version = "v1.0.0"
[[projects]]
digest = "1:cad7db5ed31bef1f9e4429ad0927b40dbf31535167ec4c768fd5985565111ea5"
name = "github.com/solo-io/gloo"
packages = [
".",
"internal/util",
"iostats",
"nfs",
"xfs",
"projects/gloo/pkg/api/v1",
"projects/gloo/pkg/api/v1/plugins",
"projects/gloo/pkg/api/v1/plugins/aws",
"projects/gloo/pkg/api/v1/plugins/azure",
"projects/gloo/pkg/api/v1/plugins/consul",
"projects/gloo/pkg/api/v1/plugins/faultinjection",
"projects/gloo/pkg/api/v1/plugins/grpc",
"projects/gloo/pkg/api/v1/plugins/grpc_web",
"projects/gloo/pkg/api/v1/plugins/hcm",
"projects/gloo/pkg/api/v1/plugins/kubernetes",
"projects/gloo/pkg/api/v1/plugins/rest",
"projects/gloo/pkg/api/v1/plugins/retries",
"projects/gloo/pkg/api/v1/plugins/static",
"projects/gloo/pkg/api/v1/plugins/transformation",
]
pruneopts = "NUT"
revision = "bbced9601137e764853b2fad7ec3e2dc4c504e02"
revision = "f767e64f7ee60139ff79e2abb547cd149067da04"
version = "v0.13.17"
[[projects]]
digest = "1:4a5b267a6929e4c3980066d745f87993c118f7797373a43537fdc69b3ee2d37e"
name = "github.com/solo-io/go-utils"
packages = [
"contextutils",
"errors",
"kubeutils",
]
pruneopts = "NUT"
revision = "a27432d89f419897df796a17456410e49a9727c3"
version = "v0.7.11"
[[projects]]
digest = "1:1cc9a8be450b7d9e77f100c7662133bf7bc8f0832f4eed3542599d3f35d28c46"
name = "github.com/solo-io/solo-kit"
packages = [
"pkg/api/v1/clients",
"pkg/api/v1/clients/configmap",
"pkg/api/v1/clients/consul",
"pkg/api/v1/clients/factory",
"pkg/api/v1/clients/file",
"pkg/api/v1/clients/kube",
"pkg/api/v1/clients/kube/cache",
"pkg/api/v1/clients/kube/controller",
"pkg/api/v1/clients/kube/crd",
"pkg/api/v1/clients/kube/crd/client/clientset/versioned",
"pkg/api/v1/clients/kube/crd/client/clientset/versioned/scheme",
"pkg/api/v1/clients/kube/crd/client/clientset/versioned/typed/solo.io/v1",
"pkg/api/v1/clients/kube/crd/solo.io/v1",
"pkg/api/v1/clients/kubesecret",
"pkg/api/v1/clients/memory",
"pkg/api/v1/clients/vault",
"pkg/api/v1/eventloop",
"pkg/api/v1/reconcile",
"pkg/api/v1/resources",
"pkg/api/v1/resources/core",
"pkg/errors",
"pkg/utils/errutils",
"pkg/utils/fileutils",
"pkg/utils/hashutils",
"pkg/utils/kubeutils",
"pkg/utils/log",
"pkg/utils/protoutils",
"pkg/utils/stringutils",
]
pruneopts = "NUT"
revision = "ab46647c2845a4830d09db3690b3ace1b06845cd"
version = "v0.6.3"
[[projects]]
digest = "1:ba6f00e510774b2f1099d2f39a2ae36796ddbe406b02703b3395f26deb8d0f2c"
name = "github.com/solo-io/supergloo"
packages = [
"api/custom/kubepod",
"api/custom/linkerd",
"pkg/api/external/istio/authorization/v1alpha1",
"pkg/api/external/istio/networking/v1alpha3",
"pkg/api/v1",
]
pruneopts = "NUT"
revision = "cb84ba5d7bd1099c5e52c09fc9229d1ee0fed9f9"
version = "v0.3.11"
[[projects]]
digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779"
@@ -255,6 +572,36 @@
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
version = "v1.0.3"
[[projects]]
branch = "master"
digest = "1:755d83f10748295646cf74cd19611ebffad37807e49632feb8e3f47d43210c3d"
name = "github.com/stefanprodan/klog"
packages = ["."]
pruneopts = "NUT"
revision = "9cbb78b20423182f9e5b2a214dd255f5e117d2d1"
[[projects]]
digest = "1:1349e632a9915b7075f74c13474bfcae2594750c390d3c0b236e48bf6bce3fa2"
name = "go.opencensus.io"
packages = [
".",
"internal",
"internal/tagencoding",
"metric/metricdata",
"metric/metricproducer",
"resource",
"stats",
"stats/internal",
"stats/view",
"tag",
"trace",
"trace/internal",
"trace/tracestate",
]
pruneopts = "NUT"
revision = "75c0cca22312e51bfd4fafdbe9197ae399e18b38"
version = "v0.20.2"
[[projects]]
digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7"
name = "go.uber.org/atomic"
@@ -288,15 +635,15 @@
[[projects]]
branch = "master"
digest = "1:058e9504b9a79bfe86092974d05bb3298d2aa0c312d266d43148de289a5065d9"
digest = "1:bbe51412d9915d64ffaa96b51d409e070665efc5194fcf145c4a27d4133107a4"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
pruneopts = "NUT"
revision = "8dd112bcdc25174059e45e07517d9fc663123347"
revision = "b43e412143f90fca62516c457cae5a8dc1595586"
[[projects]]
branch = "master"
digest = "1:e3477b53a5c2fb71a7c9688e9b3d58be702807a5a88def8b9a327259d46e4979"
digest = "1:c86c292c268416012ab237c22c2c69fdd04cb891815b40343e75210472198455"
name = "golang.org/x/net"
packages = [
"context",
@@ -307,11 +654,11 @@
"idna",
]
pruneopts = "NUT"
revision = "16b79f2e4e95ea23b2bf9903c9809ff7b013ce85"
revision = "1da14a5a36f220ea3f03470682b737b1dfd5de22"
[[projects]]
branch = "master"
digest = "1:17ee74a4d9b6078611784b873cdbfe91892d2c73052c430724e66fcc015b6c7b"
digest = "1:3121d742fbe48670a16d98b6da4693501fc33cd76d69ed6f35850c564f255c65"
name = "golang.org/x/oauth2"
packages = [
".",
@@ -321,18 +668,18 @@
"jwt",
]
pruneopts = "NUT"
revision = "e64efc72b421e893cbf63f17ba2221e7d6d0b0f3"
revision = "9f3314589c9a9136388751d9adae6b0ed400978a"
[[projects]]
branch = "master"
digest = "1:a0d91ab4d23badd4e64e115c6e6ba7dd56bd3cde5d287845822fb2599ac10236"
digest = "1:bd7da85408c51d6ab079e1acc5a2872fdfbea42e845b8bbb538c3fac6ef43d2a"
name = "golang.org/x/sys"
packages = [
"unix",
"windows",
]
pruneopts = "NUT"
revision = "30e92a19ae4a77dde818b8c3d41d51e4850cba12"
revision = "f0ce4c0180bef7e9c51babed693a6e47fdd8962f"
[[projects]]
digest = "1:e7071ed636b5422cc51c0e3a6cebc229d6c9fffc528814b519a980641422d619"
@@ -363,16 +710,15 @@
name = "golang.org/x/time"
packages = ["rate"]
pruneopts = "NUT"
revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd"
revision = "9d24e82272b4f38b78bc8cff74fa936d31ccd8ef"
[[projects]]
branch = "master"
digest = "1:e46d8e20161401a9cf8765dfa428494a3492a0b56fe114156b7da792bf41ba78"
digest = "1:be1ab6d2b333b1d487c01f1328aef9dc76cee4ff4f780775a552d2a1653f0207"
name = "golang.org/x/tools"
packages = [
"go/ast/astutil",
"go/gcexportdata",
"go/internal/cgo",
"go/internal/gcimporter",
"go/internal/packagesdriver",
"go/packages",
@@ -384,10 +730,10 @@
"internal/semver",
]
pruneopts = "NUT"
revision = "f8c04913dfb7b2339a756441456bdbe0af6eb508"
revision = "6732636ccdfd99c4301d1d1ac2307f091331f767"
[[projects]]
digest = "1:d395d49d784dd3a11938a3e85091b6570664aa90ff2767a626565c6c130fa7e9"
digest = "1:a4824d8df1fd1f63c6b3690bf4801d6ff1722adcb3e13c0489196a7e248d868a"
name = "google.golang.org/appengine"
packages = [
".",
@@ -402,8 +748,16 @@
"urlfetch",
]
pruneopts = "NUT"
revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1"
version = "v1.4.0"
revision = "54a98f90d1c46b7731eb8fb305d2a321c30ef610"
version = "v1.5.0"
[[projects]]
digest = "1:fe9eb931d7b59027c4a3467f7edc16cc8552dac5328039bec05045143c18e1ce"
name = "gopkg.in/h2non/gock.v1"
packages = ["."]
pruneopts = "NUT"
revision = "ba88c4862a27596539531ce469478a91bc5a0511"
version = "v1.0.14"
[[projects]]
digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a"
@@ -422,7 +776,7 @@
version = "v2.2.2"
[[projects]]
digest = "1:8960ef753a87391086a307122d23cd5007cee93c28189437e4f1b6ed72bffc50"
digest = "1:c453ddc26bdab1e4267683a588ad9046e48d803a73f124fe2927adbab6ff02a5"
name = "k8s.io/api"
packages = [
"admissionregistration/v1alpha1",
@@ -430,16 +784,19 @@
"apps/v1",
"apps/v1beta1",
"apps/v1beta2",
"auditregistration/v1alpha1",
"authentication/v1",
"authentication/v1beta1",
"authorization/v1",
"authorization/v1beta1",
"autoscaling/v1",
"autoscaling/v2beta1",
"autoscaling/v2beta2",
"batch/v1",
"batch/v1beta1",
"batch/v2alpha1",
"certificates/v1beta1",
"coordination/v1beta1",
"core/v1",
"events/v1beta1",
"extensions/v1beta1",
@@ -456,11 +813,25 @@
"storage/v1beta1",
]
pruneopts = "NUT"
revision = "072894a440bdee3a891dea811fe42902311cd2a3"
version = "kubernetes-1.11.0"
revision = "05914d821849570fba9eacfb29466f2d8d3cd229"
version = "kubernetes-1.13.1"
[[projects]]
digest = "1:83b01e3d6f85c4e911de84febd69a2d3ece614c5a4a518fbc2b5d59000645980"
digest = "1:501a73762f1b2c4530206ffb657b39d8b58a9b40280d30e4509ae1232767962c"
name = "k8s.io/apiextensions-apiserver"
packages = [
"pkg/apis/apiextensions",
"pkg/apis/apiextensions/v1beta1",
"pkg/client/clientset/clientset",
"pkg/client/clientset/clientset/scheme",
"pkg/client/clientset/clientset/typed/apiextensions/v1beta1",
]
pruneopts = "NUT"
revision = "0fe22c71c47604641d9aa352c785b7912c200562"
version = "kubernetes-1.13.1"
[[projects]]
digest = "1:5ac33dce66ac11d4f41c157be7f13ba30c968c74d25a3a3a0a1eddf44b6b2176"
name = "k8s.io/apimachinery"
packages = [
"pkg/api/errors",
@@ -492,6 +863,7 @@
"pkg/util/intstr",
"pkg/util/json",
"pkg/util/mergepatch",
"pkg/util/naming",
"pkg/util/net",
"pkg/util/runtime",
"pkg/util/sets",
@@ -507,15 +879,61 @@
"third_party/forked/golang/reflect",
]
pruneopts = "NUT"
revision = "103fd098999dc9c0c88536f5c9ad2e5da39373ae"
version = "kubernetes-1.11.0"
revision = "2b1284ed4c93a43499e781493253e2ac5959c4fd"
version = "kubernetes-1.13.1"
[[projects]]
digest = "1:c7d6cf5e28c377ab4000b94b6b9ff562c4b13e7e8b948ad943f133c5104be011"
digest = "1:ef9bda0e29102ac26750517500a2cb0bd7be69ba21ad267ab89a0b35d035328b"
name = "k8s.io/client-go"
packages = [
"discovery",
"discovery/fake",
"informers",
"informers/admissionregistration",
"informers/admissionregistration/v1alpha1",
"informers/admissionregistration/v1beta1",
"informers/apps",
"informers/apps/v1",
"informers/apps/v1beta1",
"informers/apps/v1beta2",
"informers/auditregistration",
"informers/auditregistration/v1alpha1",
"informers/autoscaling",
"informers/autoscaling/v1",
"informers/autoscaling/v2beta1",
"informers/autoscaling/v2beta2",
"informers/batch",
"informers/batch/v1",
"informers/batch/v1beta1",
"informers/batch/v2alpha1",
"informers/certificates",
"informers/certificates/v1beta1",
"informers/coordination",
"informers/coordination/v1beta1",
"informers/core",
"informers/core/v1",
"informers/events",
"informers/events/v1beta1",
"informers/extensions",
"informers/extensions/v1beta1",
"informers/internalinterfaces",
"informers/networking",
"informers/networking/v1",
"informers/policy",
"informers/policy/v1beta1",
"informers/rbac",
"informers/rbac/v1",
"informers/rbac/v1alpha1",
"informers/rbac/v1beta1",
"informers/scheduling",
"informers/scheduling/v1alpha1",
"informers/scheduling/v1beta1",
"informers/settings",
"informers/settings/v1alpha1",
"informers/storage",
"informers/storage/v1",
"informers/storage/v1alpha1",
"informers/storage/v1beta1",
"kubernetes",
"kubernetes/fake",
"kubernetes/scheme",
@@ -529,6 +947,8 @@
"kubernetes/typed/apps/v1beta1/fake",
"kubernetes/typed/apps/v1beta2",
"kubernetes/typed/apps/v1beta2/fake",
"kubernetes/typed/auditregistration/v1alpha1",
"kubernetes/typed/auditregistration/v1alpha1/fake",
"kubernetes/typed/authentication/v1",
"kubernetes/typed/authentication/v1/fake",
"kubernetes/typed/authentication/v1beta1",
@@ -541,6 +961,8 @@
"kubernetes/typed/autoscaling/v1/fake",
"kubernetes/typed/autoscaling/v2beta1",
"kubernetes/typed/autoscaling/v2beta1/fake",
"kubernetes/typed/autoscaling/v2beta2",
"kubernetes/typed/autoscaling/v2beta2/fake",
"kubernetes/typed/batch/v1",
"kubernetes/typed/batch/v1/fake",
"kubernetes/typed/batch/v1beta1",
@@ -549,6 +971,8 @@
"kubernetes/typed/batch/v2alpha1/fake",
"kubernetes/typed/certificates/v1beta1",
"kubernetes/typed/certificates/v1beta1/fake",
"kubernetes/typed/coordination/v1beta1",
"kubernetes/typed/coordination/v1beta1/fake",
"kubernetes/typed/core/v1",
"kubernetes/typed/core/v1/fake",
"kubernetes/typed/events/v1beta1",
@@ -577,6 +1001,34 @@
"kubernetes/typed/storage/v1alpha1/fake",
"kubernetes/typed/storage/v1beta1",
"kubernetes/typed/storage/v1beta1/fake",
"listers/admissionregistration/v1alpha1",
"listers/admissionregistration/v1beta1",
"listers/apps/v1",
"listers/apps/v1beta1",
"listers/apps/v1beta2",
"listers/auditregistration/v1alpha1",
"listers/autoscaling/v1",
"listers/autoscaling/v2beta1",
"listers/autoscaling/v2beta2",
"listers/batch/v1",
"listers/batch/v1beta1",
"listers/batch/v2alpha1",
"listers/certificates/v1beta1",
"listers/coordination/v1beta1",
"listers/core/v1",
"listers/events/v1beta1",
"listers/extensions/v1beta1",
"listers/networking/v1",
"listers/policy/v1beta1",
"listers/rbac/v1",
"listers/rbac/v1alpha1",
"listers/rbac/v1beta1",
"listers/scheduling/v1alpha1",
"listers/scheduling/v1beta1",
"listers/settings/v1alpha1",
"listers/storage/v1",
"listers/storage/v1alpha1",
"listers/storage/v1beta1",
"pkg/apis/clientauthentication",
"pkg/apis/clientauthentication/v1alpha1",
"pkg/apis/clientauthentication/v1beta1",
@@ -609,11 +1061,11 @@
"util/workqueue",
]
pruneopts = "NUT"
revision = "7d04d0e2a0a1a4d4a1cd6baa432a2301492e4e65"
version = "kubernetes-1.11.0"
revision = "8d9ed539ba3134352c586810e749e58df4e94e4f"
version = "kubernetes-1.13.1"
[[projects]]
digest = "1:8ab487a323486c8bbbaa3b689850487fdccc6cbea8690620e083b2d230a4447e"
digest = "1:dc1ae99dcab96913d81ae970b1f7a7411a54199b14bfb17a7e86f9a56979c720"
name = "k8s.io/code-generator"
packages = [
"cmd/client-gen",
@@ -637,12 +1089,12 @@
"pkg/util",
]
pruneopts = "T"
revision = "6702109cc68eb6fe6350b83e14407c8d7309fd1a"
version = "kubernetes-1.11.0"
revision = "c2090bec4d9b1fb25de3812f868accc2bc9ecbae"
version = "kubernetes-1.13.1"
[[projects]]
branch = "master"
digest = "1:61024ed77a53ac618effed55043bf6a9afbdeb64136bd6a5b0c992d4c0363766"
digest = "1:39912eb5f8eaf46486faae0839586c27c93423e552f76875defa048f52c15c15"
name = "k8s.io/gengo"
packages = [
"args",
@@ -655,23 +1107,32 @@
"types",
]
pruneopts = "NUT"
revision = "0689ccc1d7d65d9dd1bedcc3b0b1ed7df91ba266"
[[projects]]
digest = "1:c263611800c3a97991dbcf9d3bc4de390f6224aaa8ca0a7226a9d734f65a416a"
name = "k8s.io/klog"
packages = ["."]
pruneopts = "NUT"
revision = "71442cd4037d612096940ceb0f3fec3f7fff66e0"
version = "v0.2.0"
revision = "e17681d19d3ac4837a019ece36c2a0ec31ffe985"
[[projects]]
branch = "master"
digest = "1:03a96603922fc1f6895ae083e1e16d943b55ef0656b56965351bd87e7d90485f"
digest = "1:755d83f10748295646cf74cd19611ebffad37807e49632feb8e3f47d43210c3d"
name = "k8s.io/klog"
packages = ["."]
pruneopts = "NUT"
revision = "9cbb78b20423182f9e5b2a214dd255f5e117d2d1"
source = "github.com/stefanprodan/klog"
[[projects]]
branch = "master"
digest = "1:42674e29bf0cf4662d49bd9528e24b9ecc4895b32d0be281f9cf04d3a7671846"
name = "k8s.io/kube-openapi"
packages = ["pkg/util/proto"]
pruneopts = "NUT"
revision = "b3a7cee44a305be0a69e1b9ac03018307287e1b0"
revision = "6b3d3b2d5666c5912bab8b7bf26bf50f75a8f887"
[[projects]]
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
name = "sigs.k8s.io/yaml"
packages = ["."]
pruneopts = "NUT"
revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480"
version = "v1.1.0"
[solve-meta]
analyzer-name = "dep"
@@ -679,11 +1140,20 @@
input-imports = [
"github.com/google/go-cmp/cmp",
"github.com/google/go-cmp/cmp/cmpopts",
"github.com/istio/glog",
"github.com/prometheus/client_golang/prometheus",
"github.com/prometheus/client_golang/prometheus/promhttp",
"github.com/solo-io/gloo/projects/gloo/pkg/api/v1",
"github.com/solo-io/solo-kit/pkg/api/v1/clients",
"github.com/solo-io/solo-kit/pkg/api/v1/clients/factory",
"github.com/solo-io/solo-kit/pkg/api/v1/clients/kube",
"github.com/solo-io/solo-kit/pkg/api/v1/clients/memory",
"github.com/solo-io/solo-kit/pkg/api/v1/resources/core",
"github.com/solo-io/solo-kit/pkg/errors",
"github.com/solo-io/supergloo/pkg/api/v1",
"github.com/stefanprodan/klog",
"go.uber.org/zap",
"go.uber.org/zap/zapcore",
"gopkg.in/h2non/gock.v1",
"k8s.io/api/apps/v1",
"k8s.io/api/autoscaling/v1",
"k8s.io/api/autoscaling/v2beta1",

View File

@@ -11,31 +11,37 @@ required = [
name = "go.uber.org/zap"
version = "v1.9.1"
[[constraint]]
name = "gopkg.in/h2non/gock.v1"
version = "v1.0.14"
[[override]]
name = "gopkg.in/yaml.v2"
version = "v2.2.1"
[[override]]
name = "k8s.io/api"
version = "kubernetes-1.11.0"
version = "kubernetes-1.13.1"
[[override]]
name = "k8s.io/apimachinery"
version = "kubernetes-1.11.0"
version = "kubernetes-1.13.1"
[[override]]
name = "k8s.io/code-generator"
version = "kubernetes-1.11.0"
version = "kubernetes-1.13.1"
[[override]]
name = "k8s.io/client-go"
version = "kubernetes-1.11.0"
version = "kubernetes-1.13.1"
[[override]]
name = "github.com/json-iterator/go"
# This is the commit at which k8s depends on this in 1.11
# It seems to be broken at HEAD.
revision = "f2b4162afba35581b6d4a50d3b8f34e33c144682"
name = "k8s.io/apiextensions-apiserver"
version = "kubernetes-1.13.1"
[[override]]
name = "k8s.io/apiserver"
version = "kubernetes-1.13.1"
[[constraint]]
name = "github.com/prometheus/client_golang"
@@ -46,8 +52,8 @@ required = [
version = "v0.2.0"
[[override]]
name = "github.com/golang/glog"
source = "github.com/istio/glog"
name = "k8s.io/klog"
source = "github.com/stefanprodan/klog"
[prune]
go-tests = true
@@ -58,3 +64,11 @@ required = [
name = "k8s.io/code-generator"
unused-packages = false
non-go = false
[[constraint]]
name = "github.com/solo-io/supergloo"
version = "v0.3.11"
[[constraint]]
name = "github.com/solo-io/solo-kit"
version = "v0.6.3"

5
MAINTAINERS Normal file
View File

@@ -0,0 +1,5 @@
The maintainers are generally available in Slack at
https://weave-community.slack.com/messages/flagger/ (obtain an invitation
at https://slack.weave.works/).
Stefan Prodan, Weaveworks <stefan@weave.works> (Slack: @stefan Twitter: @stefanprodan)

View File

@@ -4,6 +4,7 @@ VERSION_MINOR:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4
PATCH:=$(shell grep 'VERSION' pkg/version/version.go | awk '{ print $$4 }' | tr -d '"' | awk -F. '{print $$NF}')
SOURCE_DIRS = cmd pkg/apis pkg/controller pkg/server pkg/logging pkg/version
LT_VERSION?=$(shell grep 'VERSION' cmd/loadtester/main.go | awk '{ print $$4 }' | tr -d '"' | head -n1)
TS=$(shell date +%Y-%m-%d_%H-%M-%S)
run:
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info \
@@ -11,12 +12,36 @@ run:
-slack-url=https://hooks.slack.com/services/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 \
-slack-channel="devops-alerts"
run-appmesh:
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=appmesh \
-metrics-server=http://acfc235624ca911e9a94c02c4171f346-1585187926.us-west-2.elb.amazonaws.com:9090 \
-slack-url=https://hooks.slack.com/services/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 \
-slack-channel="devops-alerts"
run-nginx:
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=nginx -namespace=nginx \
-metrics-server=http://prometheus-weave.istio.weavedx.com \
-slack-url=https://hooks.slack.com/services/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 \
-slack-channel="devops-alerts"
run-smi:
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=smi:istio -namespace=smi \
-metrics-server=https://prometheus.istio.weavedx.com \
-slack-url=https://hooks.slack.com/services/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 \
-slack-channel="devops-alerts"
run-gloo:
go run cmd/flagger/* -kubeconfig=$$HOME/.kube/config -log-level=info -mesh-provider=gloo -namespace=gloo \
-metrics-server=https://prometheus.istio.weavedx.com \
-slack-url=https://hooks.slack.com/services/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 \
-slack-channel="devops-alerts"
build:
docker build -t stefanprodan/flagger:$(TAG) . -f Dockerfile
docker build -t weaveworks/flagger:$(TAG) . -f Dockerfile
push:
docker tag stefanprodan/flagger:$(TAG) quay.io/stefanprodan/flagger:$(VERSION)
docker push quay.io/stefanprodan/flagger:$(VERSION)
docker tag weaveworks/flagger:$(TAG) weaveworks/flagger:$(VERSION)
docker push weaveworks/flagger:$(VERSION)
fmt:
gofmt -l -s -w $(SOURCE_DIRS)
@@ -33,7 +58,7 @@ test: test-fmt test-codegen
helm-package:
cd charts/ && helm package ./*
mv charts/*.tgz docs/
helm repo index docs --url https://stefanprodan.github.io/flagger --merge ./docs/index.yaml
helm repo index docs --url https://weaveworks.github.io/flagger --merge ./docs/index.yaml
helm-up:
helm upgrade --install flagger ./charts/flagger --namespace=istio-system --set crd.create=false
@@ -82,5 +107,5 @@ reset-test:
kubectl apply -f ./artifacts/canaries
loadtester-push:
docker build -t quay.io/stefanprodan/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
docker push quay.io/stefanprodan/flagger-loadtester:$(LT_VERSION)
docker build -t weaveworks/flagger-loadtester:$(LT_VERSION) . -f Dockerfile.loadtester
docker push weaveworks/flagger-loadtester:$(LT_VERSION)

127
README.md
View File

@@ -1,71 +1,58 @@
# flagger
[![build](https://travis-ci.org/stefanprodan/flagger.svg?branch=master)](https://travis-ci.org/stefanprodan/flagger)
[![report](https://goreportcard.com/badge/github.com/stefanprodan/flagger)](https://goreportcard.com/report/github.com/stefanprodan/flagger)
[![codecov](https://codecov.io/gh/stefanprodan/flagger/branch/master/graph/badge.svg)](https://codecov.io/gh/stefanprodan/flagger)
[![license](https://img.shields.io/github/license/stefanprodan/flagger.svg)](https://github.com/stefanprodan/flagger/blob/master/LICENSE)
[![release](https://img.shields.io/github/release/stefanprodan/flagger/all.svg)](https://github.com/stefanprodan/flagger/releases)
[![build](https://travis-ci.org/weaveworks/flagger.svg?branch=master)](https://travis-ci.org/weaveworks/flagger)
[![report](https://goreportcard.com/badge/github.com/weaveworks/flagger)](https://goreportcard.com/report/github.com/weaveworks/flagger)
[![codecov](https://codecov.io/gh/weaveworks/flagger/branch/master/graph/badge.svg)](https://codecov.io/gh/weaveworks/flagger)
[![license](https://img.shields.io/github/license/weaveworks/flagger.svg)](https://github.com/weaveworks/flagger/blob/master/LICENSE)
[![release](https://img.shields.io/github/release/weaveworks/flagger/all.svg)](https://github.com/weaveworks/flagger/releases)
Flagger is a Kubernetes operator that automates the promotion of canary deployments
using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
The canary analysis can be extended with webhooks for running acceptance tests,
using Istio, App Mesh or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
The canary analysis can be extended with webhooks for running acceptance tests,
load tests or any other custom validation.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
indicators like HTTP requests success rate, requests average duration and pods health.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance
indicators like HTTP requests success rate, requests average duration and pods health.
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
![flagger-overview](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-overview.png)
![flagger-overview](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-overview.png)
### Documentation
## Documentation
Flagger documentation can be found at [docs.flagger.app](https://docs.flagger.app)
* Install
* [Flagger install on Kubernetes](https://docs.flagger.app/install/flagger-install-on-kubernetes)
* [Flagger install on GKE](https://docs.flagger.app/install/flagger-install-on-google-cloud)
* [Flagger install on Kubernetes](https://docs.flagger.app/install/flagger-install-on-kubernetes)
* [Flagger install on GKE Istio](https://docs.flagger.app/install/flagger-install-on-google-cloud)
* [Flagger install on EKS App Mesh](https://docs.flagger.app/install/flagger-install-on-eks-appmesh)
* [Flagger install with SuperGloo](https://docs.flagger.app/install/flagger-install-with-supergloo)
* How it works
* [Canary custom resource](https://docs.flagger.app/how-it-works#canary-custom-resource)
* [Routing](https://docs.flagger.app/how-it-works#istio-routing)
* [Canary deployment stages](https://docs.flagger.app/how-it-works#canary-deployment)
* [Canary analysis](https://docs.flagger.app/how-it-works#canary-analysis)
* [HTTP metrics](https://docs.flagger.app/how-it-works#http-metrics)
* [Custom metrics](https://docs.flagger.app/how-it-works#custom-metrics)
* [Webhooks](https://docs.flagger.app/how-it-works#webhooks)
* [Load testing](https://docs.flagger.app/how-it-works#load-testing)
* [Canary custom resource](https://docs.flagger.app/how-it-works#canary-custom-resource)
* [Routing](https://docs.flagger.app/how-it-works#istio-routing)
* [Canary deployment stages](https://docs.flagger.app/how-it-works#canary-deployment)
* [Canary analysis](https://docs.flagger.app/how-it-works#canary-analysis)
* [HTTP metrics](https://docs.flagger.app/how-it-works#http-metrics)
* [Custom metrics](https://docs.flagger.app/how-it-works#custom-metrics)
* [Webhooks](https://docs.flagger.app/how-it-works#webhooks)
* [Load testing](https://docs.flagger.app/how-it-works#load-testing)
* Usage
* [Canary promotions and rollbacks](https://docs.flagger.app/usage/progressive-delivery)
* [Monitoring](https://docs.flagger.app/usage/monitoring)
* [Alerting](https://docs.flagger.app/usage/alerting)
* [Istio canary deployments](https://docs.flagger.app/usage/progressive-delivery)
* [Istio A/B testing](https://docs.flagger.app/usage/ab-testing)
* [App Mesh canary deployments](https://docs.flagger.app/usage/appmesh-progressive-delivery)
* [NGINX ingress controller canary deployments](https://docs.flagger.app/usage/nginx-progressive-delivery)
* [Gloo Canary Deployments](https://docs.flagger.app/usage/gloo-progressive-delivery.md)
* [Monitoring](https://docs.flagger.app/usage/monitoring)
* [Alerting](https://docs.flagger.app/usage/alerting)
* Tutorials
* [Canary deployments with Helm charts and Weave Flux](https://docs.flagger.app/tutorials/canary-helm-gitops)
* [Canary deployments with Helm charts and Weave Flux](https://docs.flagger.app/tutorials/canary-helm-gitops)
### Install
Before installing Flagger make sure you have Istio setup up with Prometheus enabled.
If you are new to Istio you can follow my [Istio service mesh walk-through](https://github.com/stefanprodan/istio-gke).
Deploy Flagger in the `istio-system` namespace using Helm:
```bash
# add the Helm repository
helm repo add flagger https://flagger.app
# install or upgrade
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set metricsServer=http://prometheus.istio-system:9090
```
Flagger is compatible with Kubernetes >1.11.0 and Istio >1.0.0.
### Canary CRD
## Canary CRD
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployments, ClusterIP services and Istio virtual services).
then creates a series of objects (Kubernetes deployments, ClusterIP services and Istio or App Mesh virtual services).
These objects expose the application on the mesh and drive the canary analysis and promotion.
Flagger keeps track of ConfigMaps and Secrets referenced by a Kubernetes Deployment and triggers a canary analysis if any of those objects change.
Flagger keeps track of ConfigMaps and Secrets referenced by a Kubernetes Deployment and triggers a canary analysis if any of those objects change.
When promoting a workload in production, both code (container images) and configuration (config maps and secrets) are being synchronised.
For a deployment named _podinfo_, a canary promotion can be defined using Flagger's custom resource:
@@ -106,17 +93,12 @@ spec:
# HTTP rewrite (optional)
rewrite:
uri: /
# Envoy timeout and retry policy (optional)
headers:
request:
add:
x-envoy-upstream-rq-timeout-ms: "15000"
x-envoy-max-retries: "10"
x-envoy-retry-on: "gateway-error,connect-failure,refused-stream"
# cross-origin resource sharing policy (optional)
corsPolicy:
allowOrigin:
- example.com
# request timeout (optional)
timeout: 5s
# promote the canary without analysing it (default false)
skipAnalysis: false
# define the canary analysis timing and KPIs
@@ -133,13 +115,13 @@ spec:
stepWeight: 5
# Istio Prometheus checks
metrics:
# builtin Istio checks
- name: istio_requests_total
# builtin checks
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
- name: request-duration
# maximum req duration P99
# milliseconds
threshold: 500
@@ -165,32 +147,45 @@ spec:
For more details on how the canary analysis and promotion works please [read the docs](https://docs.flagger.app/how-it-works).
### Roadmap
## Features
* Add A/B testing capabilities using fixed routing based on HTTP headers and cookies match conditions
* Integrate with other service mesh technologies like AWS AppMesh and Linkerd v2
| Feature | Istio | App Mesh | SuperGloo | NGINX Ingress |
| -------------------------------------------- | ------------------ | ------------------ |------------------ |------------------ |
| Canary deployments (weighted traffic) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| A/B testing (headers and cookies filters) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_minus_sign: | :heavy_check_mark: |
| Load testing | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Webhooks (custom acceptance tests) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Request success rate check (L7 metric) | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Request duration check (L7 metric) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
| Custom promql checks | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: |
| Ingress gateway (CORS, retries and timeouts) | :heavy_check_mark: | :heavy_minus_sign: | :heavy_check_mark: | :heavy_check_mark: |
## Roadmap
* Integrate with other service mesh technologies like Linkerd v2
* Add support for comparing the canary metrics to the primary ones and do the validation based on the derivation between the two
### Contributing
## Contributing
Flagger is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
When submitting bug reports please include as much details as possible:
When submitting bug reports please include as much details as possible:
* which Flagger version
* which Flagger CRD version
* which Kubernetes/Istio version
* what configuration (canary, virtual service and workloads definitions)
* what happened (Flagger, Istio Pilot and Proxy logs)
### Getting Help
## Getting Help
If you have any questions about Flagger and progressive delivery:
* Read the Flagger [docs](https://docs.flagger.app).
* Invite yourself to the [Weave community slack](https://slack.weave.works/)
* Invite yourself to the [Weave community slack](https://slack.weave.works/)
and join the [#flagger](https://weave-community.slack.com/messages/flagger/) channel.
* Join the [Weave User Group](https://www.meetup.com/pro/Weave/) and get invited to online talks,
* Join the [Weave User Group](https://www.meetup.com/pro/Weave/) and get invited to online talks,
hands-on training and meetups in your area.
* File an [issue](https://github.com/stefanprodan/flagger/issues/new).
* File an [issue](https://github.com/weaveworks/flagger/issues/new).
Your feedback is always welcome!

View File

@@ -0,0 +1,62 @@
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: abtest
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: abtest
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: abtest
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
# Istio virtual service host names (optional)
hosts:
- abtest.istio.weavedx.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 10
# total number of iterations
iterations: 10
# canary match condition
match:
- headers:
user-agent:
regex: "^(?!.*Chrome)(?=.*\bSafari\b).*$"
- headers:
cookie:
regex: "^(.*?;)?(type=insider)(;.*)?$"
metrics:
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: request-duration
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
# external checks (optional)
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 -H 'Cookie: type=insider' http://podinfo.test:9898/"

View File

@@ -0,0 +1,67 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: abtest
namespace: test
labels:
app: abtest
spec:
minReadySeconds: 5
revisionHistoryLimit: 5
progressDeadlineSeconds: 60
strategy:
rollingUpdate:
maxUnavailable: 0
type: RollingUpdate
selector:
matchLabels:
app: abtest
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: abtest
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.4.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
name: http
protocol: TCP
command:
- ./podinfo
- --port=9898
- --level=info
- --random-delay=false
- --random-error=false
env:
- name: PODINFO_UI_COLOR
value: blue
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
initialDelaySeconds: 5
timeoutSeconds: 5
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
initialDelaySeconds: 5
timeoutSeconds: 5
resources:
limits:
cpu: 2000m
memory: 512Mi
requests:
cpu: 100m
memory: 64Mi

View File

@@ -0,0 +1,19 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: abtest
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: abtest
minReplicas: 2
maxReplicas: 4
metrics:
- type: Resource
resource:
name: cpu
# scale up if usage is above
# 99% of the requested CPU (100m)
targetAverageUtilization: 99

View File

@@ -0,0 +1,50 @@
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# App Mesh reference
meshName: global
# define the canary analysis timing and KPIs
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# App Mesh Prometheus checks
metrics:
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
# external checks (optional)
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"

View File

@@ -0,0 +1,65 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
minReadySeconds: 5
revisionHistoryLimit: 5
progressDeadlineSeconds: 60
strategy:
rollingUpdate:
maxUnavailable: 0
type: RollingUpdate
selector:
matchLabels:
app: podinfo
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: podinfo
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.4.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
name: http
protocol: TCP
command:
- ./podinfo
- --port=9898
- --level=info
env:
- name: PODINFO_UI_COLOR
value: blue
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
initialDelaySeconds: 5
timeoutSeconds: 5
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
initialDelaySeconds: 5
timeoutSeconds: 5
resources:
limits:
cpu: 2000m
memory: 512Mi
requests:
cpu: 100m
memory: 64Mi

View File

@@ -0,0 +1,6 @@
apiVersion: appmesh.k8s.aws/v1beta1
kind: Mesh
metadata:
name: global
spec:
serviceDiscoveryType: dns

View File

@@ -0,0 +1,19 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: podinfo
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
minReplicas: 2
maxReplicas: 4
metrics:
- type: Resource
resource:
name: cpu
# scale up if usage is above
# 99% of the requested CPU (100m)
targetAverageUtilization: 99

View File

@@ -0,0 +1,177 @@
---
kind: ConfigMap
apiVersion: v1
metadata:
name: ingress-config
namespace: test
labels:
app: ingress
data:
envoy.yaml: |
static_resources:
listeners:
- address:
socket_address:
address: 0.0.0.0
port_value: 80
filter_chains:
- filters:
- name: envoy.http_connection_manager
config:
access_log:
- name: envoy.file_access_log
config:
path: /dev/stdout
codec_type: auto
stat_prefix: ingress_http
http_filters:
- name: envoy.router
config: {}
route_config:
name: local_route
virtual_hosts:
- name: local_service
domains: ["*"]
routes:
- match:
prefix: "/"
route:
cluster: podinfo
host_rewrite: podinfo.test
timeout: 15s
retry_policy:
retry_on: "gateway-error,connect-failure,refused-stream"
num_retries: 10
per_try_timeout: 5s
clusters:
- name: podinfo
connect_timeout: 0.30s
type: strict_dns
lb_policy: round_robin
http2_protocol_options: {}
hosts:
- socket_address:
address: podinfo.test
port_value: 9898
admin:
access_log_path: /dev/null
address:
socket_address:
address: 0.0.0.0
port_value: 9999
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ingress
namespace: test
labels:
app: ingress
spec:
replicas: 1
selector:
matchLabels:
app: ingress
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 0
template:
metadata:
labels:
app: ingress
annotations:
prometheus.io/path: "/stats/prometheus"
prometheus.io/port: "9999"
prometheus.io/scrape: "true"
# dummy port to exclude ingress from mesh traffic
# only egress should go over the mesh
appmesh.k8s.aws/ports: "444"
spec:
terminationGracePeriodSeconds: 30
containers:
- name: ingress
image: "envoyproxy/envoy-alpine:d920944aed67425f91fc203774aebce9609e5d9a"
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
command:
- /usr/bin/dumb-init
- --
args:
- /usr/local/bin/envoy
- --base-id 30
- --v2-config-only
- -l
- $loglevel
- -c
- /config/envoy.yaml
ports:
- name: admin
containerPort: 9999
protocol: TCP
- name: http
containerPort: 80
protocol: TCP
- name: https
containerPort: 443
protocol: TCP
livenessProbe:
initialDelaySeconds: 5
tcpSocket:
port: admin
readinessProbe:
initialDelaySeconds: 5
tcpSocket:
port: admin
resources:
requests:
cpu: 100m
memory: 64Mi
volumeMounts:
- name: config
mountPath: /config
volumes:
- name: config
configMap:
name: ingress-config
---
kind: Service
apiVersion: v1
metadata:
name: ingress
namespace: test
spec:
selector:
app: ingress
ports:
- protocol: TCP
name: http
port: 80
targetPort: 80
- protocol: TCP
name: https
port: 443
targetPort: 443
type: LoadBalancer
---
apiVersion: appmesh.k8s.aws/v1beta1
kind: VirtualNode
metadata:
name: ingress
namespace: test
spec:
meshName: global
listeners:
- portMapping:
port: 80
protocol: http
serviceDiscovery:
dns:
hostName: ingress.test
backends:
- virtualService:
virtualServiceName: podinfo.test

View File

@@ -23,6 +23,7 @@ spec:
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
# Istio virtual service host names (optional)
hosts:
- app.istio.weavedx.com
@@ -55,12 +56,12 @@ spec:
stepWeight: 5
# Istio Prometheus checks
metrics:
- name: istio_requests_total
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
- name: request-duration
# maximum req duration P99
# milliseconds
threshold: 500
@@ -71,4 +72,6 @@ spec:
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
logCmdOutput: "true"

View File

@@ -23,6 +23,7 @@ spec:
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
# Istio virtual service host names (optional)
hosts:
- app.iowa.weavedx.com
@@ -39,12 +40,12 @@ spec:
stepWeight: 5
# Istio Prometheus checks
metrics:
- name: istio_requests_total
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
- name: request-duration
# maximum req duration P99
# milliseconds
threshold: 500

View File

@@ -0,0 +1,264 @@
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: prometheus
labels:
app: prometheus
rules:
- apiGroups: [""]
resources:
- nodes
- services
- endpoints
- pods
- nodes/proxy
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources:
- configmaps
verbs: ["get"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: prometheus
labels:
app: prometheus
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: prometheus
subjects:
- kind: ServiceAccount
name: prometheus
namespace: appmesh-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: prometheus
namespace: appmesh-system
labels:
app: prometheus
---
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus
namespace: appmesh-system
labels:
app: prometheus
data:
prometheus.yml: |-
global:
scrape_interval: 5s
scrape_configs:
# Scrape config for AppMesh Envoy sidecar
- job_name: 'appmesh-envoy'
metrics_path: /stats/prometheus
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels: [__meta_kubernetes_pod_container_name]
action: keep
regex: '^envoy$'
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: ${1}:9901
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
# Exclude high cardinality metrics
metric_relabel_configs:
- source_labels: [ cluster_name ]
regex: '(outbound|inbound|prometheus_stats).*'
action: drop
- source_labels: [ tcp_prefix ]
regex: '(outbound|inbound|prometheus_stats).*'
action: drop
- source_labels: [ listener_address ]
regex: '(.+)'
action: drop
- source_labels: [ http_conn_manager_listener_prefix ]
regex: '(.+)'
action: drop
- source_labels: [ http_conn_manager_prefix ]
regex: '(.+)'
action: drop
- source_labels: [ __name__ ]
regex: 'envoy_tls.*'
action: drop
- source_labels: [ __name__ ]
regex: 'envoy_tcp_downstream.*'
action: drop
- source_labels: [ __name__ ]
regex: 'envoy_http_(stats|admin).*'
action: drop
- source_labels: [ __name__ ]
regex: 'envoy_cluster_(lb|retry|bind|internal|max|original).*'
action: drop
# Scrape config for API servers
- job_name: 'kubernetes-apiservers'
kubernetes_sd_configs:
- role: endpoints
namespaces:
names:
- default
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
action: keep
regex: kubernetes;https
# Scrape config for nodes
- job_name: 'kubernetes-nodes'
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics
# scrape config for cAdvisor
- job_name: 'kubernetes-cadvisor'
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
# scrape config for pods
- job_name: kubernetes-pods
kubernetes_sd_configs:
- role: pod
relabel_configs:
- action: keep
regex: true
source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
- source_labels: [ __address__ ]
regex: '.*9901.*'
action: drop
- action: replace
regex: (.+)
source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_path
target_label: __metrics_path__
- action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
source_labels:
- __address__
- __meta_kubernetes_pod_annotation_prometheus_io_port
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: kubernetes_namespace
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: kubernetes_pod_name
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: prometheus
namespace: appmesh-system
labels:
app: prometheus
spec:
replicas: 1
selector:
matchLabels:
app: prometheus
template:
metadata:
labels:
app: prometheus
annotations:
version: "appmesh-v1alpha1"
spec:
serviceAccountName: prometheus
containers:
- name: prometheus
image: "docker.io/prom/prometheus:v2.7.1"
imagePullPolicy: IfNotPresent
args:
- '--storage.tsdb.retention=6h'
- '--config.file=/etc/prometheus/prometheus.yml'
ports:
- containerPort: 9090
name: http
livenessProbe:
httpGet:
path: /-/healthy
port: 9090
readinessProbe:
httpGet:
path: /-/ready
port: 9090
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- name: config-volume
mountPath: /etc/prometheus
volumes:
- name: config-volume
configMap:
name: prometheus
---
apiVersion: v1
kind: Service
metadata:
name: prometheus
namespace: appmesh-system
labels:
name: prometheus
spec:
selector:
app: prometheus
ports:
- name: http
protocol: TCP
port: 9090

View File

@@ -13,73 +13,76 @@ metadata:
labels:
app: flagger
rules:
- apiGroups:
- ""
resources:
- configmaps
- secrets
- events
verbs:
- create
- get
- patch
- update
- apiGroups:
- ""
resources:
- services
verbs:
- create
- get
- patch
- update
- apiGroups:
- apps
resources:
- deployments
verbs:
- create
- get
- patch
- update
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- create
- get
- patch
- update
- apiGroups:
- flagger.app
resources:
- canaries/status
verbs:
- get
- patch
- update
- apiGroups:
- networking.istio.io
resources:
- virtualservices
verbs:
- create
- get
- patch
- update
- apiGroups:
- flagger.app
resources:
- canaries
verbs:
- get
- list
- watch
- nonResourceURLs:
- /version
verbs:
- get
- apiGroups:
- ""
resources:
- events
- configmaps
- secrets
- services
verbs: ["*"]
- apiGroups:
- apps
resources:
- deployments
verbs: ["*"]
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs: ["*"]
- apiGroups:
- "extensions"
resources:
- ingresses
- ingresses/status
verbs: ["*"]
- apiGroups:
- flagger.app
resources:
- canaries
- canaries/status
verbs: ["*"]
- apiGroups:
- networking.istio.io
resources:
- virtualservices
- virtualservices/status
verbs: ["*"]
- apiGroups:
- appmesh.k8s.aws
resources:
- meshes
- meshes/status
- virtualnodes
- virtualnodes/status
- virtualservices
- virtualservices/status
verbs: ["*"]
- apiGroups:
- split.smi-spec.io
resources:
- trafficsplits
verbs: ["*"]
- apiGroups:
- gloo.solo.io
resources:
- settings
- upstreams
- upstreamgroups
- proxies
- virtualservices
verbs: ["*"]
- apiGroups:
- gateway.solo.io
resources:
- virtualservices
- gateways
verbs: ["*"]
- nonResourceURLs:
- /version
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding

View File

@@ -2,6 +2,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: canaries.flagger.app
annotations:
helm.sh/resource-policy: keep
spec:
group: flagger.app
version: v1alpha3
@@ -39,9 +41,9 @@ spec:
properties:
spec:
required:
- targetRef
- service
- canaryAnalysis
- targetRef
- service
- canaryAnalysis
properties:
progressDeadlineSeconds:
type: number
@@ -67,12 +69,28 @@ spec:
type: string
name:
type: string
ingressRef:
anyOf:
- type: string
- type: object
required: ['apiVersion', 'kind', 'name']
properties:
apiVersion:
type: string
kind:
type: string
name:
type: string
service:
type: object
required: ['port']
properties:
port:
type: number
portName:
type: string
meshName:
type: string
timeout:
type: string
skipAnalysis:
@@ -82,6 +100,8 @@ spec:
interval:
type: string
pattern: "^[0-9]+(m|s)"
iterations:
type: number
threshold:
type: number
maxWeight:
@@ -113,9 +133,36 @@ spec:
properties:
name:
type: string
type:
type: string
enum:
- ""
- pre-rollout
- rollout
- post-rollout
url:
type: string
format: url
timeout:
type: string
pattern: "^[0-9]+(m|s)"
status:
properties:
phase:
type: string
enum:
- ""
- Initialized
- Progressing
- Succeeded
- Failed
canaryWeight:
type: number
failedChecks:
type: number
iterations:
type: number
lastAppliedSpec:
type: string
lastTransitionTime:
type: string

View File

@@ -22,7 +22,7 @@ spec:
serviceAccountName: flagger
containers:
- name: flagger
image: quay.io/stefanprodan/flagger:0.8.0
image: weaveworks/flagger:0.14.0
imagePullPolicy: IfNotPresent
ports:
- name: http
@@ -31,6 +31,7 @@ spec:
- ./flagger
- -log-level=info
- -control-loop-interval=10s
- -mesh-provider=$(MESH_PROVIDER)
- -metrics-server=http://prometheus.istio-system.svc.cluster.local:9090
livenessProbe:
exec:

View File

@@ -0,0 +1,36 @@
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
progressDeadlineSeconds: 60
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
port: 9898
canaryAnalysis:
interval: 10s
threshold: 10
maxWeight: 50
stepWeight: 5
metrics:
- name: request-success-rate
threshold: 99
interval: 1m
- name: request-duration
threshold: 500
interval: 30s
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 http://gloo.example.com/"

View File

@@ -0,0 +1,67 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
minReadySeconds: 5
revisionHistoryLimit: 5
progressDeadlineSeconds: 60
strategy:
rollingUpdate:
maxUnavailable: 0
type: RollingUpdate
selector:
matchLabels:
app: podinfo
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: podinfo
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.4.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
name: http
protocol: TCP
command:
- ./podinfo
- --port=9898
- --level=info
- --random-delay=false
- --random-error=false
env:
- name: PODINFO_UI_COLOR
value: blue
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
initialDelaySeconds: 5
timeoutSeconds: 5
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
initialDelaySeconds: 5
timeoutSeconds: 5
resources:
limits:
cpu: 2000m
memory: 512Mi
requests:
cpu: 100m
memory: 64Mi

19
artifacts/gloo/hpa.yaml Normal file
View File

@@ -0,0 +1,19 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: podinfo
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
minReplicas: 1
maxReplicas: 4
metrics:
- type: Resource
resource:
name: cpu
# scale up if usage is above
# 99% of the requested CPU (100m)
targetAverageUtilization: 99

View File

@@ -0,0 +1,17 @@
apiVersion: gateway.solo.io/v1
kind: VirtualService
metadata:
name: podinfo
namespace: test
spec:
virtualHost:
domains:
- '*'
name: podinfo.default
routes:
- matcher:
prefix: /
routeAction:
upstreamGroup:
name: podinfo
namespace: gloo

View File

@@ -0,0 +1,19 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: flagger-loadtester-bats
data:
tests: |
#!/usr/bin/env bats
@test "check message" {
curl -sS http://${URL} | jq -r .message | {
run cut -d $' ' -f1
[ $output = "greetings" ]
}
}
@test "check headers" {
curl -sS http://${URL}/headers | grep X-Request-Id
}

View File

@@ -17,7 +17,7 @@ spec:
spec:
containers:
- name: loadtester
image: quay.io/stefanprodan/flagger-loadtester:0.1.0
image: weaveworks/flagger-loadtester:0.3.0
imagePullPolicy: IfNotPresent
ports:
- name: http
@@ -27,7 +27,6 @@ spec:
- -port=8080
- -log-level=info
- -timeout=1h
- -log-cmd-output=true
livenessProbe:
exec:
command:
@@ -58,3 +57,11 @@ spec:
securityContext:
readOnlyRootFilesystem: true
runAsUser: 10001
# volumeMounts:
# - name: tests
# mountPath: /bats
# readOnly: true
# volumes:
# - name: tests
# configMap:
# name: flagger-loadtester-bats

View File

@@ -4,3 +4,4 @@ metadata:
name: test
labels:
istio-injection: enabled
appmesh.k8s.aws/sidecarInjectorWebhook: enabled

View File

@@ -0,0 +1,68 @@
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# ingress reference
ingressRef:
apiVersion: extensions/v1beta1
kind: Ingress
name: podinfo
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
service:
# container port
port: 9898
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# NGINX Prometheus checks
metrics:
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: "latency"
threshold: 0.5
interval: 1m
query: |
histogram_quantile(0.99,
sum(
rate(
http_request_duration_seconds_bucket{
kubernetes_namespace="test",
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
}[1m]
)
) by (le)
)
# external checks (optional)
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 http://app.example.com/"
logCmdOutput: "true"

View File

@@ -0,0 +1,69 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 0
type: RollingUpdate
selector:
matchLabels:
app: podinfo
template:
metadata:
annotations:
prometheus.io/scrape: "true"
labels:
app: podinfo
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.4.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
name: http
protocol: TCP
command:
- ./podinfo
- --port=9898
- --level=info
- --random-delay=false
- --random-error=false
env:
- name: PODINFO_UI_COLOR
value: green
livenessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/healthz
failureThreshold: 3
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 2
readinessProbe:
exec:
command:
- podcli
- check
- http
- localhost:9898/readyz
failureThreshold: 3
periodSeconds: 3
successThreshold: 1
timeoutSeconds: 2
resources:
limits:
cpu: 1000m
memory: 256Mi
requests:
cpu: 100m
memory: 16Mi

19
artifacts/nginx/hpa.yaml Normal file
View File

@@ -0,0 +1,19 @@
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: podinfo
namespace: test
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
minReplicas: 2
maxReplicas: 4
metrics:
- type: Resource
resource:
name: cpu
# scale up if usage is above
# 99% of the requested CPU (100m)
targetAverageUtilization: 99

View File

@@ -0,0 +1,17 @@
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
annotations:
kubernetes.io/ingress.class: "nginx"
spec:
rules:
- host: app.example.com
http:
paths:
- backend:
serviceName: podinfo
servicePort: 9898

View File

@@ -0,0 +1,131 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: trafficsplits.split.smi-spec.io
spec:
additionalPrinterColumns:
- JSONPath: .spec.service
description: The service
name: Service
type: string
group: split.smi-spec.io
names:
kind: TrafficSplit
listKind: TrafficSplitList
plural: trafficsplits
singular: trafficsplit
scope: Namespaced
subresources:
status: {}
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: smi-adapter-istio
namespace: istio-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: smi-adapter-istio
rules:
- apiGroups:
- ""
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
- events
- configmaps
- secrets
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
- daemonsets
- replicasets
- statefulsets
verbs:
- '*'
- apiGroups:
- monitoring.coreos.com
resources:
- servicemonitors
verbs:
- get
- create
- apiGroups:
- apps
resourceNames:
- smi-adapter-istio
resources:
- deployments/finalizers
verbs:
- update
- apiGroups:
- split.smi-spec.io
resources:
- '*'
verbs:
- '*'
- apiGroups:
- networking.istio.io
resources:
- '*'
verbs:
- '*'
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: smi-adapter-istio
subjects:
- kind: ServiceAccount
name: smi-adapter-istio
namespace: istio-system
roleRef:
kind: ClusterRole
name: smi-adapter-istio
apiGroup: rbac.authorization.k8s.io
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: smi-adapter-istio
namespace: istio-system
spec:
replicas: 1
selector:
matchLabels:
name: smi-adapter-istio
template:
metadata:
labels:
name: smi-adapter-istio
annotations:
sidecar.istio.io/inject: "false"
spec:
serviceAccountName: smi-adapter-istio
containers:
- name: smi-adapter-istio
image: docker.io/stefanprodan/smi-adapter-istio:0.0.2-beta.1
command:
- smi-adapter-istio
imagePullPolicy: Always
env:
- name: WATCH_NAMESPACE
value: ""
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: "smi-adapter-istio"

View File

@@ -23,7 +23,7 @@ spec:
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.2.0
image: quay.io/stefanprodan/podinfo:1.4.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898
@@ -67,9 +67,3 @@ spec:
requests:
cpu: 100m
memory: 16Mi
volumeMounts:
- mountPath: /data
name: data
volumes:
- emptyDir: {}
name: data

View File

@@ -1,10 +1,8 @@
apiVersion: v1
kind: Service
metadata:
name: podinfo
name: podinfo-canary
namespace: test
labels:
app: podinfo
spec:
type: ClusterIP
selector:

View File

@@ -23,7 +23,7 @@ spec:
spec:
containers:
- name: podinfod
image: quay.io/stefanprodan/podinfo:1.1.1
image: quay.io/stefanprodan/podinfo:1.4.1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9898

View File

@@ -0,0 +1,14 @@
apiVersion: v1
kind: Service
metadata:
name: podinfo
namespace: test
spec:
type: ClusterIP
selector:
app: podinfo-primary
ports:
- name: http
port: 9898
protocol: TCP
targetPort: http

View File

@@ -1,14 +1,14 @@
apiVersion: v1
name: flagger
version: 0.8.0
appVersion: 0.8.0
version: 0.14.0
appVersion: 0.14.0
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
description: Flagger is a Kubernetes operator that automates the promotion of canary deployments using Istio, App Mesh or NGINX routing for traffic shifting and Prometheus metrics for canary analysis.
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
sources:
- https://github.com/stefanprodan/flagger
- https://github.com/weaveworks/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan
@@ -16,4 +16,5 @@ maintainers:
keywords:
- canary
- istio
- appmesh
- gitops

View File

@@ -1,6 +1,6 @@
# Flagger
[Flagger](https://github.com/stefanprodan/flagger) is a Kubernetes operator that automates the promotion of
[Flagger](https://github.com/weaveworks/flagger) is a Kubernetes operator that automates the promotion of
canary deployments using Istio routing for traffic shifting and Prometheus metrics for canary analysis.
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
like HTTP requests success rate, requests average duration and pods health.
@@ -45,7 +45,7 @@ The following tables lists the configurable parameters of the Flagger chart and
Parameter | Description | Default
--- | --- | ---
`image.repository` | image repository | `quay.io/stefanprodan/flagger`
`image.repository` | image repository | `weaveworks/flagger`
`image.tag` | image tag | `<VERSION>`
`image.pullPolicy` | image pull policy | `IfNotPresent`
`metricsServer` | Prometheus URL | `http://prometheus.istio-system:9090`

View File

@@ -3,6 +3,8 @@ apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: canaries.flagger.app
annotations:
helm.sh/resource-policy: keep
spec:
group: flagger.app
version: v1alpha3
@@ -68,12 +70,28 @@ spec:
type: string
name:
type: string
ingressRef:
anyOf:
- type: string
- type: object
required: ['apiVersion', 'kind', 'name']
properties:
apiVersion:
type: string
kind:
type: string
name:
type: string
service:
type: object
required: ['port']
properties:
port:
type: number
portName:
type: string
meshName:
type: string
timeout:
type: string
skipAnalysis:
@@ -83,6 +101,8 @@ spec:
interval:
type: string
pattern: "^[0-9]+(m|s)"
iterations:
type: number
threshold:
type: number
maxWeight:
@@ -114,10 +134,37 @@ spec:
properties:
name:
type: string
type:
type: string
enum:
- ""
- pre-rollout
- rollout
- post-rollout
url:
type: string
format: url
timeout:
type: string
pattern: "^[0-9]+(m|s)"
status:
properties:
phase:
type: string
enum:
- ""
- Initialized
- Progressing
- Succeeded
- Failed
canaryWeight:
type: number
failedChecks:
type: number
iterations:
type: number
lastAppliedSpec:
type: string
lastTransitionTime:
type: string
{{- end }}

View File

@@ -35,7 +35,14 @@ spec:
command:
- ./flagger
- -log-level=info
{{- if .Values.meshProvider }}
- -mesh-provider={{ .Values.meshProvider }}
{{- end }}
{{- if .Values.prometheus.install }}
- -metrics-server=http://{{ template "flagger.fullname" . }}-prometheus:9090
{{- else }}
- -metrics-server={{ .Values.metricsServer }}
{{- end }}
{{- if .Values.namespace }}
- -namespace={{ .Values.namespace }}
{{- end }}

View File

@@ -0,0 +1,292 @@
{{- if .Values.prometheus.install }}
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: {{ template "flagger.fullname" . }}-prometheus
labels:
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
- apiGroups: [""]
resources:
- nodes
- services
- endpoints
- pods
- nodes/proxy
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources:
- configmaps
verbs: ["get"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: {{ template "flagger.fullname" . }}-prometheus
labels:
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "flagger.fullname" . }}-prometheus
subjects:
- kind: ServiceAccount
name: {{ template "flagger.serviceAccountName" . }}-prometheus
namespace: {{ .Release.Namespace }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "flagger.serviceAccountName" . }}-prometheus
namespace: {{ .Release.Namespace }}
labels:
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "flagger.fullname" . }}-prometheus
namespace: {{ .Release.Namespace }}
labels:
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
data:
prometheus.yml: |-
global:
scrape_interval: 5s
scrape_configs:
# Scrape config for AppMesh Envoy sidecar
- job_name: 'appmesh-envoy'
metrics_path: /stats/prometheus
kubernetes_sd_configs:
- role: pod
relabel_configs:
- source_labels: [__meta_kubernetes_pod_container_name]
action: keep
regex: '^envoy$'
- source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: ${1}:9901
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: kubernetes_namespace
- source_labels: [__meta_kubernetes_pod_name]
action: replace
target_label: kubernetes_pod_name
# Exclude high cardinality metrics
metric_relabel_configs:
- source_labels: [ cluster_name ]
regex: '(outbound|inbound|prometheus_stats).*'
action: drop
- source_labels: [ tcp_prefix ]
regex: '(outbound|inbound|prometheus_stats).*'
action: drop
- source_labels: [ listener_address ]
regex: '(.+)'
action: drop
- source_labels: [ http_conn_manager_listener_prefix ]
regex: '(.+)'
action: drop
- source_labels: [ http_conn_manager_prefix ]
regex: '(.+)'
action: drop
- source_labels: [ __name__ ]
regex: 'envoy_tls.*'
action: drop
- source_labels: [ __name__ ]
regex: 'envoy_tcp_downstream.*'
action: drop
- source_labels: [ __name__ ]
regex: 'envoy_http_(stats|admin).*'
action: drop
- source_labels: [ __name__ ]
regex: 'envoy_cluster_(lb|retry|bind|internal|max|original).*'
action: drop
# Scrape config for API servers
- job_name: 'kubernetes-apiservers'
kubernetes_sd_configs:
- role: endpoints
namespaces:
names:
- default
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
relabel_configs:
- source_labels: [__meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
action: keep
regex: kubernetes;https
# Scrape config for nodes
- job_name: 'kubernetes-nodes'
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics
# scrape config for cAdvisor
- job_name: 'kubernetes-cadvisor'
scheme: https
tls_config:
ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
kubernetes_sd_configs:
- role: node
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_node_label_(.+)
- target_label: __address__
replacement: kubernetes.default.svc:443
- source_labels: [__meta_kubernetes_node_name]
regex: (.+)
target_label: __metrics_path__
replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor
# scrape config for pods
- job_name: kubernetes-pods
kubernetes_sd_configs:
- role: pod
relabel_configs:
- action: keep
regex: true
source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_scrape
- source_labels: [ __address__ ]
regex: '.*9901.*'
action: drop
- action: replace
regex: (.+)
source_labels:
- __meta_kubernetes_pod_annotation_prometheus_io_path
target_label: __metrics_path__
- action: replace
regex: ([^:]+)(?::\d+)?;(\d+)
replacement: $1:$2
source_labels:
- __address__
- __meta_kubernetes_pod_annotation_prometheus_io_port
target_label: __address__
- action: labelmap
regex: __meta_kubernetes_pod_label_(.+)
- action: replace
source_labels:
- __meta_kubernetes_namespace
target_label: kubernetes_namespace
- action: replace
source_labels:
- __meta_kubernetes_pod_name
target_label: kubernetes_pod_name
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "flagger.fullname" . }}-prometheus
namespace: {{ .Release.Namespace }}
labels:
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: {{ template "flagger.name" . }}-prometheus
app.kubernetes.io/instance: {{ .Release.Name }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ template "flagger.name" . }}-prometheus
app.kubernetes.io/instance: {{ .Release.Name }}
annotations:
appmesh.k8s.aws/sidecarInjectorWebhook: disabled
sidecar.istio.io/inject: "false"
spec:
serviceAccountName: {{ template "flagger.serviceAccountName" . }}-prometheus
containers:
- name: prometheus
image: "docker.io/prom/prometheus:v2.7.1"
imagePullPolicy: IfNotPresent
args:
- '--storage.tsdb.retention=6h'
- '--config.file=/etc/prometheus/prometheus.yml'
ports:
- containerPort: 9090
name: http
livenessProbe:
httpGet:
path: /-/healthy
port: 9090
readinessProbe:
httpGet:
path: /-/ready
port: 9090
resources:
requests:
cpu: 10m
memory: 128Mi
volumeMounts:
- name: config-volume
mountPath: /etc/prometheus
- name: data-volume
mountPath: /prometheus/data
volumes:
- name: config-volume
configMap:
name: {{ template "flagger.fullname" . }}-prometheus
- name: data-volume
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: {{ template "flagger.fullname" . }}-prometheus
namespace: {{ .Release.Namespace }}
labels:
helm.sh/chart: {{ template "flagger.chart" . }}
app.kubernetes.io/name: {{ template "flagger.name" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
spec:
selector:
app.kubernetes.io/name: {{ template "flagger.name" . }}-prometheus
app.kubernetes.io/instance: {{ .Release.Name }}
ports:
- name: http
protocol: TCP
port: 9090
{{- end }}

View File

@@ -9,73 +9,76 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
rules:
- apiGroups:
- ""
resources:
- configmaps
- secrets
- events
verbs:
- create
- get
- patch
- update
- apiGroups:
- ""
resources:
- services
verbs:
- create
- get
- patch
- update
- apiGroups:
- apps
resources:
- deployments
verbs:
- create
- get
- patch
- update
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- create
- get
- patch
- update
- apiGroups:
- flagger.app
resources:
- canaries/status
verbs:
- get
- patch
- update
- apiGroups:
- networking.istio.io
resources:
- virtualservices
verbs:
- create
- get
- patch
- update
- apiGroups:
- flagger.app
resources:
- canaries
verbs:
- get
- list
- watch
- nonResourceURLs:
- /version
verbs:
- get
- apiGroups:
- ""
resources:
- events
- configmaps
- secrets
- services
verbs: ["*"]
- apiGroups:
- apps
resources:
- deployments
verbs: ["*"]
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs: ["*"]
- apiGroups:
- "extensions"
resources:
- ingresses
- ingresses/status
verbs: ["*"]
- apiGroups:
- flagger.app
resources:
- canaries
- canaries/status
verbs: ["*"]
- apiGroups:
- networking.istio.io
resources:
- virtualservices
- virtualservices/status
verbs: ["*"]
- apiGroups:
- appmesh.k8s.aws
resources:
- meshes
- meshes/status
- virtualnodes
- virtualnodes/status
- virtualservices
- virtualservices/status
verbs: ["*"]
- apiGroups:
- split.smi-spec.io
resources:
- trafficsplits
verbs: ["*"]
- apiGroups:
- gloo.solo.io
resources:
- settings
- upstreams
- upstreamgroups
- proxies
- virtualservices
verbs: ["*"]
- apiGroups:
- gateway.solo.io
resources:
- virtualservices
- gateways
verbs: ["*"]
- nonResourceURLs:
- /version
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding

View File

@@ -1,13 +1,16 @@
# Default values for flagger.
image:
repository: quay.io/stefanprodan/flagger
tag: 0.8.0
repository: weaveworks/flagger
tag: 0.14.0
pullPolicy: IfNotPresent
metricsServer: "http://prometheus.istio-system.svc.cluster.local:9090"
metricsServer: "http://prometheus:9090"
# Namespace that flagger will watch for Canary objects
# accepted values are istio, appmesh, nginx or supergloo:mesh.namespace (defaults to istio)
meshProvider: ""
# single namespace restriction
namespace: ""
slack:
@@ -46,3 +49,7 @@ nodeSelector: {}
tolerations: []
affinity: {}
prometheus:
# to be used with AppMesh or nginx ingress
install: false

View File

@@ -1,12 +1,12 @@
apiVersion: v1
name: grafana
version: 1.0.0
version: 1.2.0
appVersion: 5.4.3
description: Grafana dashboards for monitoring Flagger canary deployments
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
home: https://flagger.app
sources:
- https://github.com/stefanprodan/flagger
- https://github.com/weaveworks/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan

View File

@@ -2,7 +2,7 @@
Grafana dashboards for monitoring progressive deployments powered by Istio, Prometheus and Flagger.
![flagger-grafana](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/grafana-canary-analysis.png)
![flagger-grafana](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/grafana-canary-analysis.png)
## Prerequisites

File diff suppressed because it is too large Load Diff

View File

@@ -1583,11 +1583,7 @@
"list": [
{
"allValue": null,
"current": {
"selected": true,
"text": "test",
"value": "test"
},
"current": null,
"datasource": "prometheus",
"definition": "",
"hide": 0,
@@ -1609,11 +1605,7 @@
},
{
"allValue": null,
"current": {
"selected": false,
"text": "backend-primary",
"value": "backend-primary"
},
"current": null,
"datasource": "prometheus",
"definition": "",
"hide": 0,
@@ -1622,9 +1614,9 @@
"multi": false,
"name": "primary",
"options": [],
"query": "query_result(sum(istio_requests_total{destination_workload_namespace=~\"$namespace\"}) by (destination_service_name))",
"query": "query_result(sum(istio_requests_total{destination_workload_namespace=~\"$namespace\"}) by (destination_workload))",
"refresh": 1,
"regex": "/.*destination_service_name=\"([^\"]*).*/",
"regex": "/.*destination_workload=\"([^\"]*).*/",
"skipUrlSync": false,
"sort": 1,
"tagValuesQuery": "",
@@ -1635,11 +1627,7 @@
},
{
"allValue": null,
"current": {
"selected": true,
"text": "backend",
"value": "backend"
},
"current": null,
"datasource": "prometheus",
"definition": "",
"hide": 0,
@@ -1648,9 +1636,9 @@
"multi": false,
"name": "canary",
"options": [],
"query": "query_result(sum(istio_requests_total{destination_workload_namespace=~\"$namespace\"}) by (destination_service_name))",
"query": "query_result(sum(istio_requests_total{destination_workload_namespace=~\"$namespace\"}) by (destination_workload))",
"refresh": 1,
"regex": "/.*destination_service_name=\"([^\"]*).*/",
"regex": "/.*destination_workload=\"([^\"]*).*/",
"skipUrlSync": false,
"sort": 1,
"tagValuesQuery": "",
@@ -1691,7 +1679,7 @@
]
},
"timezone": "",
"title": "Flagger canary",
"uid": "RdykD7tiz",
"title": "Istio Canary",
"uid": "flagger-istio",
"version": 3
}

View File

@@ -1,15 +1,7 @@
1. Get the application URL by running these commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "grafana.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get svc -w {{ template "grafana.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "grafana.name" . }},release={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl port-forward $POD_NAME 8080:80
{{- end }}
1. Run the port forward command:
kubectl -n {{ .Release.Namespace }} port-forward svc/{{ .Release.Name }} 3000:80
2. Navigate to:
http://localhost:3000

View File

@@ -1,14 +1,14 @@
apiVersion: v1
name: loadtester
version: 0.1.0
appVersion: 0.1.0
version: 0.4.0
appVersion: 0.3.0
kubeVersion: ">=1.11.0-0"
engine: gotpl
description: Flagger's load testing services based on rakyll/hey that generates traffic during canary analysis when configured as a webhook.
home: https://docs.flagger.app
icon: https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/logo/flagger-icon.png
icon: https://raw.githubusercontent.com/weaveworks/flagger/master/docs/logo/flagger-icon.png
sources:
- https://github.com/stefanprodan/flagger
- https://github.com/weaveworks/flagger
maintainers:
- name: stefanprodan
url: https://github.com/stefanprodan
@@ -16,5 +16,6 @@ maintainers:
keywords:
- canary
- istio
- appmesh
- gitops
- load testing

View File

@@ -1,6 +1,6 @@
# Flagger load testing service
[Flagger's](https://github.com/stefanprodan/flagger) load testing service is based on
[Flagger's](https://github.com/weaveworks/flagger) load testing service is based on
[rakyll/hey](https://github.com/rakyll/hey)
and can be used to generates traffic during canary analysis when configured as a webhook.
@@ -56,15 +56,15 @@ Parameter | Description | Default
`nodeSelector` | node labels for pod assignment | `{}`
`service.type` | type of service | `ClusterIP`
`service.port` | ClusterIP port | `80`
`cmd.logOutput` | Log the command output to stderr | `true`
`cmd.timeout` | Command execution timeout | `1h`
`logLevel` | Log level can be debug, info, warning, error or panic | `info`
`meshName` | AWS App Mesh name | `none`
`backends` | AWS App Mesh virtual services | `none`
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
helm install flagger/loadtester --name flagger-loadtester \
--set cmd.logOutput=false
helm install flagger/loadtester --name flagger-loadtester
```
Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example,

View File

@@ -16,6 +16,8 @@ spec:
metadata:
labels:
app: {{ include "loadtester.name" . }}
annotations:
appmesh.k8s.aws/ports: "444"
spec:
containers:
- name: {{ .Chart.Name }}
@@ -29,7 +31,6 @@ spec:
- -port=8080
- -log-level={{ .Values.logLevel }}
- -timeout={{ .Values.cmd.timeout }}
- -log-cmd-output={{ .Values.cmd.logOutput }}
livenessProbe:
exec:
command:

View File

@@ -0,0 +1,27 @@
{{- if .Values.meshName }}
apiVersion: appmesh.k8s.aws/v1beta1
kind: VirtualNode
metadata:
name: {{ include "loadtester.fullname" . }}
labels:
app.kubernetes.io/name: {{ include "loadtester.name" . }}
helm.sh/chart: {{ include "loadtester.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
spec:
meshName: {{ .Values.meshName }}
listeners:
- portMapping:
port: 444
protocol: http
serviceDiscovery:
dns:
hostName: {{ include "loadtester.fullname" . }}.{{ .Release.Namespace }}
{{- if .Values.backends }}
backends:
{{- range .Values.backends }}
- virtualService:
virtualServiceName: {{ . }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -1,13 +1,12 @@
replicaCount: 1
image:
repository: quay.io/stefanprodan/flagger-loadtester
tag: 0.1.0
repository: weaveworks/flagger-loadtester
tag: 0.3.0
pullPolicy: IfNotPresent
logLevel: info
cmd:
logOutput: true
timeout: 1h
nameOverride: ""
@@ -27,3 +26,9 @@ nodeSelector: {}
tolerations: []
affinity: {}
# App Mesh virtual node settings
meshName: ""
#backends:
# - app1.namespace
# - app2.namespace

View File

@@ -1,12 +1,12 @@
apiVersion: v1
version: 2.0.0
version: 2.0.1
appVersion: 1.4.0
name: podinfo
engine: gotpl
description: Flagger canary deployment demo chart
home: https://github.com/stefanprodan/flagger
home: https://github.com/weaveworks/flagger
maintainers:
- email: stefanprodan@users.noreply.github.com
name: stefanprodan
sources:
- https://github.com/stefanprodan/flagger
- https://github.com/weaveworks/flagger

View File

@@ -32,10 +32,10 @@ spec:
maxWeight: {{ .Values.canary.analysis.maxWeight }}
stepWeight: {{ .Values.canary.analysis.stepWeight }}
metrics:
- name: istio_requests_total
- name: request-success-rate
threshold: {{ .Values.canary.thresholds.successRate }}
interval: 1m
- name: istio_request_duration_seconds_bucket
- name: request-duration
threshold: {{ .Values.canary.thresholds.latency }}
interval: 1m
{{- if .Values.canary.loadtest.enabled }}

View File

@@ -2,22 +2,26 @@ package main
import (
"flag"
_ "github.com/istio/glog"
clientset "github.com/stefanprodan/flagger/pkg/client/clientset/versioned"
informers "github.com/stefanprodan/flagger/pkg/client/informers/externalversions"
"github.com/stefanprodan/flagger/pkg/controller"
"github.com/stefanprodan/flagger/pkg/logging"
"github.com/stefanprodan/flagger/pkg/notifier"
"github.com/stefanprodan/flagger/pkg/server"
"github.com/stefanprodan/flagger/pkg/signals"
"github.com/stefanprodan/flagger/pkg/version"
"log"
"strings"
"time"
_ "github.com/stefanprodan/klog"
clientset "github.com/weaveworks/flagger/pkg/client/clientset/versioned"
informers "github.com/weaveworks/flagger/pkg/client/informers/externalversions"
"github.com/weaveworks/flagger/pkg/controller"
"github.com/weaveworks/flagger/pkg/logger"
"github.com/weaveworks/flagger/pkg/metrics"
"github.com/weaveworks/flagger/pkg/notifier"
"github.com/weaveworks/flagger/pkg/router"
"github.com/weaveworks/flagger/pkg/server"
"github.com/weaveworks/flagger/pkg/signals"
"github.com/weaveworks/flagger/pkg/version"
"go.uber.org/zap"
"k8s.io/client-go/kubernetes"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
"log"
"time"
)
var (
@@ -34,13 +38,15 @@ var (
zapReplaceGlobals bool
zapEncoding string
namespace string
meshProvider string
selectorLabels string
)
func init() {
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&metricsServer, "metrics-server", "http://prometheus:9090", "Prometheus URL")
flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "Kubernetes API sync interval")
flag.StringVar(&metricsServer, "metrics-server", "http://prometheus:9090", "Prometheus URL.")
flag.DurationVar(&controlLoopInterval, "control-loop-interval", 10*time.Second, "Kubernetes API sync interval.")
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
flag.StringVar(&port, "port", "8080", "Port to listen on.")
flag.StringVar(&slackURL, "slack-url", "", "Slack hook URL.")
@@ -49,13 +55,15 @@ func init() {
flag.IntVar(&threadiness, "threadiness", 2, "Worker concurrency.")
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
flag.StringVar(&namespace, "namespace", "", "Namespace that flagger would watch canary object")
flag.StringVar(&namespace, "namespace", "", "Namespace that flagger would watch canary object.")
flag.StringVar(&meshProvider, "mesh-provider", "istio", "Service mesh provider, can be istio, appmesh, supergloo, nginx or smi.")
flag.StringVar(&selectorLabels, "selector-labels", "app,name,app.kubernetes.io/name", "List of pod labels that Flagger uses to create pod selectors.")
}
func main() {
flag.Parse()
logger, err := logging.NewLoggerWithEncoding(logLevel, zapEncoding)
logger, err := logger.NewLoggerWithEncoding(logLevel, zapEncoding)
if err != nil {
log.Fatalf("Error creating logger: %v", err)
}
@@ -77,36 +85,43 @@ func main() {
logger.Fatalf("Error building kubernetes clientset: %v", err)
}
istioClient, err := clientset.NewForConfig(cfg)
meshClient, err := clientset.NewForConfig(cfg)
if err != nil {
logger.Fatalf("Error building istio clientset: %v", err)
logger.Fatalf("Error building mesh clientset: %v", err)
}
flaggerClient, err := clientset.NewForConfig(cfg)
if err != nil {
logger.Fatalf("Error building example clientset: %s", err.Error())
}
if namespace == "" {
logger.Infof("Flagger Canary's Watcher is on all namespace")
} else {
logger.Infof("Flagger Canary's Watcher is on namespace %s", namespace)
logger.Fatalf("Error building flagger clientset: %s", err.Error())
}
flaggerInformerFactory := informers.NewSharedInformerFactoryWithOptions(flaggerClient, time.Second*30, informers.WithNamespace(namespace))
canaryInformer := flaggerInformerFactory.Flagger().V1alpha3().Canaries()
logger.Infof("Starting flagger version %s revision %s", version.VERSION, version.REVISION)
logger.Infof("Starting flagger version %s revision %s mesh provider %s", version.VERSION, version.REVISION, meshProvider)
ver, err := kubeClient.Discovery().ServerVersion()
if err != nil {
logger.Fatalf("Error calling Kubernetes API: %v", err)
}
logger.Infof("Connected to Kubernetes API %s", ver)
labels := strings.Split(selectorLabels, ",")
if len(labels) < 1 {
logger.Fatalf("At least one selector label is required")
}
ok, err := controller.CheckMetricsServer(metricsServer)
logger.Infof("Connected to Kubernetes API %s", ver)
if namespace != "" {
logger.Infof("Watching namespace %s", namespace)
}
observerFactory, err := metrics.NewFactory(metricsServer, meshProvider, 5*time.Second)
if err != nil {
logger.Fatalf("Error building prometheus client: %s", err.Error())
}
ok, err := observerFactory.Client.IsOnline()
if ok {
logger.Infof("Connected to metrics server %s", metricsServer)
} else {
@@ -126,15 +141,22 @@ func main() {
// start HTTP server
go server.ListenAndServe(port, 3*time.Second, logger, stopCh)
routerFactory := router.NewFactory(cfg, kubeClient, flaggerClient, logger, meshClient)
c := controller.NewController(
kubeClient,
istioClient,
meshClient,
flaggerClient,
canaryInformer,
controlLoopInterval,
metricsServer,
logger,
slack,
routerFactory,
observerFactory,
meshProvider,
version.VERSION,
labels,
)
flaggerInformerFactory.Start(stopCh)

View File

@@ -2,20 +2,19 @@ package main
import (
"flag"
"github.com/stefanprodan/flagger/pkg/loadtester"
"github.com/stefanprodan/flagger/pkg/logging"
"github.com/stefanprodan/flagger/pkg/signals"
"github.com/weaveworks/flagger/pkg/loadtester"
"github.com/weaveworks/flagger/pkg/logger"
"github.com/weaveworks/flagger/pkg/signals"
"go.uber.org/zap"
"log"
"time"
)
var VERSION = "0.1.0"
var VERSION = "0.3.0"
var (
logLevel string
port string
timeout time.Duration
logCmdOutput bool
zapReplaceGlobals bool
zapEncoding string
)
@@ -23,8 +22,7 @@ var (
func init() {
flag.StringVar(&logLevel, "log-level", "debug", "Log level can be: debug, info, warning, error.")
flag.StringVar(&port, "port", "9090", "Port to listen on.")
flag.DurationVar(&timeout, "timeout", time.Hour, "Command exec timeout.")
flag.BoolVar(&logCmdOutput, "log-cmd-output", true, "Log command output to stderr")
flag.DurationVar(&timeout, "timeout", time.Hour, "Load test exec timeout.")
flag.BoolVar(&zapReplaceGlobals, "zap-replace-globals", false, "Whether to change the logging level of the global zap logger.")
flag.StringVar(&zapEncoding, "zap-encoding", "json", "Zap logger encoding.")
}
@@ -32,7 +30,7 @@ func init() {
func main() {
flag.Parse()
logger, err := logging.NewLoggerWithEncoding(logLevel, zapEncoding)
logger, err := logger.NewLoggerWithEncoding(logLevel, zapEncoding)
if err != nil {
log.Fatalf("Error creating logger: %v", err)
}
@@ -44,7 +42,7 @@ func main() {
stopCh := signals.SetupSignalHandler()
taskRunner := loadtester.NewTaskRunner(logger, timeout, logCmdOutput)
taskRunner := loadtester.NewTaskRunner(logger, timeout)
go taskRunner.Start(100*time.Millisecond, stopCh)

Binary file not shown.

After

Width:  |  Height:  |  Size: 158 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 207 KiB

After

Width:  |  Height:  |  Size: 46 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 41 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 183 KiB

After

Width:  |  Height:  |  Size: 46 KiB

View File

@@ -1,11 +1,11 @@
---
description: Flagger is an Istio progressive delivery Kubernetes operator
description: Flagger is a progressive delivery Kubernetes operator
---
# Introduction
[Flagger](https://github.com/stefanprodan/flagger) is a **Kubernetes** operator that automates the promotion of canary
deployments using **Istio** routing for traffic shifting and **Prometheus** metrics for canary analysis.
[Flagger](https://github.com/weaveworks/flagger) is a **Kubernetes** operator that automates the promotion of canary
deployments using **Istio**, **App Mesh** or **NGINX** routing for traffic shifting and **Prometheus** metrics for canary analysis.
The canary analysis can be extended with webhooks for running
system integration/acceptance tests, load tests, or any other custom validation.
@@ -13,7 +13,7 @@ Flagger implements a control loop that gradually shifts traffic to the canary wh
indicators like HTTP requests success rate, requests average duration and pods health.
Based on analysis of the **KPIs** a canary is promoted or aborted, and the analysis result is published to **Slack**.
![Flagger overview diagram](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-overview.png)
![Flagger overview diagram](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-overview.png)
Flagger can be configured with Kubernetes custom resources and is compatible with
any CI/CD solutions made for Kubernetes. Since Flagger is declarative and reacts to Kubernetes events,

View File

@@ -6,15 +6,22 @@
## Install
* [Flagger Install on Kubernetes](install/flagger-install-on-kubernetes.md)
* [Flagger Install on Google Cloud](install/flagger-install-on-google-cloud.md)
* [Flagger Install on GKE Istio](install/flagger-install-on-google-cloud.md)
* [Flagger Install on EKS App Mesh](install/flagger-install-on-eks-appmesh.md)
* [Flagger Install with SuperGloo](install/flagger-install-with-supergloo.md)
## Usage
* [Canary Deployments](usage/progressive-delivery.md)
* [Istio Canary Deployments](usage/progressive-delivery.md)
* [Istio A/B Testing](usage/ab-testing.md)
* [App Mesh Canary Deployments](usage/appmesh-progressive-delivery.md)
* [NGINX Canary Deployments](usage/nginx-progressive-delivery.md)
* [Gloo Canary Deployments](usage/gloo-progressive-delivery.md)
* [Monitoring](usage/monitoring.md)
* [Alerting](usage/alerting.md)
## Tutorials
* [SMI Istio Canary Deployments](tutorials/flagger-smi-istio.md)
* [Canaries with Helm charts and GitOps](tutorials/canary-helm-gitops.md)
* [Zero downtime deployments](tutorials/zero-downtime-deployments.md)

21
docs/gitbook/faq.md Normal file
View File

@@ -0,0 +1,21 @@
# Frequently asked questions
**Can Flagger be part of my integration tests?**
> Yes, Flagger supports webhooks to do integration testing.
**What if I only want to target beta testers?**
> That's a feature in Flagger, not in App Mesh. It's on the App Mesh roadmap.
**When do I use A/B testing when Canary?**
> One advantage of using A/B testing is that each version remains separated and routes aren't mixed.
>
> Using a Canary deployment can lead to behaviour like this one observed by a
> user:
>
> [..] during a canary deployment of our nodejs app, the version that is being served <50% traffic reports mime type mismatch errors in the browser (js as "text/html")
> When the deployment Passes/ Fails (doesn't really matter) the version that stays alive works as expected. If anyone has any tips or direction I would greatly appreciate it. Even if its as simple as I'm looking in the wrong place. Thanks in advance!
>
> The issue was that we were not maintaining session affinity while serving files for our frontend. Which resulted in any redirects or refreshes occasionally returning a mismatched app.*.js file (generated from vue)
>
> Read up on [A/B testing](https://docs.flagger.app/usage/ab-testing).

View File

@@ -1,10 +1,10 @@
# How it works
[Flagger](https://github.com/stefanprodan/flagger) takes a Kubernetes deployment and optionally
[Flagger](https://github.com/weaveworks/flagger) takes a Kubernetes deployment and optionally
a horizontal pod autoscaler \(HPA\) and creates a series of objects
\(Kubernetes deployments, ClusterIP services and Istio virtual services\) to drive the canary analysis and promotion.
\(Kubernetes deployments, ClusterIP services and Istio or App Mesh virtual services\) to drive the canary analysis and promotion.
![Flagger Canary Process](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-hpa.png)
![Flagger Canary Process](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-hpa.png)
### Canary Custom Resource
@@ -33,6 +33,8 @@ spec:
service:
# container port
port: 9898
# service port name (optional, will default to "http")
portName: http-podinfo
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
@@ -53,14 +55,14 @@ spec:
# canary increment step
# percentage (0-100)
stepWeight: 5
# Istio Prometheus checks
# Prometheus checks
metrics:
- name: istio_requests_total
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
- name: request-duration
# maximum req duration P99
# milliseconds
threshold: 500
@@ -93,6 +95,9 @@ spec:
app: podinfo
```
Besides `app` Flagger supports `name` and `app.kubernetes.io/name` selectors. If you use a different
convention you can specify your label with the `-selector-labels` flag.
The target deployment should expose a TCP port that will be used by Flagger to create the ClusterIP Service and
the Istio Virtual Service. The container port from the target deployment should match the `service.port` value.
@@ -115,9 +120,12 @@ spec:
service:
# container port
port: 9898
# service port name (optional, will default to "http")
portName: http-frontend
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
- mesh
# Istio virtual service host names (optional)
hosts:
- frontend.example.com
@@ -247,7 +255,7 @@ and for backend HTTP APIs that are accessible only from inside the mesh.
### Canary Stages
![Flagger Canary Stages](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-canary-steps.png)
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
A canary deployment is triggered by changes in any of the following objects:
@@ -262,16 +270,22 @@ Gated canary promotion stages:
* check primary and canary deployments status
* halt advancement if a rolling update is underway
* halt advancement if pods are unhealthy
* call pre-rollout webhooks are check results
* halt advancement if any hook returned a non HTTP 2xx result
* increment the failed checks counter
* increase canary traffic weight percentage from 0% to 5% (step weight)
* call webhooks and check results
* call rollout webhooks and check results
* check canary HTTP request success rate and latency
* halt advancement if any metric is under the specified threshold
* increment the failed checks counter
* check if the number of failed checks reached the threshold
* route all traffic to primary
* scale to zero the canary deployment and mark it as failed
* call post-rollout webhooks
* post the analysis result to Slack
* wait for the canary deployment to be updated and start over
* increase canary traffic weight by 5% (step weight) till it reaches 50% (max weight)
* halt advancement if any webhook call fails
* halt advancement while canary request success rate is under the threshold
* halt advancement while canary request duration P99 is over the threshold
* halt advancement if the primary or canary deployment becomes unhealthy
@@ -284,6 +298,8 @@ Gated canary promotion stages:
* route all traffic to primary
* scale to zero the canary deployment
* mark rollout as finished
* call post-rollout webhooks
* post the analysis result to Slack
* wait for the canary deployment to be updated and start over
### Canary Analysis
@@ -327,6 +343,49 @@ At any time you can set the `spec.skipAnalysis: true`.
When skip analysis is enabled, Flagger checks if the canary deployment is healthy and
promotes it without analysing it. If an analysis is underway, Flagger cancels it and runs the promotion.
### A/B Testing
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
This is particularly useful for frontend applications that require session affinity.
You can enable A/B testing by specifying the HTTP match conditions and the number of iterations:
```yaml
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
# total number of iterations
iterations: 10
# max number of failed iterations before rollback
threshold: 2
# canary match condition
match:
- headers:
user-agent:
regex: "^(?!.*Chrome).*Safari.*"
- headers:
cookie:
regex: "^(.*?;)?(user=test)(;.*)?$"
```
If Flagger finds a HTTP match condition, it will ignore the `maxWeight` and `stepWeight` settings.
The above configuration will run an analysis for ten minutes targeting the Safari users and those that have a test cookie.
You can determine the minimum time that it takes to validate and promote a canary deployment using this formula:
```
interval * iterations
```
And the time it takes for a canary to be rollback when the metrics or webhook checks are failing:
```
interval * threshold
```
Make sure that the analysis threshold is lower than the number of iterations.
### HTTP Metrics
The canary analysis is using the following Prometheus queries:
@@ -338,14 +397,14 @@ Spec:
```yaml
canaryAnalysis:
metrics:
- name: istio_requests_total
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
```
Query:
Istio query:
```javascript
sum(
@@ -370,6 +429,29 @@ sum(
)
```
App Mesh query:
```javascript
sum(
rate(
envoy_cluster_upstream_rq{
kubernetes_namespace="$namespace",
kubernetes_pod_name=~"$workload",
response_code!~"5.*"
}[$interval]
)
)
/
sum(
rate(
envoy_cluster_upstream_rq{
kubernetes_namespace="$namespace",
kubernetes_pod_name=~"$workload"
}[$interval]
)
)
```
**HTTP requests milliseconds duration P99**
Spec:
@@ -377,14 +459,14 @@ Spec:
```yaml
canaryAnalysis:
metrics:
- name: istio_request_duration_seconds_bucket
- name: request-duration
# maximum req duration P99
# milliseconds
threshold: 500
interval: 1m
```
Query:
Istio query:
```javascript
histogram_quantile(0.99,
@@ -400,6 +482,21 @@ histogram_quantile(0.99,
)
```
App Mesh query:
```javascript
histogram_quantile(0.99,
sum(
irate(
envoy_cluster_upstream_rq_time_bucket{
kubernetes_pod_name=~"$workload",
kubernetes_namespace=~"$namespace"
}[$interval]
)
) by (le)
)
```
> **Note** that the metric interval should be lower or equal to the control loop interval.
### Custom Metrics
@@ -441,42 +538,89 @@ The above configuration validates the canary by checking
if the HTTP 404 req/sec percentage is below 5 percent of the total traffic.
If the 404s rate reaches the 5% threshold, then the canary fails.
```yaml
canaryAnalysis:
threshold: 1
maxWeight: 50
stepWeight: 5
metrics:
- name: "rpc error rate"
threshold: 5
query: |
100 - (sum
rate(
grpc_server_handled_total{
grpc_service="my.TestService",
grpc_code!="OK"
}[1m]
)
)
/
sum(
rate(
grpc_server_started_total{
grpc_service="my.TestService"
}[1m]
)
) * 100
```
The above configuration validates the canary by checking if the percentage of
non-OK GRPC req/sec is below 5 percent of the total requests. If the non-OK
rate reaches the 5% threshold, then the canary fails.
When specifying a query, Flagger will run the promql query and convert the result to float64.
Then it compares the query result value with the metric threshold value.
### Webhooks
The canary analysis can be extended with webhooks.
Flagger will call each webhook URL and determine from the response status code (HTTP 2xx) if the canary is failing or not.
The canary analysis can be extended with webhooks. Flagger will call each webhook URL and
determine from the response status code (HTTP 2xx) if the canary is failing or not.
There are three types of hooks:
* Pre-rollout hooks are executed before routing traffic to canary.
The canary advancement is paused if a pre-rollout hook fails and if the number of failures reach the
threshold the canary will be rollback.
* Rollout hooks are executed during the analysis on each iteration before the metric checks.
If a rollout hook call fails the canary advancement is paused and eventfully rolled back.
* Post-rollout hooks are executed after the canary has been promoted or rolled back.
If a post rollout hook fails the error is logged.
Spec:
```yaml
canaryAnalysis:
webhooks:
- name: integration-test
url: http://int-runner.test:8080/
timeout: 30s
metadata:
test: "all"
token: "16688eb5e9f289f1991c"
- name: db-test
- name: "smoke test"
type: pre-rollout
url: http://migration-check.db/query
timeout: 30s
metadata:
key1: "val1"
key2: "val2"
- name: "load test"
type: rollout
url: http://flagger-loadtester.test/
timeout: 15s
metadata:
cmd: "hey -z 1m -q 5 -c 2 http://podinfo-canary.test:9898/"
- name: "notify"
type: post-rollout
url: http://telegram.bot:8080/
timeout: 5s
metadata:
some: "message"
```
> **Note** that the sum of all webhooks timeouts should be lower than the control loop interval.
> **Note** that the sum of all rollout webhooks timeouts should be lower than the analysis interval.
Webhook payload (HTTP POST):
```json
{
"name": "podinfo",
"namespace": "test",
"namespace": "test",
"phase": "Progressing",
"metadata": {
"test": "all",
"token": "16688eb5e9f289f1991c"
@@ -501,12 +645,12 @@ Flagger metric checks will fail with "no values found for metric istio_requests_
Flagger comes with a load testing service based on [rakyll/hey](https://github.com/rakyll/hey)
that generates traffic during analysis when configured as a webhook.
![Flagger Load Testing Webhook](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-load-testing.png)
![Flagger Load Testing Webhook](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-load-testing.png)
First you need to deploy the load test runner in a namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
@@ -519,7 +663,6 @@ helm repo add flagger https://flagger.app
helm upgrade -i flagger-loadtester flagger/loadtester \
--namespace=test \
--set cmd.logOutput=true \
--set cmd.timeout=1h
```
@@ -533,17 +676,19 @@ webhooks:
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
- name: load-test-post
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 -m POST -d '{test: 2}' http://podinfo.test:9898/echo"
```
When the canary analysis starts, Flagger will call the webhooks and the load tester will run the `hey` commands
in the background, if they are not already running. This will ensure that during the
analysis, the `podinfo.test` virtual service will receive a steady steam of GET and POST requests.
analysis, the `podinfo.test` virtual service will receive a steady stream of GET and POST requests.
If your workload is exposed outside the mesh with the Istio Gateway and TLS you can point `hey` to the
public URL and use HTTP2.
@@ -554,6 +699,7 @@ webhooks:
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 -h2 https://podinfo.example.com/"
```
@@ -566,3 +712,32 @@ FROM quay.io/stefanprodan/flagger-loadtester:<VER>
RUN curl -Lo /usr/local/bin/my-cli https://github.com/user/repo/releases/download/ver/my-cli \
&& chmod +x /usr/local/bin/my-cli
```
### Load Testing Delegation
The load tester can also forward testing tasks to external tools, by now [nGrinder](https://github.com/naver/ngrinder)
is supported.
To use this feature, add a load test task of type 'ngrinder' to the canary analysis spec:
```yaml
webhooks:
- name: load-test-post
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
# type of this load test task, cmd or ngrinder
type: ngrinder
# base url of your nGrinder controller server
server: http://ngrinder-server:port
# id of the test to clone from, the test must have been defined.
clone: 100
# user name and base64 encoded password to authenticate against the nGrinder server
username: admin
passwd: YWRtaW4=
# the interval between between nGrinder test status polling, default to 1s
pollInterval: 5s
```
When the canary analysis starts, the load tester will initiate a [clone_and_start request](https://github.com/naver/ngrinder/wiki/REST-API-PerfTest)
to the nGrinder server and start a new performance test. the load tester will periodically poll the nGrinder server
for the status of the test, and prevent duplicate requests from being sent in subsequent analysis loops.

View File

@@ -0,0 +1,176 @@
# Flagger install on AWS
This guide walks you through setting up Flagger and AWS App Mesh on EKS.
### App Mesh
The App Mesh integration with EKS is made out of the following components:
* Kubernetes custom resources
* `mesh.appmesh.k8s.aws` defines a logical boundary for network traffic between the services
* `virtualnode.appmesh.k8s.aws` defines a logical pointer to a Kubernetes workload
* `virtualservice.appmesh.k8s.aws` defines the routing rules for a workload inside the mesh
* CRD controller - keeps the custom resources in sync with the App Mesh control plane
* Admission controller - injects the Envoy sidecar and assigns Kubernetes pods to App Mesh virtual nodes
* Metrics server - Prometheus instance that collects and stores Envoy's metrics
Prerequisites:
* jq
* homebrew
* openssl
* kubectl
* AWS CLI (default region us-west-2)
### Create a Kubernetes cluster
In order to create an EKS cluster you can use [eksctl](https://eksctl.io).
Eksctl is an open source command-line utility made by Weaveworks in collaboration with Amazon,
its a Kubernetes-native tool written in Go.
On MacOS you can install eksctl with Homebrew:
```bash
brew tap weaveworks/tap
brew install weaveworks/tap/eksctl
```
Create an EKS cluster:
```bash
eksctl create cluster --name=appmesh \
--region=us-west-2 \
--appmesh-access
```
The above command will create a two nodes cluster with App Mesh
[IAM policy](https://docs.aws.amazon.com/app-mesh/latest/userguide/MESH_IAM_user_policies.html)
attached to the EKS node instance role.
Verify the install with:
```bash
kubectl get nodes
```
### Install Helm
Install the [Helm](https://docs.helm.sh/using_helm/#installing-helm) command-line tool:
```text
brew install kubernetes-helm
```
Create a service account and a cluster role binding for Tiller:
```bash
kubectl -n kube-system create sa tiller
kubectl create clusterrolebinding tiller-cluster-rule \
--clusterrole=cluster-admin \
--serviceaccount=kube-system:tiller
```
Deploy Tiller in the `kube-system` namespace:
```bash
helm init --service-account tiller
```
You should consider using SSL between Helm and Tiller, for more information on securing your Helm
installation see [docs.helm.sh](https://docs.helm.sh/using_helm/#securing-your-helm-installation).
### Enable horizontal pod auto-scaling
Install the Horizontal Pod Autoscaler (HPA) metrics provider:
```bash
helm upgrade -i metrics-server stable/metrics-server \
--namespace kube-system
```
After a minute, the metrics API should report CPU and memory usage for pods.
You can very the metrics API with:
```bash
kubectl -n kube-system top pods
```
### Install the App Mesh components
Run the App Mesh installer:
```bash
curl -fsSL https://git.io/get-app-mesh-eks.sh | bash -
```
The installer does the following:
* creates the `appmesh-system` namespace
* generates a certificate signed by Kubernetes CA
* registers the App Mesh mutating webhook
* deploys the App Mesh webhook in `appmesh-system` namespace
* deploys the App Mesh CRDs
* deploys the App Mesh controller in `appmesh-system` namespace
* creates a mesh called `global`
Verify that the global mesh is active:
```bash
kubectl describe mesh
Status:
Mesh Condition:
Status: True
Type: MeshActive
```
### Install Flagger and Grafana
Add Flagger Helm repository:
```bash
helm repo add flagger https://flagger.app
```
Deploy Flagger and Prometheus in the _**appmesh-system**_ namespace:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=appmesh-system \
--set meshProvider=appmesh \
--set prometheus.install=true
```
In order to collect the App Mesh metrics that Flagger needs to run the canary analysis,
you'll need to setup a Prometheus instance to scrape the Envoy sidecars.
You can enable **Slack** notifications with:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=appmesh-system \
--set meshProvider=appmesh \
--set metricsServer=http://prometheus.appmesh:9090 \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
Flagger comes with a Grafana dashboard made for monitoring the canary analysis.
Deploy Grafana in the _**appmesh-system**_ namespace:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=appmesh-system \
--set url=http://flagger-prometheus.appmesh-system:9090
```
You can access Grafana using port forwarding:
```bash
kubectl -n appmesh-system port-forward svc/flagger-grafana 3000:80
```
Now that you have Flagger running you can try the
[App Mesh canary deployments tutorial](https://docs.flagger.app/usage/appmesh-progressive-delivery).

View File

@@ -2,7 +2,7 @@
This guide walks you through setting up Flagger and Istio on Google Kubernetes Engine.
![GKE Cluster Overview](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-gke-istio.png)
![GKE Cluster Overview](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-gke-istio.png)
### Prerequisites
@@ -186,7 +186,7 @@ Install cert-manager's CRDs:
```bash
CERT_REPO=https://raw.githubusercontent.com/jetstack/cert-manager
kubectl apply -f ${CERT_REPO}/release-0.6/deploy/manifests/00-crds.yaml
kubectl apply -f ${CERT_REPO}/release-0.7/deploy/manifests/00-crds.yaml
```
Create the cert-manager namespace and disable resource validation:
@@ -200,20 +200,22 @@ kubectl label namespace cert-manager certmanager.k8s.io/disable-validation=true
Install cert-manager with Helm:
```bash
helm repo update && helm upgrade -i cert-manager \
helm repo add jetstack https://charts.jetstack.io && \
helm repo update && \
helm upgrade -i cert-manager \
--namespace cert-manager \
--version v0.6.0 \
stable/cert-manager
--version v0.7.0 \
jetstack/cert-manager
```
### Istio Gateway TLS setup
![Istio Let&apos;s Encrypt](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/istio-cert-manager-gke.png)
![Istio Let&apos;s Encrypt](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/istio-cert-manager-gke.png)
Create a generic Istio Gateway to expose services outside the mesh on HTTPS:
```bash
REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/gke/istio-gateway.yaml
```
@@ -284,7 +286,7 @@ metadata:
name: istio-gateway
namespace: istio-system
spec:
secretname: istio-ingressgateway-certs
secretName: istio-ingressgateway-certs
issuerRef:
name: letsencrypt-prod
commonName: "*.example.com"
@@ -332,7 +334,7 @@ Because Flagger uses the Istio HTTP metrics to run the canary analysis you have
Prometheus configuration that's similar to the one that comes with the official Istio Helm chart.
```bash
REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/gke/istio-prometheus.yaml
```

View File

@@ -52,7 +52,8 @@ If you don't have Tiller you can use the helm template command and apply the gen
```bash
# generate
helm template flagger/flagger \
helm fetch --untar --untardir . flagger/flagger &&
helm template flagger \
--name flagger \
--namespace=istio-system \
--set metricsServer=http://prometheus.istio-system:9090 \
@@ -98,12 +99,10 @@ Or use helm template command and apply the generated yaml with kubectl:
```bash
# generate
helm template flagger/grafana \
helm fetch --untar --untardir . flagger/grafana &&
helm template grafana \
--name flagger-grafana \
--namespace=istio-system \
--set url=http://prometheus.istio-system:9090 \
--set user=admin \
--set password=change-me \
> $HOME/flagger-grafana.yaml
# apply
@@ -113,7 +112,7 @@ kubectl apply -f $HOME/flagger-grafana.yaml
You can access Grafana using port forwarding:
```bash
kubectl -n istio-system port-forward svc/flagger-grafana 3000:3000
kubectl -n istio-system port-forward svc/flagger-grafana 3000:80
```
### Install Load Tester
@@ -126,17 +125,20 @@ Deploy the load test runner with Helm:
```bash
helm upgrade -i flagger-loadtester flagger/loadtester \
--namespace=test \
--set cmd.logOutput=true \
--set cmd.timeout=1h
```
Deploy with kubectl:
```bash
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
helm fetch --untar --untardir . flagger/loadtester &&
helm template loadtester \
--name flagger-loadtester \
--namespace=test
> $HOME/flagger-loadtester.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
# apply
kubectl apply -f $HOME/flagger-loadtester.yaml
```
> **Note** that the load tester should be deployed in a namespace with Istio sidecar injection enabled.

View File

@@ -0,0 +1,184 @@
# Flagger install on Kubernetes with SuperGloo
This guide walks you through setting up Flagger on a Kubernetes cluster using [SuperGloo](https://github.com/solo-io/supergloo).
SuperGloo by [Solo.io](https://solo.io) is an opinionated abstraction layer that simplifies the installation, management, and operation of your service mesh.
It supports running multiple ingresses with multiple meshes (Istio, App Mesh, Consul Connect and Linkerd 2) in the same cluster.
### Prerequisites
Flagger requires a Kubernetes cluster **v1.11** or newer with the following admission controllers enabled:
* MutatingAdmissionWebhook
* ValidatingAdmissionWebhook
### Install Istio with SuperGloo
#### Install SuperGloo command line interface helper
SuperGloo includes a command line helper (CLI) that makes operation of SuperGloo easier.
The CLI is not required for SuperGloo to function correctly.
If you use [Homebrew](https://brew.sh) package manager run the following
commands to install the SuperGloo CLI.
```bash
brew tap solo-io/tap
brew solo-io/tap/supergloo
```
Or you can download SuperGloo CLI and add it to your path:
```bash
curl -sL https://run.solo.io/supergloo/install | sh
export PATH=$HOME/.supergloo/bin:$PATH
```
#### Install SuperGloo controller
Deploy the SuperGloo controller in the `supergloo-system` namespace:
```bash
supergloo init
```
This is equivalent to installing SuperGloo using its Helm chart
```bash
helm repo add supergloo http://storage.googleapis.com/supergloo-helm
helm upgrade --install supergloo supergloo/supergloo --namespace supergloo-system
```
#### Install Istio using SuperGloo
Create the `istio-system` namespace and install Istio with traffic management, telemetry and Prometheus enabled:
```bash
ISTIO_VER="1.0.6"
kubectl create namespace istio-system
supergloo install istio --name istio \
--namespace=supergloo-system \
--auto-inject=true \
--installation-namespace=istio-system \
--mtls=false \
--prometheus=true \
--version=${ISTIO_VER}
```
This creates a Kubernetes Custom Resource (CRD) like the following.
```yaml
apiVersion: supergloo.solo.io/v1
kind: Install
metadata:
name: istio
namespace: supergloo-system
spec:
installationNamespace: istio-system
mesh:
installedMesh:
name: istio
namespace: supergloo-system
istioMesh:
enableAutoInject: true
enableMtls: false
installGrafana: false
installJaeger: false
installPrometheus: true
istioVersion: 1.0.6
```
#### Allow Flagger to manipulate SuperGloo
Create a cluster role binding so that Flagger can manipulate SuperGloo custom resources:
```bash
kubectl create clusterrolebinding flagger-supergloo \
--clusterrole=mesh-discovery \
--serviceaccount=istio-system:flagger
```
Wait for the Istio control plane to become available:
```bash
kubectl --namespace istio-system rollout status deployment/istio-sidecar-injector
kubectl --namespace istio-system rollout status deployment/prometheus
```
### Install Flagger
Add Flagger Helm repository:
```bash
helm repo add flagger https://flagger.app
```
Deploy Flagger in the _**istio-system**_ namespace and set the service mesh provider to SuperGloo:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set metricsServer=http://prometheus.istio-system:9090 \
--set meshProvider=supergloo:istio.supergloo-system
```
When using SuperGloo the mesh provider format is `supergloo:<MESH-NAME>.<SUPERGLOO-NAMESPACE>`.
Optionally you can enable **Slack** notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--reuse-values \
--namespace=istio-system \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
### Install Grafana
Flagger comes with a Grafana dashboard made for monitoring the canary analysis.
Deploy Grafana in the _**istio-system**_ namespace:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus.istio-system:9090
```
You can access Grafana using port forwarding:
```bash
kubectl -n istio-system port-forward svc/flagger-grafana 3000:80
```
### Install Load Tester
Flagger comes with an optional load testing service that generates traffic
during canary analysis when configured as a webhook.
Deploy the load test runner with Helm:
```bash
helm upgrade -i flagger-loadtester flagger/loadtester \
--namespace=test \
--set cmd.timeout=1h
```
Deploy with kubectl:
```bash
helm fetch --untar --untardir . flagger/loadtester &&
helm template loadtester \
--name flagger-loadtester \
--namespace=test
> $HOME/flagger-loadtester.yaml
# apply
kubectl apply -f $HOME/flagger-loadtester.yaml
```
> **Note** that the load tester should be deployed in a namespace with Istio sidecar injection enabled.

View File

@@ -29,7 +29,7 @@ You can find the chart source [here](https://github.com/stefanprodan/flagger/tre
Create a test namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```
@@ -79,7 +79,7 @@ Flagger will route all traffic to the primary pods and scale to zero the `fronte
Open your browser and navigate to the frontend URL:
![Podinfo Frontend](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/demo-frontend.png)
![Podinfo Frontend](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/demo-frontend.png)
Now let's install the `backend` release without exposing it outside the mesh:
@@ -104,7 +104,7 @@ frontend Initialized 0 2019-02-12T17:50:50Z
Click on the ping button in the `frontend` UI to trigger a HTTP POST request
that will reach the `backend` app:
![Jaeger Tracing](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/demo-frontend-jaeger.png)
![Jaeger Tracing](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/demo-frontend-jaeger.png)
We'll use the `/echo` endpoint (same as the one the ping button calls)
to generate load on both apps during a canary deployment.
@@ -155,7 +155,7 @@ You can monitor the canary deployment with Grafana. Open the Flagger dashboard,
select `test` from the namespace dropdown, `frontend-primary` from the primary dropdown and `frontend` from the
canary dropdown.
![Flagger Grafana Dashboard](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/demo-frontend-dashboard.png)
![Flagger Grafana Dashboard](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/demo-frontend-dashboard.png)
Now trigger a canary deployment for the `backend` app, but this time you'll change a value in the configmap:
@@ -213,7 +213,7 @@ Copying backend.test template spec to backend-primary.test
Promotion completed! Scaling down backend.test
```
![Flagger Grafana Dashboard](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/demo-backend-dashboard.png)
![Flagger Grafana Dashboard](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/demo-backend-dashboard.png)
If the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
@@ -237,7 +237,7 @@ In the [GitOps model](https://www.weave.works/technologies/gitops/),
any change to production must be committed in source control
prior to being applied on the cluster. This way rollback and audit logs are provided by Git.
![Helm GitOps Canary Deployment](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/diagrams/flagger-flux-gitops.png)
![Helm GitOps Canary Deployment](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-flux-gitops.png)
In order to apply the GitOps pipeline model to Flagger canary deployments you'll need
a Git repository with your workloads definitions in YAML format,

View File

@@ -0,0 +1,332 @@
# Flagger SMI
This guide shows you how to use the SMI Istio adapter and Flagger to automate canary deployments.
### Prerequisites
Flagger requires a Kubernetes cluster **v1.11** or newer with the following admission controllers enabled:
* MutatingAdmissionWebhook
* ValidatingAdmissionWebhook
Flagger depends on [Istio](https://istio.io/docs/setup/kubernetes/quick-start/) **v1.0.3** or newer
with traffic management, telemetry and Prometheus enabled.
A minimal Istio installation should contain the following services:
* istio-pilot
* istio-ingressgateway
* istio-sidecar-injector
* istio-telemetry
* prometheus
### Install Istio and the SMI adapter
Add Istio Helm repository:
```bash
helm repo add istio.io https://storage.googleapis.com/istio-release/releases/1.1.5/charts
```
Install Istio CRDs:
```bash
helm upgrade -i istio-init istio.io/istio-init --wait --namespace istio-system
kubectl -n istio-system wait --for=condition=complete job/istio-init-crd-11
```
Install Istio:
```bash
helm upgrade -i istio istio.io/istio --wait --namespace istio-system
```
Create a generic Istio gateway to expose services outside the mesh on HTTP:
```yaml
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: public-gateway
namespace: istio-system
spec:
selector:
istio: ingressgateway
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- "*"
```
Save the above resource as public-gateway.yaml and then apply it:
```bash
kubectl apply -f ./public-gateway.yaml
```
Find the Gateway load balancer IP and add a DNS record for it:
```bash
kubectl -n istio-system get svc/istio-ingressgateway -ojson | jq -r .status.loadBalancer.ingress[0].ip
```
Install the SMI adapter:
```bash
REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/smi/istio-adapter.yaml
```
### Install Flagger and Grafana
Add Flagger Helm repository:
```bash
helm repo add flagger https://flagger.app
```
Deploy Flagger in the _**istio-system**_ namespace:
```bash
helm upgrade -i flagger flagger/flagger \
--namespace=istio-system \
--set image.tag=master-12d84b2 \
--set meshProvider=smi:istio
```
Flagger comes with a Grafana dashboard made for monitoring the canary deployments.
Deploy Grafana in the _**istio-system**_ namespace:
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus.istio-system:9090
```
You can access Grafana using port forwarding:
```bash
kubectl -n istio-system port-forward svc/flagger-grafana 3000:80
```
### Workloads bootstrap
Create a test namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/canaries/deployment.yaml
kubectl apply -f ${REPO}/artifacts/canaries/hpa.yaml
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
```
Create a canary custom resource (replace example.com with your own domain):
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- app.example.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 5
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 10
metrics:
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: request-duration
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
# generate traffic during analysis
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
```
Save the above resource as podinfo-canary.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-canary.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
canary.flagger.app/podinfo
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
trafficsplits.split.smi-spec.io/podinfo
```
### Automated canary promotion
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
like HTTP requests success rate, requests average duration and pod health.
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n istio-system logs deployment/flagger -f | jq .msg
New revision detected podinfo.test
Scaling up podinfo.test
Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Advance podinfo.test canary weight 20
Advance podinfo.test canary weight 25
Advance podinfo.test canary weight 30
Advance podinfo.test canary weight 35
Advance podinfo.test canary weight 40
Advance podinfo.test canary weight 45
Advance podinfo.test canary weight 50
Copying podinfo.test template spec to podinfo-primary.test
Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Promotion completed! Scaling down podinfo.test
```
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
During the analysis the canarys progress can be monitored with Grafana. The Istio dashboard URL is
http://localhost:3000/d/flagger-istio/istio-canary?refresh=10s&orgId=1&var-namespace=test&var-primary=podinfo-primary&var-canary=podinfo
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 15 2019-05-16T14:05:07Z
prod frontend Succeeded 0 2019-05-15T16:15:07Z
prod backend Failed 0 2019-05-14T17:05:07Z
```
### Automated rollback
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
Create a tester pod and exec into it:
```bash
kubectl -n test run tester \
--image=quay.io/stefanprodan/podinfo:1.2.1 \
-- ./podinfo --port=9898
kubectl -n test exec -it tester-xx-xx sh
```
Generate HTTP 500 errors:
```bash
watch curl http://podinfo-canary:9898/status/500
```
Generate latency:
```bash
watch curl http://podinfo-canary:9898/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 10
Phase: Failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for podinfo.test
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```

View File

@@ -0,0 +1,213 @@
# Istio A/B Testing
This guide shows you how to automate A/B testing with Istio and Flagger.
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
This is particularly useful for frontend applications that require session affinity.
![Flagger A/B Testing Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-abtest-steps.png)
### Bootstrap
Create a test namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/ab-testing/deployment.yaml
kubectl apply -f ${REPO}/artifacts/ab-testing/hpa.yaml
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
kubectl -n test apply -f ${REPO}/artifacts/loadtester/deployment.yaml
kubectl -n test apply -f ${REPO}/artifacts/loadtester/service.yaml
```
Create a canary custom resource (replace example.com with your own domain):
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: abtest
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: abtest
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: abtest
service:
# container port
port: 9898
# Istio gateways (optional)
gateways:
- public-gateway.istio-system.svc.cluster.local
# Istio virtual service host names (optional)
hosts:
- app.example.com
canaryAnalysis:
# schedule interval (default 60s)
interval: 1m
# total number of iterations
iterations: 10
# max number of failed iterations before rollback
threshold: 2
# canary match condition
match:
- headers:
user-agent:
regex: "^(?!.*Chrome).*Safari.*"
- headers:
cookie:
regex: "^(.*?;)?(type=insider)(;.*)?$"
metrics:
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: request-duration
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
# generate traffic during analysis
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 -H 'Cookie: type=insider' http://podinfo.test:9898/"
```
The above configuration will run an analysis for ten minutes targeting Safari users and those that have an insider cookie.
Save the above resource as podinfo-abtest.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-abtest.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/abtest
horizontalpodautoscaler.autoscaling/abtest
canary.flagger.app/abtest
# generated
deployment.apps/abtest-primary
horizontalpodautoscaler.autoscaling/abtest-primary
service/abtest
service/abtest-canary
service/abtest-primary
virtualservice.networking.istio.io/abtest
```
### Automated canary promotion
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/abtest \
podinfod=quay.io/stefanprodan/podinfo:1.4.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n test describe canary/abtest
Status:
Failed Checks: 0
Phase: Succeeded
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger New revision detected abtest.test
Normal Synced 3m flagger Scaling up abtest.test
Warning Synced 3m flagger Waiting for abtest.test rollout to finish: 0 of 1 updated replicas are available
Normal Synced 3m flagger Advance abtest.test canary iteration 1/10
Normal Synced 3m flagger Advance abtest.test canary iteration 2/10
Normal Synced 3m flagger Advance abtest.test canary iteration 3/10
Normal Synced 2m flagger Advance abtest.test canary iteration 4/10
Normal Synced 2m flagger Advance abtest.test canary iteration 5/10
Normal Synced 1m flagger Advance abtest.test canary iteration 6/10
Normal Synced 1m flagger Advance abtest.test canary iteration 7/10
Normal Synced 55s flagger Advance abtest.test canary iteration 8/10
Normal Synced 45s flagger Advance abtest.test canary iteration 9/10
Normal Synced 35s flagger Advance abtest.test canary iteration 10/10
Normal Synced 25s flagger Copying abtest.test template spec to abtest-primary.test
Warning Synced 15s flagger Waiting for abtest-primary.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down abtest.test
```
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test abtest Progressing 100 2019-03-16T14:05:07Z
prod frontend Succeeded 0 2019-03-15T16:15:07Z
prod backend Failed 0 2019-03-14T17:05:07Z
```
### Automated rollback
During the canary analysis you can generate HTTP 500 errors and high latency to test Flagger's rollback.
Generate HTTP 500 errors:
```bash
watch curl -b 'type=insider' http://app.example.com/status/500
```
Generate latency:
```bash
watch curl -b 'type=insider' http://app.example.com/delay/1
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test describe canary/abtest
Status:
Failed Checks: 2
Phase: Failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for abtest.test
Normal Synced 3m flagger Advance abtest.test canary iteration 1/10
Normal Synced 3m flagger Advance abtest.test canary iteration 2/10
Normal Synced 3m flagger Advance abtest.test canary iteration 3/10
Normal Synced 3m flagger Halt abtest.test advancement success rate 69.17% < 99%
Normal Synced 2m flagger Halt abtest.test advancement success rate 61.39% < 99%
Warning Synced 2m flagger Rolling back abtest.test failed checks threshold reached 2
Warning Synced 1m flagger Canary failed! Scaling down abtest.test
```

View File

@@ -15,12 +15,12 @@ helm upgrade -i flagger flagger/flagger \
Once configured with a Slack incoming **webhook**, Flagger will post messages when a canary deployment
has been initialised, when a new revision has been detected and if the canary analysis failed or succeeded.
![flagger-slack](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-canary-notifications.png)
![Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-notifications.png)
A canary deployment will be rolled back if the progress deadline exceeded or if the analysis reached the
maximum number of failed checks:
![flagger-slack-errors](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/slack-canary-failed.png)
![Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-failed.png)
### Prometheus Alert Manager

View File

@@ -0,0 +1,286 @@
# App Mesh Canary Deployments
This guide shows you how to use App Mesh and Flagger to automate canary deployments.
You'll need an EKS cluster configured with App Mesh, you can find the install guide
[here](https://docs.flagger.app/install/flagger-install-on-eks-appmesh).
### Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployments, ClusterIP services, App Mesh virtual nodes and services).
These objects expose the application on the mesh and drive the canary analysis and promotion.
The only App Mesh object you need to create by yourself is the mesh resource.
Create a mesh called `global`:
```bash
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/appmesh/global-mesh.yaml
```
Create a test namespace with App Mesh sidecar injection enabled:
```bash
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/appmesh/deployment.yaml
kubectl apply -f ${REPO}/artifacts/appmesh/hpa.yaml
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
helm upgrade -i flagger-loadtester flagger/loadtester \
--namespace=test \
--set meshName=global.appmesh-system \
--set "backends[0]=podinfo.test"
```
Create a canary custom resource:
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
service:
# container port
port: 9898
# App Mesh reference
meshName: global.appmesh-system
# App Mesh egress (optional)
backends:
- backend.test
# define the canary analysis timing and KPIs
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# App Mesh Prometheus checks
metrics:
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
# external checks (optional)
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
cmd: "hey -z 1m -q 10 -c 2 http://podinfo.test:9898/"
```
Save the above resource as podinfo-canary.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-canary.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
canary.flagger.app/podinfo
# generated Kubernetes objects
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
# generated App Mesh objects
virtualnode.appmesh.k8s.aws/podinfo
virtualnode.appmesh.k8s.aws/podinfo-canary
virtualnode.appmesh.k8s.aws/podinfo-primary
virtualservice.appmesh.k8s.aws/podinfo.test
```
The App Mesh specific settings are:
```yaml
service:
port: 9898
meshName: global.appmesh-system
backends:
- backend1.test
- backend2.test
```
App Mesh blocks all egress traffic by default. If your application needs to call another service, you have to create an
App Mesh virtual service for it and add the virtual service name to the backend list.
### Setup App Mesh ingress (optional)
In order to expose the podinfo app outside the mesh you'll be using an Envoy ingress and an AWS classic load balancer.
The ingress binds to an internet domain and forwards the calls into the mesh through the App Mesh sidecar.
If podinfo becomes unavailable due to a HPA downscaling or a node restart,
the ingress will retry the calls for a short period of time.
Deploy the ingress and the AWS ELB service:
```bash
kubectl apply -f ${REPO}/artifacts/appmesh/ingress.yaml
```
Find the ingress public address:
```bash
kubectl -n test describe svc/ingress | grep Ingress
LoadBalancer Ingress: yyy-xx.us-west-2.elb.amazonaws.com
```
Wait for the ELB to become active:
```bash
watch curl -sS ${INGRESS_URL}
```
Open your browser and navigate to the ingress address to access podinfo UI.
### Automated canary promotion
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 0
Phase: Succeeded
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger New revision detected podinfo.test
Normal Synced 3m flagger Scaling up podinfo.test
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 2m flagger Advance podinfo.test canary weight 20
Normal Synced 2m flagger Advance podinfo.test canary weight 25
Normal Synced 1m flagger Advance podinfo.test canary weight 30
Normal Synced 1m flagger Advance podinfo.test canary weight 35
Normal Synced 55s flagger Advance podinfo.test canary weight 40
Normal Synced 45s flagger Advance podinfo.test canary weight 45
Normal Synced 35s flagger Advance podinfo.test canary weight 50
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
During the analysis the canarys progress can be monitored with Grafana. The App Mesh dashboard URL is
http://localhost:3000/d/flagger-appmesh/appmesh-canary?refresh=10s&orgId=1&var-namespace=test&var-primary=podinfo-primary&var-canary=podinfo
![App Mesh Canary Dashboard](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/flagger-grafana-appmesh.png)
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 15 2019-03-16T14:05:07Z
prod frontend Succeeded 0 2019-03-15T16:15:07Z
prod backend Failed 0 2019-03-14T17:05:07Z
```
If youve enabled the Slack notifications, you should receive the following messages:
![Flagger Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-notifications.png)
### Automated rollback
During the canary analysis you can generate HTTP 500 errors to test if Flagger pauses the rollout.
Trigger a canary deployment:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.2
```
Exec into the load tester pod with:
```bash
kubectl -n test exec -it flagger-loadtester-xx-xx sh
```
Generate HTTP 500 errors:
```bash
hey -z 1m -c 5 -q 5 http://podinfo.test:9898/status/500
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 10
Phase: Failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for podinfo.test
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```
If youve enabled the Slack notifications, youll receive a message if the progress deadline is exceeded,
or if the analysis reached the maximum number of failed checks:
![Flagger Slack Notifications](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/slack-canary-failed.png)

View File

@@ -0,0 +1,366 @@
# NGNIX Ingress Controller Canary Deployments
This guide shows you how to use the [Gloo](https://gloo.solo.io/) ingress controller and Flagger to automate canary deployments.
![Flagger Gloo Ingress Controller](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-gloo-overview.png)
### Prerequisites
Flagger requires a Kubernetes cluster **v1.11** or newer and Gloo ingress **0.13.29** or newer.
Install Gloo with Helm:
```bash
helm repo add gloo https://storage.googleapis.com/solo-public-helm
helm upgrade -i gloo gloo/gloo \
--namespace gloo-system
```
Install Flagger and the Prometheus add-on in the same namespace as Gloo:
```bash
helm repo add flagger https://flagger.app
helm upgrade -i flagger flagger/flagger \
--namespace gloo-system \
--set prometheus.install=true \
--set meshProvider=gloo
```
Optionally you can enable Slack notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--reuse-values \
--namespace gloo-system \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
### Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployments, ClusterIP services and Gloo upstream groups).
These objects expose the application outside the cluster and drive the canary analysis and promotion.
Create a test namespace:
```bash
kubectl create ns test
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/gloo/deployment.yaml
kubectl apply -f ${REPO}/artifacts/gloo/hpa.yaml
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
helm upgrade -i flagger-loadtester flagger/loadtester \
--namespace=test
```
Create an virtual service definition that references an upstream group that will be generated by Flagger
(replace `app.example.com` with your own domain):
```yaml
apiVersion: gateway.solo.io/v1
kind: VirtualService
metadata:
name: podinfo
namespace: test
spec:
virtualHost:
domains:
- 'app.example.com'
name: podinfo.test
routes:
- matcher:
prefix: /
routeAction:
upstreamGroup:
name: podinfo
namespace: test
```
Save the above resource as podinfo-virtualservice.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-virtualservice.yaml
```
Create a canary custom resource (replace `app.example.com` with your own domain):
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
service:
# container port
port: 9898
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 5
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# Gloo Prometheus checks
metrics:
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: request-duration
# maximum req duration P99
# milliseconds
threshold: 500
interval: 30s
# load testing (optional)
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 http://app.example.com/"
```
Save the above resource as podinfo-canary.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-canary.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
virtualservices.gateway.solo.io/podinfo
canary.flagger.app/podinfo
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
upstreamgroups.gloo.solo.io/podinfo
```
When the bootstrap finishes Flagger will set the canary status to initialized:
```bash
kubectl -n test get canary podinfo
NAME STATUS WEIGHT LASTTRANSITIONTIME
podinfo Initialized 0 2019-05-17T08:09:51Z
```
### Automated canary promotion
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
like HTTP requests success rate, requests average duration and pod health.
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 0
Phase: Succeeded
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger New revision detected podinfo.test
Normal Synced 3m flagger Scaling up podinfo.test
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 2m flagger Advance podinfo.test canary weight 20
Normal Synced 2m flagger Advance podinfo.test canary weight 25
Normal Synced 1m flagger Advance podinfo.test canary weight 30
Normal Synced 1m flagger Advance podinfo.test canary weight 35
Normal Synced 55s flagger Advance podinfo.test canary weight 40
Normal Synced 45s flagger Advance podinfo.test canary weight 45
Normal Synced 35s flagger Advance podinfo.test canary weight 50
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 15 2019-05-17T14:05:07Z
prod frontend Succeeded 0 2019-05-17T16:15:07Z
prod backend Failed 0 2019-05-17T17:05:07Z
```
### Automated rollback
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses and rolls back the faulted version.
Trigger another canary deployment:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.2
```
Generate HTTP 500 errors:
```bash
watch curl http://app.example.com/status/500
```
Generate high latency:
```bash
watch curl http://app.example.com/delay/2
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 10
Phase: Failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for podinfo.test
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```
### Custom metrics
The canary analysis can be extended with Prometheus queries.
The demo app is instrumented with Prometheus so you can create a custom check that will use the HTTP request duration
histogram to validate the canary.
Edit the canary analysis and add the following metric:
```yaml
canaryAnalysis:
metrics:
- name: "404s percentage"
threshold: 5
query: |
100 - sum(
rate(
http_request_duration_seconds_count{
kubernetes_namespace="test",
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
status!="404"
}[1m]
)
)
/
sum(
rate(
http_request_duration_seconds_count{
kubernetes_namespace="test",
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
}[1m]
)
) * 100
```
The above configuration validates the canary by checking if the HTTP 404 req/sec percentage is below 5
percent of the total traffic. If the 404s rate reaches the 5% threshold, then the canary fails.
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.3
```
Generate 404s:
```bash
watch curl http://app.example.com/status/400
```
Watch Flagger logs:
```
kubectl -n gloo-system logs deployment/flagger -f | jq .msg
Starting canary deployment for podinfo.test
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Halt podinfo.test advancement 404s percentage 6.20 > 5
Halt podinfo.test advancement 404s percentage 6.45 > 5
Halt podinfo.test advancement 404s percentage 7.60 > 5
Halt podinfo.test advancement 404s percentage 8.69 > 5
Halt podinfo.test advancement 404s percentage 9.70 > 5
Rolling back podinfo.test failed checks threshold reached 5
Canary failed! Scaling down podinfo.test
```
If you have Slack configured, Flagger will send a notification with the reason why the canary failed.

View File

@@ -6,15 +6,13 @@ Flagger comes with a Grafana dashboard made for canary analysis. Install Grafana
```bash
helm upgrade -i flagger-grafana flagger/grafana \
--namespace=istio-system \
--set url=http://prometheus:9090 \
--set user=admin \
--set password=admin
--namespace=istio-system \ # or appmesh-system
--set url=http://prometheus:9090
```
The dashboard shows the RED and USE metrics for the primary and canary workloads:
![canary dashboard](https://raw.githubusercontent.com/stefanprodan/flagger/master/docs/screens/grafana-canary-analysis.png)
![Canary Dashboard](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/screens/grafana-canary-analysis.png)
### Logging
@@ -48,6 +46,9 @@ Flagger exposes Prometheus metrics that can be used to determine the canary anal
the destination weight values:
```bash
# Flagger version and mesh provider gauge
flagger_info{version="0.10.0", mesh_provider="istio"} 1
# Canaries total gauge
flagger_canary_total{namespace="test"} 1

View File

@@ -0,0 +1,421 @@
# NGNIX Ingress Controller Canary Deployments
This guide shows you how to use the NGINX ingress controller and Flagger to automate canary deployments and A/B testing.
![Flagger NGINX Ingress Controller](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-nginx-overview.png)
### Prerequisites
Flagger requires a Kubernetes cluster **v1.11** or newer and NGINX ingress **0.24** or newer.
Install NGINX with Helm:
```bash
helm upgrade -i nginx-ingress stable/nginx-ingress \
--namespace ingress-nginx \
--set controller.stats.enabled=true \
--set controller.metrics.enabled=true \
--set controller.podAnnotations."prometheus\.io/scrape"=true \
--set controller.podAnnotations."prometheus\.io/port"=10254
```
Install Flagger and the Prometheus add-on in the same namespace as NGINX:
```bash
helm repo add flagger https://flagger.app
helm upgrade -i flagger flagger/flagger \
--namespace ingress-nginx \
--set prometheus.install=true \
--set meshProvider=nginx
```
Optionally you can enable Slack notifications:
```bash
helm upgrade -i flagger flagger/flagger \
--reuse-values \
--namespace ingress-nginx \
--set slack.url=https://hooks.slack.com/services/YOUR/SLACK/WEBHOOK \
--set slack.channel=general \
--set slack.user=flagger
```
### Bootstrap
Flagger takes a Kubernetes deployment and optionally a horizontal pod autoscaler (HPA),
then creates a series of objects (Kubernetes deployments, ClusterIP services and canary ingress).
These objects expose the application outside the cluster and drive the canary analysis and promotion.
Create a test namespace:
```bash
kubectl create ns test
```
Create a deployment and a horizontal pod autoscaler:
```bash
kubectl apply -f ${REPO}/artifacts/nginx/deployment.yaml
kubectl apply -f ${REPO}/artifacts/nginx/hpa.yaml
```
Deploy the load testing service to generate traffic during the canary analysis:
```bash
helm upgrade -i flagger-loadtester flagger/loadtester \
--namespace=test
```
Create an ingress definition (replace `app.example.com` with your own domain):
```yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: podinfo
namespace: test
labels:
app: podinfo
annotations:
kubernetes.io/ingress.class: "nginx"
spec:
rules:
- host: app.example.com
http:
paths:
- backend:
serviceName: podinfo
servicePort: 9898
```
Save the above resource as podinfo-ingress.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-ingress.yaml
```
Create a canary custom resource (replace `app.example.com` with your own domain):
```yaml
apiVersion: flagger.app/v1alpha3
kind: Canary
metadata:
name: podinfo
namespace: test
spec:
# deployment reference
targetRef:
apiVersion: apps/v1
kind: Deployment
name: podinfo
# ingress reference
ingressRef:
apiVersion: extensions/v1beta1
kind: Ingress
name: podinfo
# HPA reference (optional)
autoscalerRef:
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
name: podinfo
# the maximum time in seconds for the canary deployment
# to make progress before it is rollback (default 600s)
progressDeadlineSeconds: 60
service:
# container port
port: 9898
canaryAnalysis:
# schedule interval (default 60s)
interval: 10s
# max number of failed metric checks before rollback
threshold: 10
# max traffic percentage routed to canary
# percentage (0-100)
maxWeight: 50
# canary increment step
# percentage (0-100)
stepWeight: 5
# NGINX Prometheus checks
metrics:
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
# load testing (optional)
webhooks:
- name: load-test
url: http://flagger-loadtester.test/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 http://app.example.com/"
```
Save the above resource as podinfo-canary.yaml and then apply it:
```bash
kubectl apply -f ./podinfo-canary.yaml
```
After a couple of seconds Flagger will create the canary objects:
```bash
# applied
deployment.apps/podinfo
horizontalpodautoscaler.autoscaling/podinfo
ingresses.extensions/podinfo
canary.flagger.app/podinfo
# generated
deployment.apps/podinfo-primary
horizontalpodautoscaler.autoscaling/podinfo-primary
service/podinfo
service/podinfo-canary
service/podinfo-primary
ingresses.extensions/podinfo-canary
```
### Automated canary promotion
Flagger implements a control loop that gradually shifts traffic to the canary while measuring key performance indicators
like HTTP requests success rate, requests average duration and pod health.
Based on analysis of the KPIs a canary is promoted or aborted, and the analysis result is published to Slack.
![Flagger Canary Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-canary-steps.png)
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.1
```
Flagger detects that the deployment revision changed and starts a new rollout:
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 0
Phase: Succeeded
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger New revision detected podinfo.test
Normal Synced 3m flagger Scaling up podinfo.test
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 2m flagger Advance podinfo.test canary weight 20
Normal Synced 2m flagger Advance podinfo.test canary weight 25
Normal Synced 1m flagger Advance podinfo.test canary weight 30
Normal Synced 1m flagger Advance podinfo.test canary weight 35
Normal Synced 55s flagger Advance podinfo.test canary weight 40
Normal Synced 45s flagger Advance podinfo.test canary weight 45
Normal Synced 35s flagger Advance podinfo.test canary weight 50
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```
**Note** that if you apply new changes to the deployment during the canary analysis, Flagger will restart the analysis.
You can monitor all canaries with:
```bash
watch kubectl get canaries --all-namespaces
NAMESPACE NAME STATUS WEIGHT LASTTRANSITIONTIME
test podinfo Progressing 15 2019-05-06T14:05:07Z
prod frontend Succeeded 0 2019-05-05T16:15:07Z
prod backend Failed 0 2019-05-04T17:05:07Z
```
### Automated rollback
During the canary analysis you can generate HTTP 500 errors to test if Flagger pauses and rolls back the faulted version.
Trigger another canary deployment:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.2
```
Generate HTTP 500 errors:
```bash
watch curl http://app.example.com/status/500
```
When the number of failed checks reaches the canary analysis threshold, the traffic is routed back to the primary,
the canary is scaled to zero and the rollout is marked as failed.
```text
kubectl -n test describe canary/podinfo
Status:
Canary Weight: 0
Failed Checks: 10
Phase: Failed
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger Starting canary deployment for podinfo.test
Normal Synced 3m flagger Advance podinfo.test canary weight 5
Normal Synced 3m flagger Advance podinfo.test canary weight 10
Normal Synced 3m flagger Advance podinfo.test canary weight 15
Normal Synced 3m flagger Halt podinfo.test advancement success rate 69.17% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 61.39% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 55.06% < 99%
Normal Synced 2m flagger Halt podinfo.test advancement success rate 47.00% < 99%
Normal Synced 2m flagger (combined from similar events): Halt podinfo.test advancement success rate 38.08% < 99%
Warning Synced 1m flagger Rolling back podinfo.test failed checks threshold reached 10
Warning Synced 1m flagger Canary failed! Scaling down podinfo.test
```
### Custom metrics
The canary analysis can be extended with Prometheus queries.
The demo app is instrumented with Prometheus so you can create a custom check that will use the HTTP request duration
histogram to validate the canary.
Edit the canary analysis and add the following metric:
```yaml
canaryAnalysis:
metrics:
- name: "latency"
threshold: 0.5
interval: 1m
query: |
histogram_quantile(0.99,
sum(
rate(
http_request_duration_seconds_bucket{
kubernetes_namespace="test",
kubernetes_pod_name=~"podinfo-[0-9a-zA-Z]+(-[0-9a-zA-Z]+)"
}[1m]
)
) by (le)
)
```
The threshold is set to 500ms so if the average request duration in the last minute
goes over half a second then the analysis will fail and the canary will not be promoted.
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.4.3
```
Generate high response latency:
```bash
watch curl http://app.exmaple.com/delay/2
```
Watch Flagger logs:
```
kubectl -n nginx-ingress logs deployment/flagger -f | jq .msg
Starting canary deployment for podinfo.test
Advance podinfo.test canary weight 5
Advance podinfo.test canary weight 10
Advance podinfo.test canary weight 15
Halt podinfo.test advancement latency 1.20 > 0.5
Halt podinfo.test advancement latency 1.45 > 0.5
Halt podinfo.test advancement latency 1.60 > 0.5
Halt podinfo.test advancement latency 1.69 > 0.5
Halt podinfo.test advancement latency 1.70 > 0.5
Rolling back podinfo.test failed checks threshold reached 5
Canary failed! Scaling down podinfo.test
```
If you have Slack configured, Flagger will send a notification with the reason why the canary failed.
### A/B Testing
Besides weighted routing, Flagger can be configured to route traffic to the canary based on HTTP match conditions.
In an A/B testing scenario, you'll be using HTTP headers or cookies to target a certain segment of your users.
This is particularly useful for frontend applications that require session affinity.
![Flagger A/B Testing Stages](https://raw.githubusercontent.com/weaveworks/flagger/master/docs/diagrams/flagger-abtest-steps.png)
Edit the canary analysis, remove the max/step weight and add the match conditions and iterations:
```yaml
canaryAnalysis:
interval: 1m
threshold: 10
iterations: 10
match:
# curl -H 'X-Canary: insider' http://app.example.com
- headers:
x-canary:
exact: "insider"
# curl -b 'canary=always' http://app.example.com
- headers:
cookie:
exact: "canary"
metrics:
- name: request-success-rate
threshold: 99
interval: 1m
webhooks:
- name: load-test
url: http://localhost:8888/
timeout: 5s
metadata:
type: cmd
cmd: "hey -z 1m -q 10 -c 2 -H 'Cookie: canary=always' http://app.example.com/"
logCmdOutput: "true"
```
The above configuration will run an analysis for ten minutes targeting users that have a `canary` cookie set to `always` or
those that call the service using the `X-Canary: insider` header.
Trigger a canary deployment by updating the container image:
```bash
kubectl -n test set image deployment/podinfo \
podinfod=quay.io/stefanprodan/podinfo:1.5.0
```
Flagger detects that the deployment revision changed and starts the A/B testing:
```text
kubectl -n test describe canary/podinfo
Status:
Failed Checks: 0
Phase: Succeeded
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Synced 3m flagger New revision detected podinfo.test
Normal Synced 3m flagger Scaling up podinfo.test
Warning Synced 3m flagger Waiting for podinfo.test rollout to finish: 0 of 1 updated replicas are available
Normal Synced 3m flagger Advance podinfo.test canary iteration 1/10
Normal Synced 3m flagger Advance podinfo.test canary iteration 2/10
Normal Synced 3m flagger Advance podinfo.test canary iteration 3/10
Normal Synced 2m flagger Advance podinfo.test canary iteration 4/10
Normal Synced 2m flagger Advance podinfo.test canary iteration 5/10
Normal Synced 1m flagger Advance podinfo.test canary iteration 6/10
Normal Synced 1m flagger Advance podinfo.test canary iteration 7/10
Normal Synced 55s flagger Advance podinfo.test canary iteration 8/10
Normal Synced 45s flagger Advance podinfo.test canary iteration 9/10
Normal Synced 35s flagger Advance podinfo.test canary iteration 10/10
Normal Synced 25s flagger Copying podinfo.test template spec to podinfo-primary.test
Warning Synced 15s flagger Waiting for podinfo-primary.test rollout to finish: 1 of 2 updated replicas are available
Normal Synced 5s flagger Promotion completed! Scaling down podinfo.test
```

View File

@@ -1,11 +1,13 @@
# Canary Deployments
# Istio Canary Deployments
This guide shows you how to use Istio and Flagger to automate canary deployments.
### Bootstrap
Create a test namespace with Istio sidecar injection enabled:
```bash
export REPO=https://raw.githubusercontent.com/stefanprodan/flagger/master
export REPO=https://raw.githubusercontent.com/weaveworks/flagger/master
kubectl apply -f ${REPO}/artifacts/namespaces/test.yaml
```
@@ -67,12 +69,12 @@ spec:
# percentage (0-100)
stepWeight: 10
metrics:
- name: istio_requests_total
- name: request-success-rate
# minimum req success rate (non 5xx responses)
# percentage (0-100)
threshold: 99
interval: 1m
- name: istio_request_duration_seconds_bucket
- name: request-duration
# maximum req duration P99
# milliseconds
threshold: 500
@@ -109,6 +111,8 @@ service/podinfo-primary
virtualservice.networking.istio.io/podinfo
```
### Automated canary promotion
Trigger a canary deployment by updating the container image:
```bash
@@ -159,6 +163,8 @@ prod frontend Succeeded 0 2019-01-15T16:15:07Z
prod backend Failed 0 2019-01-14T17:05:07Z
```
### Automated rollback
During the canary analysis you can generate HTTP 500 errors and high latency to test if Flagger pauses the rollout.
Create a tester pod and exec into it:

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

View File

@@ -0,0 +1,71 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Generator: Adobe Illustrator 16.0.3, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
width="900px" height="900px" viewBox="0 0 900 900" enable-background="new 0 0 900 900" xml:space="preserve">
<g>
<defs>
<rect id="SVGID_1_" width="900" height="900"/>
</defs>
<clipPath id="SVGID_2_">
<use xlink:href="#SVGID_1_" overflow="visible"/>
</clipPath>
<path clip-path="url(#SVGID_2_)" fill="#33324B" d="M356.003,268.915L586.001,63.471c-18.262-14.35-38.76-25.978-60.934-34.188
L356.003,180.298V268.915z"/>
<path clip-path="url(#SVGID_2_)" fill="#33324B" d="M289.914,239.333l-62.307,55.655c5.674,23.087,14.884,44.774,27.05,64.454
l35.257-31.493V239.333z"/>
<path clip-path="url(#SVGID_2_)" fill="#40C6F1" d="M479.023,70.412V17.631c-10.554-1.513-21.329-2.338-32.302-2.338
c-11.484,0-22.764,0.871-33.787,2.527v111.626L479.023,70.412z"/>
<path clip-path="url(#SVGID_2_)" fill="#40C6F1" d="M479.023,380.863V159.029l-66.089,59.033v246.046
c11.023,1.655,22.303,2.526,33.787,2.526c13.449,0,26.601-1.244,39.404-3.5l176.98-158.086c6.01-20.321,9.287-41.815,9.287-64.085
c0-10.488-0.774-20.79-2.159-30.898L479.023,380.863z"/>
<path clip-path="url(#SVGID_2_)" fill="#F04E27" d="M479.023,315.734l177.305-158.376c-8.446-21.157-19.978-40.739-34.079-58.175
L479.023,227.118V315.734z"/>
<path clip-path="url(#SVGID_2_)" fill="#F04E27" d="M412.935,286.152l-56.931,50.854V34.328
c-24.657,10.841-46.992,25.96-66.09,44.426v324.42c14.897,14.403,31.742,26.797,50.147,36.688l72.874-65.094V286.152z"/>
<polygon clip-path="url(#SVGID_2_)" fill="#33324B" points="175.804,603.774 207.413,485.167 249.859,485.167 202.897,650.136
152.926,650.136 124.93,539.054 96.933,650.136 46.961,650.136 0,485.167 42.446,485.167 74.055,603.774 105.362,485.167
144.497,485.167 "/>
<path clip-path="url(#SVGID_2_)" fill="#33324B" d="M406.409,595.948c-4.816,32.812-30.103,56.293-73.452,56.293
c-46.059,0-80.076-27.695-80.076-83.686c0-56.293,35.222-85.495,80.076-85.495c49.67,0,75.861,36.426,75.861,85.191v8.73H295.025
c0,18.365,7.526,41.543,37.328,41.543c16.558,0,29.502-8.127,32.814-23.479L406.409,595.948z M365.77,547.784
c-1.506-22.88-16.256-31.308-33.114-31.308c-19.266,0-34.017,11.738-36.124,31.308H365.77z"/>
<path clip-path="url(#SVGID_2_)" fill="#33324B" d="M530.318,543.269v-5.721c0-12.944-5.42-21.374-29.803-21.374
c-30.404,0-31.308,16.859-31.308,22.58h-39.134c0-24.386,10.234-55.693,71.646-55.693c58.702,0,69.238,29.201,69.238,52.982v90.912
c0,6.624,0.301,12.041,0.903,16.256l1.204,6.925h-41.544v-16.557c-11.739,11.738-27.995,18.662-51.176,18.662
c-32.511,0-57.196-18.965-57.196-51.777c0-37.93,35.824-46.059,50.273-48.465L530.318,543.269z M530.318,574.275l-15.955,2.406
c-8.73,1.506-19.267,3.011-25.588,4.215c-6.925,1.205-12.945,2.71-17.46,6.321c-4.215,3.313-6.321,7.829-6.321,13.546
c0,4.818,1.806,19.87,24.385,19.87c13.243,0,24.984-4.816,31.307-11.14c8.128-8.127,9.633-18.362,9.633-26.49V574.275z"/>
<polygon clip-path="url(#SVGID_2_)" fill="#33324B" points="687.764,650.136 632.675,650.136 578.488,485.167 623.041,485.167
660.972,611.303 699.203,485.167 742.552,485.167 "/>
<path clip-path="url(#SVGID_2_)" fill="#33324B" d="M897.591,595.948c-4.816,32.812-30.103,56.293-73.452,56.293
c-46.059,0-80.075-27.695-80.075-83.686c0-56.293,35.222-85.495,80.075-85.495c49.67,0,75.861,36.426,75.861,85.191v8.73H786.208
c0,18.365,7.526,41.543,37.328,41.543c16.558,0,29.502-8.127,32.812-23.479L897.591,595.948z M856.952,547.784
c-1.506-22.88-16.256-31.308-33.114-31.308c-19.266,0-34.017,11.738-36.124,31.308H856.952z"/>
</g>
<g id="flagger1">
<path id="fagger" fill="#566991" d="M138.27,737.32h-30.625v113.925H87.8V737.32H64.77v-16.905H87.8v-22.05
c0-18.62,11.515-29.89,32.095-29.89c8.575,0,20.09,0.489,20.09,0.489v17.15c-1.225,0-10.535-0.245-13.965-0.245
c-10.535,0-18.375,1.225-18.375,14.945v19.6h30.625V737.32z M179.675,851.245h-20.09v-181.3h20.09V851.245z M293.519,800.285
V785.34c0,0-28.42,3.675-42.875,5.88c-13.965,1.96-25.235,6.615-25.235,22.051c0,12.984,8.82,22.784,26.705,22.784
C276.858,836.055,293.519,821.845,293.519,800.285z M239.618,776.521c12.495-2.45,53.9-7.351,53.9-7.351v-7.104
c0-18.13-7.84-26.705-30.87-26.705c-24.745,0-31.605,11.27-32.585,24.5h-20.09c0-16.905,9.555-41.16,52.185-41.16
c44.835,0,51.45,23.275,51.45,44.1v67.375c0,6.125,0.245,15.926,1.715,21.07h-20.335c0,0-0.49-7.105-0.49-17.64
c-6.37,8.33-19.11,19.354-45.815,19.354c-25.235,0-44.1-14.7-44.1-39.2C204.583,786.565,226.388,779.215,239.618,776.521z
M338.271,782.646c0-42.386,24.99-63.945,55.86-63.945c22.295,0,36.015,10.535,40.425,20.58v-18.865h19.845V850.51
c0,25.235-12.25,49.49-58.31,49.49c-34.3,0-55.125-13.72-55.86-42.63h20.335c0,14.945,10.535,25.97,36.015,25.97
c29.399,0,37.729-13.475,37.729-35.035v-22.784c-4.41,10.045-18.62,20.58-40.915,20.58
C362.527,846.101,338.271,825.03,338.271,782.646z M434.312,782.4c0-33.32-14.7-46.55-38.22-46.55
c-20.825,0-37.485,11.76-37.485,46.305s16.66,46.795,37.485,46.795C419.611,828.95,434.312,815.72,434.312,782.4z M478.575,782.646
c0-42.386,24.99-63.945,55.859-63.945c22.296,0,36.016,10.535,40.426,20.58v-18.865h19.845V850.51
c0,25.235-12.25,49.49-58.311,49.49c-34.3,0-55.125-13.72-55.859-42.63h20.335c0,14.945,10.535,25.97,36.015,25.97
c29.4,0,37.73-13.475,37.73-35.035v-22.784c-4.41,10.045-18.62,20.58-40.915,20.58C502.83,846.101,478.575,825.03,478.575,782.646z
M574.615,782.4c0-33.32-14.7-46.55-38.221-46.55c-20.824,0-37.484,11.76-37.484,46.305s16.66,46.795,37.484,46.795
C559.915,828.95,574.615,815.72,574.615,782.4z M714.674,806.165l20.335,0.245c-2.695,23.03-18.62,46.55-56.105,46.55
c-34.79,0-60.025-23.765-60.025-66.885c0-40.67,24.99-67.375,59.78-67.375c36.75,0,57.085,27.439,57.085,70.56h-96.285
c0.49,24.745,10.78,46.795,39.69,46.795C700.463,836.055,712.958,822.825,714.674,806.165z M640.193,773.335h73.745
c-1.96-27.685-17.885-37.729-35.771-37.729C658.078,735.605,642.644,749.08,640.193,773.335z M829.497,740.995h-8.82
c-23.521,0-39.936,8.085-39.936,38.955v71.295h-20.09v-130.83h19.846v22.54c8.574-16.66,25.97-22.54,39.689-22.54h9.311V740.995z"
/>
</g>
</svg>

After

Width:  |  Height:  |  Size: 6.3 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 133 KiB

View File

@@ -22,7 +22,6 @@ SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/..
CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
github.com/stefanprodan/flagger/pkg/client github.com/stefanprodan/flagger/pkg/apis \
"istio:v1alpha3 flagger:v1alpha3" \
github.com/weaveworks/flagger/pkg/client github.com/weaveworks/flagger/pkg/apis \
"appmesh:v1beta1 istio:v1alpha3 flagger:v1alpha3 smi:v1alpha1" \
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt

View File

@@ -0,0 +1,5 @@
package appmesh
const (
GroupName = "appmesh.k8s.aws"
)

View File

@@ -0,0 +1,5 @@
// +k8s:deepcopy-gen=package
// Package v1beta1 is the v1beta1 version of the API.
// +groupName=appmesh.k8s.aws
package v1beta1

View File

@@ -0,0 +1,41 @@
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/weaveworks/flagger/pkg/apis/appmesh"
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: appmesh.GroupName, Version: "v1beta1"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
return SchemeGroupVersion.WithKind(kind).GroupKind()
}
// Resource takes an unqualified resource and returns a Group qualified GroupResource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
var (
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to api.Scheme.
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Mesh{},
&MeshList{},
&VirtualService{},
&VirtualServiceList{},
&VirtualNode{},
&VirtualNodeList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

View File

@@ -0,0 +1,286 @@
package v1beta1
import (
api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// App Mesh Custom Resource API types.
// This API follows the conventions described in
// https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Mesh is a specification for a Mesh resource
type Mesh struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// +optional
Spec MeshSpec `json:"spec,omitempty"`
// +optional
Status MeshStatus `json:"status,omitempty"`
}
type MeshServiceDiscoveryType string
const (
Dns MeshServiceDiscoveryType = "Dns"
)
// MeshSpec is the spec for a Mesh resource
type MeshSpec struct {
// +optional
ServiceDiscoveryType *MeshServiceDiscoveryType `json:"serviceDiscoveryType,omitempty"`
}
// MeshStatus is the status for a Mesh resource
type MeshStatus struct {
// MeshArn is the AppMesh Mesh object's Amazon Resource Name
// +optional
MeshArn *string `json:"meshArn,omitempty"`
Conditions []MeshCondition `json:"meshCondition"`
}
type MeshConditionType string
const (
// MeshActive is Active when the Appmesh Mesh has been created or found via the API
MeshActive MeshConditionType = "MeshActive"
)
type MeshCondition struct {
// Type of mesh condition.
Type MeshConditionType `json:"type"`
// Status of the condition, one of True, False, Unknown.
Status api.ConditionStatus `json:"status"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
// The reason for the condition's last transition.
// +optional
Reason *string `json:"reason,omitempty"`
// A human readable message indicating details about the transition.
// +optional
Message *string `json:"message,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// MeshList is a list of Mesh resources
type MeshList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Mesh `json:"items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// VirtualService is a specification for a VirtualService resource
type VirtualService struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// +optional
Spec VirtualServiceSpec `json:"spec,omitempty"`
// +optional
Status VirtualServiceStatus `json:"status,omitempty"`
}
// VirtualServiceSpec is the spec for a VirtualService resource
type VirtualServiceSpec struct {
MeshName string `json:"meshName"`
// +optional
VirtualRouter *VirtualRouter `json:"virtualRouter,omitempty"`
// +optional
Routes []Route `json:"routes,omitempty"`
}
type VirtualRouter struct {
Name string `json:"name"`
}
type Route struct {
Name string `json:"name"`
Http HttpRoute `json:"http"`
}
type HttpRoute struct {
Match HttpRouteMatch `json:"match"`
Action HttpRouteAction `json:"action"`
}
type HttpRouteMatch struct {
Prefix string `json:"prefix"`
}
type HttpRouteAction struct {
WeightedTargets []WeightedTarget `json:"weightedTargets"`
}
type WeightedTarget struct {
VirtualNodeName string `json:"virtualNodeName"`
Weight int64 `json:"weight"`
}
// VirtualServiceStatus is the status for a VirtualService resource
type VirtualServiceStatus struct {
// VirtualServiceArn is the AppMesh VirtualService object's Amazon Resource Name
// +optional
VirtualServiceArn *string `json:"virtualServiceArn,omitempty"`
// VirtualRouterArn is the AppMesh VirtualRouter object's Amazon Resource Name
// +optional
VirtualRouterArn *string `json:"virtualRouterArn,omitempty"`
// RouteArns is a list of AppMesh Route objects' Amazon Resource Names
// +optional
RouteArns []string `json:"routeArns,omitempty"`
Conditions []VirtualServiceCondition `json:"conditions"`
}
type VirtualServiceConditionType string
const (
// VirtualServiceActive is Active when the Appmesh Service has been created or found via the API
VirtualServiceActive VirtualServiceConditionType = "VirtualServiceActive"
VirtualRouterActive VirtualServiceConditionType = "VirtualRouterActive"
RoutesActive VirtualServiceConditionType = "RoutesActive"
VirtualServiceMeshMarkedForDeletion VirtualServiceConditionType = "MeshMarkedForDeletion"
)
type VirtualServiceCondition struct {
// Type of mesh service condition.
Type VirtualServiceConditionType `json:"type"`
// Status of the condition, one of True, False, Unknown.
Status api.ConditionStatus `json:"status"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
// The reason for the condition's last transition.
// +optional
Reason *string `json:"reason,omitempty"`
// A human readable message indicating details about the transition.
// +optional
Message *string `json:"message,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// VirtualServiceList is a list of VirtualService resources
type VirtualServiceList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []VirtualService `json:"items"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// VirtualNode is a specification for a VirtualNode resource
type VirtualNode struct {
metav1.TypeMeta `json:",inline"`
// +optional
metav1.ObjectMeta `json:"metadata,omitempty"`
// +optional
Spec VirtualNodeSpec `json:"spec,omitempty"`
// +optional
Status VirtualNodeStatus `json:"status,omitempty"`
}
// VirtualNodeSpec is the spec for a VirtualNode resource
type VirtualNodeSpec struct {
MeshName string `json:"meshName"`
// +optional
Listeners []Listener `json:"listeners,omitempty"`
// +optional
ServiceDiscovery *ServiceDiscovery `json:"serviceDiscovery,omitempty"`
// +optional
Backends []Backend `json:"backends,omitempty"`
}
type Listener struct {
PortMapping PortMapping `json:"portMapping"`
}
type PortMapping struct {
Port int64 `json:"port"`
Protocol string `json:"protocol"`
}
type ServiceDiscovery struct {
// +optional
CloudMap *CloudMapServiceDiscovery `json:"cloudMap,omitempty"`
// +optional
Dns *DnsServiceDiscovery `json:"dns,omitempty"`
}
type CloudMapServiceDiscovery struct {
CloudMapServiceName string `json:"cloudMapServiceName"`
}
type DnsServiceDiscovery struct {
HostName string `json:"hostName"`
}
type Backend struct {
VirtualService VirtualServiceBackend `json:"virtualService"`
}
type VirtualServiceBackend struct {
VirtualServiceName string `json:"virtualServiceName"`
}
// VirtualNodeStatus is the status for a VirtualNode resource
type VirtualNodeStatus struct {
MeshArn *string `json:"meshArn,omitempty"`
// VirtualNodeArn is the AppMesh VirtualNode object's Amazon Resource Name
// +optional
VirtualNodeArn *string `json:"virtualNodeArn,omitempty"`
// CloudMapServiceArn is a CloudMap Service object's Amazon Resource Name
// +optional
CloudMapServiceArn *string `json:"cloudMapServiceArn,omitempty"`
// +optional
QueryParameters map[string]string `json:"queryParameters,omitempty"`
Conditions []VirtualNodeCondition `json:"conditions"`
}
type VirtualNodeConditionType string
const (
// VirtualNodeActive is Active when the Appmesh Node has been created or found via the API
VirtualNodeActive VirtualNodeConditionType = "VirtualNodeActive"
VirtualNodeMeshMarkedForDeletion VirtualNodeConditionType = "MeshMarkedForDeletion"
)
type VirtualNodeCondition struct {
// Type of mesh node condition.
Type VirtualNodeConditionType `json:"type"`
// Status of the condition, one of True, False, Unknown.
Status api.ConditionStatus `json:"status"`
// Last time the condition transitioned from one status to another.
// +optional
LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"`
// The reason for the condition's last transition.
// +optional
Reason *string `json:"reason,omitempty"`
// A human readable message indicating details about the transition.
// +optional
Message *string `json:"reason,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// VirtualNodeList is a list of VirtualNode resources
type VirtualNodeList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []VirtualNode `json:"items"`
}

View File

@@ -0,0 +1,717 @@
// +build !ignore_autogenerated
/*
Copyright The Flagger Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1beta1
import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Backend) DeepCopyInto(out *Backend) {
*out = *in
out.VirtualService = in.VirtualService
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backend.
func (in *Backend) DeepCopy() *Backend {
if in == nil {
return nil
}
out := new(Backend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CloudMapServiceDiscovery) DeepCopyInto(out *CloudMapServiceDiscovery) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudMapServiceDiscovery.
func (in *CloudMapServiceDiscovery) DeepCopy() *CloudMapServiceDiscovery {
if in == nil {
return nil
}
out := new(CloudMapServiceDiscovery)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DnsServiceDiscovery) DeepCopyInto(out *DnsServiceDiscovery) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DnsServiceDiscovery.
func (in *DnsServiceDiscovery) DeepCopy() *DnsServiceDiscovery {
if in == nil {
return nil
}
out := new(DnsServiceDiscovery)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HttpRoute) DeepCopyInto(out *HttpRoute) {
*out = *in
out.Match = in.Match
in.Action.DeepCopyInto(&out.Action)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HttpRoute.
func (in *HttpRoute) DeepCopy() *HttpRoute {
if in == nil {
return nil
}
out := new(HttpRoute)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HttpRouteAction) DeepCopyInto(out *HttpRouteAction) {
*out = *in
if in.WeightedTargets != nil {
in, out := &in.WeightedTargets, &out.WeightedTargets
*out = make([]WeightedTarget, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HttpRouteAction.
func (in *HttpRouteAction) DeepCopy() *HttpRouteAction {
if in == nil {
return nil
}
out := new(HttpRouteAction)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *HttpRouteMatch) DeepCopyInto(out *HttpRouteMatch) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HttpRouteMatch.
func (in *HttpRouteMatch) DeepCopy() *HttpRouteMatch {
if in == nil {
return nil
}
out := new(HttpRouteMatch)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Listener) DeepCopyInto(out *Listener) {
*out = *in
out.PortMapping = in.PortMapping
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Listener.
func (in *Listener) DeepCopy() *Listener {
if in == nil {
return nil
}
out := new(Listener)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Mesh) DeepCopyInto(out *Mesh) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mesh.
func (in *Mesh) DeepCopy() *Mesh {
if in == nil {
return nil
}
out := new(Mesh)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *Mesh) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MeshCondition) DeepCopyInto(out *MeshCondition) {
*out = *in
if in.LastTransitionTime != nil {
in, out := &in.LastTransitionTime, &out.LastTransitionTime
*out = (*in).DeepCopy()
}
if in.Reason != nil {
in, out := &in.Reason, &out.Reason
*out = new(string)
**out = **in
}
if in.Message != nil {
in, out := &in.Message, &out.Message
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshCondition.
func (in *MeshCondition) DeepCopy() *MeshCondition {
if in == nil {
return nil
}
out := new(MeshCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MeshList) DeepCopyInto(out *MeshList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Mesh, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshList.
func (in *MeshList) DeepCopy() *MeshList {
if in == nil {
return nil
}
out := new(MeshList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *MeshList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MeshSpec) DeepCopyInto(out *MeshSpec) {
*out = *in
if in.ServiceDiscoveryType != nil {
in, out := &in.ServiceDiscoveryType, &out.ServiceDiscoveryType
*out = new(MeshServiceDiscoveryType)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshSpec.
func (in *MeshSpec) DeepCopy() *MeshSpec {
if in == nil {
return nil
}
out := new(MeshSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MeshStatus) DeepCopyInto(out *MeshStatus) {
*out = *in
if in.MeshArn != nil {
in, out := &in.MeshArn, &out.MeshArn
*out = new(string)
**out = **in
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]MeshCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MeshStatus.
func (in *MeshStatus) DeepCopy() *MeshStatus {
if in == nil {
return nil
}
out := new(MeshStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PortMapping) DeepCopyInto(out *PortMapping) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortMapping.
func (in *PortMapping) DeepCopy() *PortMapping {
if in == nil {
return nil
}
out := new(PortMapping)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Route) DeepCopyInto(out *Route) {
*out = *in
in.Http.DeepCopyInto(&out.Http)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route.
func (in *Route) DeepCopy() *Route {
if in == nil {
return nil
}
out := new(Route)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ServiceDiscovery) DeepCopyInto(out *ServiceDiscovery) {
*out = *in
if in.CloudMap != nil {
in, out := &in.CloudMap, &out.CloudMap
*out = new(CloudMapServiceDiscovery)
**out = **in
}
if in.Dns != nil {
in, out := &in.Dns, &out.Dns
*out = new(DnsServiceDiscovery)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceDiscovery.
func (in *ServiceDiscovery) DeepCopy() *ServiceDiscovery {
if in == nil {
return nil
}
out := new(ServiceDiscovery)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualNode) DeepCopyInto(out *VirtualNode) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNode.
func (in *VirtualNode) DeepCopy() *VirtualNode {
if in == nil {
return nil
}
out := new(VirtualNode)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VirtualNode) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualNodeCondition) DeepCopyInto(out *VirtualNodeCondition) {
*out = *in
if in.LastTransitionTime != nil {
in, out := &in.LastTransitionTime, &out.LastTransitionTime
*out = (*in).DeepCopy()
}
if in.Reason != nil {
in, out := &in.Reason, &out.Reason
*out = new(string)
**out = **in
}
if in.Message != nil {
in, out := &in.Message, &out.Message
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNodeCondition.
func (in *VirtualNodeCondition) DeepCopy() *VirtualNodeCondition {
if in == nil {
return nil
}
out := new(VirtualNodeCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualNodeList) DeepCopyInto(out *VirtualNodeList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]VirtualNode, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNodeList.
func (in *VirtualNodeList) DeepCopy() *VirtualNodeList {
if in == nil {
return nil
}
out := new(VirtualNodeList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VirtualNodeList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualNodeSpec) DeepCopyInto(out *VirtualNodeSpec) {
*out = *in
if in.Listeners != nil {
in, out := &in.Listeners, &out.Listeners
*out = make([]Listener, len(*in))
copy(*out, *in)
}
if in.ServiceDiscovery != nil {
in, out := &in.ServiceDiscovery, &out.ServiceDiscovery
*out = new(ServiceDiscovery)
(*in).DeepCopyInto(*out)
}
if in.Backends != nil {
in, out := &in.Backends, &out.Backends
*out = make([]Backend, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNodeSpec.
func (in *VirtualNodeSpec) DeepCopy() *VirtualNodeSpec {
if in == nil {
return nil
}
out := new(VirtualNodeSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualNodeStatus) DeepCopyInto(out *VirtualNodeStatus) {
*out = *in
if in.MeshArn != nil {
in, out := &in.MeshArn, &out.MeshArn
*out = new(string)
**out = **in
}
if in.VirtualNodeArn != nil {
in, out := &in.VirtualNodeArn, &out.VirtualNodeArn
*out = new(string)
**out = **in
}
if in.CloudMapServiceArn != nil {
in, out := &in.CloudMapServiceArn, &out.CloudMapServiceArn
*out = new(string)
**out = **in
}
if in.QueryParameters != nil {
in, out := &in.QueryParameters, &out.QueryParameters
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]VirtualNodeCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualNodeStatus.
func (in *VirtualNodeStatus) DeepCopy() *VirtualNodeStatus {
if in == nil {
return nil
}
out := new(VirtualNodeStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualRouter) DeepCopyInto(out *VirtualRouter) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualRouter.
func (in *VirtualRouter) DeepCopy() *VirtualRouter {
if in == nil {
return nil
}
out := new(VirtualRouter)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualService) DeepCopyInto(out *VirtualService) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualService.
func (in *VirtualService) DeepCopy() *VirtualService {
if in == nil {
return nil
}
out := new(VirtualService)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VirtualService) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualServiceBackend) DeepCopyInto(out *VirtualServiceBackend) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceBackend.
func (in *VirtualServiceBackend) DeepCopy() *VirtualServiceBackend {
if in == nil {
return nil
}
out := new(VirtualServiceBackend)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualServiceCondition) DeepCopyInto(out *VirtualServiceCondition) {
*out = *in
if in.LastTransitionTime != nil {
in, out := &in.LastTransitionTime, &out.LastTransitionTime
*out = (*in).DeepCopy()
}
if in.Reason != nil {
in, out := &in.Reason, &out.Reason
*out = new(string)
**out = **in
}
if in.Message != nil {
in, out := &in.Message, &out.Message
*out = new(string)
**out = **in
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceCondition.
func (in *VirtualServiceCondition) DeepCopy() *VirtualServiceCondition {
if in == nil {
return nil
}
out := new(VirtualServiceCondition)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualServiceList) DeepCopyInto(out *VirtualServiceList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]VirtualService, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceList.
func (in *VirtualServiceList) DeepCopy() *VirtualServiceList {
if in == nil {
return nil
}
out := new(VirtualServiceList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *VirtualServiceList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualServiceSpec) DeepCopyInto(out *VirtualServiceSpec) {
*out = *in
if in.VirtualRouter != nil {
in, out := &in.VirtualRouter, &out.VirtualRouter
*out = new(VirtualRouter)
**out = **in
}
if in.Routes != nil {
in, out := &in.Routes, &out.Routes
*out = make([]Route, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceSpec.
func (in *VirtualServiceSpec) DeepCopy() *VirtualServiceSpec {
if in == nil {
return nil
}
out := new(VirtualServiceSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VirtualServiceStatus) DeepCopyInto(out *VirtualServiceStatus) {
*out = *in
if in.VirtualServiceArn != nil {
in, out := &in.VirtualServiceArn, &out.VirtualServiceArn
*out = new(string)
**out = **in
}
if in.VirtualRouterArn != nil {
in, out := &in.VirtualRouterArn, &out.VirtualRouterArn
*out = new(string)
**out = **in
}
if in.RouteArns != nil {
in, out := &in.RouteArns, &out.RouteArns
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Conditions != nil {
in, out := &in.Conditions, &out.Conditions
*out = make([]VirtualServiceCondition, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VirtualServiceStatus.
func (in *VirtualServiceStatus) DeepCopy() *VirtualServiceStatus {
if in == nil {
return nil
}
out := new(VirtualServiceStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WeightedTarget) DeepCopyInto(out *WeightedTarget) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedTarget.
func (in *WeightedTarget) DeepCopy() *WeightedTarget {
if in == nil {
return nil
}
out := new(WeightedTarget)
in.DeepCopyInto(out)
return out
}

View File

@@ -21,7 +21,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
rollout "github.com/stefanprodan/flagger/pkg/apis/flagger"
rollout "github.com/weaveworks/flagger/pkg/apis/flagger"
)
// SchemeGroupVersion is group version used to register these objects

Some files were not shown because too many files have changed in this diff Show More